From 63004e3de1473254c3b2a75f1e06afaabdacd7dd Mon Sep 17 00:00:00 2001
From: Todd Gamblin <tgamblin@llnl.gov>
Date: Sun, 19 Aug 2018 18:11:52 -0700
Subject: [PATCH] yaml: use ruamel.yaml instead of pyyaml

- ruamel.yaml allows round-tripping comments from/to files
- ruamel.yaml is single-source, python2/python3 compatible
---
 bin/spack                                     |    7 -
 lib/spack/external/__init__.py                |   13 +-
 lib/spack/external/ruamel/__init__.py         |    0
 .../external/ruamel/yaml/.ruamel/__init__.py  |    2 +
 lib/spack/external/ruamel/yaml/LICENSE        |   21 +
 lib/spack/external/ruamel/yaml/README.rst     |   38 +
 lib/spack/external/ruamel/yaml/__init__.py    |   85 +
 lib/spack/external/ruamel/yaml/comments.py    |  481 ++++++
 lib/spack/external/ruamel/yaml/compat.py      |  120 ++
 .../{yaml/lib => ruamel}/yaml/composer.py     |   95 +-
 .../external/ruamel/yaml/configobjwalker.py   |    9 +
 lib/spack/external/ruamel/yaml/constructor.py | 1167 +++++++++++++
 lib/spack/external/ruamel/yaml/dumper.py      |  102 ++
 .../{yaml/lib => ruamel}/yaml/emitter.py      |  340 ++--
 .../{yaml/lib => ruamel}/yaml/error.py        |   36 +-
 .../{yaml/lib => ruamel}/yaml/events.py       |   68 +-
 lib/spack/external/ruamel/yaml/loader.py      |   61 +
 .../yaml/__init__.py => ruamel/yaml/main.py}  |  189 ++-
 lib/spack/external/ruamel/yaml/nodes.py       |   86 +
 .../{yaml/lib => ruamel}/yaml/parser.py       |  242 ++-
 .../{yaml/lib3 => ruamel}/yaml/reader.py      |   99 +-
 lib/spack/external/ruamel/yaml/representer.py |  888 ++++++++++
 lib/spack/external/ruamel/yaml/resolver.py    |  397 +++++
 .../external/ruamel/yaml/scalarstring.py      |   60 +
 .../{yaml/lib => ruamel}/yaml/scanner.py      |  524 ++++--
 lib/spack/external/ruamel/yaml/serializer.py  |  178 ++
 lib/spack/external/ruamel/yaml/setup.cfg      |    5 +
 lib/spack/external/ruamel/yaml/tokens.py      |  195 +++
 lib/spack/external/ruamel/yaml/util.py        |  139 ++
 lib/spack/external/yaml/LICENSE               |   19 -
 lib/spack/external/yaml/README                |   35 -
 lib/spack/external/yaml/lib/yaml/__init__.py  |  315 ----
 .../external/yaml/lib/yaml/constructor.py     |  675 --------
 lib/spack/external/yaml/lib/yaml/cyaml.py     |   85 -
 lib/spack/external/yaml/lib/yaml/dumper.py    |   62 -
 lib/spack/external/yaml/lib/yaml/loader.py    |   40 -
 lib/spack/external/yaml/lib/yaml/nodes.py     |   49 -
 lib/spack/external/yaml/lib/yaml/reader.py    |  190 ---
 .../external/yaml/lib/yaml/representer.py     |  486 ------
 lib/spack/external/yaml/lib/yaml/resolver.py  |  227 ---
 .../external/yaml/lib/yaml/serializer.py      |  111 --
 lib/spack/external/yaml/lib/yaml/tokens.py    |  104 --
 lib/spack/external/yaml/lib3/yaml/composer.py |  139 --
 .../external/yaml/lib3/yaml/constructor.py    |  686 --------
 lib/spack/external/yaml/lib3/yaml/cyaml.py    |   85 -
 lib/spack/external/yaml/lib3/yaml/dumper.py   |   62 -
 lib/spack/external/yaml/lib3/yaml/emitter.py  | 1137 -------------
 lib/spack/external/yaml/lib3/yaml/error.py    |   75 -
 lib/spack/external/yaml/lib3/yaml/events.py   |   86 -
 lib/spack/external/yaml/lib3/yaml/loader.py   |   40 -
 lib/spack/external/yaml/lib3/yaml/nodes.py    |   49 -
 lib/spack/external/yaml/lib3/yaml/parser.py   |  589 -------
 .../external/yaml/lib3/yaml/representer.py    |  387 -----
 lib/spack/external/yaml/lib3/yaml/resolver.py |  227 ---
 lib/spack/external/yaml/lib3/yaml/scanner.py  | 1444 -----------------
 .../external/yaml/lib3/yaml/serializer.py     |  111 --
 lib/spack/external/yaml/lib3/yaml/tokens.py   |  104 --
 lib/spack/spack/binary_distribution.py        |    2 +-
 lib/spack/spack/config.py                     |    4 +-
 lib/spack/spack/database.py                   |    2 +-
 lib/spack/spack/directory_layout.py           |    3 +-
 lib/spack/spack/provider_index.py             |    2 +-
 lib/spack/spack/repo.py                       |    2 +-
 lib/spack/spack/spec.py                       |    6 +-
 lib/spack/spack/test/config.py                |    2 +-
 lib/spack/spack/test/conftest.py              |    2 +-
 lib/spack/spack/test/modules/conftest.py      |    2 +-
 lib/spack/spack/util/spack_yaml.py            |    8 +-
 68 files changed, 5165 insertions(+), 8136 deletions(-)
 create mode 100644 lib/spack/external/ruamel/__init__.py
 create mode 100644 lib/spack/external/ruamel/yaml/.ruamel/__init__.py
 create mode 100644 lib/spack/external/ruamel/yaml/LICENSE
 create mode 100644 lib/spack/external/ruamel/yaml/README.rst
 create mode 100644 lib/spack/external/ruamel/yaml/__init__.py
 create mode 100644 lib/spack/external/ruamel/yaml/comments.py
 create mode 100644 lib/spack/external/ruamel/yaml/compat.py
 rename lib/spack/external/{yaml/lib => ruamel}/yaml/composer.py (53%)
 create mode 100644 lib/spack/external/ruamel/yaml/configobjwalker.py
 create mode 100644 lib/spack/external/ruamel/yaml/constructor.py
 create mode 100644 lib/spack/external/ruamel/yaml/dumper.py
 rename lib/spack/external/{yaml/lib => ruamel}/yaml/emitter.py (77%)
 rename lib/spack/external/{yaml/lib => ruamel}/yaml/error.py (67%)
 rename lib/spack/external/{yaml/lib => ruamel}/yaml/events.py (55%)
 create mode 100644 lib/spack/external/ruamel/yaml/loader.py
 rename lib/spack/external/{yaml/lib3/yaml/__init__.py => ruamel/yaml/main.py} (57%)
 create mode 100644 lib/spack/external/ruamel/yaml/nodes.py
 rename lib/spack/external/{yaml/lib => ruamel}/yaml/parser.py (72%)
 rename lib/spack/external/{yaml/lib3 => ruamel}/yaml/reader.py (69%)
 create mode 100644 lib/spack/external/ruamel/yaml/representer.py
 create mode 100644 lib/spack/external/ruamel/yaml/resolver.py
 create mode 100644 lib/spack/external/ruamel/yaml/scalarstring.py
 rename lib/spack/external/{yaml/lib => ruamel}/yaml/scanner.py (73%)
 create mode 100644 lib/spack/external/ruamel/yaml/serializer.py
 create mode 100644 lib/spack/external/ruamel/yaml/setup.cfg
 create mode 100644 lib/spack/external/ruamel/yaml/tokens.py
 create mode 100644 lib/spack/external/ruamel/yaml/util.py
 delete mode 100644 lib/spack/external/yaml/LICENSE
 delete mode 100644 lib/spack/external/yaml/README
 delete mode 100644 lib/spack/external/yaml/lib/yaml/__init__.py
 delete mode 100644 lib/spack/external/yaml/lib/yaml/constructor.py
 delete mode 100644 lib/spack/external/yaml/lib/yaml/cyaml.py
 delete mode 100644 lib/spack/external/yaml/lib/yaml/dumper.py
 delete mode 100644 lib/spack/external/yaml/lib/yaml/loader.py
 delete mode 100644 lib/spack/external/yaml/lib/yaml/nodes.py
 delete mode 100644 lib/spack/external/yaml/lib/yaml/reader.py
 delete mode 100644 lib/spack/external/yaml/lib/yaml/representer.py
 delete mode 100644 lib/spack/external/yaml/lib/yaml/resolver.py
 delete mode 100644 lib/spack/external/yaml/lib/yaml/serializer.py
 delete mode 100644 lib/spack/external/yaml/lib/yaml/tokens.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/composer.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/constructor.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/cyaml.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/dumper.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/emitter.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/error.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/events.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/loader.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/nodes.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/parser.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/representer.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/resolver.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/scanner.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/serializer.py
 delete mode 100644 lib/spack/external/yaml/lib3/yaml/tokens.py

diff --git a/bin/spack b/bin/spack
index 80e47caa1a..22a7032daa 100755
--- a/bin/spack
+++ b/bin/spack
@@ -49,13 +49,6 @@ if sys.version_info[:2] == (2, 6):
 
 sys.path.insert(0, spack_external_libs)
 
-# Handle vendoring of YAML specially, as it has two versions.
-if sys.version_info[0] == 2:
-    spack_yaml_libs = os.path.join(spack_external_libs, "yaml/lib")
-else:
-    spack_yaml_libs = os.path.join(spack_external_libs, "yaml/lib3")
-sys.path.insert(0, spack_yaml_libs)
-
 # Once we've set up the system path, run the spack main method
 import spack.main  # noqa
 sys.exit(spack.main.main())
diff --git a/lib/spack/external/__init__.py b/lib/spack/external/__init__.py
index 578a21df93..0f81f37802 100644
--- a/lib/spack/external/__init__.py
+++ b/lib/spack/external/__init__.py
@@ -118,12 +118,17 @@
   vendored copy ever needs to be updated again:
   https://github.com/spack/spack/pull/6801/commits/ff513c39f2c67ff615de5cbc581dd69a8ec96526
 
-pyyaml
+ruamel.yaml
 ------
 
-* Homepage: https://pypi.python.org/pypi/PyYAML
-* Usage: Used for config files.
-* Version: 3.12
+* Homepage: https://yaml.readthedocs.io/
+* Usage: Used for config files. Ruamel is based on PyYAML but is more
+  actively maintained and has more features, including round-tripping
+  comments read from config files.
+* Version: 0.11.15 (last version supporting Python 2.6)
+* Note: This package has been slightly modified to improve Python 2.6
+  compatibility -- some ``{}`` format strings were replaced, and the
+  import for ``OrderedDict`` was tweaked.
 
 six
 ---
diff --git a/lib/spack/external/ruamel/__init__.py b/lib/spack/external/ruamel/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/lib/spack/external/ruamel/yaml/.ruamel/__init__.py b/lib/spack/external/ruamel/yaml/.ruamel/__init__.py
new file mode 100644
index 0000000000..ece379ce2f
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/.ruamel/__init__.py
@@ -0,0 +1,2 @@
+import pkg_resources
+pkg_resources.declare_namespace(__name__)
diff --git a/lib/spack/external/ruamel/yaml/LICENSE b/lib/spack/external/ruamel/yaml/LICENSE
new file mode 100644
index 0000000000..f6f753a366
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/LICENSE
@@ -0,0 +1,21 @@
+ 
+ The MIT License (MIT)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+     
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+ 
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 
+ THE SOFTWARE.
+
diff --git a/lib/spack/external/ruamel/yaml/README.rst b/lib/spack/external/ruamel/yaml/README.rst
new file mode 100644
index 0000000000..993cf35542
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/README.rst
@@ -0,0 +1,38 @@
+
+ruamel.yaml
+===========
+
+``ruamel.yaml`` is a YAML 1.2 loader/dumper package for Python.
+
+* `Overview <http://yaml.readthedocs.org/en/latest/overview.html>`_
+* `Installing <http://yaml.readthedocs.org/en/latest/install.html>`_
+* `Details <http://yaml.readthedocs.org/en/latest/detail.html>`_
+* `Examples <http://yaml.readthedocs.org/en/latest/example.html>`_
+* `Differences with PyYAML <http://yaml.readthedocs.org/en/latest/pyyaml.html>`_
+
+.. image:: https://readthedocs.org/projects/yaml/badge/?version=stable
+   :target: https://yaml.readthedocs.org/en/stable
+
+ChangeLog
+=========
+
+::
+
+  0.11.15 (2016-XX-XX):
+    - Change to prevent FutureWarning in NumPy, as reported by tgehring
+    ("comparison to None will result in an elementwise object comparison in the future")
+
+  0.11.14 (2016-07-06):
+    - fix preserve_quotes missing on original Loaders (as reported
+      by Leynos, bitbucket issue 38)
+
+  0.11.13 (2016-07-06):
+    - documentation only, automated linux wheels
+
+  0.11.12 (2016-07-06):
+    - added support for roundtrip of single/double quoted scalars using:
+      ruamel.yaml.round_trip_load(stream, preserve_quotes=True)
+
+  0.11.0 (2016-02-18):
+    - RoundTripLoader loads 1.2 by default (no sexagesimals, 012 octals nor
+      yes/no/on/off booleans
diff --git a/lib/spack/external/ruamel/yaml/__init__.py b/lib/spack/external/ruamel/yaml/__init__.py
new file mode 100644
index 0000000000..b77032fc83
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/__init__.py
@@ -0,0 +1,85 @@
+# coding: utf-8
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+# install_requires of ruamel.base is not really required but the old
+# ruamel.base installed __init__.py, and thus a new version should
+# be installed at some point
+
+_package_data = dict(
+    full_package_name="ruamel.yaml",
+    version_info=(0, 11, 15),
+    author="Anthon van der Neut",
+    author_email="a.van.der.neut@ruamel.eu",
+    description="ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order",  # NOQA
+    entry_points=None,
+    install_requires=dict(
+        any=[],
+        py26=["ruamel.ordereddict"],
+        py27=["ruamel.ordereddict"]
+    ),
+    ext_modules=[dict(
+        name="_ruamel_yaml",
+        src=["ext/_ruamel_yaml.c", "ext/api.c", "ext/writer.c", "ext/dumper.c",
+                "ext/loader.c", "ext/reader.c", "ext/scanner.c", "ext/parser.c",
+                "ext/emitter.c"],
+        lib=[],
+        # test='#include "ext/yaml.h"\n\nint main(int argc, char* argv[])\n{\nyaml_parser_t parser;\nparser = parser;  /* prevent warning */\nreturn 0;\n}\n'  # NOQA
+        )
+    ],
+    classifiers=[
+        "Programming Language :: Python :: 2.6",
+        "Programming Language :: Python :: 2.7",
+        "Programming Language :: Python :: 3.3",
+        "Programming Language :: Python :: 3.4",
+        "Programming Language :: Python :: 3.5",
+        "Programming Language :: Python :: Implementation :: CPython",
+        "Programming Language :: Python :: Implementation :: PyPy",
+        "Programming Language :: Python :: Implementation :: Jython",
+        "Topic :: Software Development :: Libraries :: Python Modules",
+        "Topic :: Text Processing :: Markup"
+    ],
+    windows_wheels=True,
+    read_the_docs='yaml',
+    many_linux='libyaml-devel',
+)
+
+
+# < from ruamel.util.new import _convert_version
+def _convert_version(tup):
+    """create a PEP 386 pseudo-format conformant string from tuple tup"""
+    ret_val = str(tup[0])  # first is always digit
+    next_sep = "."  # separator for next extension, can be "" or "."
+    for x in tup[1:]:
+        if isinstance(x, int):
+            ret_val += next_sep + str(x)
+            next_sep = '.'
+            continue
+        first_letter = x[0].lower()
+        next_sep = ''
+        if first_letter in 'abcr':
+            ret_val += 'rc' if first_letter == 'r' else first_letter
+        elif first_letter in 'pd':
+            ret_val += '.post' if first_letter == 'p' else '.dev'
+    return ret_val
+
+
+# <
+version_info = _package_data['version_info']
+__version__ = _convert_version(version_info)
+
+del _convert_version
+
+try:
+    from .cyaml import *                               # NOQA
+    __with_libyaml__ = True
+except (ImportError, ValueError):  # for Jython
+    __with_libyaml__ = False
+
+
+# body extracted to main.py
+try:
+    from .main import *                               # NOQA
+except ImportError:
+    from ruamel.yaml.main import *                               # NOQA
diff --git a/lib/spack/external/ruamel/yaml/comments.py b/lib/spack/external/ruamel/yaml/comments.py
new file mode 100644
index 0000000000..4a99931615
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/comments.py
@@ -0,0 +1,481 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+"""
+stuff to deal with comments and formatting on dict/list/ordereddict/set
+these are not really related, formatting could be factored out as
+a separate base
+"""
+
+from collections import MutableSet
+
+__all__ = ["CommentedSeq", "CommentedMap", "CommentedOrderedMap",
+           "CommentedSet", 'comment_attrib', 'merge_attrib']
+
+
+try:
+    from .compat import ordereddict
+except ImportError:
+    from ruamel.yaml.compat import ordereddict
+
+comment_attrib = '_yaml_comment'
+format_attrib = '_yaml_format'
+line_col_attrib = '_yaml_line_col'
+anchor_attrib = '_yaml_anchor'
+merge_attrib = '_yaml_merge'
+tag_attrib = '_yaml_tag'
+
+
+class Comment(object):
+    # sys.getsize tested the Comment objects, __slots__ make them bigger
+    # and adding self.end did not matter
+    attrib = comment_attrib
+
+    def __init__(self):
+        self.comment = None  # [post, [pre]]
+        # map key (mapping/omap/dict) or index (sequence/list) to a  list of
+        # dict: post_key, pre_key, post_value, pre_value
+        # list: pre item, post item
+        self._items = {}
+        # self._start = [] # should not put these on first item
+        self._end = []  # end of document comments
+
+    def __str__(self):
+        if self._end:
+            end = ',\n  end=' + str(self._end)
+        else:
+            end = ''
+        return "Comment(comment={0},\n  items={1}{2})".format(
+            self.comment, self._items, end)
+
+    @property
+    def items(self):
+        return self._items
+
+    @property
+    def end(self):
+        return self._end
+
+    @end.setter
+    def end(self, value):
+        self._end = value
+
+    @property
+    def start(self):
+        return self._start
+
+    @start.setter
+    def start(self, value):
+        self._start = value
+
+
+# to distinguish key from None
+def NoComment():
+    pass
+
+
+class Format(object):
+    attrib = format_attrib
+
+    def __init__(self):
+        self._flow_style = None
+
+    def set_flow_style(self):
+        self._flow_style = True
+
+    def set_block_style(self):
+        self._flow_style = False
+
+    def flow_style(self, default=None):
+        """if default (the flow_style) is None, the flow style tacked on to
+        the object explicitly will be taken. If that is None as well the
+        default flow style rules the format down the line, or the type
+        of the constituent values (simple -> flow, map/list -> block)"""
+        if self._flow_style is None:
+            return default
+        return self._flow_style
+
+
+class LineCol(object):
+    attrib = line_col_attrib
+
+    def __init__(self):
+        self.line = None
+        self.col = None
+        self.data = None
+
+    def add_kv_line_col(self, key, data):
+        if self.data is None:
+            self.data = {}
+        self.data[key] = data
+
+    def key(self, k):
+        return self._kv(k, 0, 1)
+
+    def value(self, k):
+        return self._kv(k, 2, 3)
+
+    def _kv(self, k, x0, x1):
+        if self.data is None:
+            return None
+        data = self.data[k]
+        return data[x0], data[x1]
+
+    def item(self, idx):
+        if self.data is None:
+            return None
+        return self.data[idx][0], self.data[idx][1]
+
+    def add_idx_line_col(self, key, data):
+        if self.data is None:
+            self.data = {}
+        self.data[key] = data
+
+
+class Anchor(object):
+    attrib = anchor_attrib
+
+    def __init__(self):
+        self.value = None
+        self.always_dump = False
+
+
+class Tag(object):
+    """store tag information for roundtripping"""
+    attrib = tag_attrib
+
+    def __init__(self):
+        self.value = None
+
+
+class CommentedBase(object):
+    @property
+    def ca(self):
+        if not hasattr(self, Comment.attrib):
+            setattr(self, Comment.attrib, Comment())
+        return getattr(self, Comment.attrib)
+
+    def yaml_end_comment_extend(self, comment, clear=False):
+        if clear:
+            self.ca.end = []
+        self.ca.end.extend(comment)
+
+    def yaml_key_comment_extend(self, key, comment, clear=False):
+        l = self.ca._items.setdefault(key, [None, None, None, None])
+        if clear or l[1] is None:
+            if comment[1] is not None:
+                assert isinstance(comment[1], list)
+            l[1] = comment[1]
+        else:
+            l[1].extend(comment[0])
+        l[0] = comment[0]
+
+    def yaml_value_comment_extend(self, key, comment, clear=False):
+        l = self.ca._items.setdefault(key, [None, None, None, None])
+        if clear or l[3] is None:
+            if comment[1] is not None:
+                assert isinstance(comment[1], list)
+            l[3] = comment[1]
+        else:
+            l[3].extend(comment[0])
+        l[2] = comment[0]
+
+    def yaml_set_start_comment(self, comment, indent=0):
+        """overwrites any preceding comment lines on an object
+        expects comment to be without `#` and possible have mutlple lines
+        """
+        from .error import Mark
+        from .tokens import CommentToken
+        pre_comments = self._yaml_get_pre_comment()
+        if comment[-1] == '\n':
+            comment = comment[:-1]  # strip final newline if there
+        start_mark = Mark(None, None, None, indent, None, None)
+        for com in comment.split('\n'):
+            pre_comments.append(CommentToken('# ' + com + '\n', start_mark, None))
+
+    @property
+    def fa(self):
+        """format attribute
+
+        set_flow_style()/set_block_style()"""
+        if not hasattr(self, Format.attrib):
+            setattr(self, Format.attrib, Format())
+        return getattr(self, Format.attrib)
+
+    def yaml_add_eol_comment(self, comment, key=NoComment, column=None):
+        """
+        there is a problem as eol comments should start with ' #'
+        (but at the beginning of the line the space doesn't have to be before
+        the #. The column index is for the # mark
+        """
+        from .tokens import CommentToken
+        from .error import Mark
+        if column is None:
+            column = self._yaml_get_column(key)
+        if comment[0] != '#':
+            comment = '# ' + comment
+        if column is None:
+            if comment[0] == '#':
+                comment = ' ' + comment
+                column = 0
+        start_mark = Mark(None, None, None, column, None, None)
+        ct = [CommentToken(comment, start_mark, None), None]
+        self._yaml_add_eol_comment(ct, key=key)
+
+    @property
+    def lc(self):
+        if not hasattr(self, LineCol.attrib):
+            setattr(self, LineCol.attrib, LineCol())
+        return getattr(self, LineCol.attrib)
+
+    def _yaml_set_line_col(self, line, col):
+        self.lc.line = line
+        self.lc.col = col
+
+    def _yaml_set_kv_line_col(self, key, data):
+        self.lc.add_kv_line_col(key, data)
+
+    def _yaml_set_idx_line_col(self, key, data):
+        self.lc.add_idx_line_col(key, data)
+
+    @property
+    def anchor(self):
+        if not hasattr(self, Anchor.attrib):
+            setattr(self, Anchor.attrib, Anchor())
+        return getattr(self, Anchor.attrib)
+
+    def yaml_anchor(self):
+        if not hasattr(self, Anchor.attrib):
+            return None
+        return self.anchor
+
+    def yaml_set_anchor(self, value, always_dump=False):
+        self.anchor.value = value
+        self.anchor.always_dump = always_dump
+
+    @property
+    def tag(self):
+        if not hasattr(self, Tag.attrib):
+            setattr(self, Tag.attrib, Tag())
+        return getattr(self, Tag.attrib)
+
+    def yaml_set_tag(self, value):
+        self.tag.value = value
+
+
+class CommentedSeq(list, CommentedBase):
+    __slots__ = [Comment.attrib, ]
+
+    def _yaml_add_comment(self, comment, key=NoComment):
+        if key is not NoComment:
+            self.yaml_key_comment_extend(key, comment)
+        else:
+            self.ca.comment = comment
+
+    def _yaml_add_eol_comment(self, comment, key):
+        self._yaml_add_comment(comment, key=key)
+
+    def _yaml_get_columnX(self, key):
+        return self.ca.items[key][0].start_mark.column
+
+    def insert(self, idx, val):
+        """the comments after the insertion have to move forward"""
+        list.insert(self, idx, val)
+        for list_index in sorted(self.ca.items, reverse=True):
+            if list_index < idx:
+                break
+            self.ca.items[list_index+1] = self.ca.items.pop(list_index)
+
+    def pop(self, idx):
+        res = list.pop(self, idx)
+        self.ca.items.pop(idx, None)  # might not be there -> default value
+        for list_index in sorted(self.ca.items):
+            if list_index < idx:
+                continue
+            self.ca.items[list_index-1] = self.ca.items.pop(list_index)
+        return res
+
+    def _yaml_get_column(self, key):
+        column = None
+        sel_idx = None
+        pre, post = key-1, key+1
+        if pre in self.ca.items:
+            sel_idx = pre
+        elif post in self.ca.items:
+            sel_idx = post
+        else:
+            # self.ca.items is not ordered
+            for row_idx, k1 in enumerate(self):
+                if row_idx >= key:
+                    break
+                if row_idx not in self.ca.items:
+                    continue
+                sel_idx = row_idx
+        if sel_idx is not None:
+            column = self._yaml_get_columnX(sel_idx)
+        return column
+
+    def _yaml_get_pre_comment(self):
+        if self.ca.comment is None:
+            pre_comments = []
+            self.ca.comment = [None, pre_comments]
+        else:
+            pre_comments = self.ca.comment[1] = []
+        return pre_comments
+
+
+class CommentedMap(ordereddict, CommentedBase):
+    __slots__ = [Comment.attrib, ]
+
+    def _yaml_add_comment(self, comment, key=NoComment, value=NoComment):
+        """values is set to key to indicate a value attachment of comment"""
+        if key is not NoComment:
+            self.yaml_key_comment_extend(key, comment)
+            return
+        if value is not NoComment:
+            self.yaml_value_comment_extend(value, comment)
+        else:
+            self.ca.comment = comment
+
+    def _yaml_add_eol_comment(self, comment, key):
+        """add on the value line, with value specified by the key"""
+        self._yaml_add_comment(comment, value=key)
+
+    def _yaml_get_columnX(self, key):
+        return self.ca.items[key][2].start_mark.column
+
+    def _yaml_get_column(self, key):
+        column = None
+        sel_idx = None
+        pre, post, last = None, None, None
+        for x in self:
+            if pre is not None and x != key:
+                post = x
+                break
+            if x == key:
+                pre = last
+            last = x
+        if pre in self.ca.items:
+            sel_idx = pre
+        elif post in self.ca.items:
+            sel_idx = post
+        else:
+            # self.ca.items is not ordered
+            for row_idx, k1 in enumerate(self):
+                if k1 >= key:
+                    break
+                if k1 not in self.ca.items:
+                    continue
+                sel_idx = k1
+        if sel_idx is not None:
+            column = self._yaml_get_columnX(sel_idx)
+        return column
+
+    def _yaml_get_pre_comment(self):
+        if self.ca.comment is None:
+            pre_comments = []
+            self.ca.comment = [None, pre_comments]
+        else:
+            pre_comments = self.ca.comment[1] = []
+        return pre_comments
+
+    def update(self, vals):
+        try:
+            ordereddict.update(self, vals)
+        except TypeError:
+            # probably a dict that is used
+            for x in vals:
+                self[x] = vals[x]
+
+    def insert(self, pos, key, value, comment=None):
+        """insert key value into given position
+        attach comment if provided
+        """
+        ordereddict.insert(self, pos, key, value)
+        if comment is not None:
+            self.yaml_add_eol_comment(comment, key=key)
+
+    def mlget(self, key, default=None, list_ok=False):
+        """multi-level get that expects dicts within dicts"""
+        if not isinstance(key, list):
+            return self.get(key, default)
+        # assume that the key is a list of recursively accessible dicts
+
+        def get_one_level(key_list, level, d):
+            if not list_ok:
+                assert isinstance(d, dict)
+            if level >= len(key_list):
+                if level > len(key_list):
+                    raise IndexError
+                return d[key_list[level-1]]
+            return get_one_level(key_list, level+1, d[key_list[level-1]])
+
+        try:
+            return get_one_level(key, 1, self)
+        except KeyError:
+            return default
+        except (TypeError, IndexError):
+            if not list_ok:
+                raise
+            return default
+
+    def __getitem__(self, key):
+        try:
+            return ordereddict.__getitem__(self, key)
+        except KeyError:
+            for merged in getattr(self, merge_attrib, []):
+                if key in merged[1]:
+                    return merged[1][key]
+            raise
+
+    def get(self, key, default=None):
+        try:
+            return self.__getitem__(key)
+        except:
+            return default
+
+    @property
+    def merge(self):
+        if not hasattr(self, merge_attrib):
+            setattr(self, merge_attrib, [])
+        return getattr(self, merge_attrib)
+
+    def add_yaml_merge(self, value):
+        self.merge.extend(value)
+
+
+class CommentedOrderedMap(CommentedMap):
+    __slots__ = [Comment.attrib, ]
+
+
+class CommentedSet(MutableSet, CommentedMap):
+    __slots__ = [Comment.attrib, 'odict']
+
+    def __init__(self, values=None):
+        self.odict = ordereddict()
+        MutableSet.__init__(self)
+        if values is not None:
+            self |= values
+
+    def add(self, value):
+        """Add an element."""
+        self.odict[value] = None
+
+    def discard(self, value):
+        """Remove an element.  Do not raise an exception if absent."""
+        del self.odict[value]
+
+    def __contains__(self, x):
+        return x in self.odict
+
+    def __iter__(self):
+        for x in self.odict:
+            yield x
+
+    def __len__(self):
+        return len(self.odict)
+
+    def __repr__(self):
+        return 'set({0!r})'.format(self.odict.keys())
diff --git a/lib/spack/external/ruamel/yaml/compat.py b/lib/spack/external/ruamel/yaml/compat.py
new file mode 100644
index 0000000000..6eee151c51
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/compat.py
@@ -0,0 +1,120 @@
+# coding: utf-8
+
+from __future__ import print_function
+
+# partially from package six by Benjamin Peterson
+
+import sys
+import os
+import types
+
+try:
+    from ruamel.ordereddict import ordereddict
+except:
+    try:
+        from collections import OrderedDict  # nopyqver
+    except ImportError:
+        from ordereddict import OrderedDict
+    # to get the right name import ... as ordereddict doesn't do that
+
+    class ordereddict(OrderedDict):
+        if not hasattr(OrderedDict, 'insert'):
+            def insert(self, pos, key, value):
+                if pos >= len(self):
+                    self[key] = value
+                    return
+                od = ordereddict()
+                od.update(self)
+                for k in od:
+                    del self[k]
+                for index, old_key in enumerate(od):
+                    if pos == index:
+                        self[key] = value
+                    self[old_key] = od[old_key]
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    def utf8(s):
+        return s
+
+    def to_str(s):
+        return s
+
+    def to_unicode(s):
+        return s
+
+else:
+    def utf8(s):
+        return s.encode('utf-8')
+
+    def to_str(s):
+        return str(s)
+
+    def to_unicode(s):
+        return unicode(s)
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+    unichr = chr
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    unichr = unichr  # to allow importing
+    import StringIO
+    StringIO = StringIO.StringIO
+    import cStringIO
+    BytesIO = cStringIO.StringIO
+
+if PY3:
+    builtins_module = 'builtins'
+else:
+    builtins_module = '__builtin__'
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    return meta("NewBase", bases, {})
+
+DBG_TOKEN = 1
+DBG_EVENT = 2
+DBG_NODE = 4
+
+
+_debug = None
+
+
+# used from yaml util when testing
+def dbg(val=None):
+    global _debug
+    if _debug is None:
+        # set to true or false
+        _debug = os.environ.get('YAMLDEBUG')
+        if _debug is None:
+            _debug = 0
+        else:
+            _debug = int(_debug)
+    if val is None:
+        return _debug
+    return _debug & val
+
+
+def nprint(*args, **kw):
+    if dbg:
+        print(*args, **kw)
diff --git a/lib/spack/external/yaml/lib/yaml/composer.py b/lib/spack/external/ruamel/yaml/composer.py
similarity index 53%
rename from lib/spack/external/yaml/lib/yaml/composer.py
rename to lib/spack/external/ruamel/yaml/composer.py
index 06e5ac782f..fb0a55c759 100644
--- a/lib/spack/external/yaml/lib/yaml/composer.py
+++ b/lib/spack/external/ruamel/yaml/composer.py
@@ -1,15 +1,32 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+
+try:
+    from .error import MarkedYAMLError
+    from .compat import utf8
+except (ImportError, ValueError):  # for Jython
+    from ruamel.yaml.error import MarkedYAMLError
+    from ruamel.yaml.compat import utf8
+
+from ruamel.yaml.events import (
+    StreamStartEvent, StreamEndEvent, MappingStartEvent, MappingEndEvent,
+    SequenceStartEvent, SequenceEndEvent, AliasEvent, ScalarEvent,
+)
+from ruamel.yaml.nodes import (
+    MappingNode, ScalarNode, SequenceNode,
+)
 
 __all__ = ['Composer', 'ComposerError']
 
-from error import MarkedYAMLError
-from events import *
-from nodes import *
 
 class ComposerError(MarkedYAMLError):
     pass
 
-class Composer(object):
 
+class Composer(object):
     def __init__(self):
         self.anchors = {}
 
@@ -38,9 +55,10 @@ def get_single_node(self):
         # Ensure that the stream contains no more documents.
         if not self.check_event(StreamEndEvent):
             event = self.get_event()
-            raise ComposerError("expected a single document in the stream",
-                    document.start_mark, "but found another document",
-                    event.start_mark)
+            raise ComposerError(
+                "expected a single document in the stream",
+                document.start_mark, "but found another document",
+                event.start_mark)
 
         # Drop the STREAM-END event.
         self.get_event()
@@ -63,18 +81,20 @@ def compose_document(self):
     def compose_node(self, parent, index):
         if self.check_event(AliasEvent):
             event = self.get_event()
-            anchor = event.anchor
-            if anchor not in self.anchors:
-                raise ComposerError(None, None, "found undefined alias %r"
-                        % anchor.encode('utf-8'), event.start_mark)
-            return self.anchors[anchor]
+            alias = event.anchor
+            if alias not in self.anchors:
+                raise ComposerError(
+                    None, None, "found undefined alias %r"
+                    % utf8(alias), event.start_mark)
+            return self.anchors[alias]
         event = self.peek_event()
         anchor = event.anchor
-        if anchor is not None:
+        if anchor is not None:  # have an anchor
             if anchor in self.anchors:
-                raise ComposerError("found duplicate anchor %r; first occurence"
-                        % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
-                        "second occurence", event.start_mark)
+                raise ComposerError(
+                    "found duplicate anchor %r; first occurence"
+                    % utf8(anchor), self.anchors[anchor].start_mark,
+                    "second occurence", event.start_mark)
         self.descend_resolver(parent, index)
         if self.check_event(ScalarEvent):
             node = self.compose_scalar_node(anchor)
@@ -91,7 +111,8 @@ def compose_scalar_node(self, anchor):
         if tag is None or tag == u'!':
             tag = self.resolve(ScalarNode, event.value, event.implicit)
         node = ScalarNode(tag, event.value,
-                event.start_mark, event.end_mark, style=event.style)
+                          event.start_mark, event.end_mark, style=event.style,
+                          comment=event.comment)
         if anchor is not None:
             self.anchors[anchor] = node
         return node
@@ -102,8 +123,9 @@ def compose_sequence_node(self, anchor):
         if tag is None or tag == u'!':
             tag = self.resolve(SequenceNode, None, start_event.implicit)
         node = SequenceNode(tag, [],
-                start_event.start_mark, None,
-                flow_style=start_event.flow_style)
+                            start_event.start_mark, None,
+                            flow_style=start_event.flow_style,
+                            comment=start_event.comment, anchor=anchor)
         if anchor is not None:
             self.anchors[anchor] = node
         index = 0
@@ -111,7 +133,13 @@ def compose_sequence_node(self, anchor):
             node.value.append(self.compose_node(node, index))
             index += 1
         end_event = self.get_event()
+        if node.flow_style is True and end_event.comment is not None:
+            if node.comment is not None:
+                print('Warning: unexpected end_event commment in sequence '
+                      'node {0}'.format(node.flow_style))
+            node.comment = end_event.comment
         node.end_mark = end_event.end_mark
+        self.check_end_doc_comment(end_event, node)
         return node
 
     def compose_mapping_node(self, anchor):
@@ -120,20 +148,35 @@ def compose_mapping_node(self, anchor):
         if tag is None or tag == u'!':
             tag = self.resolve(MappingNode, None, start_event.implicit)
         node = MappingNode(tag, [],
-                start_event.start_mark, None,
-                flow_style=start_event.flow_style)
+                           start_event.start_mark, None,
+                           flow_style=start_event.flow_style,
+                           comment=start_event.comment, anchor=anchor)
         if anchor is not None:
             self.anchors[anchor] = node
         while not self.check_event(MappingEndEvent):
-            #key_event = self.peek_event()
+            # key_event = self.peek_event()
             item_key = self.compose_node(node, None)
-            #if item_key in node.value:
-            #    raise ComposerError("while composing a mapping", start_event.start_mark,
-            #            "found duplicate key", key_event.start_mark)
+            # if item_key in node.value:
+            #     raise ComposerError("while composing a mapping",
+            #             start_event.start_mark,
+            #             "found duplicate key", key_event.start_mark)
             item_value = self.compose_node(node, item_key)
-            #node.value[item_key] = item_value
+            # node.value[item_key] = item_value
             node.value.append((item_key, item_value))
         end_event = self.get_event()
+        if node.flow_style is True and end_event.comment is not None:
+            node.comment = end_event.comment
         node.end_mark = end_event.end_mark
+        self.check_end_doc_comment(end_event, node)
         return node
 
+    def check_end_doc_comment(self, end_event, node):
+        if end_event.comment and end_event.comment[1]:
+            # pre comments on an end_event, no following to move to
+            if node.comment is None:
+                node.comment = [None, None]
+            assert not isinstance(node, ScalarEvent)
+            # this is a post comment on a mapping node, add as third element
+            # in the list
+            node.comment.append(end_event.comment[1])
+            end_event.comment[1] = None
diff --git a/lib/spack/external/ruamel/yaml/configobjwalker.py b/lib/spack/external/ruamel/yaml/configobjwalker.py
new file mode 100644
index 0000000000..bab910cb11
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/configobjwalker.py
@@ -0,0 +1,9 @@
+# coding: utf-8
+
+import warnings
+from ruamel.yaml.util import configobj_walker as new_configobj_walker
+
+
+def configobj_walker(cfg):
+    warnings.warn("configobj_walker has move to ruamel.yaml.util, please update your code")
+    return new_configobj_walker(cfg)
diff --git a/lib/spack/external/ruamel/yaml/constructor.py b/lib/spack/external/ruamel/yaml/constructor.py
new file mode 100644
index 0000000000..f809df4bf9
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/constructor.py
@@ -0,0 +1,1167 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+import collections
+import datetime
+import base64
+import binascii
+import re
+import sys
+import types
+
+try:
+    from .error import *                               # NOQA
+    from .nodes import *                               # NOQA
+    from .compat import utf8, builtins_module, to_str, PY2, PY3, ordereddict, text_type
+    from .comments import *                               # NOQA
+    from .scalarstring import *                           # NOQA
+except (ImportError, ValueError):  # for Jython
+    from ruamel.yaml.error import *                               # NOQA
+    from ruamel.yaml.nodes import *                               # NOQA
+    from ruamel.yaml.compat import (utf8, builtins_module, to_str, PY2, PY3,
+                                    ordereddict, text_type)
+    from ruamel.yaml.comments import *                               # NOQA
+    from ruamel.yaml.scalarstring import *                           # NOQA
+
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+           'ConstructorError', 'RoundTripConstructor']
+
+
+class ConstructorError(MarkedYAMLError):
+    pass
+
+
+class BaseConstructor(object):
+
+    yaml_constructors = {}
+    yaml_multi_constructors = {}
+
+    def __init__(self, preserve_quotes=None):
+        self.constructed_objects = {}
+        self.recursive_objects = {}
+        self.state_generators = []
+        self.deep_construct = False
+        self._preserve_quotes = preserve_quotes
+
+    def check_data(self):
+        # If there are more documents available?
+        return self.check_node()
+
+    def get_data(self):
+        # Construct and return the next document.
+        if self.check_node():
+            return self.construct_document(self.get_node())
+
+    def get_single_data(self):
+        # Ensure that the stream contains a single document and construct it.
+        node = self.get_single_node()
+        if node is not None:
+            return self.construct_document(node)
+        return None
+
+    def construct_document(self, node):
+        data = self.construct_object(node)
+        while self.state_generators:
+            state_generators = self.state_generators
+            self.state_generators = []
+            for generator in state_generators:
+                for dummy in generator:
+                    pass
+        self.constructed_objects = {}
+        self.recursive_objects = {}
+        self.deep_construct = False
+        return data
+
+    def construct_object(self, node, deep=False):
+        """deep is True when creating an object/mapping recursively,
+        in that case want the underlying elements available during construction
+        """
+        if node in self.constructed_objects:
+            return self.constructed_objects[node]
+        if deep:
+            old_deep = self.deep_construct
+            self.deep_construct = True
+        if node in self.recursive_objects:
+            raise ConstructorError(
+                None, None,
+                "found unconstructable recursive node", node.start_mark)
+        self.recursive_objects[node] = None
+        constructor = None
+        tag_suffix = None
+        if node.tag in self.yaml_constructors:
+            constructor = self.yaml_constructors[node.tag]
+        else:
+            for tag_prefix in self.yaml_multi_constructors:
+                if node.tag.startswith(tag_prefix):
+                    tag_suffix = node.tag[len(tag_prefix):]
+                    constructor = self.yaml_multi_constructors[tag_prefix]
+                    break
+            else:
+                if None in self.yaml_multi_constructors:
+                    tag_suffix = node.tag
+                    constructor = self.yaml_multi_constructors[None]
+                elif None in self.yaml_constructors:
+                    constructor = self.yaml_constructors[None]
+                elif isinstance(node, ScalarNode):
+                    constructor = self.__class__.construct_scalar
+                elif isinstance(node, SequenceNode):
+                    constructor = self.__class__.construct_sequence
+                elif isinstance(node, MappingNode):
+                    constructor = self.__class__.construct_mapping
+        if tag_suffix is None:
+            data = constructor(self, node)
+        else:
+            data = constructor(self, tag_suffix, node)
+        if isinstance(data, types.GeneratorType):
+            generator = data
+            data = next(generator)
+            if self.deep_construct:
+                for dummy in generator:
+                    pass
+            else:
+                self.state_generators.append(generator)
+        self.constructed_objects[node] = data
+        del self.recursive_objects[node]
+        if deep:
+            self.deep_construct = old_deep
+        return data
+
+    def construct_scalar(self, node):
+        if not isinstance(node, ScalarNode):
+            raise ConstructorError(
+                None, None,
+                "expected a scalar node, but found %s" % node.id,
+                node.start_mark)
+        return node.value
+
+    def construct_sequence(self, node, deep=False):
+        """deep is True when creating an object/mapping recursively,
+        in that case want the underlying elements available during construction
+        """
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(
+                None, None,
+                "expected a sequence node, but found %s" % node.id,
+                node.start_mark)
+        return [self.construct_object(child, deep=deep)
+                for child in node.value]
+
+    def construct_mapping(self, node, deep=False):
+        """deep is True when creating an object/mapping recursively,
+        in that case want the underlying elements available during construction
+        """
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(
+                None, None,
+                "expected a mapping node, but found %s" % node.id,
+                node.start_mark)
+        mapping = {}
+        for key_node, value_node in node.value:
+            # keys can be list -> deep
+            key = self.construct_object(key_node, deep=True)
+            # lists are not hashable, but tuples are
+            if not isinstance(key, collections.Hashable):
+                if isinstance(key, list):
+                    key = tuple(key)
+            if PY2:
+                try:
+                    hash(key)
+                except TypeError as exc:
+                    raise ConstructorError(
+                        "while constructing a mapping", node.start_mark,
+                        "found unacceptable key (%s)" %
+                        exc, key_node.start_mark)
+            else:
+                if not isinstance(key, collections.Hashable):
+                    raise ConstructorError(
+                        "while constructing a mapping", node.start_mark,
+                        "found unhashable key", key_node.start_mark)
+
+            value = self.construct_object(value_node, deep=deep)
+            mapping[key] = value
+        return mapping
+
+    def construct_pairs(self, node, deep=False):
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(
+                None, None,
+                "expected a mapping node, but found %s" % node.id,
+                node.start_mark)
+        pairs = []
+        for key_node, value_node in node.value:
+            key = self.construct_object(key_node, deep=deep)
+            value = self.construct_object(value_node, deep=deep)
+            pairs.append((key, value))
+        return pairs
+
+    @classmethod
+    def add_constructor(cls, tag, constructor):
+        if 'yaml_constructors' not in cls.__dict__:
+            cls.yaml_constructors = cls.yaml_constructors.copy()
+        cls.yaml_constructors[tag] = constructor
+
+    @classmethod
+    def add_multi_constructor(cls, tag_prefix, multi_constructor):
+        if 'yaml_multi_constructors' not in cls.__dict__:
+            cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+        cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+
+class SafeConstructor(BaseConstructor):
+    def construct_scalar(self, node):
+        if isinstance(node, MappingNode):
+            for key_node, value_node in node.value:
+                if key_node.tag == u'tag:yaml.org,2002:value':
+                    return self.construct_scalar(value_node)
+        return BaseConstructor.construct_scalar(self, node)
+
+    def flatten_mapping(self, node):
+        """
+        This implements the merge key feature http://yaml.org/type/merge.html
+        by inserting keys from the merge dict/list of dicts if not yet
+        available in this node
+        """
+        merge = []
+        index = 0
+        while index < len(node.value):
+            key_node, value_node = node.value[index]
+            if key_node.tag == u'tag:yaml.org,2002:merge':
+                del node.value[index]
+                if isinstance(value_node, MappingNode):
+                    self.flatten_mapping(value_node)
+                    merge.extend(value_node.value)
+                elif isinstance(value_node, SequenceNode):
+                    submerge = []
+                    for subnode in value_node.value:
+                        if not isinstance(subnode, MappingNode):
+                            raise ConstructorError(
+                                "while constructing a mapping",
+                                node.start_mark,
+                                "expected a mapping for merging, but found %s"
+                                % subnode.id, subnode.start_mark)
+                        self.flatten_mapping(subnode)
+                        submerge.append(subnode.value)
+                    submerge.reverse()
+                    for value in submerge:
+                        merge.extend(value)
+                else:
+                    raise ConstructorError(
+                        "while constructing a mapping", node.start_mark,
+                        "expected a mapping or list of mappings for merging, "
+                        "but found %s"
+                        % value_node.id, value_node.start_mark)
+            elif key_node.tag == u'tag:yaml.org,2002:value':
+                key_node.tag = u'tag:yaml.org,2002:str'
+                index += 1
+            else:
+                index += 1
+        if merge:
+            node.value = merge + node.value
+
+    def construct_mapping(self, node, deep=False):
+        """deep is True when creating an object/mapping recursively,
+        in that case want the underlying elements available during construction
+        """
+        if isinstance(node, MappingNode):
+            self.flatten_mapping(node)
+        return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+    def construct_yaml_null(self, node):
+        self.construct_scalar(node)
+        return None
+
+    # YAML 1.2 spec doesn't mention yes/no etc any more, 1.1 does
+    bool_values = {
+        u'yes':     True,
+        u'no':      False,
+        u'true':    True,
+        u'false':   False,
+        u'on':      True,
+        u'off':     False,
+    }
+
+    def construct_yaml_bool(self, node):
+        value = self.construct_scalar(node)
+        return self.bool_values[value.lower()]
+
+    def construct_yaml_int(self, node):
+        value = to_str(self.construct_scalar(node))
+        value = value.replace('_', '')
+        sign = +1
+        if value[0] == '-':
+            sign = -1
+        if value[0] in '+-':
+            value = value[1:]
+        if value == '0':
+            return 0
+        elif value.startswith('0b'):
+            return sign*int(value[2:], 2)
+        elif value.startswith('0x'):
+            return sign*int(value[2:], 16)
+        elif value.startswith('0o'):
+            return sign*int(value[2:], 8)
+        elif self.processing_version != (1, 2) and value[0] == '0':
+            return sign*int(value, 8)
+        elif self.processing_version != (1, 2) and ':' in value:
+            digits = [int(part) for part in value.split(':')]
+            digits.reverse()
+            base = 1
+            value = 0
+            for digit in digits:
+                value += digit*base
+                base *= 60
+            return sign*value
+        else:
+            return sign*int(value)
+
+    inf_value = 1e300
+    while inf_value != inf_value*inf_value:
+        inf_value *= inf_value
+    nan_value = -inf_value/inf_value   # Trying to make a quiet NaN (like C99).
+
+    def construct_yaml_float(self, node):
+        value = to_str(self.construct_scalar(node))
+        value = value.replace('_', '').lower()
+        sign = +1
+        if value[0] == '-':
+            sign = -1
+        if value[0] in '+-':
+            value = value[1:]
+        if value == '.inf':
+            return sign*self.inf_value
+        elif value == '.nan':
+            return self.nan_value
+        elif ':' in value:
+            digits = [float(part) for part in value.split(':')]
+            digits.reverse()
+            base = 1
+            value = 0.0
+            for digit in digits:
+                value += digit*base
+                base *= 60
+            return sign*value
+        else:
+            return sign*float(value)
+
+    if PY3:
+        def construct_yaml_binary(self, node):
+            try:
+                value = self.construct_scalar(node).encode('ascii')
+            except UnicodeEncodeError as exc:
+                raise ConstructorError(
+                    None, None,
+                    "failed to convert base64 data into ascii: %s" % exc,
+                    node.start_mark)
+            try:
+                if hasattr(base64, 'decodebytes'):
+                    return base64.decodebytes(value)
+                else:
+                    return base64.decodestring(value)
+            except binascii.Error as exc:
+                raise ConstructorError(
+                    None, None,
+                    "failed to decode base64 data: %s" % exc, node.start_mark)
+    else:
+        def construct_yaml_binary(self, node):
+            value = self.construct_scalar(node)
+            try:
+                return to_str(value).decode('base64')
+            except (binascii.Error, UnicodeEncodeError) as exc:
+                raise ConstructorError(
+                    None, None,
+                    "failed to decode base64 data: %s" % exc, node.start_mark)
+
+    timestamp_regexp = re.compile(
+        u'''^(?P<year>[0-9][0-9][0-9][0-9])
+          -(?P<month>[0-9][0-9]?)
+          -(?P<day>[0-9][0-9]?)
+          (?:(?:[Tt]|[ \\t]+)
+          (?P<hour>[0-9][0-9]?)
+          :(?P<minute>[0-9][0-9])
+          :(?P<second>[0-9][0-9])
+          (?:\\.(?P<fraction>[0-9]*))?
+          (?:[ \\t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+          (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+    def construct_yaml_timestamp(self, node):
+        value = self.construct_scalar(node)  # NOQA
+        match = self.timestamp_regexp.match(node.value)
+        values = match.groupdict()
+        year = int(values['year'])
+        month = int(values['month'])
+        day = int(values['day'])
+        if not values['hour']:
+            return datetime.date(year, month, day)
+        hour = int(values['hour'])
+        minute = int(values['minute'])
+        second = int(values['second'])
+        fraction = 0
+        if values['fraction']:
+            fraction = values['fraction'][:6]
+            while len(fraction) < 6:
+                fraction += '0'
+            fraction = int(fraction)
+        delta = None
+        if values['tz_sign']:
+            tz_hour = int(values['tz_hour'])
+            tz_minute = int(values['tz_minute'] or 0)
+            delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+            if values['tz_sign'] == '-':
+                delta = -delta
+        data = datetime.datetime(year, month, day, hour, minute, second,
+                                 fraction)
+        if delta:
+            data -= delta
+        return data
+
+    def construct_yaml_omap(self, node):
+        # Note: we do now check for duplicate keys
+        omap = ordereddict()
+        yield omap
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(
+                "while constructing an ordered map", node.start_mark,
+                "expected a sequence, but found %s" % node.id, node.start_mark)
+        for subnode in node.value:
+            if not isinstance(subnode, MappingNode):
+                raise ConstructorError(
+                    "while constructing an ordered map", node.start_mark,
+                    "expected a mapping of length 1, but found %s" %
+                    subnode.id,
+                    subnode.start_mark)
+            if len(subnode.value) != 1:
+                raise ConstructorError(
+                    "while constructing an ordered map", node.start_mark,
+                    "expected a single mapping item, but found %d items" %
+                    len(subnode.value),
+                    subnode.start_mark)
+            key_node, value_node = subnode.value[0]
+            key = self.construct_object(key_node)
+            assert key not in omap
+            value = self.construct_object(value_node)
+            omap[key] = value
+
+    def construct_yaml_pairs(self, node):
+        # Note: the same code as `construct_yaml_omap`.
+        pairs = []
+        yield pairs
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(
+                "while constructing pairs", node.start_mark,
+                "expected a sequence, but found %s" % node.id, node.start_mark)
+        for subnode in node.value:
+            if not isinstance(subnode, MappingNode):
+                raise ConstructorError(
+                    "while constructing pairs", node.start_mark,
+                    "expected a mapping of length 1, but found %s" %
+                    subnode.id,
+                    subnode.start_mark)
+            if len(subnode.value) != 1:
+                raise ConstructorError(
+                    "while constructing pairs", node.start_mark,
+                    "expected a single mapping item, but found %d items" %
+                    len(subnode.value),
+                    subnode.start_mark)
+            key_node, value_node = subnode.value[0]
+            key = self.construct_object(key_node)
+            value = self.construct_object(value_node)
+            pairs.append((key, value))
+
+    def construct_yaml_set(self, node):
+        data = set()
+        yield data
+        value = self.construct_mapping(node)
+        data.update(value)
+
+    def construct_yaml_str(self, node):
+        value = self.construct_scalar(node)
+        if PY3:
+            return value
+        try:
+            return value.encode('ascii')
+        except UnicodeEncodeError:
+            return value
+
+    def construct_yaml_seq(self, node):
+        data = []
+        yield data
+        data.extend(self.construct_sequence(node))
+
+    def construct_yaml_map(self, node):
+        data = {}
+        yield data
+        value = self.construct_mapping(node)
+        data.update(value)
+
+    def construct_yaml_object(self, node, cls):
+        data = cls.__new__(cls)
+        yield data
+        if hasattr(data, '__setstate__'):
+            state = self.construct_mapping(node, deep=True)
+            data.__setstate__(state)
+        else:
+            state = self.construct_mapping(node)
+            data.__dict__.update(state)
+
+    def construct_undefined(self, node):
+        raise ConstructorError(
+            None, None,
+            "could not determine a constructor for the tag %r" %
+            utf8(node.tag),
+            node.start_mark)
+
+SafeConstructor.add_constructor(
+    u'tag:yaml.org,2002:null',
+    SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+    u'tag:yaml.org,2002:bool',
+    SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+    u'tag:yaml.org,2002:int',
+    SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+    u'tag:yaml.org,2002:float',
+    SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+    u'tag:yaml.org,2002:binary',
+    SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+    u'tag:yaml.org,2002:timestamp',
+    SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+    u'tag:yaml.org,2002:omap',
+    SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+    u'tag:yaml.org,2002:pairs',
+    SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+    u'tag:yaml.org,2002:set',
+    SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+    u'tag:yaml.org,2002:str',
+    SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+    u'tag:yaml.org,2002:seq',
+    SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+    u'tag:yaml.org,2002:map',
+    SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(
+    None, SafeConstructor.construct_undefined)
+
+
+class Constructor(SafeConstructor):
+
+    def construct_python_str(self, node):
+        return utf8(self.construct_scalar(node))
+
+    def construct_python_unicode(self, node):
+        return self.construct_scalar(node)
+
+    if PY3:
+        def construct_python_bytes(self, node):
+            try:
+                value = self.construct_scalar(node).encode('ascii')
+            except UnicodeEncodeError as exc:
+                raise ConstructorError(
+                    None, None,
+                    "failed to convert base64 data into ascii: %s" % exc,
+                    node.start_mark)
+            try:
+                if hasattr(base64, 'decodebytes'):
+                    return base64.decodebytes(value)
+                else:
+                    return base64.decodestring(value)
+            except binascii.Error as exc:
+                raise ConstructorError(
+                    None, None,
+                    "failed to decode base64 data: %s" % exc, node.start_mark)
+
+    def construct_python_long(self, node):
+        val = self.construct_yaml_int(node)
+        if PY3:
+            return val
+        return int(val)
+
+    def construct_python_complex(self, node):
+        return complex(self.construct_scalar(node))
+
+    def construct_python_tuple(self, node):
+        return tuple(self.construct_sequence(node))
+
+    def find_python_module(self, name, mark):
+        if not name:
+            raise ConstructorError(
+                "while constructing a Python module", mark,
+                "expected non-empty name appended to the tag", mark)
+        try:
+            __import__(name)
+        except ImportError as exc:
+            raise ConstructorError(
+                "while constructing a Python module", mark,
+                "cannot find module %r (%s)" % (utf8(name), exc), mark)
+        return sys.modules[name]
+
+    def find_python_name(self, name, mark):
+        if not name:
+            raise ConstructorError(
+                "while constructing a Python object", mark,
+                "expected non-empty name appended to the tag", mark)
+        if u'.' in name:
+            module_name, object_name = name.rsplit('.', 1)
+        else:
+            module_name = builtins_module
+            object_name = name
+        try:
+            __import__(module_name)
+        except ImportError as exc:
+            raise ConstructorError(
+                "while constructing a Python object", mark,
+                "cannot find module %r (%s)" % (utf8(module_name), exc), mark)
+        module = sys.modules[module_name]
+        if not hasattr(module, object_name):
+            raise ConstructorError(
+                "while constructing a Python object", mark,
+                "cannot find %r in the module %r" % (utf8(object_name),
+                                                     module.__name__), mark)
+        return getattr(module, object_name)
+
+    def construct_python_name(self, suffix, node):
+        value = self.construct_scalar(node)
+        if value:
+            raise ConstructorError(
+                "while constructing a Python name", node.start_mark,
+                "expected the empty value, but found %r" % utf8(value),
+                node.start_mark)
+        return self.find_python_name(suffix, node.start_mark)
+
+    def construct_python_module(self, suffix, node):
+        value = self.construct_scalar(node)
+        if value:
+            raise ConstructorError(
+                "while constructing a Python module", node.start_mark,
+                "expected the empty value, but found %r" % utf8(value),
+                node.start_mark)
+        return self.find_python_module(suffix, node.start_mark)
+
+    if PY2:
+        class classobj:
+            pass
+
+    def make_python_instance(self, suffix, node,
+                             args=None, kwds=None, newobj=False):
+        if not args:
+            args = []
+        if not kwds:
+            kwds = {}
+        cls = self.find_python_name(suffix, node.start_mark)
+        if PY3:
+            if newobj and isinstance(cls, type):
+                return cls.__new__(cls, *args, **kwds)
+            else:
+                return cls(*args, **kwds)
+        else:
+            if newobj and isinstance(cls, type(self.classobj))  \
+                    and not args and not kwds:
+                instance = self.classobj()
+                instance.__class__ = cls
+                return instance
+            elif newobj and isinstance(cls, type):
+                return cls.__new__(cls, *args, **kwds)
+            else:
+                return cls(*args, **kwds)
+
+    def set_python_instance_state(self, instance, state):
+        if hasattr(instance, '__setstate__'):
+            instance.__setstate__(state)
+        else:
+            slotstate = {}
+            if isinstance(state, tuple) and len(state) == 2:
+                state, slotstate = state
+            if hasattr(instance, '__dict__'):
+                instance.__dict__.update(state)
+            elif state:
+                slotstate.update(state)
+            for key, value in slotstate.items():
+                setattr(object, key, value)
+
+    def construct_python_object(self, suffix, node):
+        # Format:
+        #   !!python/object:module.name { ... state ... }
+        instance = self.make_python_instance(suffix, node, newobj=True)
+        yield instance
+        deep = hasattr(instance, '__setstate__')
+        state = self.construct_mapping(node, deep=deep)
+        self.set_python_instance_state(instance, state)
+
+    def construct_python_object_apply(self, suffix, node, newobj=False):
+        # Format:
+        #   !!python/object/apply       # (or !!python/object/new)
+        #   args: [ ... arguments ... ]
+        #   kwds: { ... keywords ... }
+        #   state: ... state ...
+        #   listitems: [ ... listitems ... ]
+        #   dictitems: { ... dictitems ... }
+        # or short format:
+        #   !!python/object/apply [ ... arguments ... ]
+        # The difference between !!python/object/apply and !!python/object/new
+        # is how an object is created, check make_python_instance for details.
+        if isinstance(node, SequenceNode):
+            args = self.construct_sequence(node, deep=True)
+            kwds = {}
+            state = {}
+            listitems = []
+            dictitems = {}
+        else:
+            value = self.construct_mapping(node, deep=True)
+            args = value.get('args', [])
+            kwds = value.get('kwds', {})
+            state = value.get('state', {})
+            listitems = value.get('listitems', [])
+            dictitems = value.get('dictitems', {})
+        instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+        if state:
+            self.set_python_instance_state(instance, state)
+        if listitems:
+            instance.extend(listitems)
+        if dictitems:
+            for key in dictitems:
+                instance[key] = dictitems[key]
+        return instance
+
+    def construct_python_object_new(self, suffix, node):
+        return self.construct_python_object_apply(suffix, node, newobj=True)
+
+Constructor.add_constructor(
+    u'tag:yaml.org,2002:python/none',
+    Constructor.construct_yaml_null)
+
+Constructor.add_constructor(
+    u'tag:yaml.org,2002:python/bool',
+    Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(
+    u'tag:yaml.org,2002:python/str',
+    Constructor.construct_python_str)
+
+Constructor.add_constructor(
+    u'tag:yaml.org,2002:python/unicode',
+    Constructor.construct_python_unicode)
+
+if PY3:
+    Constructor.add_constructor(
+        u'tag:yaml.org,2002:python/bytes',
+        Constructor.construct_python_bytes)
+
+Constructor.add_constructor(
+    u'tag:yaml.org,2002:python/int',
+    Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+    u'tag:yaml.org,2002:python/long',
+    Constructor.construct_python_long)
+
+Constructor.add_constructor(
+    u'tag:yaml.org,2002:python/float',
+    Constructor.construct_yaml_float)
+
+Constructor.add_constructor(
+    u'tag:yaml.org,2002:python/complex',
+    Constructor.construct_python_complex)
+
+Constructor.add_constructor(
+    u'tag:yaml.org,2002:python/list',
+    Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+    u'tag:yaml.org,2002:python/tuple',
+    Constructor.construct_python_tuple)
+
+Constructor.add_constructor(
+    u'tag:yaml.org,2002:python/dict',
+    Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+    u'tag:yaml.org,2002:python/name:',
+    Constructor.construct_python_name)
+
+Constructor.add_multi_constructor(
+    u'tag:yaml.org,2002:python/module:',
+    Constructor.construct_python_module)
+
+Constructor.add_multi_constructor(
+    u'tag:yaml.org,2002:python/object:',
+    Constructor.construct_python_object)
+
+Constructor.add_multi_constructor(
+    u'tag:yaml.org,2002:python/object/apply:',
+    Constructor.construct_python_object_apply)
+
+Constructor.add_multi_constructor(
+    u'tag:yaml.org,2002:python/object/new:',
+    Constructor.construct_python_object_new)
+
+
+class RoundTripConstructor(SafeConstructor):
+    """need to store the comments on the node itself,
+    as well as on the items
+    """
+
+    def construct_scalar(self, node):
+        if not isinstance(node, ScalarNode):
+            raise ConstructorError(
+                None, None,
+                "expected a scalar node, but found %s" % node.id,
+                node.start_mark)
+
+        if node.style == '|' and isinstance(node.value, text_type):
+            return PreservedScalarString(node.value)
+        elif self._preserve_quotes and isinstance(node.value, text_type):
+            if node.style == "'":
+                return SingleQuotedScalarString(node.value)
+            if node.style == '"':
+                return DoubleQuotedScalarString(node.value)
+        return node.value
+
+    def construct_yaml_str(self, node):
+        value = self.construct_scalar(node)
+        if isinstance(value, ScalarString):
+            return value
+        if PY3:
+            return value
+        try:
+            return value.encode('ascii')
+        except AttributeError:
+            # in case you replace the node dynamically e.g. with a dict
+            return value
+        except UnicodeEncodeError:
+            return value
+
+    def construct_sequence(self, node, seqtyp, deep=False):
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(
+                None, None,
+                "expected a sequence node, but found %s" % node.id,
+                node.start_mark)
+        ret_val = []
+        if node.comment:
+            seqtyp._yaml_add_comment(node.comment[:2])
+            if len(node.comment) > 2:
+                seqtyp.yaml_end_comment_extend(node.comment[2], clear=True)
+        if node.anchor:
+            from ruamel.yaml.serializer import templated_id
+            if not templated_id(node.anchor):
+                seqtyp.yaml_set_anchor(node.anchor)
+        for idx, child in enumerate(node.value):
+            ret_val.append(self.construct_object(child, deep=deep))
+            if child.comment:
+                seqtyp._yaml_add_comment(child.comment, key=idx)
+            seqtyp._yaml_set_idx_line_col(
+                idx, [child.start_mark.line, child.start_mark.column])
+        return ret_val
+
+    def flatten_mapping(self, node):
+        """
+        This implements the merge key feature http://yaml.org/type/merge.html
+        by inserting keys from the merge dict/list of dicts if not yet
+        available in this node
+        """
+
+        def constructed(value_node):
+            # If the contents of a merge are defined within the
+            # merge marker, then they won't have been constructed
+            # yet. But if they were already constructed, we need to use
+            # the existing object.
+            if value_node in self.constructed_objects:
+                value = self.constructed_objects[value_node]
+            else:
+                value = self.construct_object(value_node, deep=False)
+            return value
+
+        # merge = []
+        merge_map_list = []
+        index = 0
+        while index < len(node.value):
+            key_node, value_node = node.value[index]
+            if key_node.tag == u'tag:yaml.org,2002:merge':
+                del node.value[index]
+                if isinstance(value_node, MappingNode):
+                    merge_map_list.append(
+                        (index, constructed(value_node)))
+                    # self.flatten_mapping(value_node)
+                    # merge.extend(value_node.value)
+                elif isinstance(value_node, SequenceNode):
+                    # submerge = []
+                    for subnode in value_node.value:
+                        if not isinstance(subnode, MappingNode):
+                            raise ConstructorError(
+                                "while constructing a mapping",
+                                node.start_mark,
+                                "expected a mapping for merging, but found %s"
+                                % subnode.id, subnode.start_mark)
+                        merge_map_list.append(
+                            (index, constructed(subnode)))
+                    #     self.flatten_mapping(subnode)
+                    #     submerge.append(subnode.value)
+                    # submerge.reverse()
+                    # for value in submerge:
+                    #     merge.extend(value)
+                else:
+                    raise ConstructorError(
+                        "while constructing a mapping", node.start_mark,
+                        "expected a mapping or list of mappings for merging, "
+                        "but found %s"
+                        % value_node.id, value_node.start_mark)
+            elif key_node.tag == u'tag:yaml.org,2002:value':
+                key_node.tag = u'tag:yaml.org,2002:str'
+                index += 1
+            else:
+                index += 1
+        # print ('merge_map_list', merge_map_list)
+        return merge_map_list
+        # if merge:
+        #     node.value = merge + node.value
+
+    def construct_mapping(self, node, maptyp, deep=False):
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(
+                None, None,
+                "expected a mapping node, but found %s" % node.id,
+                node.start_mark)
+        merge_map = self.flatten_mapping(node)
+        if merge_map:
+            maptyp.add_yaml_merge(merge_map)
+        # mapping = {}
+        if node.comment:
+            maptyp._yaml_add_comment(node.comment[:2])
+            if len(node.comment) > 2:
+                maptyp.yaml_end_comment_extend(node.comment[2], clear=True)
+        if node.anchor:
+            from ruamel.yaml.serializer import templated_id
+            if not templated_id(node.anchor):
+                maptyp.yaml_set_anchor(node.anchor)
+        for key_node, value_node in node.value:
+            # keys can be list -> deep
+            key = self.construct_object(key_node, deep=True)
+            # lists are not hashable, but tuples are
+            if not isinstance(key, collections.Hashable):
+                if isinstance(key, list):
+                    key = tuple(key)
+            if PY2:
+                try:
+                    hash(key)
+                except TypeError as exc:
+                    raise ConstructorError(
+                        "while constructing a mapping", node.start_mark,
+                        "found unacceptable key (%s)" %
+                        exc, key_node.start_mark)
+            else:
+                if not isinstance(key, collections.Hashable):
+                    raise ConstructorError(
+                        "while constructing a mapping", node.start_mark,
+                        "found unhashable key", key_node.start_mark)
+            value = self.construct_object(value_node, deep=deep)
+            if key_node.comment:
+                maptyp._yaml_add_comment(key_node.comment, key=key)
+            if value_node.comment:
+                maptyp._yaml_add_comment(value_node.comment, value=key)
+            maptyp._yaml_set_kv_line_col(
+                key, [key_node.start_mark.line, key_node.start_mark.column,
+                      value_node.start_mark.line, value_node.start_mark.column])
+            maptyp[key] = value
+
+    def construct_setting(self, node, typ, deep=False):
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(
+                None, None,
+                "expected a mapping node, but found %s" % node.id,
+                node.start_mark)
+        if node.comment:
+            typ._yaml_add_comment(node.comment[:2])
+            if len(node.comment) > 2:
+                typ.yaml_end_comment_extend(node.comment[2], clear=True)
+        if node.anchor:
+            from ruamel.yaml.serializer import templated_id
+            if not templated_id(node.anchor):
+                typ.yaml_set_anchor(node.anchor)
+        for key_node, value_node in node.value:
+            # keys can be list -> deep
+            key = self.construct_object(key_node, deep=True)
+            # lists are not hashable, but tuples are
+            if not isinstance(key, collections.Hashable):
+                if isinstance(key, list):
+                    key = tuple(key)
+            if PY2:
+                try:
+                    hash(key)
+                except TypeError as exc:
+                    raise ConstructorError(
+                        "while constructing a mapping", node.start_mark,
+                        "found unacceptable key (%s)" %
+                        exc, key_node.start_mark)
+            else:
+                if not isinstance(key, collections.Hashable):
+                    raise ConstructorError(
+                        "while constructing a mapping", node.start_mark,
+                        "found unhashable key", key_node.start_mark)
+            value = self.construct_object(value_node, deep=deep)  # NOQA
+            if key_node.comment:
+                typ._yaml_add_comment(key_node.comment, key=key)
+            if value_node.comment:
+                typ._yaml_add_comment(value_node.comment, value=key)
+            typ.add(key)
+
+    def construct_yaml_seq(self, node):
+        data = CommentedSeq()
+        data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+        if node.flow_style is True:
+            data.fa.set_flow_style()
+        elif node.flow_style is False:
+            data.fa.set_block_style()
+        if node.comment:
+            data._yaml_add_comment(node.comment)
+        yield data
+        data.extend(self.construct_sequence(node, data))
+
+    def construct_yaml_map(self, node):
+        data = CommentedMap()
+        data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+        if node.flow_style is True:
+            data.fa.set_flow_style()
+        elif node.flow_style is False:
+            data.fa.set_block_style()
+        yield data
+        self.construct_mapping(node, data)
+
+    def construct_yaml_omap(self, node):
+        # Note: we do now check for duplicate keys
+        omap = CommentedOrderedMap()
+        omap._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+        if node.flow_style is True:
+            omap.fa.set_flow_style()
+        elif node.flow_style is False:
+            omap.fa.set_block_style()
+        yield omap
+        if node.comment:
+            omap._yaml_add_comment(node.comment[:2])
+            if len(node.comment) > 2:
+                omap.yaml_end_comment_extend(node.comment[2], clear=True)
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(
+                "while constructing an ordered map", node.start_mark,
+                "expected a sequence, but found %s" % node.id, node.start_mark)
+        for subnode in node.value:
+            if not isinstance(subnode, MappingNode):
+                raise ConstructorError(
+                    "while constructing an ordered map", node.start_mark,
+                    "expected a mapping of length 1, but found %s" %
+                    subnode.id,
+                    subnode.start_mark)
+            if len(subnode.value) != 1:
+                raise ConstructorError(
+                    "while constructing an ordered map", node.start_mark,
+                    "expected a single mapping item, but found %d items" %
+                    len(subnode.value),
+                    subnode.start_mark)
+            key_node, value_node = subnode.value[0]
+            key = self.construct_object(key_node)
+            assert key not in omap
+            value = self.construct_object(value_node)
+            if key_node.comment:
+                omap._yaml_add_comment(key_node.comment, key=key)
+            if subnode.comment:
+                omap._yaml_add_comment(subnode.comment, key=key)
+            if value_node.comment:
+                omap._yaml_add_comment(value_node.comment, value=key)
+            omap[key] = value
+
+    def construct_yaml_set(self, node):
+        data = CommentedSet()
+        data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+        yield data
+        self.construct_setting(node, data)
+
+    def construct_undefined(self, node):
+        try:
+            data = CommentedMap()
+            data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+            if node.flow_style is True:
+                data.fa.set_flow_style()
+            elif node.flow_style is False:
+                data.fa.set_block_style()
+            data.yaml_set_tag(node.tag)
+            yield data
+            self.construct_mapping(node, data)
+        except:
+            raise ConstructorError(
+                None, None,
+                "could not determine a constructor for the tag %r" %
+                utf8(node.tag),
+                node.start_mark)
+
+
+RoundTripConstructor.add_constructor(
+    u'tag:yaml.org,2002:null',
+    RoundTripConstructor.construct_yaml_null)
+
+RoundTripConstructor.add_constructor(
+    u'tag:yaml.org,2002:bool',
+    RoundTripConstructor.construct_yaml_bool)
+
+RoundTripConstructor.add_constructor(
+    u'tag:yaml.org,2002:int',
+    RoundTripConstructor.construct_yaml_int)
+
+RoundTripConstructor.add_constructor(
+    u'tag:yaml.org,2002:float',
+    RoundTripConstructor.construct_yaml_float)
+
+RoundTripConstructor.add_constructor(
+    u'tag:yaml.org,2002:binary',
+    RoundTripConstructor.construct_yaml_binary)
+
+RoundTripConstructor.add_constructor(
+    u'tag:yaml.org,2002:timestamp',
+    RoundTripConstructor.construct_yaml_timestamp)
+
+RoundTripConstructor.add_constructor(
+    u'tag:yaml.org,2002:omap',
+    RoundTripConstructor.construct_yaml_omap)
+
+RoundTripConstructor.add_constructor(
+    u'tag:yaml.org,2002:pairs',
+    RoundTripConstructor.construct_yaml_pairs)
+
+RoundTripConstructor.add_constructor(
+    u'tag:yaml.org,2002:set',
+    RoundTripConstructor.construct_yaml_set)
+
+RoundTripConstructor.add_constructor(
+    u'tag:yaml.org,2002:str',
+    RoundTripConstructor.construct_yaml_str)
+
+RoundTripConstructor.add_constructor(
+    u'tag:yaml.org,2002:seq',
+    RoundTripConstructor.construct_yaml_seq)
+
+RoundTripConstructor.add_constructor(
+    u'tag:yaml.org,2002:map',
+    RoundTripConstructor.construct_yaml_map)
+
+RoundTripConstructor.add_constructor(
+    None, RoundTripConstructor.construct_undefined)
diff --git a/lib/spack/external/ruamel/yaml/dumper.py b/lib/spack/external/ruamel/yaml/dumper.py
new file mode 100644
index 0000000000..90e2ca9d1b
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/dumper.py
@@ -0,0 +1,102 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper']
+
+try:
+    from .emitter import *                               # NOQA
+    from .serializer import *                               # NOQA
+    from .representer import *                               # NOQA
+    from .resolver import *                               # NOQA
+except (ImportError, ValueError):  # for Jython
+    from ruamel.yaml.emitter import *                               # NOQA
+    from ruamel.yaml.serializer import *                               # NOQA
+    from ruamel.yaml.representer import *                               # NOQA
+    from ruamel.yaml.resolver import *                               # NOQA
+
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+    def __init__(self, stream,
+                 default_style=None, default_flow_style=None,
+                 canonical=None, indent=None, width=None,
+                 allow_unicode=None, line_break=None,
+                 encoding=None, explicit_start=None, explicit_end=None,
+                 version=None, tags=None, block_seq_indent=None,
+                 top_level_colon_align=None, prefix_colon=None):
+        Emitter.__init__(self, stream, canonical=canonical,
+                         indent=indent, width=width,
+                         allow_unicode=allow_unicode, line_break=line_break,
+                         block_seq_indent=block_seq_indent)
+        Serializer.__init__(self, encoding=encoding,
+                            explicit_start=explicit_start,
+                            explicit_end=explicit_end,
+                            version=version, tags=tags)
+        Representer.__init__(self, default_style=default_style,
+                             default_flow_style=default_flow_style)
+        Resolver.__init__(self)
+
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+    def __init__(self, stream,
+                 default_style=None, default_flow_style=None,
+                 canonical=None, indent=None, width=None,
+                 allow_unicode=None, line_break=None,
+                 encoding=None, explicit_start=None, explicit_end=None,
+                 version=None, tags=None, block_seq_indent=None,
+                 top_level_colon_align=None, prefix_colon=None):
+        Emitter.__init__(self, stream, canonical=canonical,
+                         indent=indent, width=width,
+                         allow_unicode=allow_unicode, line_break=line_break,
+                         block_seq_indent=block_seq_indent)
+        Serializer.__init__(self, encoding=encoding,
+                            explicit_start=explicit_start,
+                            explicit_end=explicit_end,
+                            version=version, tags=tags)
+        SafeRepresenter.__init__(self, default_style=default_style,
+                                 default_flow_style=default_flow_style)
+        Resolver.__init__(self)
+
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+    def __init__(self, stream,
+                 default_style=None, default_flow_style=None,
+                 canonical=None, indent=None, width=None,
+                 allow_unicode=None, line_break=None,
+                 encoding=None, explicit_start=None, explicit_end=None,
+                 version=None, tags=None, block_seq_indent=None,
+                 top_level_colon_align=None, prefix_colon=None):
+        Emitter.__init__(self, stream, canonical=canonical,
+                         indent=indent, width=width,
+                         allow_unicode=allow_unicode, line_break=line_break,
+                         block_seq_indent=block_seq_indent)
+        Serializer.__init__(self, encoding=encoding,
+                            explicit_start=explicit_start,
+                            explicit_end=explicit_end,
+                            version=version, tags=tags)
+        Representer.__init__(self, default_style=default_style,
+                             default_flow_style=default_flow_style)
+        Resolver.__init__(self)
+
+
+class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver):
+    def __init__(self, stream,
+                 default_style=None, default_flow_style=None,
+                 canonical=None, indent=None, width=None,
+                 allow_unicode=None, line_break=None,
+                 encoding=None, explicit_start=None, explicit_end=None,
+                 version=None, tags=None, block_seq_indent=None,
+                 top_level_colon_align=None, prefix_colon=None):
+        Emitter.__init__(self, stream, canonical=canonical,
+                         indent=indent, width=width,
+                         allow_unicode=allow_unicode, line_break=line_break,
+                         block_seq_indent=block_seq_indent,
+                         top_level_colon_align=top_level_colon_align,
+                         prefix_colon=prefix_colon)
+        Serializer.__init__(self, encoding=encoding,
+                            explicit_start=explicit_start,
+                            explicit_end=explicit_end,
+                            version=version, tags=tags)
+        RoundTripRepresenter.__init__(self, default_style=default_style,
+                                      default_flow_style=default_flow_style)
+        VersionedResolver.__init__(self)
diff --git a/lib/spack/external/yaml/lib/yaml/emitter.py b/lib/spack/external/ruamel/yaml/emitter.py
similarity index 77%
rename from lib/spack/external/yaml/lib/yaml/emitter.py
rename to lib/spack/external/ruamel/yaml/emitter.py
index e5bcdcccbb..b754bc04e1 100644
--- a/lib/spack/external/yaml/lib/yaml/emitter.py
+++ b/lib/spack/external/ruamel/yaml/emitter.py
@@ -1,3 +1,7 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from __future__ import print_function
 
 # Emitter expects events obeying the following grammar:
 # stream ::= STREAM-START document* STREAM-END
@@ -8,17 +12,25 @@
 
 __all__ = ['Emitter', 'EmitterError']
 
-from error import YAMLError
-from events import *
+try:
+    from .error import YAMLError
+    from .events import *                                                # NOQA
+    from .compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT
+except (ImportError, ValueError):  # for Jython
+    from ruamel.yaml.error import YAMLError
+    from ruamel.yaml.events import *                                     # NOQA
+    from ruamel.yaml.compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT
+
 
 class EmitterError(YAMLError):
     pass
 
+
 class ScalarAnalysis(object):
     def __init__(self, scalar, empty, multiline,
-            allow_flow_plain, allow_block_plain,
-            allow_single_quoted, allow_double_quoted,
-            allow_block):
+                 allow_flow_plain, allow_block_plain,
+                 allow_single_quoted, allow_double_quoted,
+                 allow_block):
         self.scalar = scalar
         self.empty = empty
         self.multiline = multiline
@@ -28,15 +40,18 @@ def __init__(self, scalar, empty, multiline,
         self.allow_double_quoted = allow_double_quoted
         self.allow_block = allow_block
 
-class Emitter(object):
 
+class Emitter(object):
     DEFAULT_TAG_PREFIXES = {
-        u'!' : u'!',
-        u'tag:yaml.org,2002:' : u'!!',
+        u'!': u'!',
+        u'tag:yaml.org,2002:': u'!!',
     }
 
+    MAX_SIMPLE_KEY_LENGTH = 128
+
     def __init__(self, stream, canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None):
+                 allow_unicode=None, line_break=None, block_seq_indent=None,
+                 top_level_colon_align=None, prefix_colon=None):
 
         # The stream should have the methods `write` and possibly `flush`.
         self.stream = stream
@@ -75,16 +90,25 @@ def __init__(self, stream, canonical=None, indent=None, width=None,
         self.column = 0
         self.whitespace = True
         self.indention = True
+        self.no_newline = None  # set if directly after `- `
 
         # Whether the document requires an explicit document indicator
         self.open_ended = False
 
+        # colon handling
+        self.colon = u':'
+        self.prefixed_colon = self.colon if prefix_colon is None else prefix_colon + self.colon
+
         # Formatting details.
         self.canonical = canonical
         self.allow_unicode = allow_unicode
+        self.block_seq_indent = block_seq_indent if block_seq_indent else 0
+        self.top_level_colon_align = top_level_colon_align
         self.best_indent = 2
         if indent and 1 < indent < 10:
             self.best_indent = indent
+        # if self.best_indent < self.block_seq_indent + 1:
+        #     self.best_indent = self.block_seq_indent + 1
         self.best_width = 80
         if width and width > self.best_indent*2:
             self.best_width = width
@@ -109,6 +133,8 @@ def dispose(self):
         self.state = None
 
     def emit(self, event):
+        if dbg(DBG_EVENT):
+            nprint(event)
         self.events.append(event)
         while not self.need_more_events():
             self.event = self.events.pop(0)
@@ -143,7 +169,7 @@ def need_events(self, count):
                 return False
         return (len(self.events) < count+1)
 
-    def increase_indent(self, flow=False, indentless=False):
+    def increase_indent(self, flow=False, sequence=None, indentless=False):
         self.indents.append(self.indent)
         if self.indent is None:
             if flow:
@@ -152,6 +178,8 @@ def increase_indent(self, flow=False, indentless=False):
                 self.indent = 0
         elif not indentless:
             self.indent += self.best_indent
+            # if self.sequence_context and (self.block_seq_indent + 2) > self.best_indent:
+            #    self.indent = self.block_seq_indent + 2
 
     # States.
 
@@ -159,13 +187,19 @@ def increase_indent(self, flow=False, indentless=False):
 
     def expect_stream_start(self):
         if isinstance(self.event, StreamStartEvent):
-            if self.event.encoding and not getattr(self.stream, 'encoding', None):
-                self.encoding = self.event.encoding
+            if PY2:
+                if self.event.encoding \
+                   and not getattr(self.stream, 'encoding', None):
+                    self.encoding = self.event.encoding
+            else:
+                if self.event.encoding \
+                   and not hasattr(self.stream, 'encoding'):
+                    self.encoding = self.event.encoding
             self.write_stream_start()
             self.state = self.expect_first_document_start
         else:
-            raise EmitterError("expected StreamStartEvent, but got %s"
-                    % self.event)
+            raise EmitterError("expected StreamStartEvent, but got %s" %
+                               self.event)
 
     def expect_nothing(self):
         raise EmitterError("expected nothing, but got %s" % self.event)
@@ -185,17 +219,19 @@ def expect_document_start(self, first=False):
                 self.write_version_directive(version_text)
             self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
             if self.event.tags:
-                handles = self.event.tags.keys()
-                handles.sort()
+                handles = sorted(self.event.tags.keys())
                 for handle in handles:
                     prefix = self.event.tags[handle]
                     self.tag_prefixes[prefix] = handle
                     handle_text = self.prepare_tag_handle(handle)
                     prefix_text = self.prepare_tag_prefix(prefix)
                     self.write_tag_directive(handle_text, prefix_text)
-            implicit = (first and not self.event.explicit and not self.canonical
-                    and not self.event.version and not self.event.tags
-                    and not self.check_empty_document())
+            implicit = (first and
+                        not self.event.explicit and
+                        not self.canonical and
+                        not self.event.version and
+                        not self.event.tags and
+                        not self.check_empty_document())
             if not implicit:
                 self.write_indent()
                 self.write_indicator(u'---', True)
@@ -209,8 +245,8 @@ def expect_document_start(self, first=False):
             self.write_stream_end()
             self.state = self.expect_nothing
         else:
-            raise EmitterError("expected DocumentStartEvent, but got %s"
-                    % self.event)
+            raise EmitterError("expected DocumentStartEvent, but got %s" %
+                               self.event)
 
     def expect_document_end(self):
         if isinstance(self.event, DocumentEndEvent):
@@ -221,8 +257,8 @@ def expect_document_end(self):
             self.flush_stream()
             self.state = self.expect_document_start
         else:
-            raise EmitterError("expected DocumentEndEvent, but got %s"
-                    % self.event)
+            raise EmitterError("expected DocumentEndEvent, but got %s" %
+                               self.event)
 
     def expect_document_root(self):
         self.states.append(self.expect_document_end)
@@ -231,9 +267,9 @@ def expect_document_root(self):
     # Node handlers.
 
     def expect_node(self, root=False, sequence=False, mapping=False,
-            simple_key=False):
+                    simple_key=False):
         self.root_context = root
-        self.sequence_context = sequence
+        self.sequence_context = sequence   # not used in PyYAML
         self.mapping_context = mapping
         self.simple_key_context = simple_key
         if isinstance(self.event, AliasEvent):
@@ -244,13 +280,22 @@ def expect_node(self, root=False, sequence=False, mapping=False,
             if isinstance(self.event, ScalarEvent):
                 self.expect_scalar()
             elif isinstance(self.event, SequenceStartEvent):
-                if self.flow_level or self.canonical or self.event.flow_style   \
-                        or self.check_empty_sequence():
+                if self.event.comment:
+                    self.write_pre_comment(self.event)
+                    if self.event.flow_style is False and self.event.comment:
+                        self.write_post_comment(self.event)
+                # print('seq event', self.event)
+                if self.flow_level or self.canonical or self.event.flow_style or \
+                        self.check_empty_sequence():
                     self.expect_flow_sequence()
                 else:
                     self.expect_block_sequence()
             elif isinstance(self.event, MappingStartEvent):
-                if self.flow_level or self.canonical or self.event.flow_style   \
+                if self.event.flow_style is False and self.event.comment:
+                    self.write_post_comment(self.event)
+                if self.event.comment and self.event.comment[1]:
+                    self.write_pre_comment(self.event)
+                if self.flow_level or self.canonical or self.event.flow_style \
                         or self.check_empty_mapping():
                     self.expect_flow_mapping()
                 else:
@@ -275,7 +320,7 @@ def expect_scalar(self):
     def expect_flow_sequence(self):
         self.write_indicator(u'[', True, whitespace=True)
         self.flow_level += 1
-        self.increase_indent(flow=True)
+        self.increase_indent(flow=True, sequence=True)
         self.state = self.expect_first_flow_sequence_item
 
     def expect_first_flow_sequence_item(self):
@@ -298,6 +343,9 @@ def expect_flow_sequence_item(self):
                 self.write_indicator(u',', False)
                 self.write_indent()
             self.write_indicator(u']', False)
+            if self.event.comment and self.event.comment[0]:
+                # eol comment on flow sequence
+                self.write_post_comment(self.event)
             self.state = self.states.pop()
         else:
             self.write_indicator(u',', False)
@@ -311,7 +359,7 @@ def expect_flow_sequence_item(self):
     def expect_flow_mapping(self):
         self.write_indicator(u'{', True, whitespace=True)
         self.flow_level += 1
-        self.increase_indent(flow=True)
+        self.increase_indent(flow=True, sequence=False)
         self.state = self.expect_first_flow_mapping_key
 
     def expect_first_flow_mapping_key(self):
@@ -319,6 +367,9 @@ def expect_first_flow_mapping_key(self):
             self.indent = self.indents.pop()
             self.flow_level -= 1
             self.write_indicator(u'}', False)
+            # if self.event.comment and self.event.comment[0]:
+            #     # eol comment on flow sequence
+            #     self.write_post_comment(self.event)
             self.state = self.states.pop()
         else:
             if self.canonical or self.column > self.best_width:
@@ -333,12 +384,17 @@ def expect_first_flow_mapping_key(self):
 
     def expect_flow_mapping_key(self):
         if isinstance(self.event, MappingEndEvent):
+            # if self.event.comment and self.event.comment[1]:
+            #     self.write_pre_comment(self.event)
             self.indent = self.indents.pop()
             self.flow_level -= 1
             if self.canonical:
                 self.write_indicator(u',', False)
                 self.write_indent()
             self.write_indicator(u'}', False)
+            if self.event.comment and self.event.comment[0]:
+                # eol comment on flow mapping
+                self.write_post_comment(self.event)
             self.state = self.states.pop()
         else:
             self.write_indicator(u',', False)
@@ -353,14 +409,14 @@ def expect_flow_mapping_key(self):
                 self.expect_node(mapping=True)
 
     def expect_flow_mapping_simple_value(self):
-        self.write_indicator(u':', False)
+        self.write_indicator(self.prefixed_colon, False)
         self.states.append(self.expect_flow_mapping_key)
         self.expect_node(mapping=True)
 
     def expect_flow_mapping_value(self):
         if self.canonical or self.column > self.best_width:
             self.write_indent()
-        self.write_indicator(u':', True)
+        self.write_indicator(self.prefixed_colon, True)
         self.states.append(self.expect_flow_mapping_key)
         self.expect_node(mapping=True)
 
@@ -368,7 +424,7 @@ def expect_flow_mapping_value(self):
 
     def expect_block_sequence(self):
         indentless = (self.mapping_context and not self.indention)
-        self.increase_indent(flow=False, indentless=indentless)
+        self.increase_indent(flow=False, sequence=True, indentless=indentless)
         self.state = self.expect_first_block_sequence_item
 
     def expect_first_block_sequence_item(self):
@@ -376,18 +432,26 @@ def expect_first_block_sequence_item(self):
 
     def expect_block_sequence_item(self, first=False):
         if not first and isinstance(self.event, SequenceEndEvent):
+            if self.event.comment and self.event.comment[1]:
+                # final comments from a doc
+                self.write_pre_comment(self.event)
             self.indent = self.indents.pop()
             self.state = self.states.pop()
         else:
             self.write_indent()
-            self.write_indicator(u'-', True, indention=True)
+            if self.event.comment and self.event.comment[1]:
+                self.write_pre_comment(self.event)
+            self.write_indent()
+            self.write_indicator((u' ' * self.block_seq_indent) + u'-', True, indention=True)
+            if self.block_seq_indent + 2 > self.best_indent:
+                self.no_newline = True
             self.states.append(self.expect_block_sequence_item)
             self.expect_node(sequence=True)
 
     # Block mapping handlers.
 
     def expect_block_mapping(self):
-        self.increase_indent(flow=False)
+        self.increase_indent(flow=False, sequence=False)
         self.state = self.expect_first_block_mapping_key
 
     def expect_first_block_mapping_key(self):
@@ -395,11 +459,19 @@ def expect_first_block_mapping_key(self):
 
     def expect_block_mapping_key(self, first=False):
         if not first and isinstance(self.event, MappingEndEvent):
+            if self.event.comment and self.event.comment[1]:
+                # final comments from a doc
+                self.write_pre_comment(self.event)
             self.indent = self.indents.pop()
             self.state = self.states.pop()
         else:
+            if self.event.comment and self.event.comment[1]:
+                # final comments from a doc
+                self.write_pre_comment(self.event)
             self.write_indent()
             if self.check_simple_key():
+                if self.event.style == '?':
+                    self.write_indicator(u'?', True, indention=True)
                 self.states.append(self.expect_block_mapping_simple_value)
                 self.expect_node(mapping=True, simple_key=True)
             else:
@@ -408,32 +480,39 @@ def expect_block_mapping_key(self, first=False):
                 self.expect_node(mapping=True)
 
     def expect_block_mapping_simple_value(self):
-        self.write_indicator(u':', False)
+        if getattr(self.event, 'style', None) != '?':
+            # prefix = u''
+            if self.indent == 0 and self.top_level_colon_align is not None:
+                # write non-prefixed colon
+                c = u' ' * (self.top_level_colon_align - self.column) + self.colon
+            else:
+                c = self.prefixed_colon
+            self.write_indicator(c, False)
         self.states.append(self.expect_block_mapping_key)
         self.expect_node(mapping=True)
 
     def expect_block_mapping_value(self):
         self.write_indent()
-        self.write_indicator(u':', True, indention=True)
+        self.write_indicator(self.prefixed_colon, True, indention=True)
         self.states.append(self.expect_block_mapping_key)
         self.expect_node(mapping=True)
 
     # Checkers.
 
     def check_empty_sequence(self):
-        return (isinstance(self.event, SequenceStartEvent) and self.events
-                and isinstance(self.events[0], SequenceEndEvent))
+        return (isinstance(self.event, SequenceStartEvent) and self.events and
+                isinstance(self.events[0], SequenceEndEvent))
 
     def check_empty_mapping(self):
-        return (isinstance(self.event, MappingStartEvent) and self.events
-                and isinstance(self.events[0], MappingEndEvent))
+        return (isinstance(self.event, MappingStartEvent) and self.events and
+                isinstance(self.events[0], MappingEndEvent))
 
     def check_empty_document(self):
         if not isinstance(self.event, DocumentStartEvent) or not self.events:
             return False
         event = self.events[0]
-        return (isinstance(event, ScalarEvent) and event.anchor is None
-                and event.tag is None and event.implicit and event.value == u'')
+        return (isinstance(event, ScalarEvent) and event.anchor is None and
+                event.tag is None and event.implicit and event.value == u'')
 
     def check_simple_key(self):
         length = 0
@@ -450,10 +529,11 @@ def check_simple_key(self):
             if self.analysis is None:
                 self.analysis = self.analyze_scalar(self.event.value)
             length += len(self.analysis.scalar)
-        return (length < 128 and (isinstance(self.event, AliasEvent)
-            or (isinstance(self.event, ScalarEvent)
-                    and not self.analysis.empty and not self.analysis.multiline)
-            or self.check_empty_sequence() or self.check_empty_mapping()))
+        return (length < self.MAX_SIMPLE_KEY_LENGTH and (
+            isinstance(self.event, AliasEvent) or
+            (isinstance(self.event, ScalarEvent) and
+             not self.analysis.empty and not self.analysis.multiline) or
+            self.check_empty_sequence() or self.check_empty_mapping()))
 
     # Anchor, Tag, and Scalar processors.
 
@@ -473,8 +553,8 @@ def process_tag(self):
             if self.style is None:
                 self.style = self.choose_scalar_style()
             if ((not self.canonical or tag is None) and
-                ((self.style == '' and self.event.implicit[0])
-                        or (self.style != '' and self.event.implicit[1]))):
+                ((self.style == '' and self.event.implicit[0]) or
+                 (self.style != '' and self.event.implicit[1]))):
                 self.prepared_tag = None
                 return
             if self.event.implicit[0] and tag is None:
@@ -497,15 +577,16 @@ def choose_scalar_style(self):
             self.analysis = self.analyze_scalar(self.event.value)
         if self.event.style == '"' or self.canonical:
             return '"'
-        if not self.event.style and self.event.implicit[0]:
+        if (not self.event.style or self.event.style == '?') and \
+           self.event.implicit[0]:
             if (not (self.simple_key_context and
-                    (self.analysis.empty or self.analysis.multiline))
-                and (self.flow_level and self.analysis.allow_flow_plain
-                    or (not self.flow_level and self.analysis.allow_block_plain))):
+                     (self.analysis.empty or self.analysis.multiline)) and
+                (self.flow_level and self.analysis.allow_flow_plain or
+                    (not self.flow_level and self.analysis.allow_block_plain))):
                 return ''
         if self.event.style and self.event.style in '|>':
-            if (not self.flow_level and not self.simple_key_context
-                    and self.analysis.allow_block):
+            if (not self.flow_level and not self.simple_key_context and
+                    self.analysis.allow_block):
                 return self.event.style
         if not self.event.style or self.event.style == '\'':
             if (self.analysis.allow_single_quoted and
@@ -519,9 +600,11 @@ def process_scalar(self):
         if self.style is None:
             self.style = self.choose_scalar_style()
         split = (not self.simple_key_context)
-        #if self.analysis.multiline and split    \
-        #        and (not self.style or self.style in '\'\"'):
-        #    self.write_indent()
+        # if self.analysis.multiline and split    \
+        #         and (not self.style or self.style in '\'\"'):
+        #     self.write_indent()
+        if self.sequence_context and not self.flow_level:
+            self.write_indent()
         if self.style == '"':
             self.write_double_quoted(self.analysis.scalar, split)
         elif self.style == '\'':
@@ -534,13 +617,16 @@ def process_scalar(self):
             self.write_plain(self.analysis.scalar, split)
         self.analysis = None
         self.style = None
+        if self.event.comment:
+            self.write_post_comment(self.event)
 
     # Analyzers.
 
     def prepare_version(self, version):
         major, minor = version
         if major != 1:
-            raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+            raise EmitterError("unsupported YAML version: %d.%d" %
+                               (major, minor))
         return u'%d.%d' % (major, minor)
 
     def prepare_tag_handle(self, handle):
@@ -548,12 +634,12 @@ def prepare_tag_handle(self, handle):
             raise EmitterError("tag handle must not be empty")
         if handle[0] != u'!' or handle[-1] != u'!':
             raise EmitterError("tag handle must start and end with '!': %r"
-                    % (handle.encode('utf-8')))
+                               % (utf8(handle)))
         for ch in handle[1:-1]:
-            if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z'  \
-                    or ch in u'-_'):
+            if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or
+                    u'a' <= ch <= u'z' or ch in u'-_'):
                 raise EmitterError("invalid character %r in the tag handle: %r"
-                        % (ch.encode('utf-8'), handle.encode('utf-8')))
+                                   % (utf8(ch), utf8(handle)))
         return handle
 
     def prepare_tag_prefix(self, prefix):
@@ -572,7 +658,7 @@ def prepare_tag_prefix(self, prefix):
                 if start < end:
                     chunks.append(prefix[start:end])
                 start = end = end+1
-                data = ch.encode('utf-8')
+                data = utf8(ch)
                 for ch in data:
                     chunks.append(u'%%%02X' % ord(ch))
         if start < end:
@@ -586,8 +672,7 @@ def prepare_tag(self, tag):
             return tag
         handle = None
         suffix = tag
-        prefixes = self.tag_prefixes.keys()
-        prefixes.sort()
+        prefixes = sorted(self.tag_prefixes.keys())
         for prefix in prefixes:
             if tag.startswith(prefix)   \
                     and (prefix == u'!' or len(prefix) < len(tag)):
@@ -605,7 +690,7 @@ def prepare_tag(self, tag):
                 if start < end:
                     chunks.append(suffix[start:end])
                 start = end = end+1
-                data = ch.encode('utf-8')
+                data = utf8(ch)
                 for ch in data:
                     chunks.append(u'%%%02X' % ord(ch))
         if start < end:
@@ -620,20 +705,21 @@ def prepare_anchor(self, anchor):
         if not anchor:
             raise EmitterError("anchor must not be empty")
         for ch in anchor:
-            if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z'  \
-                    or ch in u'-_'):
+            if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or
+                    u'a' <= ch <= u'z' or ch in u'-_'):
                 raise EmitterError("invalid character %r in the anchor: %r"
-                        % (ch.encode('utf-8'), anchor.encode('utf-8')))
+                                   % (utf8(ch), utf8(anchor)))
         return anchor
 
     def analyze_scalar(self, scalar):
 
         # Empty scalar is a special case.
         if not scalar:
-            return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
-                    allow_flow_plain=False, allow_block_plain=True,
-                    allow_single_quoted=True, allow_double_quoted=True,
-                    allow_block=False)
+            return ScalarAnalysis(
+                scalar=scalar, empty=True, multiline=False,
+                allow_flow_plain=False, allow_block_plain=True,
+                allow_single_quoted=True, allow_double_quoted=True,
+                allow_block=False)
 
         # Indicators and special characters.
         block_indicators = False
@@ -659,7 +745,7 @@ def analyze_scalar(self, scalar):
 
         # Last character or followed by a whitespace.
         followed_by_whitespace = (len(scalar) == 1 or
-                scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+                                  scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
 
         # The previous character is a space.
         previous_space = False
@@ -674,7 +760,7 @@ def analyze_scalar(self, scalar):
             # Check for indicators.
             if index == 0:
                 # Leading indicators are special characters.
-                if ch in u'#,[]{}&*!|>\'\"%@`': 
+                if ch in u'#,[]{}&*!|>\'\"%@`':
                     flow_indicators = True
                     block_indicators = True
                 if ch in u'?:':
@@ -700,9 +786,9 @@ def analyze_scalar(self, scalar):
             if ch in u'\n\x85\u2028\u2029':
                 line_breaks = True
             if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
-                if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
-                        or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
-                    unicode_characters = True
+                if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' or
+                        u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
+                    # unicode_characters = True
                     if not self.allow_unicode:
                         special_characters = True
                 else:
@@ -734,8 +820,9 @@ def analyze_scalar(self, scalar):
             # Prepare for the next character.
             index += 1
             preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
-            followed_by_whitespace = (index+1 >= len(scalar) or
-                    scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+            followed_by_whitespace = (
+                index+1 >= len(scalar) or
+                scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
 
         # Let's decide what styles are allowed.
         allow_flow_plain = True
@@ -745,8 +832,7 @@ def analyze_scalar(self, scalar):
         allow_block = True
 
         # Leading and trailing whitespaces are bad for plain scalars.
-        if (leading_space or leading_break
-                or trailing_space or trailing_break):
+        if (leading_space or leading_break or trailing_space or trailing_break):
             allow_flow_plain = allow_block_plain = False
 
         # We do not permit trailing spaces for block scalars.
@@ -761,8 +847,8 @@ def analyze_scalar(self, scalar):
         # Spaces followed by breaks, as well as special character are only
         # allowed for double quoted scalars.
         if space_break or special_characters:
-            allow_flow_plain = allow_block_plain =  \
-            allow_single_quoted = allow_block = False
+            allow_flow_plain = allow_block_plain = \
+                allow_single_quoted = allow_block = False
 
         # Although the plain scalar writer supports breaks, we never emit
         # multiline plain scalars.
@@ -778,12 +864,12 @@ def analyze_scalar(self, scalar):
             allow_block_plain = False
 
         return ScalarAnalysis(scalar=scalar,
-                empty=False, multiline=line_breaks,
-                allow_flow_plain=allow_flow_plain,
-                allow_block_plain=allow_block_plain,
-                allow_single_quoted=allow_single_quoted,
-                allow_double_quoted=allow_double_quoted,
-                allow_block=allow_block)
+                              empty=False, multiline=line_breaks,
+                              allow_flow_plain=allow_flow_plain,
+                              allow_block_plain=allow_block_plain,
+                              allow_single_quoted=allow_single_quoted,
+                              allow_double_quoted=allow_double_quoted,
+                              allow_block=allow_block)
 
     # Writers.
 
@@ -800,7 +886,7 @@ def write_stream_end(self):
         self.flush_stream()
 
     def write_indicator(self, indicator, need_whitespace,
-            whitespace=False, indention=False):
+                        whitespace=False, indention=False):
         if self.whitespace or not need_whitespace:
             data = indicator
         else:
@@ -817,7 +903,10 @@ def write_indent(self):
         indent = self.indent or 0
         if not self.indention or self.column > indent   \
                 or (self.column == indent and not self.whitespace):
-            self.write_line_break()
+            if self.no_newline:
+                self.no_newline = False
+            else:
+                self.write_line_break()
         if self.column < indent:
             self.whitespace = True
             data = u' '*(indent-self.column)
@@ -933,10 +1022,9 @@ def write_double_quoted(self, text, split=True):
             if end < len(text):
                 ch = text[end]
             if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
-                    or not (u'\x20' <= ch <= u'\x7E'
-                        or (self.allow_unicode
-                            and (u'\xA0' <= ch <= u'\uD7FF'
-                                or u'\uE000' <= ch <= u'\uFFFD'))):
+               or not (u'\x20' <= ch <= u'\x7E' or
+                       (self.allow_unicode and
+                        (u'\xA0' <= ch <= u'\uD7FF' or u'\uE000' <= ch <= u'\uFFFD'))):
                 if start < end:
                     data = text[start:end]
                     self.column += len(data)
@@ -983,7 +1071,7 @@ def determine_block_hints(self, text):
         hints = u''
         if text:
             if text[0] in u' \n\x85\u2028\u2029':
-                hints += unicode(self.best_indent)
+                hints += text_type(self.best_indent)
             if text[-1] not in u'\n\x85\u2028\u2029':
                 hints += u'-'
             elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
@@ -1101,7 +1189,8 @@ def write_plain(self, text, split=True):
                 ch = text[end]
             if spaces:
                 if ch != u' ':
-                    if start+1 == end and self.column > self.best_width and split:
+                    if start+1 == end and self.column > self.best_width \
+                       and split:
                         self.write_indent()
                         self.whitespace = False
                         self.indention = False
@@ -1138,3 +1227,56 @@ def write_plain(self, text, split=True):
                 breaks = (ch in u'\n\x85\u2028\u2029')
             end += 1
 
+    def write_comment(self, comment):
+        value = comment.value
+        # print('{:02d} {:02d} {}'.format(self.column, comment.start_mark.column, value))
+        if value[-1] == '\n':
+            value = value[:-1]
+        try:
+            # get original column position
+            col = comment.start_mark.column
+            if col < self.column + 1:
+                ValueError
+        except ValueError:
+            col = self.column + 1
+        # print('post_comment', self.line, self.column, value)
+        try:
+            # at least one space if the current column >= the start column of the comment
+            # but not at the start of a line
+            nr_spaces = col - self.column
+            if self.column and value.strip() and nr_spaces < 1:
+                nr_spaces = 1
+            value = ' ' * nr_spaces + value
+            try:
+                if self.encoding:
+                    value = value.encode(self.encoding)
+            except UnicodeDecodeError:
+                pass
+            self.stream.write(value)
+        except TypeError:
+            raise
+        self.write_line_break()
+
+    def write_pre_comment(self, event):
+        comments = event.comment[1]
+        if comments is None:
+            return
+        try:
+            for comment in comments:
+                if isinstance(event, MappingStartEvent) and \
+                   getattr(comment, 'pre_done', None):
+                    continue
+                if self.column != 0:
+                        self.write_line_break()
+                self.write_comment(comment)
+                if isinstance(event, MappingStartEvent):
+                    comment.pre_done = True
+        except TypeError:
+            print ('eventtt', type(event), event)
+            raise
+
+    def write_post_comment(self, event):
+        if self.event.comment[0] is None:
+            return
+        comment = event.comment[0]
+        self.write_comment(comment)
diff --git a/lib/spack/external/yaml/lib/yaml/error.py b/lib/spack/external/ruamel/yaml/error.py
similarity index 67%
rename from lib/spack/external/yaml/lib/yaml/error.py
rename to lib/spack/external/ruamel/yaml/error.py
index 577686db5f..1ec77e60ec 100644
--- a/lib/spack/external/yaml/lib/yaml/error.py
+++ b/lib/spack/external/ruamel/yaml/error.py
@@ -1,8 +1,16 @@
+# coding: utf-8
+
+from __future__ import absolute_import
 
 __all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
 
-class Mark(object):
+try:
+    from .compat import utf8
+except (ImportError, ValueError):  # for Jython
+    from ruamel.yaml.compat import utf8
 
+
+class Mark(object):
     def __init__(self, name, index, line, column, buffer, pointer):
         self.name = name
         self.index = index
@@ -16,7 +24,8 @@ def get_snippet(self, indent=4, max_length=75):
             return None
         head = ''
         start = self.pointer
-        while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+        while (start > 0 and
+               self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029'):
             start -= 1
             if self.pointer-start > max_length/2-1:
                 head = ' ... '
@@ -24,15 +33,16 @@ def get_snippet(self, indent=4, max_length=75):
                 break
         tail = ''
         end = self.pointer
-        while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+        while (end < len(self.buffer) and
+               self.buffer[end] not in u'\0\r\n\x85\u2028\u2029'):
             end += 1
             if end-self.pointer > max_length/2-1:
                 tail = ' ... '
                 end -= 5
                 break
-        snippet = self.buffer[start:end].encode('utf-8')
-        return ' '*indent + head + snippet + tail + '\n'  \
-                + ' '*(indent+self.pointer-start+len(head)) + '^'
+        snippet = utf8(self.buffer[start:end])
+        return ' '*indent + head + snippet + tail + '\n' \
+               + ' '*(indent+self.pointer-start+len(head)) + '^'
 
     def __str__(self):
         snippet = self.get_snippet()
@@ -42,13 +52,14 @@ def __str__(self):
             where += ":\n"+snippet
         return where
 
+
 class YAMLError(Exception):
     pass
 
-class MarkedYAMLError(YAMLError):
 
+class MarkedYAMLError(YAMLError):
     def __init__(self, context=None, context_mark=None,
-            problem=None, problem_mark=None, note=None):
+                 problem=None, problem_mark=None, note=None):
         self.context = context
         self.context_mark = context_mark
         self.problem = problem
@@ -60,10 +71,10 @@ def __str__(self):
         if self.context is not None:
             lines.append(self.context)
         if self.context_mark is not None  \
-            and (self.problem is None or self.problem_mark is None
-                    or self.context_mark.name != self.problem_mark.name
-                    or self.context_mark.line != self.problem_mark.line
-                    or self.context_mark.column != self.problem_mark.column):
+           and (self.problem is None or self.problem_mark is None or
+                self.context_mark.name != self.problem_mark.name or
+                self.context_mark.line != self.problem_mark.line or
+                self.context_mark.column != self.problem_mark.column):
             lines.append(str(self.context_mark))
         if self.problem is not None:
             lines.append(self.problem)
@@ -72,4 +83,3 @@ def __str__(self):
         if self.note is not None:
             lines.append(self.note)
         return '\n'.join(lines)
-
diff --git a/lib/spack/external/yaml/lib/yaml/events.py b/lib/spack/external/ruamel/yaml/events.py
similarity index 55%
rename from lib/spack/external/yaml/lib/yaml/events.py
rename to lib/spack/external/ruamel/yaml/events.py
index f79ad389cb..7667c016be 100644
--- a/lib/spack/external/yaml/lib/yaml/events.py
+++ b/lib/spack/external/ruamel/yaml/events.py
@@ -1,86 +1,106 @@
+# coding: utf-8
 
 # Abstract classes.
 
+
+def CommentCheck():
+    pass
+
+
 class Event(object):
-    def __init__(self, start_mark=None, end_mark=None):
+    def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck):
         self.start_mark = start_mark
         self.end_mark = end_mark
+        # assert comment is not CommentCheck
+        if comment is CommentCheck:
+            comment = None
+        self.comment = comment
+
     def __repr__(self):
-        attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
-                if hasattr(self, key)]
+        attributes = [key for key in ['anchor', 'tag', 'implicit', 'value',
+                                      'flow_style', 'style']
+                      if hasattr(self, key)]
         arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
-                for key in attributes])
+                               for key in attributes])
+        if self.comment not in [None, CommentCheck]:
+            arguments += ', comment={!r}'.format(self.comment)
         return '%s(%s)' % (self.__class__.__name__, arguments)
 
+
 class NodeEvent(Event):
-    def __init__(self, anchor, start_mark=None, end_mark=None):
+    def __init__(self, anchor, start_mark=None, end_mark=None, comment=None):
+        Event.__init__(self, start_mark, end_mark, comment)
         self.anchor = anchor
-        self.start_mark = start_mark
-        self.end_mark = end_mark
+
 
 class CollectionStartEvent(NodeEvent):
     def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
-            flow_style=None):
+                 flow_style=None, comment=None):
+        Event.__init__(self, start_mark, end_mark, comment)
         self.anchor = anchor
         self.tag = tag
         self.implicit = implicit
-        self.start_mark = start_mark
-        self.end_mark = end_mark
         self.flow_style = flow_style
 
+
 class CollectionEndEvent(Event):
     pass
 
 # Implementations.
 
+
 class StreamStartEvent(Event):
-    def __init__(self, start_mark=None, end_mark=None, encoding=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
+    def __init__(self, start_mark=None, end_mark=None, encoding=None,
+                 comment=None):
+        Event.__init__(self, start_mark, end_mark, comment)
         self.encoding = encoding
 
+
 class StreamEndEvent(Event):
     pass
 
+
 class DocumentStartEvent(Event):
     def __init__(self, start_mark=None, end_mark=None,
-            explicit=None, version=None, tags=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
+                 explicit=None, version=None, tags=None, comment=None):
+        Event.__init__(self, start_mark, end_mark, comment)
         self.explicit = explicit
         self.version = version
         self.tags = tags
 
+
 class DocumentEndEvent(Event):
     def __init__(self, start_mark=None, end_mark=None,
-            explicit=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
+                 explicit=None, comment=None):
+        Event.__init__(self, start_mark, end_mark, comment)
         self.explicit = explicit
 
+
 class AliasEvent(NodeEvent):
     pass
 
+
 class ScalarEvent(NodeEvent):
     def __init__(self, anchor, tag, implicit, value,
-            start_mark=None, end_mark=None, style=None):
-        self.anchor = anchor
+                 start_mark=None, end_mark=None, style=None, comment=None):
+        NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
         self.tag = tag
         self.implicit = implicit
         self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
         self.style = style
 
+
 class SequenceStartEvent(CollectionStartEvent):
     pass
 
+
 class SequenceEndEvent(CollectionEndEvent):
     pass
 
+
 class MappingStartEvent(CollectionStartEvent):
     pass
 
+
 class MappingEndEvent(CollectionEndEvent):
     pass
-
diff --git a/lib/spack/external/ruamel/yaml/loader.py b/lib/spack/external/ruamel/yaml/loader.py
new file mode 100644
index 0000000000..b5ba20a0a1
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/loader.py
@@ -0,0 +1,61 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader']
+
+try:
+    from .reader import *                                # NOQA
+    from .scanner import *                               # NOQA
+    from .parser import *                                # NOQA
+    from .composer import *                              # NOQA
+    from .constructor import *                           # NOQA
+    from .resolver import *                              # NOQA
+except (ImportError, ValueError):  # for Jython
+    from ruamel.yaml.reader import *                                # NOQA
+    from ruamel.yaml.scanner import *                               # NOQA
+    from ruamel.yaml.parser import *                                # NOQA
+    from ruamel.yaml.composer import *                              # NOQA
+    from ruamel.yaml.constructor import *                           # NOQA
+    from ruamel.yaml.resolver import *                              # NOQA
+
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+    def __init__(self, stream, version=None, preserve_quotes=None):
+        Reader.__init__(self, stream)
+        Scanner.__init__(self)
+        Parser.__init__(self)
+        Composer.__init__(self)
+        BaseConstructor.__init__(self)
+        BaseResolver.__init__(self)
+
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+    def __init__(self, stream, version=None, preserve_quotes=None):
+        Reader.__init__(self, stream)
+        Scanner.__init__(self)
+        Parser.__init__(self)
+        Composer.__init__(self)
+        SafeConstructor.__init__(self)
+        Resolver.__init__(self)
+
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+    def __init__(self, stream, version=None, preserve_quotes=None):
+        Reader.__init__(self, stream)
+        Scanner.__init__(self)
+        Parser.__init__(self)
+        Composer.__init__(self)
+        Constructor.__init__(self)
+        Resolver.__init__(self)
+
+
+class RoundTripLoader(Reader, RoundTripScanner, RoundTripParser, Composer,
+                      RoundTripConstructor, VersionedResolver):
+    def __init__(self, stream, version=None, preserve_quotes=None):
+        Reader.__init__(self, stream)
+        RoundTripScanner.__init__(self)
+        RoundTripParser.__init__(self)
+        Composer.__init__(self)
+        RoundTripConstructor.__init__(self, preserve_quotes=preserve_quotes)
+        VersionedResolver.__init__(self, version)
diff --git a/lib/spack/external/yaml/lib3/yaml/__init__.py b/lib/spack/external/ruamel/yaml/main.py
similarity index 57%
rename from lib/spack/external/yaml/lib3/yaml/__init__.py
rename to lib/spack/external/ruamel/yaml/main.py
index d7d27fe63b..797bdcde65 100644
--- a/lib/spack/external/yaml/lib3/yaml/__init__.py
+++ b/lib/spack/external/ruamel/yaml/main.py
@@ -1,21 +1,20 @@
+# coding: utf-8
 
-from .error import *
+from __future__ import absolute_import
 
-from .tokens import *
-from .events import *
-from .nodes import *
 
-from .loader import *
-from .dumper import *
+from ruamel.yaml.error import *                                # NOQA
 
-__version__ = '3.12'
-try:
-    from .cyaml import *
-    __with_libyaml__ = True
-except ImportError:
-    __with_libyaml__ = False
+from ruamel.yaml.tokens import *                               # NOQA
+from ruamel.yaml.events import *                               # NOQA
+from ruamel.yaml.nodes import *                                # NOQA
+
+from ruamel.yaml.loader import *                               # NOQA
+from ruamel.yaml.dumper import *                               # NOQA
+from ruamel.yaml.compat import StringIO, BytesIO, with_metaclass, PY3
+
+# import io
 
-import io
 
 def scan(stream, Loader=Loader):
     """
@@ -28,6 +27,7 @@ def scan(stream, Loader=Loader):
     finally:
         loader.dispose()
 
+
 def parse(stream, Loader=Loader):
     """
     Parse a YAML stream and produce parsing events.
@@ -39,6 +39,7 @@ def parse(stream, Loader=Loader):
     finally:
         loader.dispose()
 
+
 def compose(stream, Loader=Loader):
     """
     Parse the first YAML document in a stream
@@ -50,6 +51,7 @@ def compose(stream, Loader=Loader):
     finally:
         loader.dispose()
 
+
 def compose_all(stream, Loader=Loader):
     """
     Parse all YAML documents in a stream
@@ -62,58 +64,81 @@ def compose_all(stream, Loader=Loader):
     finally:
         loader.dispose()
 
-def load(stream, Loader=Loader):
+
+def load(stream, Loader=Loader, version=None, preserve_quotes=None):
     """
     Parse the first YAML document in a stream
     and produce the corresponding Python object.
     """
-    loader = Loader(stream)
+    loader = Loader(stream, version, preserve_quotes=preserve_quotes)
     try:
         return loader.get_single_data()
     finally:
         loader.dispose()
 
-def load_all(stream, Loader=Loader):
+
+def load_all(stream, Loader=Loader, version=None):
     """
     Parse all YAML documents in a stream
     and produce corresponding Python objects.
     """
-    loader = Loader(stream)
+    loader = Loader(stream, version)
     try:
         while loader.check_data():
             yield loader.get_data()
     finally:
         loader.dispose()
 
-def safe_load(stream):
+
+def safe_load(stream, version=None):
     """
     Parse the first YAML document in a stream
     and produce the corresponding Python object.
     Resolve only basic YAML tags.
     """
-    return load(stream, SafeLoader)
+    return load(stream, SafeLoader, version)
+
 
-def safe_load_all(stream):
+def safe_load_all(stream, version=None):
     """
     Parse all YAML documents in a stream
     and produce corresponding Python objects.
     Resolve only basic YAML tags.
     """
-    return load_all(stream, SafeLoader)
+    return load_all(stream, SafeLoader, version)
+
+
+def round_trip_load(stream, version=None, preserve_quotes=None):
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding Python object.
+    Resolve only basic YAML tags.
+    """
+    return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
+
+
+def round_trip_load_all(stream, version=None, preserve_quotes=None):
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding Python objects.
+    Resolve only basic YAML tags.
+    """
+    return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
+
 
 def emit(events, stream=None, Dumper=Dumper,
-        canonical=None, indent=None, width=None,
-        allow_unicode=None, line_break=None):
+         canonical=None, indent=None, width=None,
+         allow_unicode=None, line_break=None):
     """
     Emit YAML parsing events into a stream.
     If stream is None, return the produced string instead.
     """
     getvalue = None
     if stream is None:
-        stream = io.StringIO()
+        stream = StringIO()
         getvalue = stream.getvalue
     dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
-            allow_unicode=allow_unicode, line_break=line_break)
+                    allow_unicode=allow_unicode, line_break=line_break)
     try:
         for event in events:
             dumper.emit(event)
@@ -122,11 +147,14 @@ def emit(events, stream=None, Dumper=Dumper,
     if getvalue:
         return getvalue()
 
+enc = None if PY3 else 'utf-8'
+
+
 def serialize_all(nodes, stream=None, Dumper=Dumper,
-        canonical=None, indent=None, width=None,
-        allow_unicode=None, line_break=None,
-        encoding=None, explicit_start=None, explicit_end=None,
-        version=None, tags=None):
+                  canonical=None, indent=None, width=None,
+                  allow_unicode=None, line_break=None,
+                  encoding=enc, explicit_start=None, explicit_end=None,
+                  version=None, tags=None):
     """
     Serialize a sequence of representation trees into a YAML stream.
     If stream is None, return the produced string instead.
@@ -134,14 +162,14 @@ def serialize_all(nodes, stream=None, Dumper=Dumper,
     getvalue = None
     if stream is None:
         if encoding is None:
-            stream = io.StringIO()
+            stream = StringIO()
         else:
-            stream = io.BytesIO()
+            stream = BytesIO()
         getvalue = stream.getvalue
     dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
-            allow_unicode=allow_unicode, line_break=line_break,
-            encoding=encoding, version=version, tags=tags,
-            explicit_start=explicit_start, explicit_end=explicit_end)
+                    allow_unicode=allow_unicode, line_break=line_break,
+                    encoding=encoding, version=version, tags=tags,
+                    explicit_start=explicit_start, explicit_end=explicit_end)
     try:
         dumper.open()
         for node in nodes:
@@ -152,6 +180,7 @@ def serialize_all(nodes, stream=None, Dumper=Dumper,
     if getvalue:
         return getvalue()
 
+
 def serialize(node, stream=None, Dumper=Dumper, **kwds):
     """
     Serialize a representation tree into a YAML stream.
@@ -159,29 +188,36 @@ def serialize(node, stream=None, Dumper=Dumper, **kwds):
     """
     return serialize_all([node], stream, Dumper=Dumper, **kwds)
 
+
 def dump_all(documents, stream=None, Dumper=Dumper,
-        default_style=None, default_flow_style=None,
-        canonical=None, indent=None, width=None,
-        allow_unicode=None, line_break=None,
-        encoding=None, explicit_start=None, explicit_end=None,
-        version=None, tags=None):
+             default_style=None, default_flow_style=None,
+             canonical=None, indent=None, width=None,
+             allow_unicode=None, line_break=None,
+             encoding=enc, explicit_start=None, explicit_end=None,
+             version=None, tags=None, block_seq_indent=None,
+             top_level_colon_align=None, prefix_colon=None):
     """
     Serialize a sequence of Python objects into a YAML stream.
     If stream is None, return the produced string instead.
     """
     getvalue = None
+    if top_level_colon_align is True:
+        top_level_colon_align = max([len(str(x)) for x in documents[0]])
     if stream is None:
         if encoding is None:
-            stream = io.StringIO()
+            stream = StringIO()
         else:
-            stream = io.BytesIO()
+            stream = BytesIO()
         getvalue = stream.getvalue
     dumper = Dumper(stream, default_style=default_style,
-            default_flow_style=default_flow_style,
-            canonical=canonical, indent=indent, width=width,
-            allow_unicode=allow_unicode, line_break=line_break,
-            encoding=encoding, version=version, tags=tags,
-            explicit_start=explicit_start, explicit_end=explicit_end)
+                    default_flow_style=default_flow_style,
+                    canonical=canonical, indent=indent, width=width,
+                    allow_unicode=allow_unicode, line_break=line_break,
+                    encoding=encoding, explicit_start=explicit_start,
+                    explicit_end=explicit_end, version=version,
+                    tags=tags, block_seq_indent=block_seq_indent,
+                    top_level_colon_align=top_level_colon_align, prefix_colon=prefix_colon,
+                    )
     try:
         dumper.open()
         for data in documents:
@@ -192,12 +228,31 @@ def dump_all(documents, stream=None, Dumper=Dumper,
     if getvalue:
         return getvalue()
 
-def dump(data, stream=None, Dumper=Dumper, **kwds):
+
+def dump(data, stream=None, Dumper=Dumper,
+         default_style=None, default_flow_style=None,
+         canonical=None, indent=None, width=None,
+         allow_unicode=None, line_break=None,
+         encoding=enc, explicit_start=None, explicit_end=None,
+         version=None, tags=None, block_seq_indent=None):
     """
     Serialize a Python object into a YAML stream.
     If stream is None, return the produced string instead.
+
+    default_style ∈ None, '', '"', "'", '|', '>'
+
     """
-    return dump_all([data], stream, Dumper=Dumper, **kwds)
+    return dump_all([data], stream, Dumper=Dumper,
+                    default_style=default_style,
+                    default_flow_style=default_flow_style,
+                    canonical=canonical,
+                    indent=indent, width=width,
+                    allow_unicode=allow_unicode,
+                    line_break=line_break,
+                    encoding=encoding, explicit_start=explicit_start,
+                    explicit_end=explicit_end,
+                    version=version, tags=tags, block_seq_indent=block_seq_indent)
+
 
 def safe_dump_all(documents, stream=None, **kwds):
     """
@@ -207,6 +262,7 @@ def safe_dump_all(documents, stream=None, **kwds):
     """
     return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
 
+
 def safe_dump(data, stream=None, **kwds):
     """
     Serialize a Python object into a YAML stream.
@@ -215,8 +271,30 @@ def safe_dump(data, stream=None, **kwds):
     """
     return dump_all([data], stream, Dumper=SafeDumper, **kwds)
 
+
+def round_trip_dump(data, stream=None, Dumper=RoundTripDumper,
+                    default_style=None, default_flow_style=None,
+                    canonical=None, indent=None, width=None,
+                    allow_unicode=None, line_break=None,
+                    encoding=enc, explicit_start=None, explicit_end=None,
+                    version=None, tags=None, block_seq_indent=None,
+                    top_level_colon_align=None, prefix_colon=None):
+    allow_unicode = True if allow_unicode is None else allow_unicode
+    return dump_all([data], stream, Dumper=Dumper,
+                    default_style=default_style,
+                    default_flow_style=default_flow_style,
+                    canonical=canonical,
+                    indent=indent, width=width,
+                    allow_unicode=allow_unicode,
+                    line_break=line_break,
+                    encoding=encoding, explicit_start=explicit_start,
+                    explicit_end=explicit_end,
+                    version=version, tags=tags, block_seq_indent=block_seq_indent,
+                    top_level_colon_align=top_level_colon_align, prefix_colon=prefix_colon)
+
+
 def add_implicit_resolver(tag, regexp, first=None,
-        Loader=Loader, Dumper=Dumper):
+                          Loader=Loader, Dumper=Dumper):
     """
     Add an implicit scalar detector.
     If an implicit scalar value matches the given regexp,
@@ -226,6 +304,7 @@ def add_implicit_resolver(tag, regexp, first=None,
     Loader.add_implicit_resolver(tag, regexp, first)
     Dumper.add_implicit_resolver(tag, regexp, first)
 
+
 def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
     """
     Add a path based resolver for the given tag.
@@ -236,6 +315,7 @@ def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
     Loader.add_path_resolver(tag, path, kind)
     Dumper.add_path_resolver(tag, path, kind)
 
+
 def add_constructor(tag, constructor, Loader=Loader):
     """
     Add a constructor for the given tag.
@@ -244,6 +324,7 @@ def add_constructor(tag, constructor, Loader=Loader):
     """
     Loader.add_constructor(tag, constructor)
 
+
 def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
     """
     Add a multi-constructor for the given tag prefix.
@@ -253,6 +334,7 @@ def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
     """
     Loader.add_multi_constructor(tag_prefix, multi_constructor)
 
+
 def add_representer(data_type, representer, Dumper=Dumper):
     """
     Add a representer for the given type.
@@ -262,6 +344,7 @@ def add_representer(data_type, representer, Dumper=Dumper):
     """
     Dumper.add_representer(data_type, representer)
 
+
 def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
     """
     Add a representer for the given type.
@@ -271,6 +354,7 @@ def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
     """
     Dumper.add_multi_representer(data_type, multi_representer)
 
+
 class YAMLObjectMetaclass(type):
     """
     The metaclass for YAMLObject.
@@ -281,12 +365,12 @@ def __init__(cls, name, bases, kwds):
             cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
             cls.yaml_dumper.add_representer(cls, cls.to_yaml)
 
-class YAMLObject(metaclass=YAMLObjectMetaclass):
+
+class YAMLObject(with_metaclass(YAMLObjectMetaclass)):
     """
     An object that can dump itself to a YAML stream
     and load itself from a YAML stream.
     """
-
     __slots__ = ()  # no direct instantiation, so allow immutable subclasses
 
     yaml_loader = Loader
@@ -308,5 +392,4 @@ def to_yaml(cls, dumper, data):
         Convert a Python object to a representation node.
         """
         return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
-                flow_style=cls.yaml_flow_style)
-
+                                            flow_style=cls.yaml_flow_style)
diff --git a/lib/spack/external/ruamel/yaml/nodes.py b/lib/spack/external/ruamel/yaml/nodes.py
new file mode 100644
index 0000000000..26c6d77ae6
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/nodes.py
@@ -0,0 +1,86 @@
+# coding: utf-8
+
+from __future__ import print_function
+
+
+class Node(object):
+    def __init__(self, tag, value, start_mark, end_mark, comment=None):
+        self.tag = tag
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.comment = comment
+        self.anchor = None
+
+    def __repr__(self):
+        value = self.value
+        # if isinstance(value, list):
+        #     if len(value) == 0:
+        #         value = '<empty>'
+        #     elif len(value) == 1:
+        #         value = '<1 item>'
+        #     else:
+        #         value = '<%d items>' % len(value)
+        # else:
+        #     if len(value) > 75:
+        #         value = repr(value[:70]+u' ... ')
+        #     else:
+        #         value = repr(value)
+        value = repr(value)
+        return '%s(tag=%r, value=%s)' % (self.__class__.__name__,
+                                         self.tag, value)
+
+    def dump(self, indent=0):
+        if isinstance(self.value, basestring):
+            print('{0}{1}(tag={!r}, value={!r})'.format(
+                '  ' * indent, self.__class__.__name__, self.tag, self.value))
+            if self.comment:
+                print('    {0}comment: {1})'.format(
+                    '  ' * indent, self.comment))
+            return
+        print('{0}{1}(tag={!r})'.format(
+            '  ' * indent, self.__class__.__name__, self.tag))
+        if self.comment:
+            print('    {0}comment: {1})'.format(
+                '  ' * indent, self.comment))
+        for v in self.value:
+            if isinstance(v, tuple):
+                for v1 in v:
+                    v1.dump(indent+1)
+            elif isinstance(v, Node):
+                v.dump(indent+1)
+            else:
+                print('Node value type?', type(v))
+
+
+class ScalarNode(Node):
+    """
+    styles:
+      ? -> set() ? key, no value
+      " -> double quoted
+      ' -> single quoted
+      | -> literal style
+      > ->
+    """
+    id = 'scalar'
+
+    def __init__(self, tag, value, start_mark=None, end_mark=None, style=None,
+                 comment=None):
+        Node.__init__(self, tag, value, start_mark, end_mark, comment=comment)
+        self.style = style
+
+
+class CollectionNode(Node):
+    def __init__(self, tag, value, start_mark=None, end_mark=None,
+                 flow_style=None, comment=None, anchor=None):
+        Node.__init__(self, tag, value, start_mark, end_mark, comment=comment)
+        self.flow_style = flow_style
+        self.anchor = anchor
+
+
+class SequenceNode(CollectionNode):
+    id = 'sequence'
+
+
+class MappingNode(CollectionNode):
+    id = 'mapping'
diff --git a/lib/spack/external/yaml/lib/yaml/parser.py b/lib/spack/external/ruamel/yaml/parser.py
similarity index 72%
rename from lib/spack/external/yaml/lib/yaml/parser.py
rename to lib/spack/external/ruamel/yaml/parser.py
index f9e3057f33..543cca9b43 100644
--- a/lib/spack/external/yaml/lib/yaml/parser.py
+++ b/lib/spack/external/ruamel/yaml/parser.py
@@ -1,13 +1,18 @@
+# coding: utf-8
+
+from __future__ import absolute_import
 
 # The following YAML grammar is LL(1) and is parsed by a recursive descent
 # parser.
 #
-# stream            ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# stream            ::= STREAM-START implicit_document? explicit_document*
+#                                                                   STREAM-END
 # implicit_document ::= block_node DOCUMENT-END*
 # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
 # block_node_or_indentless_sequence ::=
 #                       ALIAS
-#                       | properties (block_content | indentless_block_sequence)?
+#                       | properties (block_content |
+#                                                   indentless_block_sequence)?
 #                       | block_content
 #                       | indentless_block_sequence
 # block_node        ::= ALIAS
@@ -21,7 +26,8 @@
 # flow_content      ::= flow_collection | SCALAR
 # block_collection  ::= block_sequence | block_mapping
 # flow_collection   ::= flow_sequence | flow_mapping
-# block_sequence    ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# block_sequence    ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
+#                                                                   BLOCK-END
 # indentless_sequence   ::= (BLOCK-ENTRY block_node?)+
 # block_mapping     ::= BLOCK-MAPPING_START
 #                       ((KEY block_node_or_indentless_sequence?)?
@@ -43,32 +49,44 @@
 # stream: { STREAM-START }
 # explicit_document: { DIRECTIVE DOCUMENT-START }
 # implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START
+#                  BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
 # flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START
+#                               FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
 # flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
 # block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
 # flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
 # block_sequence: { BLOCK-SEQUENCE-START }
 # block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR
+#               BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START
+#               FLOW-MAPPING-START BLOCK-ENTRY }
 # indentless_sequence: { ENTRY }
 # flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
 # flow_sequence: { FLOW-SEQUENCE-START }
 # flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
+#                                                    FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
+#                                                    FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'RoundTripParser', 'ParserError']
 
-__all__ = ['Parser', 'ParserError']
+# need to have full path, as pkg_resources tries to load parser.py in __init__.py
+# only to not do anything with the package afterwards
+# and for Jython too
+from ruamel.yaml.error import MarkedYAMLError                  # NOQA
+from ruamel.yaml.tokens import *                               # NOQA
+from ruamel.yaml.events import *                               # NOQA
+from ruamel.yaml.scanner import *                              # NOQA
+from ruamel.yaml.compat import utf8                            # NOQA
 
-from error import MarkedYAMLError
-from tokens import *
-from events import *
-from scanner import *
 
 class ParserError(MarkedYAMLError):
     pass
 
+
 class Parser(object):
     # Since writing a recursive-descendant parser is a straightforward task, we
     # do not give many comments here.
@@ -120,7 +138,8 @@ def get_event(self):
         self.current_event = None
         return value
 
-    # stream    ::= STREAM-START implicit_document? explicit_document* STREAM-END
+    # stream    ::= STREAM-START implicit_document? explicit_document*
+    #                                                               STREAM-END
     # implicit_document ::= block_node DOCUMENT-END*
     # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
 
@@ -128,8 +147,9 @@ def parse_stream_start(self):
 
         # Parse the stream start.
         token = self.get_token()
+        token.move_comment(self.peek_token())
         event = StreamStartEvent(token.start_mark, token.end_mark,
-                encoding=token.encoding)
+                                 encoding=token.encoding)
 
         # Prepare the next state.
         self.state = self.parse_implicit_document_start
@@ -140,12 +160,12 @@ def parse_implicit_document_start(self):
 
         # Parse an implicit document.
         if not self.check_token(DirectiveToken, DocumentStartToken,
-                StreamEndToken):
+                                StreamEndToken):
             self.tag_handles = self.DEFAULT_TAGS
             token = self.peek_token()
             start_mark = end_mark = token.start_mark
             event = DocumentStartEvent(start_mark, end_mark,
-                    explicit=False)
+                                       explicit=False)
 
             # Prepare the next state.
             self.states.append(self.parse_document_end)
@@ -169,19 +189,21 @@ def parse_document_start(self):
             version, tags = self.process_directives()
             if not self.check_token(DocumentStartToken):
                 raise ParserError(None, None,
-                        "expected '<document start>', but found %r"
-                        % self.peek_token().id,
-                        self.peek_token().start_mark)
+                                  "expected '<document start>', but found %r"
+                                  % self.peek_token().id,
+                                  self.peek_token().start_mark)
             token = self.get_token()
             end_mark = token.end_mark
-            event = DocumentStartEvent(start_mark, end_mark,
-                    explicit=True, version=version, tags=tags)
+            event = DocumentStartEvent(
+                start_mark, end_mark,
+                explicit=True, version=version, tags=tags)
             self.states.append(self.parse_document_end)
             self.state = self.parse_document_content
         else:
             # Parse the end of the stream.
             token = self.get_token()
-            event = StreamEndEvent(token.start_mark, token.end_mark)
+            event = StreamEndEvent(token.start_mark, token.end_mark,
+                                   comment=token.comment)
             assert not self.states
             assert not self.marks
             self.state = None
@@ -197,8 +219,7 @@ def parse_document_end(self):
             token = self.get_token()
             end_mark = token.end_mark
             explicit = True
-        event = DocumentEndEvent(start_mark, end_mark,
-                explicit=explicit)
+        event = DocumentEndEvent(start_mark, end_mark, explicit=explicit)
 
         # Prepare the next state.
         self.state = self.parse_document_start
@@ -206,8 +227,9 @@ def parse_document_end(self):
         return event
 
     def parse_document_content(self):
-        if self.check_token(DirectiveToken,
-                DocumentStartToken, DocumentEndToken, StreamEndToken):
+        if self.check_token(
+           DirectiveToken,
+           DocumentStartToken, DocumentEndToken, StreamEndToken):
             event = self.process_empty_scalar(self.peek_token().start_mark)
             self.state = self.states.pop()
             return event
@@ -221,20 +243,23 @@ def process_directives(self):
             token = self.get_token()
             if token.name == u'YAML':
                 if self.yaml_version is not None:
-                    raise ParserError(None, None,
-                            "found duplicate YAML directive", token.start_mark)
+                    raise ParserError(
+                        None, None,
+                        "found duplicate YAML directive", token.start_mark)
                 major, minor = token.value
                 if major != 1:
-                    raise ParserError(None, None,
-                            "found incompatible YAML document (version 1.* is required)",
-                            token.start_mark)
+                    raise ParserError(
+                        None, None,
+                        "found incompatible YAML document (version 1.* is "
+                        "required)",
+                        token.start_mark)
                 self.yaml_version = token.value
             elif token.name == u'TAG':
                 handle, prefix = token.value
                 if handle in self.tag_handles:
                     raise ParserError(None, None,
-                            "duplicate tag handle %r" % handle.encode('utf-8'),
-                            token.start_mark)
+                                      "duplicate tag handle %r" % utf8(handle),
+                                      token.start_mark)
                 self.tag_handles[handle] = prefix
         if self.tag_handles:
             value = self.yaml_version, self.tag_handles.copy()
@@ -270,6 +295,9 @@ def parse_flow_node(self):
     def parse_block_node_or_indentless_sequence(self):
         return self.parse_node(block=True, indentless_sequence=True)
 
+    def transform_tag(self, handle, suffix):
+        return self.tag_handles[handle] + suffix
+
     def parse_node(self, block=False, indentless_sequence=False):
         if self.check_token(AliasToken):
             token = self.get_token()
@@ -302,16 +330,18 @@ def parse_node(self, block=False, indentless_sequence=False):
                 handle, suffix = tag
                 if handle is not None:
                     if handle not in self.tag_handles:
-                        raise ParserError("while parsing a node", start_mark,
-                                "found undefined tag handle %r" % handle.encode('utf-8'),
-                                tag_mark)
-                    tag = self.tag_handles[handle]+suffix
+                        raise ParserError(
+                            "while parsing a node", start_mark,
+                            "found undefined tag handle %r" % utf8(handle),
+                            tag_mark)
+                    tag = self.transform_tag(handle, suffix)
                 else:
                     tag = suffix
-            #if tag == u'!':
-            #    raise ParserError("while parsing a node", start_mark,
-            #            "found non-specific tag '!'", tag_mark,
-            #            "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+            # if tag == u'!':
+            #     raise ParserError("while parsing a node", start_mark,
+            #             "found non-specific tag '!'", tag_mark,
+            #      "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag'
+            #     and share your opinion.")
             if start_mark is None:
                 start_mark = end_mark = self.peek_token().start_mark
             event = None
@@ -319,7 +349,7 @@ def parse_node(self, block=False, indentless_sequence=False):
             if indentless_sequence and self.check_token(BlockEntryToken):
                 end_mark = self.peek_token().end_mark
                 event = SequenceStartEvent(anchor, tag, implicit,
-                        start_mark, end_mark)
+                                           start_mark, end_mark)
                 self.state = self.parse_indentless_sequence_entry
             else:
                 if self.check_token(ScalarToken):
@@ -331,34 +361,52 @@ def parse_node(self, block=False, indentless_sequence=False):
                         implicit = (False, True)
                     else:
                         implicit = (False, False)
-                    event = ScalarEvent(anchor, tag, implicit, token.value,
-                            start_mark, end_mark, style=token.style)
+                    event = ScalarEvent(
+                        anchor, tag, implicit, token.value,
+                        start_mark, end_mark, style=token.style,
+                        comment=token.comment
+                    )
                     self.state = self.states.pop()
                 elif self.check_token(FlowSequenceStartToken):
                     end_mark = self.peek_token().end_mark
-                    event = SequenceStartEvent(anchor, tag, implicit,
-                            start_mark, end_mark, flow_style=True)
+                    event = SequenceStartEvent(
+                        anchor, tag, implicit,
+                        start_mark, end_mark, flow_style=True)
                     self.state = self.parse_flow_sequence_first_entry
                 elif self.check_token(FlowMappingStartToken):
                     end_mark = self.peek_token().end_mark
-                    event = MappingStartEvent(anchor, tag, implicit,
-                            start_mark, end_mark, flow_style=True)
+                    event = MappingStartEvent(
+                        anchor, tag, implicit,
+                        start_mark, end_mark, flow_style=True)
                     self.state = self.parse_flow_mapping_first_key
                 elif block and self.check_token(BlockSequenceStartToken):
                     end_mark = self.peek_token().start_mark
-                    event = SequenceStartEvent(anchor, tag, implicit,
-                            start_mark, end_mark, flow_style=False)
+                    # should inserting the comment be dependent on the
+                    # indentation?
+                    pt = self.peek_token()
+                    comment = pt.comment
+                    # print('pt0', type(pt))
+                    if comment is None or comment[1] is None:
+                        comment = pt.split_comment()
+                    # print('pt1', comment)
+                    event = SequenceStartEvent(
+                        anchor, tag, implicit, start_mark, end_mark,
+                        flow_style=False,
+                        comment=comment,
+                    )
                     self.state = self.parse_block_sequence_first_entry
                 elif block and self.check_token(BlockMappingStartToken):
                     end_mark = self.peek_token().start_mark
-                    event = MappingStartEvent(anchor, tag, implicit,
-                            start_mark, end_mark, flow_style=False)
+                    comment = self.peek_token().comment
+                    event = MappingStartEvent(
+                        anchor, tag, implicit, start_mark, end_mark,
+                        flow_style=False, comment=comment)
                     self.state = self.parse_block_mapping_first_key
                 elif anchor is not None or tag is not None:
                     # Empty scalars are allowed even if a tag or an anchor is
                     # specified.
                     event = ScalarEvent(anchor, tag, (implicit, False), u'',
-                            start_mark, end_mark)
+                                        start_mark, end_mark)
                     self.state = self.states.pop()
                 else:
                     if block:
@@ -366,21 +414,26 @@ def parse_node(self, block=False, indentless_sequence=False):
                     else:
                         node = 'flow'
                     token = self.peek_token()
-                    raise ParserError("while parsing a %s node" % node, start_mark,
-                            "expected the node content, but found %r" % token.id,
-                            token.start_mark)
+                    raise ParserError(
+                        "while parsing a %s node" % node, start_mark,
+                        "expected the node content, but found %r" % token.id,
+                        token.start_mark)
         return event
 
-    # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+    # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
+    #                                                               BLOCK-END
 
     def parse_block_sequence_first_entry(self):
         token = self.get_token()
+        # move any comment from start token
+        # token.move_comment(self.peek_token())
         self.marks.append(token.start_mark)
         return self.parse_block_sequence_entry()
 
     def parse_block_sequence_entry(self):
         if self.check_token(BlockEntryToken):
             token = self.get_token()
+            token.move_comment(self.peek_token())
             if not self.check_token(BlockEntryToken, BlockEndToken):
                 self.states.append(self.parse_block_sequence_entry)
                 return self.parse_block_node()
@@ -389,28 +442,38 @@ def parse_block_sequence_entry(self):
                 return self.process_empty_scalar(token.end_mark)
         if not self.check_token(BlockEndToken):
             token = self.peek_token()
-            raise ParserError("while parsing a block collection", self.marks[-1],
-                    "expected <block end>, but found %r" % token.id, token.start_mark)
-        token = self.get_token()
-        event = SequenceEndEvent(token.start_mark, token.end_mark)
+            raise ParserError(
+                "while parsing a block collection", self.marks[-1],
+                "expected <block end>, but found %r" %
+                token.id, token.start_mark)
+        token = self.get_token()  # BlockEndToken
+        event = SequenceEndEvent(token.start_mark, token.end_mark,
+                                 comment=token.comment)
         self.state = self.states.pop()
         self.marks.pop()
         return event
 
     # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
 
+    # indentless_sequence?
+    # sequence:
+    # - entry
+    #  - nested
+
     def parse_indentless_sequence_entry(self):
         if self.check_token(BlockEntryToken):
             token = self.get_token()
+            token.move_comment(self.peek_token())
             if not self.check_token(BlockEntryToken,
-                    KeyToken, ValueToken, BlockEndToken):
+                                    KeyToken, ValueToken, BlockEndToken):
                 self.states.append(self.parse_indentless_sequence_entry)
                 return self.parse_block_node()
             else:
                 self.state = self.parse_indentless_sequence_entry
                 return self.process_empty_scalar(token.end_mark)
         token = self.peek_token()
-        event = SequenceEndEvent(token.start_mark, token.start_mark)
+        event = SequenceEndEvent(token.start_mark, token.start_mark,
+                                 comment=token.comment)
         self.state = self.states.pop()
         return event
 
@@ -427,6 +490,7 @@ def parse_block_mapping_first_key(self):
     def parse_block_mapping_key(self):
         if self.check_token(KeyToken):
             token = self.get_token()
+            token.move_comment(self.peek_token())
             if not self.check_token(KeyToken, ValueToken, BlockEndToken):
                 self.states.append(self.parse_block_mapping_value)
                 return self.parse_block_node_or_indentless_sequence()
@@ -435,10 +499,14 @@ def parse_block_mapping_key(self):
                 return self.process_empty_scalar(token.end_mark)
         if not self.check_token(BlockEndToken):
             token = self.peek_token()
-            raise ParserError("while parsing a block mapping", self.marks[-1],
-                    "expected <block end>, but found %r" % token.id, token.start_mark)
+            raise ParserError(
+                "while parsing a block mapping", self.marks[-1],
+                "expected <block end>, but found %r" % token.id,
+                token.start_mark)
         token = self.get_token()
-        event = MappingEndEvent(token.start_mark, token.end_mark)
+        token.move_comment(self.peek_token())
+        event = MappingEndEvent(token.start_mark, token.end_mark,
+                                comment=token.comment)
         self.state = self.states.pop()
         self.marks.pop()
         return event
@@ -446,6 +514,8 @@ def parse_block_mapping_key(self):
     def parse_block_mapping_value(self):
         if self.check_token(ValueToken):
             token = self.get_token()
+            # value token might have post comment move it to e.g. block
+            token.move_comment(self.peek_token())
             if not self.check_token(KeyToken, ValueToken, BlockEndToken):
                 self.states.append(self.parse_block_mapping_key)
                 return self.parse_block_node_or_indentless_sequence()
@@ -480,21 +550,24 @@ def parse_flow_sequence_entry(self, first=False):
                     self.get_token()
                 else:
                     token = self.peek_token()
-                    raise ParserError("while parsing a flow sequence", self.marks[-1],
-                            "expected ',' or ']', but got %r" % token.id, token.start_mark)
-            
+                    raise ParserError(
+                        "while parsing a flow sequence", self.marks[-1],
+                        "expected ',' or ']', but got %r" % token.id,
+                        token.start_mark)
+
             if self.check_token(KeyToken):
                 token = self.peek_token()
                 event = MappingStartEvent(None, None, True,
-                        token.start_mark, token.end_mark,
-                        flow_style=True)
+                                          token.start_mark, token.end_mark,
+                                          flow_style=True)
                 self.state = self.parse_flow_sequence_entry_mapping_key
                 return event
             elif not self.check_token(FlowSequenceEndToken):
                 self.states.append(self.parse_flow_sequence_entry)
                 return self.parse_flow_node()
         token = self.get_token()
-        event = SequenceEndEvent(token.start_mark, token.end_mark)
+        event = SequenceEndEvent(token.start_mark, token.end_mark,
+                                 comment=token.comment)
         self.state = self.states.pop()
         self.marks.pop()
         return event
@@ -502,7 +575,7 @@ def parse_flow_sequence_entry(self, first=False):
     def parse_flow_sequence_entry_mapping_key(self):
         token = self.get_token()
         if not self.check_token(ValueToken,
-                FlowEntryToken, FlowSequenceEndToken):
+                                FlowEntryToken, FlowSequenceEndToken):
             self.states.append(self.parse_flow_sequence_entry_mapping_value)
             return self.parse_flow_node()
         else:
@@ -546,12 +619,14 @@ def parse_flow_mapping_key(self, first=False):
                     self.get_token()
                 else:
                     token = self.peek_token()
-                    raise ParserError("while parsing a flow mapping", self.marks[-1],
-                            "expected ',' or '}', but got %r" % token.id, token.start_mark)
+                    raise ParserError(
+                        "while parsing a flow mapping", self.marks[-1],
+                        "expected ',' or '}', but got %r" % token.id,
+                        token.start_mark)
             if self.check_token(KeyToken):
                 token = self.get_token()
                 if not self.check_token(ValueToken,
-                        FlowEntryToken, FlowMappingEndToken):
+                                        FlowEntryToken, FlowMappingEndToken):
                     self.states.append(self.parse_flow_mapping_value)
                     return self.parse_flow_node()
                 else:
@@ -561,7 +636,8 @@ def parse_flow_mapping_key(self, first=False):
                 self.states.append(self.parse_flow_mapping_empty_value)
                 return self.parse_flow_node()
         token = self.get_token()
-        event = MappingEndEvent(token.start_mark, token.end_mark)
+        event = MappingEndEvent(token.start_mark, token.end_mark,
+                                comment=token.comment)
         self.state = self.states.pop()
         self.marks.pop()
         return event
@@ -587,3 +663,13 @@ def parse_flow_mapping_empty_value(self):
     def process_empty_scalar(self, mark):
         return ScalarEvent(None, None, (True, False), u'', mark, mark)
 
+
+class RoundTripParser(Parser):
+    """roundtrip is a safe loader, that wants to see the unmangled tag"""
+    def transform_tag(self, handle, suffix):
+        # return self.tag_handles[handle]+suffix
+        if handle == '!!' and suffix in (u'null', u'bool', u'int', u'float', u'binary',
+                                         u'timestamp', u'omap', u'pairs', u'set', u'str',
+                                         u'seq', u'map'):
+            return Parser.transform_tag(self, handle, suffix)
+        return handle+suffix
diff --git a/lib/spack/external/yaml/lib3/yaml/reader.py b/lib/spack/external/ruamel/yaml/reader.py
similarity index 69%
rename from lib/spack/external/yaml/lib3/yaml/reader.py
rename to lib/spack/external/ruamel/yaml/reader.py
index f70e920f44..376c6de8c6 100644
--- a/lib/spack/external/yaml/lib3/yaml/reader.py
+++ b/lib/spack/external/ruamel/yaml/reader.py
@@ -1,3 +1,6 @@
+# coding: utf-8
+
+from __future__ import absolute_import
 # This module contains abstractions for the input stream. You don't have to
 # looks further, there are no pretty code.
 #
@@ -11,15 +14,24 @@
 # Reader determines the encoding of `data` and converts it to unicode.
 # Reader provides the following methods and attributes:
 #   reader.peek(length=1) - return the next `length` characters
-#   reader.forward(length=1) - move the current position to `length` characters.
+#   reader.forward(length=1) - move the current position to `length`
+#      characters.
 #   reader.index - the number of the current character.
-#   reader.line, stream.column - the line and the column of the current character.
+#   reader.line, stream.column - the line and the column of the current
+#      character.
 
-__all__ = ['Reader', 'ReaderError']
+import codecs
+import re
 
-from .error import YAMLError, Mark
+try:
+    from .error import YAMLError, Mark
+    from .compat import text_type, binary_type, PY3
+except (ImportError, ValueError):  # for Jython
+    from ruamel.yaml.error import YAMLError, Mark
+    from ruamel.yaml.compat import text_type, binary_type, PY3
+
+__all__ = ['Reader', 'ReaderError']
 
-import codecs, re
 
 class ReaderError(YAMLError):
 
@@ -31,16 +43,17 @@ def __init__(self, name, position, character, encoding, reason):
         self.reason = reason
 
     def __str__(self):
-        if isinstance(self.character, bytes):
+        if isinstance(self.character, binary_type):
             return "'%s' codec can't decode byte #x%02x: %s\n"  \
-                    "  in \"%s\", position %d"    \
-                    % (self.encoding, ord(self.character), self.reason,
-                            self.name, self.position)
+                   "  in \"%s\", position %d"    \
+                   % (self.encoding, ord(self.character), self.reason,
+                      self.name, self.position)
         else:
             return "unacceptable character #x%04x: %s\n"    \
-                    "  in \"%s\", position %d"    \
-                    % (self.character, self.reason,
-                            self.name, self.position)
+                   "  in \"%s\", position %d"    \
+                   % (self.character, self.reason,
+                      self.name, self.position)
+
 
 class Reader(object):
     # Reader:
@@ -49,8 +62,8 @@ class Reader(object):
     # - adds '\0' to the end.
 
     # Reader accepts
-    #  - a `bytes` object,
-    #  - a `str` object,
+    #  - a `str` object (PY2) / a `bytes` object (PY3),
+    #  - a `unicode` object (PY2) / a `str` object (PY3),
     #  - a file-like object with its `read` method returning `str`,
     #  - a file-like object with its `read` method returning `unicode`.
 
@@ -61,7 +74,7 @@ def __init__(self, stream):
         self.stream = None
         self.stream_pointer = 0
         self.eof = True
-        self.buffer = ''
+        self.buffer = u''
         self.pointer = 0
         self.raw_buffer = None
         self.raw_decode = None
@@ -69,11 +82,11 @@ def __init__(self, stream):
         self.index = 0
         self.line = 0
         self.column = 0
-        if isinstance(stream, str):
+        if isinstance(stream, text_type):
             self.name = "<unicode string>"
             self.check_printable(stream)
-            self.buffer = stream+'\0'
-        elif isinstance(stream, bytes):
+            self.buffer = stream+u'\0'
+        elif isinstance(stream, binary_type):
             self.name = "<byte string>"
             self.raw_buffer = stream
             self.determine_encoding()
@@ -103,26 +116,27 @@ def forward(self, length=1):
             ch = self.buffer[self.pointer]
             self.pointer += 1
             self.index += 1
-            if ch in '\n\x85\u2028\u2029'  \
-                    or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+            if ch in u'\n\x85\u2028\u2029'  \
+                    or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
                 self.line += 1
                 self.column = 0
-            elif ch != '\uFEFF':
+            elif ch != u'\uFEFF':
                 self.column += 1
             length -= 1
 
     def get_mark(self):
         if self.stream is None:
             return Mark(self.name, self.index, self.line, self.column,
-                    self.buffer, self.pointer)
+                        self.buffer, self.pointer)
         else:
             return Mark(self.name, self.index, self.line, self.column,
-                    None, None)
+                        None, None)
 
     def determine_encoding(self):
-        while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+        while not self.eof and (self.raw_buffer is None or
+                                len(self.raw_buffer) < 2):
             self.update_raw()
-        if isinstance(self.raw_buffer, bytes):
+        if isinstance(self.raw_buffer, binary_type):
             if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
                 self.raw_decode = codecs.utf_16_le_decode
                 self.encoding = 'utf-16-le'
@@ -134,14 +148,16 @@ def determine_encoding(self):
                 self.encoding = 'utf-8'
         self.update(1)
 
-    NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+    NON_PRINTABLE = re.compile(
+        u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
+
     def check_printable(self, data):
         match = self.NON_PRINTABLE.search(data)
         if match:
             character = match.group()
             position = self.index+(len(self.buffer)-self.pointer)+match.start()
             raise ReaderError(self.name, position, ord(character),
-                    'unicode', "special characters are not allowed")
+                              'unicode', "special characters are not allowed")
 
     def update(self, length):
         if self.raw_buffer is None:
@@ -154,15 +170,19 @@ def update(self, length):
             if self.raw_decode is not None:
                 try:
                     data, converted = self.raw_decode(self.raw_buffer,
-                            'strict', self.eof)
+                                                      'strict', self.eof)
                 except UnicodeDecodeError as exc:
-                    character = self.raw_buffer[exc.start]
+                    if PY3:
+                        character = self.raw_buffer[exc.start]
+                    else:
+                        character = exc.object[exc.start]
                     if self.stream is not None:
-                        position = self.stream_pointer-len(self.raw_buffer)+exc.start
+                        position = self.stream_pointer - \
+                            len(self.raw_buffer) + exc.start
                     else:
                         position = exc.start
                     raise ReaderError(self.name, position, character,
-                            exc.encoding, exc.reason)
+                                      exc.encoding, exc.reason)
             else:
                 data = self.raw_buffer
                 converted = len(data)
@@ -170,11 +190,13 @@ def update(self, length):
             self.buffer += data
             self.raw_buffer = self.raw_buffer[converted:]
             if self.eof:
-                self.buffer += '\0'
+                self.buffer += u'\0'
                 self.raw_buffer = None
                 break
 
-    def update_raw(self, size=4096):
+    def update_raw(self, size=None):
+        if size is None:
+            size = 4096 if PY3 else 1024
         data = self.stream.read(size)
         if self.raw_buffer is None:
             self.raw_buffer = data
@@ -184,9 +206,8 @@ def update_raw(self, size=4096):
         if not data:
             self.eof = True
 
-#try:
-#    import psyco
-#    psyco.bind(Reader)
-#except ImportError:
-#    pass
-
+# try:
+#     import psyco
+#     psyco.bind(Reader)
+# except ImportError:
+#     pass
diff --git a/lib/spack/external/ruamel/yaml/representer.py b/lib/spack/external/ruamel/yaml/representer.py
new file mode 100644
index 0000000000..b4625bfae9
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/representer.py
@@ -0,0 +1,888 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+try:
+    from .error import *                                  # NOQA
+    from .nodes import *                                  # NOQA
+    from .compat import text_type, binary_type, to_unicode, PY2, PY3, ordereddict
+    from .scalarstring import *                           # NOQA
+except (ImportError, ValueError):  # for Jython
+    from ruamel.yaml.error import *                       # NOQA
+    from ruamel.yaml.nodes import *                       # NOQA
+    from ruamel.yaml.compat import text_type, binary_type, to_unicode, PY2, PY3, ordereddict
+    from ruamel.yaml.scalarstring import *                # NOQA
+
+
+import datetime
+import sys
+import types
+if PY3:
+    import copyreg
+    import base64
+else:
+    import copy_reg as copyreg
+
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+           'RepresenterError', 'RoundTripRepresenter']
+
+
+class RepresenterError(YAMLError):
+    pass
+
+
+class BaseRepresenter(object):
+
+    yaml_representers = {}
+    yaml_multi_representers = {}
+
+    def __init__(self, default_style=None, default_flow_style=None):
+        self.default_style = default_style
+        self.default_flow_style = default_flow_style
+        self.represented_objects = {}
+        self.object_keeper = []
+        self.alias_key = None
+
+    def represent(self, data):
+        node = self.represent_data(data)
+        self.serialize(node)
+        self.represented_objects = {}
+        self.object_keeper = []
+        self.alias_key = None
+
+    if PY2:
+        def get_classobj_bases(self, cls):
+            bases = [cls]
+            for base in cls.__bases__:
+                bases.extend(self.get_classobj_bases(base))
+            return bases
+
+    def represent_data(self, data):
+        if self.ignore_aliases(data):
+            self.alias_key = None
+        else:
+            self.alias_key = id(data)
+        if self.alias_key is not None:
+            if self.alias_key in self.represented_objects:
+                node = self.represented_objects[self.alias_key]
+                # if node is None:
+                #     raise RepresenterError(
+                #          "recursive objects are not allowed: %r" % data)
+                return node
+            # self.represented_objects[alias_key] = None
+            self.object_keeper.append(data)
+        data_types = type(data).__mro__
+        if PY2:
+            # if type(data) is types.InstanceType:
+            if isinstance(data, types.InstanceType):
+                data_types = self.get_classobj_bases(data.__class__) + \
+                    list(data_types)
+        if data_types[0] in self.yaml_representers:
+            node = self.yaml_representers[data_types[0]](self, data)
+        else:
+            for data_type in data_types:
+                if data_type in self.yaml_multi_representers:
+                    node = self.yaml_multi_representers[data_type](self, data)
+                    break
+            else:
+                if None in self.yaml_multi_representers:
+                    node = self.yaml_multi_representers[None](self, data)
+                elif None in self.yaml_representers:
+                    node = self.yaml_representers[None](self, data)
+                else:
+                    node = ScalarNode(None, text_type(data))
+        # if alias_key is not None:
+        #     self.represented_objects[alias_key] = node
+        return node
+
+    def represent_key(self, data):
+        """
+        David Fraser: Extract a method to represent keys in mappings, so that
+        a subclass can choose not to quote them (for example)
+        used in repesent_mapping
+        https://bitbucket.org/davidfraser/pyyaml/commits/d81df6eb95f20cac4a79eed95ae553b5c6f77b8c
+        """
+        return self.represent_data(data)
+
+    @classmethod
+    def add_representer(cls, data_type, representer):
+        if 'yaml_representers' not in cls.__dict__:
+            cls.yaml_representers = cls.yaml_representers.copy()
+        cls.yaml_representers[data_type] = representer
+
+    @classmethod
+    def add_multi_representer(cls, data_type, representer):
+        if 'yaml_multi_representers' not in cls.__dict__:
+            cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+        cls.yaml_multi_representers[data_type] = representer
+
+    def represent_scalar(self, tag, value, style=None):
+        if style is None:
+            style = self.default_style
+        node = ScalarNode(tag, value, style=style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        return node
+
+    def represent_sequence(self, tag, sequence, flow_style=None):
+        value = []
+        node = SequenceNode(tag, value, flow_style=flow_style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        for item in sequence:
+            node_item = self.represent_data(item)
+            if not (isinstance(node_item, ScalarNode) and not node_item.style):
+                best_style = False
+            value.append(node_item)
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def represent_omap(self, tag, omap, flow_style=None):
+        value = []
+        node = SequenceNode(tag, value, flow_style=flow_style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        for item_key in omap:
+            item_val = omap[item_key]
+            node_item = self.represent_data({item_key: item_val})
+            # if not (isinstance(node_item, ScalarNode) \
+            #    and not node_item.style):
+            #     best_style = False
+            value.append(node_item)
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def represent_mapping(self, tag, mapping, flow_style=None):
+        value = []
+        node = MappingNode(tag, value, flow_style=flow_style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        if hasattr(mapping, 'items'):
+            mapping = list(mapping.items())
+            try:
+                mapping = sorted(mapping)
+            except TypeError:
+                pass
+        for item_key, item_value in mapping:
+            node_key = self.represent_key(item_key)
+            node_value = self.represent_data(item_value)
+            if not (isinstance(node_key, ScalarNode) and not node_key.style):
+                best_style = False
+            if not (isinstance(node_value, ScalarNode) and not
+                    node_value.style):
+                best_style = False
+            value.append((node_key, node_value))
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def ignore_aliases(self, data):
+        return False
+
+
+class SafeRepresenter(BaseRepresenter):
+
+    def ignore_aliases(self, data):
+        # https://docs.python.org/3/reference/expressions.html#parenthesized-forms :
+        # "i.e. two occurrences of the empty tuple may or may not yield the same object"
+        # so "data is ()" should not be used
+        if data is None or data == ():
+            return True
+        if isinstance(data, (binary_type, text_type, bool, int, float)):
+            return True
+
+    def represent_none(self, data):
+        return self.represent_scalar(u'tag:yaml.org,2002:null',
+                                     u'null')
+
+    if PY3:
+        def represent_str(self, data):
+            return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+        def represent_binary(self, data):
+            if hasattr(base64, 'encodebytes'):
+                data = base64.encodebytes(data).decode('ascii')
+            else:
+                data = base64.encodestring(data).decode('ascii')
+            return self.represent_scalar(u'tag:yaml.org,2002:binary', data,
+                                         style='|')
+    else:
+        def represent_str(self, data):
+            tag = None
+            style = None
+            try:
+                data = unicode(data, 'ascii')
+                tag = u'tag:yaml.org,2002:str'
+            except UnicodeDecodeError:
+                try:
+                    data = unicode(data, 'utf-8')
+                    tag = u'tag:yaml.org,2002:str'
+                except UnicodeDecodeError:
+                    data = data.encode('base64')
+                    tag = u'tag:yaml.org,2002:binary'
+                    style = '|'
+            return self.represent_scalar(tag, data, style=style)
+
+        def represent_unicode(self, data):
+            return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+    def represent_bool(self, data):
+        if data:
+            value = u'true'
+        else:
+            value = u'false'
+        return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+
+    def represent_int(self, data):
+        return self.represent_scalar(u'tag:yaml.org,2002:int', text_type(data))
+
+    if PY2:
+        def represent_long(self, data):
+            return self.represent_scalar(u'tag:yaml.org,2002:int',
+                                         text_type(data))
+
+    inf_value = 1e300
+    while repr(inf_value) != repr(inf_value*inf_value):
+        inf_value *= inf_value
+
+    def represent_float(self, data):
+        if data != data or (data == 0.0 and data == 1.0):
+            value = u'.nan'
+        elif data == self.inf_value:
+            value = u'.inf'
+        elif data == -self.inf_value:
+            value = u'-.inf'
+        else:
+            value = to_unicode(repr(data)).lower()
+            # Note that in some cases `repr(data)` represents a float number
+            # without the decimal parts.  For instance:
+            #   >>> repr(1e17)
+            #   '1e17'
+            # Unfortunately, this is not a valid float representation according
+            # to the definition of the `!!float` tag.  We fix this by adding
+            # '.0' before the 'e' symbol.
+            if u'.' not in value and u'e' in value:
+                value = value.replace(u'e', u'.0e', 1)
+        return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+    def represent_list(self, data):
+        # pairs = (len(data) > 0 and isinstance(data, list))
+        # if pairs:
+        #     for item in data:
+        #         if not isinstance(item, tuple) or len(item) != 2:
+        #             pairs = False
+        #             break
+        # if not pairs:
+            return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+        # value = []
+        # for item_key, item_value in data:
+        #     value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+        #         [(item_key, item_value)]))
+        # return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+    def represent_dict(self, data):
+        return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+    def represent_ordereddict(self, data):
+        return self.represent_omap(u'tag:yaml.org,2002:omap', data)
+
+    def represent_set(self, data):
+        value = {}
+        for key in data:
+            value[key] = None
+        return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+    def represent_date(self, data):
+        value = to_unicode(data.isoformat())
+        return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+    def represent_datetime(self, data):
+        value = to_unicode(data.isoformat(' '))
+        return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+    def represent_yaml_object(self, tag, data, cls, flow_style=None):
+        if hasattr(data, '__getstate__'):
+            state = data.__getstate__()
+        else:
+            state = data.__dict__.copy()
+        return self.represent_mapping(tag, state, flow_style=flow_style)
+
+    def represent_undefined(self, data):
+        raise RepresenterError("cannot represent an object: %s" % data)
+
+SafeRepresenter.add_representer(type(None),
+                                SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+                                SafeRepresenter.represent_str)
+
+if PY2:
+    SafeRepresenter.add_representer(unicode,
+                                    SafeRepresenter.represent_unicode)
+else:
+    SafeRepresenter.add_representer(bytes,
+                                    SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool,
+                                SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+                                SafeRepresenter.represent_int)
+
+if PY2:
+    SafeRepresenter.add_representer(long,
+                                    SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float,
+                                SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+                                SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+                                SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+                                SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+                                SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(ordereddict,
+                                SafeRepresenter.represent_ordereddict)
+
+SafeRepresenter.add_representer(datetime.date,
+                                SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+                                SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+                                SafeRepresenter.represent_undefined)
+
+
+class Representer(SafeRepresenter):
+    if PY2:
+        def represent_str(self, data):
+            tag = None
+            style = None
+            try:
+                data = unicode(data, 'ascii')
+                tag = u'tag:yaml.org,2002:str'
+            except UnicodeDecodeError:
+                try:
+                    data = unicode(data, 'utf-8')
+                    tag = u'tag:yaml.org,2002:python/str'
+                except UnicodeDecodeError:
+                    data = data.encode('base64')
+                    tag = u'tag:yaml.org,2002:binary'
+                    style = '|'
+            return self.represent_scalar(tag, data, style=style)
+
+        def represent_unicode(self, data):
+            tag = None
+            try:
+                data.encode('ascii')
+                tag = u'tag:yaml.org,2002:python/unicode'
+            except UnicodeEncodeError:
+                tag = u'tag:yaml.org,2002:str'
+            return self.represent_scalar(tag, data)
+
+        def represent_long(self, data):
+            tag = u'tag:yaml.org,2002:int'
+            if int(data) is not data:
+                tag = u'tag:yaml.org,2002:python/long'
+            return self.represent_scalar(tag, to_unicode(data))
+
+    def represent_complex(self, data):
+        if data.imag == 0.0:
+            data = u'%r' % data.real
+        elif data.real == 0.0:
+            data = u'%rj' % data.imag
+        elif data.imag > 0:
+            data = u'%r+%rj' % (data.real, data.imag)
+        else:
+            data = u'%r%rj' % (data.real, data.imag)
+        return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+    def represent_tuple(self, data):
+        return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+    def represent_name(self, data):
+        name = u'%s.%s' % (data.__module__, data.__name__)
+        return self.represent_scalar(u'tag:yaml.org,2002:python/name:' +
+                                     name, u'')
+
+    def represent_module(self, data):
+        return self.represent_scalar(
+            u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+
+    if PY2:
+        def represent_instance(self, data):
+            # For instances of classic classes, we use __getinitargs__ and
+            # __getstate__ to serialize the data.
+
+            # If data.__getinitargs__ exists, the object must be reconstructed
+            # by calling cls(**args), where args is a tuple returned by
+            # __getinitargs__. Otherwise, the cls.__init__ method should never
+            # be called and the class instance is created by instantiating a
+            # trivial class and assigning to the instance's __class__ variable.
+
+            # If data.__getstate__ exists, it returns the state of the object.
+            # Otherwise, the state of the object is data.__dict__.
+
+            # We produce either a !!python/object or !!python/object/new node.
+            # If data.__getinitargs__ does not exist and state is a dictionary,
+            # we produce a !!python/object node . Otherwise we produce a
+            # !!python/object/new node.
+
+            cls = data.__class__
+            class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+            args = None
+            state = None
+            if hasattr(data, '__getinitargs__'):
+                args = list(data.__getinitargs__())
+            if hasattr(data, '__getstate__'):
+                state = data.__getstate__()
+            else:
+                state = data.__dict__
+            if args is None and isinstance(state, dict):
+                return self.represent_mapping(
+                    u'tag:yaml.org,2002:python/object:'+class_name, state)
+            if isinstance(state, dict) and not state:
+                return self.represent_sequence(
+                    u'tag:yaml.org,2002:python/object/new:' +
+                    class_name, args)
+            value = {}
+            if args:
+                value['args'] = args
+            value['state'] = state
+            return self.represent_mapping(
+                u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+
+    def represent_object(self, data):
+        # We use __reduce__ API to save the data. data.__reduce__ returns
+        # a tuple of length 2-5:
+        #   (function, args, state, listitems, dictitems)
+
+        # For reconstructing, we calls function(*args), then set its state,
+        # listitems, and dictitems if they are not None.
+
+        # A special case is when function.__name__ == '__newobj__'. In this
+        # case we create the object with args[0].__new__(*args).
+
+        # Another special case is when __reduce__ returns a string - we don't
+        # support it.
+
+        # We produce a !!python/object, !!python/object/new or
+        # !!python/object/apply node.
+
+        cls = type(data)
+        if cls in copyreg.dispatch_table:
+            reduce = copyreg.dispatch_table[cls](data)
+        elif hasattr(data, '__reduce_ex__'):
+            reduce = data.__reduce_ex__(2)
+        elif hasattr(data, '__reduce__'):
+            reduce = data.__reduce__()
+        else:
+            raise RepresenterError("cannot represent object: %r" % data)
+        reduce = (list(reduce)+[None]*5)[:5]
+        function, args, state, listitems, dictitems = reduce
+        args = list(args)
+        if state is None:
+            state = {}
+        if listitems is not None:
+            listitems = list(listitems)
+        if dictitems is not None:
+            dictitems = dict(dictitems)
+        if function.__name__ == '__newobj__':
+            function = args[0]
+            args = args[1:]
+            tag = u'tag:yaml.org,2002:python/object/new:'
+            newobj = True
+        else:
+            tag = u'tag:yaml.org,2002:python/object/apply:'
+            newobj = False
+        function_name = u'%s.%s' % (function.__module__, function.__name__)
+        if not args and not listitems and not dictitems \
+                and isinstance(state, dict) and newobj:
+            return self.represent_mapping(
+                u'tag:yaml.org,2002:python/object:'+function_name, state)
+        if not listitems and not dictitems  \
+                and isinstance(state, dict) and not state:
+            return self.represent_sequence(tag+function_name, args)
+        value = {}
+        if args:
+            value['args'] = args
+        if state or not isinstance(state, dict):
+            value['state'] = state
+        if listitems:
+            value['listitems'] = listitems
+        if dictitems:
+            value['dictitems'] = dictitems
+        return self.represent_mapping(tag+function_name, value)
+
+if PY2:
+    Representer.add_representer(str,
+                                Representer.represent_str)
+
+    Representer.add_representer(unicode,
+                                Representer.represent_unicode)
+
+    Representer.add_representer(long,
+                                Representer.represent_long)
+
+Representer.add_representer(complex,
+                            Representer.represent_complex)
+
+Representer.add_representer(tuple,
+                            Representer.represent_tuple)
+
+Representer.add_representer(type,
+                            Representer.represent_name)
+
+if PY2:
+    Representer.add_representer(types.ClassType,
+                                Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+                            Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+                            Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+                            Representer.represent_module)
+
+if PY2:
+    Representer.add_multi_representer(types.InstanceType,
+                                      Representer.represent_instance)
+
+Representer.add_multi_representer(object,
+                                  Representer.represent_object)
+
+
+try:
+    from .comments import CommentedMap, CommentedOrderedMap, CommentedSeq, \
+        CommentedSet, comment_attrib, merge_attrib
+except ImportError:  # for Jython
+    from ruamel.yaml.comments import CommentedMap, CommentedOrderedMap, \
+        CommentedSeq, CommentedSet, comment_attrib, merge_attrib
+
+
+class RoundTripRepresenter(SafeRepresenter):
+    # need to add type here and write out the .comment
+    # in serializer and emitter
+
+    def __init__(self, default_style=None, default_flow_style=None):
+        if default_flow_style is None:
+            default_flow_style = False
+        SafeRepresenter.__init__(self, default_style=default_style,
+                                 default_flow_style=default_flow_style)
+
+    def represent_none(self, data):
+        return self.represent_scalar(u'tag:yaml.org,2002:null',
+                                     u'')
+
+    def represent_preserved_scalarstring(self, data):
+        tag = None
+        style = '|'
+        if PY2 and not isinstance(data, unicode):
+            data = unicode(data, 'ascii')
+        tag = u'tag:yaml.org,2002:str'
+        return self.represent_scalar(tag, data, style=style)
+
+    def represent_single_quoted_scalarstring(self, data):
+        tag = None
+        style = "'"
+        if PY2 and not isinstance(data, unicode):
+            data = unicode(data, 'ascii')
+        tag = u'tag:yaml.org,2002:str'
+        return self.represent_scalar(tag, data, style=style)
+
+    def represent_double_quoted_scalarstring(self, data):
+        tag = None
+        style = '"'
+        if PY2 and not isinstance(data, unicode):
+            data = unicode(data, 'ascii')
+        tag = u'tag:yaml.org,2002:str'
+        return self.represent_scalar(tag, data, style=style)
+
+    def represent_sequence(self, tag, sequence, flow_style=None):
+        value = []
+        # if the flow_style is None, the flow style tacked on to the object
+        # explicitly will be taken. If that is None as well the default flow
+        # style rules
+        try:
+            flow_style = sequence.fa.flow_style(flow_style)
+        except AttributeError:
+            flow_style = flow_style
+        try:
+            anchor = sequence.yaml_anchor()
+        except AttributeError:
+            anchor = None
+        node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        try:
+            comment = getattr(sequence, comment_attrib)
+            item_comments = comment.items
+            node.comment = comment.comment
+            try:
+                node.comment.append(comment.end)
+            except AttributeError:
+                pass
+        except AttributeError:
+            item_comments = {}
+        for idx, item in enumerate(sequence):
+            node_item = self.represent_data(item)
+            node_item.comment = item_comments.get(idx)
+            if not (isinstance(node_item, ScalarNode) and not node_item.style):
+                best_style = False
+            value.append(node_item)
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def represent_mapping(self, tag, mapping, flow_style=None):
+        value = []
+        try:
+            flow_style = mapping.fa.flow_style(flow_style)
+        except AttributeError:
+            flow_style = flow_style
+        try:
+            anchor = mapping.yaml_anchor()
+        except AttributeError:
+            anchor = None
+        node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        # no sorting! !!
+        try:
+            comment = getattr(mapping, comment_attrib)
+            node.comment = comment.comment
+            if node.comment and node.comment[1]:
+                for ct in node.comment[1]:
+                    ct.reset()
+            item_comments = comment.items
+            for v in item_comments.values():
+                if v and v[1]:
+                    for ct in v[1]:
+                        ct.reset()
+            try:
+                node.comment.append(comment.end)
+            except AttributeError:
+                pass
+        except AttributeError:
+            item_comments = {}
+        for item_key, item_value in mapping.items():
+            node_key = self.represent_key(item_key)
+            node_value = self.represent_data(item_value)
+            item_comment = item_comments.get(item_key)
+            if item_comment:
+                assert getattr(node_key, 'comment', None) is None
+                node_key.comment = item_comment[:2]
+                nvc = getattr(node_value, 'comment', None)
+                if nvc is not None:  # end comment already there
+                    nvc[0] = item_comment[2]
+                    nvc[1] = item_comment[3]
+                else:
+                    node_value.comment = item_comment[2:]
+            if not (isinstance(node_key, ScalarNode) and not node_key.style):
+                best_style = False
+            if not (isinstance(node_value, ScalarNode) and not
+                    node_value.style):
+                best_style = False
+            value.append((node_key, node_value))
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])]
+        if merge_list:
+            # because of the call to represent_data here, the anchors
+            # are marked as being used and thereby created
+            if len(merge_list) == 1:
+                arg = self.represent_data(merge_list[0])
+            else:
+                arg = self.represent_data(merge_list)
+                arg.flow_style = True
+            value.insert(0,
+                         (ScalarNode(u'tag:yaml.org,2002:merge', '<<'), arg))
+        return node
+
+    def represent_omap(self, tag, omap, flow_style=None):
+        value = []
+        try:
+            flow_style = omap.fa.flow_style(flow_style)
+        except AttributeError:
+            flow_style = flow_style
+        try:
+            anchor = omap.yaml_anchor()
+        except AttributeError:
+            anchor = None
+        node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        try:
+            comment = getattr(omap, comment_attrib)
+            node.comment = comment.comment
+            if node.comment and node.comment[1]:
+                for ct in node.comment[1]:
+                    ct.reset()
+            item_comments = comment.items
+            for v in item_comments.values():
+                if v and v[1]:
+                    for ct in v[1]:
+                        ct.reset()
+            try:
+                node.comment.append(comment.end)
+            except AttributeError:
+                pass
+        except AttributeError:
+            item_comments = {}
+        for item_key in omap:
+            item_val = omap[item_key]
+            node_item = self.represent_data({item_key: item_val})
+            # node item has two scalars in value: node_key and node_value
+            item_comment = item_comments.get(item_key)
+            if item_comment:
+                if item_comment[1]:
+                    node_item.comment = [None, item_comment[1]]
+                assert getattr(node_item.value[0][0], 'comment', None) is None
+                node_item.value[0][0].comment = [item_comment[0], None]
+                nvc = getattr(node_item.value[0][1], 'comment', None)
+                if nvc is not None:  # end comment already there
+                    nvc[0] = item_comment[2]
+                    nvc[1] = item_comment[3]
+                else:
+                    node_item.value[0][1].comment = item_comment[2:]
+            # if not (isinstance(node_item, ScalarNode) \
+            #    and not node_item.style):
+            #     best_style = False
+            value.append(node_item)
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def represent_set(self, setting):
+        flow_style = False
+        tag = u'tag:yaml.org,2002:set'
+        # return self.represent_mapping(tag, value)
+        value = []
+        flow_style = setting.fa.flow_style(flow_style)
+        try:
+            anchor = setting.yaml_anchor()
+        except AttributeError:
+            anchor = None
+        node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        # no sorting! !!
+        try:
+            comment = getattr(setting, comment_attrib)
+            node.comment = comment.comment
+            if node.comment and node.comment[1]:
+                for ct in node.comment[1]:
+                    ct.reset()
+            item_comments = comment.items
+            for v in item_comments.values():
+                if v and v[1]:
+                    for ct in v[1]:
+                        ct.reset()
+            try:
+                node.comment.append(comment.end)
+            except AttributeError:
+                pass
+        except AttributeError:
+            item_comments = {}
+        for item_key in setting.odict:
+            node_key = self.represent_key(item_key)
+            node_value = self.represent_data(None)
+            item_comment = item_comments.get(item_key)
+            if item_comment:
+                assert getattr(node_key, 'comment', None) is None
+                node_key.comment = item_comment[:2]
+            node_key.style = node_value.style = "?"
+            if not (isinstance(node_key, ScalarNode) and not node_key.style):
+                best_style = False
+            if not (isinstance(node_value, ScalarNode) and not
+                    node_value.style):
+                best_style = False
+            value.append((node_key, node_value))
+        best_style = best_style
+        return node
+
+    def represent_dict(self, data):
+        """write out tag if saved on loading"""
+        try:
+            t = data.tag.value
+        except AttributeError:
+            t = None
+        if t:
+            while t and t[0] == '!':
+                t = t[1:]
+            tag = 'tag:yaml.org,2002:' + t
+        else:
+            tag = u'tag:yaml.org,2002:map'
+        return self.represent_mapping(tag, data)
+
+
+RoundTripRepresenter.add_representer(type(None),
+                                     RoundTripRepresenter.represent_none)
+
+RoundTripRepresenter.add_representer(
+    PreservedScalarString,
+    RoundTripRepresenter.represent_preserved_scalarstring)
+
+RoundTripRepresenter.add_representer(
+    SingleQuotedScalarString,
+    RoundTripRepresenter.represent_single_quoted_scalarstring)
+
+RoundTripRepresenter.add_representer(
+    DoubleQuotedScalarString,
+    RoundTripRepresenter.represent_double_quoted_scalarstring)
+
+RoundTripRepresenter.add_representer(CommentedSeq,
+                                     RoundTripRepresenter.represent_list)
+
+RoundTripRepresenter.add_representer(CommentedMap,
+                                     RoundTripRepresenter.represent_dict)
+
+RoundTripRepresenter.add_representer(CommentedOrderedMap,
+                                     RoundTripRepresenter.represent_ordereddict)
+
+if sys.version_info >= (2, 7):
+    import collections
+    RoundTripRepresenter.add_representer(collections.OrderedDict,
+                                         RoundTripRepresenter.represent_ordereddict)
+
+RoundTripRepresenter.add_representer(CommentedSet,
+                                     RoundTripRepresenter.represent_set)
diff --git a/lib/spack/external/ruamel/yaml/resolver.py b/lib/spack/external/ruamel/yaml/resolver.py
new file mode 100644
index 0000000000..84227072e0
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/resolver.py
@@ -0,0 +1,397 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+import re
+
+try:
+    from .error import *                               # NOQA
+    from .nodes import *                               # NOQA
+    from .compat import string_types
+except (ImportError, ValueError):  # for Jython
+    from ruamel.yaml.error import *                               # NOQA
+    from ruamel.yaml.nodes import *                               # NOQA
+    from ruamel.yaml.compat import string_types
+
+__all__ = ['BaseResolver', 'Resolver', 'VersionedResolver']
+
+
+_DEFAULT_VERSION = (1, 2)
+
+
+class ResolverError(YAMLError):
+    pass
+
+
+class BaseResolver(object):
+
+    DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+    DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+    DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+    yaml_implicit_resolvers = {}
+    yaml_path_resolvers = {}
+
+    def __init__(self):
+        self._loader_version = None
+        self.resolver_exact_paths = []
+        self.resolver_prefix_paths = []
+
+    @classmethod
+    def add_implicit_resolver(cls, tag, regexp, first):
+        if 'yaml_implicit_resolvers' not in cls.__dict__:
+            cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+        if first is None:
+            first = [None]
+        for ch in first:
+            cls.yaml_implicit_resolvers.setdefault(ch, []).append(
+                (tag, regexp))
+
+    @classmethod
+    def add_path_resolver(cls, tag, path, kind=None):
+        # Note: `add_path_resolver` is experimental.  The API could be changed.
+        # `new_path` is a pattern that is matched against the path from the
+        # root to the node that is being considered.  `node_path` elements are
+        # tuples `(node_check, index_check)`.  `node_check` is a node class:
+        # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`.  `None`
+        # matches any kind of a node.  `index_check` could be `None`, a boolean
+        # value, a string value, or a number.  `None` and `False` match against
+        # any _value_ of sequence and mapping nodes.  `True` matches against
+        # any _key_ of a mapping node.  A string `index_check` matches against
+        # a mapping value that corresponds to a scalar key which content is
+        # equal to the `index_check` value.  An integer `index_check` matches
+        # against a sequence value with the index equal to `index_check`.
+        if 'yaml_path_resolvers' not in cls.__dict__:
+            cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+        new_path = []
+        for element in path:
+            if isinstance(element, (list, tuple)):
+                if len(element) == 2:
+                    node_check, index_check = element
+                elif len(element) == 1:
+                    node_check = element[0]
+                    index_check = True
+                else:
+                    raise ResolverError("Invalid path element: %s" % element)
+            else:
+                node_check = None
+                index_check = element
+            if node_check is str:
+                node_check = ScalarNode
+            elif node_check is list:
+                node_check = SequenceNode
+            elif node_check is dict:
+                node_check = MappingNode
+            elif node_check not in [ScalarNode, SequenceNode, MappingNode]  \
+                    and not isinstance(node_check, string_types)  \
+                    and node_check is not None:
+                raise ResolverError("Invalid node checker: %s" % node_check)
+            if not isinstance(index_check, (string_types, int))   \
+                    and index_check is not None:
+                raise ResolverError("Invalid index checker: %s" % index_check)
+            new_path.append((node_check, index_check))
+        if kind is str:
+            kind = ScalarNode
+        elif kind is list:
+            kind = SequenceNode
+        elif kind is dict:
+            kind = MappingNode
+        elif kind not in [ScalarNode, SequenceNode, MappingNode]    \
+                and kind is not None:
+            raise ResolverError("Invalid node kind: %s" % kind)
+        cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+    def descend_resolver(self, current_node, current_index):
+        if not self.yaml_path_resolvers:
+            return
+        exact_paths = {}
+        prefix_paths = []
+        if current_node:
+            depth = len(self.resolver_prefix_paths)
+            for path, kind in self.resolver_prefix_paths[-1]:
+                if self.check_resolver_prefix(depth, path, kind,
+                                              current_node, current_index):
+                    if len(path) > depth:
+                        prefix_paths.append((path, kind))
+                    else:
+                        exact_paths[kind] = self.yaml_path_resolvers[path,
+                                                                     kind]
+        else:
+            for path, kind in self.yaml_path_resolvers:
+                if not path:
+                    exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+                else:
+                    prefix_paths.append((path, kind))
+        self.resolver_exact_paths.append(exact_paths)
+        self.resolver_prefix_paths.append(prefix_paths)
+
+    def ascend_resolver(self):
+        if not self.yaml_path_resolvers:
+            return
+        self.resolver_exact_paths.pop()
+        self.resolver_prefix_paths.pop()
+
+    def check_resolver_prefix(self, depth, path, kind,
+                              current_node, current_index):
+        node_check, index_check = path[depth-1]
+        if isinstance(node_check, string_types):
+            if current_node.tag != node_check:
+                return
+        elif node_check is not None:
+            if not isinstance(current_node, node_check):
+                return
+        if index_check is True and current_index is not None:
+            return
+        if (index_check is False or index_check is None)    \
+                and current_index is None:
+            return
+        if isinstance(index_check, string_types):
+            if not (isinstance(current_index, ScalarNode) and
+                    index_check == current_index.value):
+                return
+        elif isinstance(index_check, int) and not isinstance(index_check,
+                                                             bool):
+            if index_check != current_index:
+                return
+        return True
+
+    def resolve(self, kind, value, implicit):
+        if kind is ScalarNode and implicit[0]:
+            if value == u'':
+                resolvers = self.yaml_implicit_resolvers.get(u'', [])
+            else:
+                resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+            resolvers += self.yaml_implicit_resolvers.get(None, [])
+            for tag, regexp in resolvers:
+                if regexp.match(value):
+                    return tag
+            implicit = implicit[1]
+        if self.yaml_path_resolvers:
+            exact_paths = self.resolver_exact_paths[-1]
+            if kind in exact_paths:
+                return exact_paths[kind]
+            if None in exact_paths:
+                return exact_paths[None]
+        if kind is ScalarNode:
+            return self.DEFAULT_SCALAR_TAG
+        elif kind is SequenceNode:
+            return self.DEFAULT_SEQUENCE_TAG
+        elif kind is MappingNode:
+            return self.DEFAULT_MAPPING_TAG
+
+    @property
+    def processing_version(self):
+        return None
+
+
+class Resolver(BaseResolver):
+    pass
+
+Resolver.add_implicit_resolver(
+    u'tag:yaml.org,2002:bool',
+    re.compile(u'''^(?:yes|Yes|YES|no|No|NO
+    |true|True|TRUE|false|False|FALSE
+    |on|On|ON|off|Off|OFF)$''', re.X),
+    list(u'yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+    u'tag:yaml.org,2002:float',
+    re.compile(u'''^(?:
+     [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
+    |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
+    |\\.[0-9_]+(?:[eE][-+][0-9]+)?
+    |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
+    |[-+]?\\.(?:inf|Inf|INF)
+    |\\.(?:nan|NaN|NAN))$''', re.X),
+    list(u'-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+    u'tag:yaml.org,2002:int',
+    re.compile(u'''^(?:[-+]?0b[0-1_]+
+    |[-+]?0o?[0-7_]+
+    |[-+]?(?:0|[1-9][0-9_]*)
+    |[-+]?0x[0-9a-fA-F_]+
+    |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+    list(u'-+0123456789'))
+
+Resolver.add_implicit_resolver(
+    u'tag:yaml.org,2002:merge',
+    re.compile(u'^(?:<<)$'),
+    [u'<'])
+
+Resolver.add_implicit_resolver(
+    u'tag:yaml.org,2002:null',
+    re.compile(u'''^(?: ~
+    |null|Null|NULL
+    | )$''', re.X),
+    [u'~', u'n', u'N', u''])
+
+Resolver.add_implicit_resolver(
+    u'tag:yaml.org,2002:timestamp',
+    re.compile(u'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+    |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+    (?:[Tt]|[ \\t]+)[0-9][0-9]?
+    :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)?
+    (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+    list(u'0123456789'))
+
+Resolver.add_implicit_resolver(
+    u'tag:yaml.org,2002:value',
+    re.compile(u'^(?:=)$'),
+    [u'='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+    u'tag:yaml.org,2002:yaml',
+    re.compile(u'^(?:!|&|\\*)$'),
+    list(u'!&*'))
+
+# resolvers consist of
+# - a list of applicable version
+# - a tag
+# - a regexp
+# - a list of first characters to match
+implicit_resolvers = [
+    ([(1, 2)],
+        u'tag:yaml.org,2002:bool',
+        re.compile(u'''^(?:true|True|TRUE|false|False|FALSE)$''', re.X),
+        list(u'tTfF')),
+    ([(1, 1)],
+        u'tag:yaml.org,2002:bool',
+        re.compile(u'''^(?:yes|Yes|YES|no|No|NO
+        |true|True|TRUE|false|False|FALSE
+        |on|On|ON|off|Off|OFF)$''', re.X),
+        list(u'yYnNtTfFoO')),
+    ([(1, 2), (1, 1)],
+        u'tag:yaml.org,2002:float',
+        re.compile(u'''^(?:
+         [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
+        |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
+        |\\.[0-9_]+(?:[eE][-+][0-9]+)?
+        |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
+        |[-+]?\\.(?:inf|Inf|INF)
+        |\\.(?:nan|NaN|NAN))$''', re.X),
+        list(u'-+0123456789.')),
+    ([(1, 2)],
+        u'tag:yaml.org,2002:int',
+        re.compile(u'''^(?:[-+]?0b[0-1_]+
+        |[-+]?0o?[0-7_]+
+        |[-+]?(?:0|[1-9][0-9_]*)
+        |[-+]?0x[0-9a-fA-F_]+)$''', re.X),
+        list(u'-+0123456789')),
+    ([(1, 1)],
+        u'tag:yaml.org,2002:int',
+        re.compile(u'''^(?:[-+]?0b[0-1_]+
+        |[-+]?0o?[0-7_]+
+        |[-+]?(?:0|[1-9][0-9_]*)
+        |[-+]?0x[0-9a-fA-F_]+
+        |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+        list(u'-+0123456789')),
+    ([(1, 2), (1, 1)],
+        u'tag:yaml.org,2002:merge',
+        re.compile(u'^(?:<<)$'),
+        [u'<']),
+    ([(1, 2), (1, 1)],
+        u'tag:yaml.org,2002:null',
+        re.compile(u'''^(?: ~
+        |null|Null|NULL
+        | )$''', re.X),
+        [u'~', u'n', u'N', u'']),
+    ([(1, 2), (1, 1)],
+        u'tag:yaml.org,2002:timestamp',
+        re.compile(u'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+        |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+        (?:[Tt]|[ \\t]+)[0-9][0-9]?
+        :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)?
+        (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+        list(u'0123456789')),
+    ([(1, 2), (1, 1)],
+        u'tag:yaml.org,2002:value',
+        re.compile(u'^(?:=)$'),
+        [u'=']),
+    # The following resolver is only for documentation purposes. It cannot work
+    # because plain scalars cannot start with '!', '&', or '*'.
+    ([(1, 2), (1, 1)],
+        u'tag:yaml.org,2002:yaml',
+        re.compile(u'^(?:!|&|\\*)$'),
+        list(u'!&*')),
+]
+
+
+class VersionedResolver(BaseResolver):
+    """
+    contrary to the "normal" resolver, the smart resolver delays loading
+    the pattern matching rules. That way it can decide to load 1.1 rules
+    or the (default) 1.2 that no longer support octal without 0o, sexagesimals
+    and Yes/No/On/Off booleans.
+    """
+
+    def __init__(self, version=None):
+        BaseResolver.__init__(self)
+        self._loader_version = self.get_loader_version(version)
+        self._version_implicit_resolver = {}
+
+    def add_version_implicit_resolver(self, version, tag, regexp, first):
+        if first is None:
+            first = [None]
+        impl_resolver = self._version_implicit_resolver.setdefault(version, {})
+        for ch in first:
+            impl_resolver.setdefault(ch, []).append((tag, regexp))
+
+    def get_loader_version(self, version):
+        if version is None or isinstance(version, tuple):
+            return version
+        if isinstance(version, list):
+            return tuple(version)
+        # assume string
+        return tuple(map(int, version.split(u'.')))
+
+    @property
+    def resolver(self):
+        """
+        select the resolver based on the version we are parsing
+        """
+        version = self.processing_version
+        if version not in self._version_implicit_resolver:
+            for x in implicit_resolvers:
+                if version in x[0]:
+                    self.add_version_implicit_resolver(version, x[1], x[2], x[3])
+        return self._version_implicit_resolver[version]
+
+    def resolve(self, kind, value, implicit):
+        if kind is ScalarNode and implicit[0]:
+            if value == u'':
+                resolvers = self.resolver.get(u'', [])
+            else:
+                resolvers = self.resolver.get(value[0], [])
+            resolvers += self.resolver.get(None, [])
+            for tag, regexp in resolvers:
+                if regexp.match(value):
+                    return tag
+            implicit = implicit[1]
+        if self.yaml_path_resolvers:
+            exact_paths = self.resolver_exact_paths[-1]
+            if kind in exact_paths:
+                return exact_paths[kind]
+            if None in exact_paths:
+                return exact_paths[None]
+        if kind is ScalarNode:
+            return self.DEFAULT_SCALAR_TAG
+        elif kind is SequenceNode:
+            return self.DEFAULT_SEQUENCE_TAG
+        elif kind is MappingNode:
+            return self.DEFAULT_MAPPING_TAG
+
+    @property
+    def processing_version(self):
+        try:
+            version = self.yaml_version
+        except AttributeError:
+            # dumping
+            version = self.use_version
+        if version is None:
+            version = self._loader_version
+            if version is None:
+                version = _DEFAULT_VERSION
+        return version
diff --git a/lib/spack/external/ruamel/yaml/scalarstring.py b/lib/spack/external/ruamel/yaml/scalarstring.py
new file mode 100644
index 0000000000..d3abaff4db
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/scalarstring.py
@@ -0,0 +1,60 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+__all__ = ["ScalarString", "PreservedScalarString", "SingleQuotedScalarString",
+           "DoubleQuotedScalarString"]
+
+try:
+    from .compat import text_type
+except (ImportError, ValueError):  # for Jython
+    from ruamel.yaml.compat import text_type
+
+
+class ScalarString(text_type):
+    def __new__(cls, *args, **kw):
+        return text_type.__new__(cls, *args, **kw)
+
+
+class PreservedScalarString(ScalarString):
+    def __new__(cls, value):
+        return ScalarString.__new__(cls, value)
+
+
+class SingleQuotedScalarString(ScalarString):
+    def __new__(cls, value):
+        return ScalarString.__new__(cls, value)
+
+
+class DoubleQuotedScalarString(ScalarString):
+    def __new__(cls, value):
+        return ScalarString.__new__(cls, value)
+
+
+def preserve_literal(s):
+    return PreservedScalarString(s.replace('\r\n', '\n').replace('\r', '\n'))
+
+
+def walk_tree(base):
+    """
+    the routine here walks over a simple yaml tree (recursing in
+    dict values and list items) and converts strings that
+    have multiple lines to literal scalars
+    """
+    from ruamel.yaml.compat import string_types
+
+    if isinstance(base, dict):
+        for k in base:
+            v = base[k]
+            if isinstance(v, string_types) and '\n' in v:
+                base[k] = preserve_literal(v)
+            else:
+                walk_tree(v)
+    elif isinstance(base, list):
+        for idx, elem in enumerate(base):
+            if isinstance(elem, string_types) and '\n' in elem:
+                print(elem)
+                base[idx] = preserve_literal(elem)
+            else:
+                walk_tree(elem)
diff --git a/lib/spack/external/yaml/lib/yaml/scanner.py b/lib/spack/external/ruamel/yaml/scanner.py
similarity index 73%
rename from lib/spack/external/yaml/lib/yaml/scanner.py
rename to lib/spack/external/ruamel/yaml/scanner.py
index 834f662a4c..61feb34043 100644
--- a/lib/spack/external/yaml/lib/yaml/scanner.py
+++ b/lib/spack/external/ruamel/yaml/scanner.py
@@ -1,3 +1,7 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from __future__ import print_function
 
 # Scanner produces tokens of the following types:
 # STREAM-START
@@ -21,17 +25,28 @@
 # TAG(value)
 # SCALAR(value, plain, style)
 #
+# RoundTripScanner
+# COMMENT(value)
+#
 # Read comments in the Scanner code for more details.
 #
 
-__all__ = ['Scanner', 'ScannerError']
+__all__ = ['Scanner', 'RoundTripScanner', 'ScannerError']
+
+try:
+    from .error import MarkedYAMLError
+    from .tokens import *                           # NOQA
+    from .compat import utf8, unichr, PY3
+except (ImportError, ValueError):  # for Jython
+    from ruamel.yaml.error import MarkedYAMLError
+    from ruamel.yaml.tokens import *                # NOQA
+    from ruamel.yaml.compat import utf8, unichr, PY3
 
-from error import MarkedYAMLError
-from tokens import *
 
 class ScannerError(MarkedYAMLError):
     pass
 
+
 class SimpleKey(object):
     # See below simple keys treatment.
 
@@ -43,6 +58,7 @@ def __init__(self, token_number, required, index, line, column, mark):
         self.column = column
         self.mark = mark
 
+
 class Scanner(object):
 
     def __init__(self):
@@ -52,9 +68,9 @@ def __init__(self):
         # input data to Unicode. It also adds NUL to the end.
         #
         # Reader supports the following methods
-        #   self.peek(i=0)       # peek the next i-th character
-        #   self.prefix(l=1)     # peek the next l characters
-        #   self.forward(l=1)    # read the next l characters and move the pointer.
+        #   self.peek(i=0)    # peek the next i-th character
+        #   self.prefix(l=1)  # peek the next l characters
+        #   self.forward(l=1) # read the next l characters and move the pointer
 
         # Had we reached the end of the stream?
         self.done = False
@@ -153,7 +169,10 @@ def need_more_tokens(self):
     def fetch_more_tokens(self):
 
         # Eat whitespaces and comments until we reach the next token.
-        self.scan_to_next_token()
+        comment = self.scan_to_next_token()
+
+        if comment is not None:  # never happens for base scanner
+            return self.fetch_comment(comment)
 
         # Remove obsolete possible simple keys.
         self.stale_possible_simple_keys()
@@ -182,8 +201,8 @@ def fetch_more_tokens(self):
             return self.fetch_document_end()
 
         # TODO: support for BOM within a stream.
-        #if ch == u'\uFEFF':
-        #    return self.fetch_bom()    <-- issue BOMToken
+        # if ch == u'\uFEFF':
+        #     return self.fetch_bom()    <-- issue BOMToken
 
         # Note: the order of the following checks is NOT significant.
 
@@ -253,8 +272,8 @@ def fetch_more_tokens(self):
 
         # No? It's an error. Let's produce a nice error message.
         raise ScannerError("while scanning for the next token", None,
-                "found character %r that cannot start any token"
-                % ch.encode('utf-8'), self.get_mark())
+                           "found character %r that cannot start any token"
+                           % utf8(ch), self.get_mark())
 
     # Simple keys treatment.
 
@@ -280,13 +299,14 @@ def stale_possible_simple_keys(self):
         # - should be no longer than 1024 characters.
         # Disabling this procedure will allow simple keys of any length and
         # height (may cause problems if indentation is broken though).
-        for level in self.possible_simple_keys.keys():
+        for level in list(self.possible_simple_keys):
             key = self.possible_simple_keys[level]
             if key.line != self.line  \
                     or self.index-key.index > 1024:
                 if key.required:
-                    raise ScannerError("while scanning a simple key", key.mark,
-                            "could not find expected ':'", self.get_mark())
+                    raise ScannerError(
+                        "while scanning a simple key", key.mark,
+                        "could not find expected ':'", self.get_mark())
                 del self.possible_simple_keys[level]
 
     def save_possible_simple_key(self):
@@ -302,18 +322,20 @@ def save_possible_simple_key(self):
         if self.allow_simple_key:
             self.remove_possible_simple_key()
             token_number = self.tokens_taken+len(self.tokens)
-            key = SimpleKey(token_number, required,
-                    self.index, self.line, self.column, self.get_mark())
+            key = SimpleKey(
+                token_number, required,
+                self.index, self.line, self.column, self.get_mark())
             self.possible_simple_keys[self.flow_level] = key
 
     def remove_possible_simple_key(self):
         # Remove the saved possible key position at the current flow level.
         if self.flow_level in self.possible_simple_keys:
             key = self.possible_simple_keys[self.flow_level]
-            
+
             if key.required:
-                raise ScannerError("while scanning a simple key", key.mark,
-                        "could not find expected ':'", self.get_mark())
+                raise ScannerError(
+                    "while scanning a simple key", key.mark,
+                    "could not find expected ':'", self.get_mark())
 
             del self.possible_simple_keys[self.flow_level]
 
@@ -321,16 +343,17 @@ def remove_possible_simple_key(self):
 
     def unwind_indent(self, column):
 
-        ## In flow context, tokens should respect indentation.
-        ## Actually the condition should be `self.indent >= column` according to
-        ## the spec. But this condition will prohibit intuitively correct
-        ## constructions such as
-        ## key : {
-        ## }
-        #if self.flow_level and self.indent > column:
-        #    raise ScannerError(None, None,
-        #            "invalid intendation or unclosed '[' or '{'",
-        #            self.get_mark())
+        # In flow context, tokens should respect indentation.
+        # Actually the condition should be `self.indent >= column` according to
+        # the spec. But this condition will prohibit intuitively correct
+        # constructions such as
+        # key : {
+        # }
+        # ####
+        # if self.flow_level and self.indent > column:
+        #     raise ScannerError(None, None,
+        #             "invalid intendation or unclosed '[' or '{'",
+        #             self.get_mark())
 
         # In the flow context, indentation is ignored. We make the scanner less
         # restrictive then specification requires.
@@ -359,11 +382,10 @@ def fetch_stream_start(self):
 
         # Read the token.
         mark = self.get_mark()
-        
+
         # Add STREAM-START.
         self.tokens.append(StreamStartToken(mark, mark,
-            encoding=self.encoding))
-        
+                                            encoding=self.encoding))
 
     def fetch_stream_end(self):
 
@@ -377,7 +399,7 @@ def fetch_stream_end(self):
 
         # Read the token.
         mark = self.get_mark()
-        
+
         # Add STREAM-END.
         self.tokens.append(StreamEndToken(mark, mark))
 
@@ -385,7 +407,7 @@ def fetch_stream_end(self):
         self.done = True
 
     def fetch_directive(self):
-        
+
         # Set the current intendation to -1.
         self.unwind_indent(-1)
 
@@ -486,8 +508,8 @@ def fetch_block_entry(self):
             # Are we allowed to start a new entry?
             if not self.allow_simple_key:
                 raise ScannerError(None, None,
-                        "sequence entries are not allowed here",
-                        self.get_mark())
+                                   "sequence entries are not allowed here",
+                                   self.get_mark())
 
             # We may need to add BLOCK-SEQUENCE-START.
             if self.add_indent(self.column):
@@ -512,15 +534,15 @@ def fetch_block_entry(self):
         self.tokens.append(BlockEntryToken(start_mark, end_mark))
 
     def fetch_key(self):
-        
+
         # Block context needs additional checks.
         if not self.flow_level:
 
             # Are we allowed to start a key (not nessesary a simple)?
             if not self.allow_simple_key:
                 raise ScannerError(None, None,
-                        "mapping keys are not allowed here",
-                        self.get_mark())
+                                   "mapping keys are not allowed here",
+                                   self.get_mark())
 
             # We may need to add BLOCK-MAPPING-START.
             if self.add_indent(self.column):
@@ -543,26 +565,26 @@ def fetch_value(self):
 
         # Do we determine a simple key?
         if self.flow_level in self.possible_simple_keys:
-
             # Add KEY.
             key = self.possible_simple_keys[self.flow_level]
             del self.possible_simple_keys[self.flow_level]
             self.tokens.insert(key.token_number-self.tokens_taken,
-                    KeyToken(key.mark, key.mark))
+                               KeyToken(key.mark, key.mark))
 
             # If this key starts a new block mapping, we need to add
             # BLOCK-MAPPING-START.
             if not self.flow_level:
                 if self.add_indent(key.column):
-                    self.tokens.insert(key.token_number-self.tokens_taken,
-                            BlockMappingStartToken(key.mark, key.mark))
+                    self.tokens.insert(
+                        key.token_number-self.tokens_taken,
+                        BlockMappingStartToken(key.mark, key.mark))
 
             # There cannot be two simple keys one after another.
             self.allow_simple_key = False
 
         # It must be a part of a complex key.
         else:
-            
+
             # Block context needs additional checks.
             # (Do we really need them? They will be catched by the parser
             # anyway.)
@@ -572,8 +594,8 @@ def fetch_value(self):
                 # we can start a simple key.
                 if not self.allow_simple_key:
                     raise ScannerError(None, None,
-                            "mapping values are not allowed here",
-                            self.get_mark())
+                                       "mapping values are not allowed here",
+                                       self.get_mark())
 
             # If this value starts a new block mapping, we need to add
             # BLOCK-MAPPING-START.  It will be detected as an error later by
@@ -726,7 +748,6 @@ def check_value(self):
             return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
 
     def check_plain(self):
-
         # A plain scalar may start with any non-space character except:
         #   '-', '?', ':', ',', '[', ']', '{', '}',
         #   '#', '&', '*', '!', '|', '>', '\'', '\"',
@@ -740,9 +761,9 @@ def check_plain(self):
         # '-' character) because we want the flow context to be space
         # independent.
         ch = self.peek()
-        return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`'  \
-                or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
-                        and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+        return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' or \
+            (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' and
+             (ch == u'-' or (not self.flow_level and ch in u'?:')))
 
     # Scanners.
 
@@ -804,21 +825,23 @@ def scan_directive_name(self, start_mark):
         # See the specification for details.
         length = 0
         ch = self.peek(length)
-        while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z'    \
-                or ch in u'-_':
+        while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+                or ch in u'-_:.':
             length += 1
             ch = self.peek(length)
         if not length:
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected alphabetic or numeric character, but found %r"
-                    % ch.encode('utf-8'), self.get_mark())
+            raise ScannerError(
+                "while scanning a directive", start_mark,
+                "expected alphabetic or numeric character, but found %r"
+                % utf8(ch), self.get_mark())
         value = self.prefix(length)
         self.forward(length)
         ch = self.peek()
         if ch not in u'\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected alphabetic or numeric character, but found %r"
-                    % ch.encode('utf-8'), self.get_mark())
+            raise ScannerError(
+                "while scanning a directive", start_mark,
+                "expected alphabetic or numeric character, but found %r"
+                % utf8(ch), self.get_mark())
         return value
 
     def scan_yaml_directive_value(self, start_mark):
@@ -827,26 +850,29 @@ def scan_yaml_directive_value(self, start_mark):
             self.forward()
         major = self.scan_yaml_directive_number(start_mark)
         if self.peek() != '.':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected a digit or '.', but found %r"
-                    % self.peek().encode('utf-8'),
-                    self.get_mark())
+            raise ScannerError(
+                "while scanning a directive", start_mark,
+                "expected a digit or '.', but found %r"
+                % utf8(self.peek()),
+                self.get_mark())
         self.forward()
         minor = self.scan_yaml_directive_number(start_mark)
         if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected a digit or ' ', but found %r"
-                    % self.peek().encode('utf-8'),
-                    self.get_mark())
+            raise ScannerError(
+                "while scanning a directive", start_mark,
+                "expected a digit or ' ', but found %r"
+                % utf8(self.peek()),
+                self.get_mark())
         return (major, minor)
 
     def scan_yaml_directive_number(self, start_mark):
         # See the specification for details.
         ch = self.peek()
         if not (u'0' <= ch <= u'9'):
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected a digit, but found %r" % ch.encode('utf-8'),
-                    self.get_mark())
+            raise ScannerError(
+                "while scanning a directive", start_mark,
+                "expected a digit, but found %r" % utf8(ch),
+                self.get_mark())
         length = 0
         while u'0' <= self.peek(length) <= u'9':
             length += 1
@@ -870,8 +896,8 @@ def scan_tag_directive_handle(self, start_mark):
         ch = self.peek()
         if ch != u' ':
             raise ScannerError("while scanning a directive", start_mark,
-                    "expected ' ', but found %r" % ch.encode('utf-8'),
-                    self.get_mark())
+                               "expected ' ', but found %r" % utf8(ch),
+                               self.get_mark())
         return value
 
     def scan_tag_directive_prefix(self, start_mark):
@@ -880,8 +906,8 @@ def scan_tag_directive_prefix(self, start_mark):
         ch = self.peek()
         if ch not in u'\0 \r\n\x85\u2028\u2029':
             raise ScannerError("while scanning a directive", start_mark,
-                    "expected ' ', but found %r" % ch.encode('utf-8'),
-                    self.get_mark())
+                               "expected ' ', but found %r" % utf8(ch),
+                               self.get_mark())
         return value
 
     def scan_directive_ignored_line(self, start_mark):
@@ -893,9 +919,10 @@ def scan_directive_ignored_line(self, start_mark):
                 self.forward()
         ch = self.peek()
         if ch not in u'\0\r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected a comment or a line break, but found %r"
-                        % ch.encode('utf-8'), self.get_mark())
+            raise ScannerError(
+                "while scanning a directive", start_mark,
+                "expected a comment or a line break, but found %r"
+                % utf8(ch), self.get_mark())
         self.scan_line_break()
 
     def scan_anchor(self, TokenClass):
@@ -916,21 +943,23 @@ def scan_anchor(self, TokenClass):
         self.forward()
         length = 0
         ch = self.peek(length)
-        while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z'    \
+        while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
                 or ch in u'-_':
             length += 1
             ch = self.peek(length)
         if not length:
-            raise ScannerError("while scanning an %s" % name, start_mark,
-                    "expected alphabetic or numeric character, but found %r"
-                    % ch.encode('utf-8'), self.get_mark())
+            raise ScannerError(
+                "while scanning an %s" % name, start_mark,
+                "expected alphabetic or numeric character, but found %r"
+                % utf8(ch), self.get_mark())
         value = self.prefix(length)
         self.forward(length)
         ch = self.peek()
         if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
-            raise ScannerError("while scanning an %s" % name, start_mark,
-                    "expected alphabetic or numeric character, but found %r"
-                    % ch.encode('utf-8'), self.get_mark())
+            raise ScannerError(
+                "while scanning an %s" % name, start_mark,
+                "expected alphabetic or numeric character, but found %r"
+                % utf8(ch), self.get_mark())
         end_mark = self.get_mark()
         return TokenClass(value, start_mark, end_mark)
 
@@ -943,9 +972,10 @@ def scan_tag(self):
             self.forward(2)
             suffix = self.scan_tag_uri('tag', start_mark)
             if self.peek() != u'>':
-                raise ScannerError("while parsing a tag", start_mark,
-                        "expected '>', but found %r" % self.peek().encode('utf-8'),
-                        self.get_mark())
+                raise ScannerError(
+                    "while parsing a tag", start_mark,
+                    "expected '>', but found %r" % utf8(self.peek()),
+                    self.get_mark())
             self.forward()
         elif ch in u'\0 \t\r\n\x85\u2028\u2029':
             handle = None
@@ -970,8 +1000,8 @@ def scan_tag(self):
         ch = self.peek()
         if ch not in u'\0 \r\n\x85\u2028\u2029':
             raise ScannerError("while scanning a tag", start_mark,
-                    "expected ' ', but found %r" % ch.encode('utf-8'),
-                    self.get_mark())
+                               "expected ' ', but found %r" % utf8(ch),
+                               self.get_mark())
         value = (handle, suffix)
         end_mark = self.get_mark()
         return TagToken(value, start_mark, end_mark)
@@ -1020,37 +1050,49 @@ def scan_block_scalar(self, style):
                 # Unfortunately, folding rules are ambiguous.
                 #
                 # This is the folding according to the specification:
-                
+
                 if folded and line_break == u'\n'   \
                         and leading_non_space and self.peek() not in u' \t':
                     if not breaks:
                         chunks.append(u' ')
                 else:
                     chunks.append(line_break)
-                
+
                 # This is Clark Evans's interpretation (also in the spec
                 # examples):
                 #
-                #if folded and line_break == u'\n':
-                #    if not breaks:
-                #        if self.peek() not in ' \t':
-                #            chunks.append(u' ')
-                #        else:
-                #            chunks.append(line_break)
-                #else:
-                #    chunks.append(line_break)
+                # if folded and line_break == u'\n':
+                #     if not breaks:
+                #         if self.peek() not in ' \t':
+                #             chunks.append(u' ')
+                #         else:
+                #             chunks.append(line_break)
+                # else:
+                #     chunks.append(line_break)
             else:
                 break
 
-        # Chomp the tail.
-        if chomping is not False:
+        # Process trailing line breaks. The 'chomping' setting determines
+        # whether they are included in the value.
+        comment = []
+        if chomping in [None, True]:
             chunks.append(line_break)
         if chomping is True:
             chunks.extend(breaks)
+        elif chomping in [None, False]:
+            comment.extend(breaks)
 
         # We are done.
-        return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
-                style)
+        token = ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+                            style)
+        if len(comment) > 0:
+            # Keep track of the trailing whitespace as a comment token, if
+            # isn't all included in the actual value.
+            comment_end_mark = self.get_mark()
+            comment = CommentToken(''.join(comment), end_mark,
+                                   comment_end_mark)
+            token.add_post_comment(comment)
+        return token
 
     def scan_block_scalar_indicators(self, start_mark):
         # See the specification for details.
@@ -1067,16 +1109,19 @@ def scan_block_scalar_indicators(self, start_mark):
             if ch in u'0123456789':
                 increment = int(ch)
                 if increment == 0:
-                    raise ScannerError("while scanning a block scalar", start_mark,
-                            "expected indentation indicator in the range 1-9, but found 0",
-                            self.get_mark())
+                    raise ScannerError(
+                        "while scanning a block scalar", start_mark,
+                        "expected indentation indicator in the range 1-9, "
+                        "but found 0", self.get_mark())
                 self.forward()
         elif ch in u'0123456789':
             increment = int(ch)
             if increment == 0:
-                raise ScannerError("while scanning a block scalar", start_mark,
-                        "expected indentation indicator in the range 1-9, but found 0",
-                        self.get_mark())
+                raise ScannerError(
+                    "while scanning a block scalar", start_mark,
+                    "expected indentation indicator in the range 1-9, "
+                    "but found 0",
+                    self.get_mark())
             self.forward()
             ch = self.peek()
             if ch in u'+-':
@@ -1087,9 +1132,10 @@ def scan_block_scalar_indicators(self, start_mark):
                 self.forward()
         ch = self.peek()
         if ch not in u'\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a block scalar", start_mark,
-                    "expected chomping or indentation indicators, but found %r"
-                        % ch.encode('utf-8'), self.get_mark())
+            raise ScannerError(
+                "while scanning a block scalar", start_mark,
+                "expected chomping or indentation indicators, but found %r"
+                % utf8(ch), self.get_mark())
         return chomping, increment
 
     def scan_block_scalar_ignored_line(self, start_mark):
@@ -1101,9 +1147,10 @@ def scan_block_scalar_ignored_line(self, start_mark):
                 self.forward()
         ch = self.peek()
         if ch not in u'\0\r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a block scalar", start_mark,
-                    "expected a comment or a line break, but found %r"
-                        % ch.encode('utf-8'), self.get_mark())
+            raise ScannerError(
+                "while scanning a block scalar", start_mark,
+                "expected a comment or a line break, but found %r"
+                % utf8(ch), self.get_mark())
         self.scan_line_break()
 
     def scan_block_scalar_indentation(self):
@@ -1156,7 +1203,7 @@ def scan_flow_scalar(self, style):
         self.forward()
         end_mark = self.get_mark()
         return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
-                style)
+                           style)
 
     ESCAPE_REPLACEMENTS = {
         u'0':   u'\0',
@@ -1171,6 +1218,7 @@ def scan_flow_scalar(self, style):
         u'e':   u'\x1B',
         u' ':   u'\x20',
         u'\"':  u'\"',
+        u'/':   u'/',  # as per http://www.json.org/
         u'\\':  u'\\',
         u'N':   u'\x85',
         u'_':   u'\xA0',
@@ -1212,18 +1260,24 @@ def scan_flow_scalar_non_spaces(self, double, start_mark):
                     self.forward()
                     for k in range(length):
                         if self.peek(k) not in u'0123456789ABCDEFabcdef':
-                            raise ScannerError("while scanning a double-quoted scalar", start_mark,
-                                    "expected escape sequence of %d hexdecimal numbers, but found %r" %
-                                        (length, self.peek(k).encode('utf-8')), self.get_mark())
+                            raise ScannerError(
+                                "while scanning a double-quoted scalar",
+                                start_mark,
+                                "expected escape sequence of %d hexdecimal "
+                                "numbers, but found %r" %
+                                (length, utf8(self.peek(k))), self.get_mark())
                     code = int(self.prefix(length), 16)
                     chunks.append(unichr(code))
                     self.forward(length)
                 elif ch in u'\r\n\x85\u2028\u2029':
                     self.scan_line_break()
-                    chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+                    chunks.extend(self.scan_flow_scalar_breaks(
+                        double, start_mark))
                 else:
-                    raise ScannerError("while scanning a double-quoted scalar", start_mark,
-                            "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
+                    raise ScannerError(
+                        "while scanning a double-quoted scalar", start_mark,
+                        "found unknown escape character %r" % utf8(ch),
+                        self.get_mark())
             else:
                 return chunks
 
@@ -1237,8 +1291,9 @@ def scan_flow_scalar_spaces(self, double, start_mark):
         self.forward(length)
         ch = self.peek()
         if ch == u'\0':
-            raise ScannerError("while scanning a quoted scalar", start_mark,
-                    "found unexpected end of stream", self.get_mark())
+            raise ScannerError(
+                "while scanning a quoted scalar", start_mark,
+                "found unexpected end of stream", self.get_mark())
         elif ch in u'\r\n\x85\u2028\u2029':
             line_break = self.scan_line_break()
             breaks = self.scan_flow_scalar_breaks(double, start_mark)
@@ -1260,8 +1315,10 @@ def scan_flow_scalar_breaks(self, double, start_mark):
             prefix = self.prefix(3)
             if (prefix == u'---' or prefix == u'...')   \
                     and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
-                raise ScannerError("while scanning a quoted scalar", start_mark,
-                        "found unexpected document separator", self.get_mark())
+                raise ScannerError("while scanning a quoted scalar",
+                                   start_mark,
+                                   "found unexpected document separator",
+                                   self.get_mark())
             while self.peek() in u' \t':
                 self.forward()
             if self.peek() in u'\r\n\x85\u2028\u2029':
@@ -1281,8 +1338,8 @@ def scan_plain(self):
         indent = self.indent+1
         # We allow zero indentation for scalars, but then we need to check for
         # document separators at the beginning of the line.
-        #if indent == 0:
-        #    indent = 1
+        # if indent == 0:
+        #     indent = 1
         spaces = []
         while True:
             length = 0
@@ -1290,19 +1347,22 @@ def scan_plain(self):
                 break
             while True:
                 ch = self.peek(length)
-                if ch in u'\0 \t\r\n\x85\u2028\u2029'   \
-                        or (not self.flow_level and ch == u':' and
-                                self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
-                        or (self.flow_level and ch in u',:?[]{}'):
+                if ch in u'\0 \t\r\n\x85\u2028\u2029' \
+                   or (not self.flow_level and ch == u':' and
+                       self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
+                   or (self.flow_level and ch in u',:?[]{}'):
                     break
                 length += 1
             # It's not clear what we should do with ':' in the flow context.
-            if (self.flow_level and ch == u':'
-                    and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
+            if (self.flow_level and ch == u':' and
+               self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
                 self.forward(length)
-                raise ScannerError("while scanning a plain scalar", start_mark,
+                raise ScannerError(
+                    "while scanning a plain scalar", start_mark,
                     "found unexpected ':'", self.get_mark(),
-                    "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
+                    "Please check "
+                    "http://pyyaml.org/wiki/YAMLColonInFlowContext "
+                    "for details.")
             if length == 0:
                 break
             self.allow_simple_key = False
@@ -1314,7 +1374,13 @@ def scan_plain(self):
             if not spaces or self.peek() == u'#' \
                     or (not self.flow_level and self.column < indent):
                 break
-        return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+
+        token = ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+        if spaces and spaces[0] == '\n':
+            # Create a comment token to preserve the trailing line breaks.
+            comment = CommentToken(''.join(spaces) + '\n', start_mark, end_mark)
+            token.add_post_comment(comment)
+        return token
 
     def scan_plain_spaces(self, indent, start_mark):
         # See the specification for details.
@@ -1342,7 +1408,7 @@ def scan_plain_spaces(self, indent, start_mark):
                     breaks.append(self.scan_line_break())
                     prefix = self.prefix(3)
                     if (prefix == u'---' or prefix == u'...')   \
-                            and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+                       and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
                         return
             if line_break != u'\n':
                 chunks.append(line_break)
@@ -1360,20 +1426,21 @@ def scan_tag_handle(self, name, start_mark):
         ch = self.peek()
         if ch != u'!':
             raise ScannerError("while scanning a %s" % name, start_mark,
-                    "expected '!', but found %r" % ch.encode('utf-8'),
-                    self.get_mark())
+                               "expected '!', but found %r" % utf8(ch),
+                               self.get_mark())
         length = 1
         ch = self.peek(length)
         if ch != u' ':
-            while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z'    \
-                    or ch in u'-_':
+            while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' \
+                  or u'a' <= ch <= u'z' \
+                  or ch in u'-_':
                 length += 1
                 ch = self.peek(length)
             if ch != u'!':
                 self.forward(length)
                 raise ScannerError("while scanning a %s" % name, start_mark,
-                        "expected '!', but found %r" % ch.encode('utf-8'),
-                        self.get_mark())
+                                   "expected '!', but found %r" % utf8(ch),
+                                   self.get_mark())
             length += 1
         value = self.prefix(length)
         self.forward(length)
@@ -1401,27 +1468,36 @@ def scan_tag_uri(self, name, start_mark):
             length = 0
         if not chunks:
             raise ScannerError("while parsing a %s" % name, start_mark,
-                    "expected URI, but found %r" % ch.encode('utf-8'),
-                    self.get_mark())
+                               "expected URI, but found %r" % utf8(ch),
+                               self.get_mark())
         return u''.join(chunks)
 
     def scan_uri_escapes(self, name, start_mark):
         # See the specification for details.
-        bytes = []
+        code_bytes = []
         mark = self.get_mark()
         while self.peek() == u'%':
             self.forward()
             for k in range(2):
                 if self.peek(k) not in u'0123456789ABCDEFabcdef':
-                    raise ScannerError("while scanning a %s" % name, start_mark,
-                            "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
-                                (self.peek(k).encode('utf-8')), self.get_mark())
-            bytes.append(chr(int(self.prefix(2), 16)))
+                    raise ScannerError(
+                        "while scanning a %s" % name, start_mark,
+                        "expected URI escape sequence of 2 hexdecimal numbers,"
+                        " but found %r"
+                        % utf8(self.peek(k)), self.get_mark())
+            if PY3:
+                code_bytes.append(int(self.prefix(2), 16))
+            else:
+                code_bytes.append(chr(int(self.prefix(2), 16)))
             self.forward(2)
         try:
-            value = unicode(''.join(bytes), 'utf-8')
-        except UnicodeDecodeError, exc:
-            raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+            if PY3:
+                value = bytes(code_bytes).decode('utf-8')
+            else:
+                value = unicode(''.join(code_bytes), 'utf-8')
+        except UnicodeDecodeError as exc:
+            raise ScannerError("while scanning a %s" % name, start_mark,
+                               str(exc), mark)
         return value
 
     def scan_line_break(self):
@@ -1445,9 +1521,141 @@ def scan_line_break(self):
             return ch
         return u''
 
-#try:
-#    import psyco
-#    psyco.bind(Scanner)
-#except ImportError:
-#    pass
 
+class RoundTripScanner(Scanner):
+    def check_token(self, *choices):
+        # Check if the next token is one of the given types.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        self._gather_comments()
+        if self.tokens:
+            if not choices:
+                return True
+            for choice in choices:
+                if isinstance(self.tokens[0], choice):
+                    return True
+        return False
+
+    def peek_token(self):
+        # Return the next token, but do not delete if from the queue.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        self._gather_comments()
+        if self.tokens:
+            return self.tokens[0]
+
+    def _gather_comments(self):
+        """combine multiple comment lines"""
+        comments = []
+        if not self.tokens:
+            return comments
+        if isinstance(self.tokens[0], CommentToken):
+            comment = self.tokens.pop(0)
+            self.tokens_taken += 1
+            # print('################ dropping', comment)
+            comments.append(comment)
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+            if not self.tokens:
+                return comments
+            if isinstance(self.tokens[0], CommentToken):
+                self.tokens_taken += 1
+                comment = self.tokens.pop(0)
+                # print 'dropping2', comment
+                comments.append(comment)
+        if len(comments) >= 1:
+            # print('  len', len(comments), comments)
+            # print('  com', comments[0], comments[0].start_mark.line)
+            # print('  tok', self.tokens[0].end_mark.line)
+            self.tokens[0].add_pre_comments(comments)
+        # pull in post comment on e.g. ':'
+        if not self.done and len(self.tokens) < 2:
+            self.fetch_more_tokens()
+
+    def get_token(self):
+        # Return the next token.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        self._gather_comments()
+        if self.tokens:
+            # only add post comment to single line tokens:
+            # scalar, value token. FlowXEndToken, otherwise
+            # hidden streamtokens could get them (leave them and they will be
+            # pre comments for the next map/seq
+            if len(self.tokens) > 1 and \
+               isinstance(self.tokens[0], (
+                   ScalarToken,
+                   ValueToken,
+                   FlowSequenceEndToken,
+                   FlowMappingEndToken,
+                   )) and \
+               isinstance(self.tokens[1], CommentToken) and \
+               self.tokens[0].end_mark.line == self.tokens[1].start_mark.line:
+                self.tokens_taken += 1
+                self.tokens[0].add_post_comment(self.tokens.pop(1))
+            self.tokens_taken += 1
+            return self.tokens.pop(0)
+
+    def fetch_comment(self, comment):  # XXXX
+        value, start_mark, end_mark = comment
+        self.tokens.append(CommentToken(value, start_mark, end_mark))
+
+    # scanner
+
+    def scan_to_next_token(self):
+        # We ignore spaces, line breaks and comments.
+        # If we find a line break in the block context, we set the flag
+        # `allow_simple_key` on.
+        # The byte order mark is stripped if it's the first character in the
+        # stream. We do not yet support BOM inside the stream as the
+        # specification requires. Any such mark will be considered as a part
+        # of the document.
+        #
+        # TODO: We need to make tab handling rules more sane. A good rule is
+        #   Tabs cannot precede tokens
+        #   BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+        #   KEY(block), VALUE(block), BLOCK-ENTRY
+        # So the checking code is
+        #   if <TAB>:
+        #       self.allow_simple_keys = False
+        # We also need to add the check for `allow_simple_keys == True` to
+        # `unwind_indent` before issuing BLOCK-END.
+        # Scanners for block, flow, and plain scalars need to be modified.
+
+        if self.index == 0 and self.peek() == u'\uFEFF':
+            self.forward()
+        found = False
+        while not found:
+            while self.peek() == u' ':
+                self.forward()
+            ch = self.peek()
+            if ch == u'#':
+                start_mark = self.get_mark()
+                comment = ch
+                self.forward()
+                while ch not in u'\0\r\n\x85\u2028\u2029':
+                    ch = self.peek()
+                    if ch == u'\0':  # don't gobble the end-of-stream character
+                        break
+                    comment += ch
+                    self.forward()
+                # gather any blank lines following the comment too
+                ch = self.scan_line_break()
+                while len(ch) > 0:
+                    comment += ch
+                    ch = self.scan_line_break()
+                end_mark = self.get_mark()
+                if not self.flow_level:
+                    self.allow_simple_key = True
+                return comment, start_mark, end_mark
+            if self.scan_line_break():
+                if not self.flow_level:
+                    self.allow_simple_key = True
+            else:
+                found = True
+
+# try:
+#     import psyco
+#     psyco.bind(Scanner)
+# except ImportError:
+#     pass
diff --git a/lib/spack/external/ruamel/yaml/serializer.py b/lib/spack/external/ruamel/yaml/serializer.py
new file mode 100644
index 0000000000..60dd5170ca
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/serializer.py
@@ -0,0 +1,178 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+import re
+
+try:
+    from .error import YAMLError
+    from .compat import nprint, DBG_NODE, dbg, string_types
+except (ImportError, ValueError):  # for Jython
+    from ruamel.yaml.error import YAMLError
+    from ruamel.yaml.compat import nprint, DBG_NODE, dbg, string_types
+
+from ruamel.yaml.events import (
+    StreamStartEvent, StreamEndEvent, MappingStartEvent, MappingEndEvent,
+    SequenceStartEvent, SequenceEndEvent, AliasEvent, ScalarEvent,
+    DocumentStartEvent, DocumentEndEvent,
+)
+from ruamel.yaml.nodes import (
+    MappingNode, ScalarNode, SequenceNode,
+)
+
+__all__ = ['Serializer', 'SerializerError']
+
+
+class SerializerError(YAMLError):
+    pass
+
+
+class Serializer(object):
+
+    # 'id' and 3+ numbers, but not 000
+    ANCHOR_TEMPLATE = u'id%03d'
+    ANCHOR_RE = re.compile(u'id(?!000$)\\d{3,}')
+
+    def __init__(self, encoding=None, explicit_start=None, explicit_end=None,
+                 version=None, tags=None):
+        self.use_encoding = encoding
+        self.use_explicit_start = explicit_start
+        self.use_explicit_end = explicit_end
+        if isinstance(version, string_types):
+            self.use_version = tuple(map(int, version.split('.')))
+        else:
+            self.use_version = version
+        self.use_tags = tags
+        self.serialized_nodes = {}
+        self.anchors = {}
+        self.last_anchor_id = 0
+        self.closed = None
+        self._templated_id = None
+
+    def open(self):
+        if self.closed is None:
+            self.emit(StreamStartEvent(encoding=self.use_encoding))
+            self.closed = False
+        elif self.closed:
+            raise SerializerError("serializer is closed")
+        else:
+            raise SerializerError("serializer is already opened")
+
+    def close(self):
+        if self.closed is None:
+            raise SerializerError("serializer is not opened")
+        elif not self.closed:
+            self.emit(StreamEndEvent())
+            self.closed = True
+
+    # def __del__(self):
+    #     self.close()
+
+    def serialize(self, node):
+        if dbg(DBG_NODE):
+            nprint('Serializing nodes')
+            node.dump()
+        if self.closed is None:
+            raise SerializerError("serializer is not opened")
+        elif self.closed:
+            raise SerializerError("serializer is closed")
+        self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+                                     version=self.use_version,
+                                     tags=self.use_tags))
+        self.anchor_node(node)
+        self.serialize_node(node, None, None)
+        self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+        self.serialized_nodes = {}
+        self.anchors = {}
+        self.last_anchor_id = 0
+
+    def anchor_node(self, node):
+        if node in self.anchors:
+            if self.anchors[node] is None:
+                self.anchors[node] = self.generate_anchor(node)
+        else:
+            anchor = None
+            try:
+                if node.anchor.always_dump:
+                    anchor = node.anchor.value
+            except:
+                pass
+            self.anchors[node] = anchor
+            if isinstance(node, SequenceNode):
+                for item in node.value:
+                    self.anchor_node(item)
+            elif isinstance(node, MappingNode):
+                for key, value in node.value:
+                    self.anchor_node(key)
+                    self.anchor_node(value)
+
+    def generate_anchor(self, node):
+        try:
+            anchor = node.anchor.value
+        except:
+            anchor = None
+        if anchor is None:
+            self.last_anchor_id += 1
+            return self.ANCHOR_TEMPLATE % self.last_anchor_id
+        return anchor
+
+    def serialize_node(self, node, parent, index):
+        alias = self.anchors[node]
+        if node in self.serialized_nodes:
+            self.emit(AliasEvent(alias))
+        else:
+            self.serialized_nodes[node] = True
+            self.descend_resolver(parent, index)
+            if isinstance(node, ScalarNode):
+                # here check if the node.tag equals the one that would result from parsing
+                # if not equal quoting is necessary for strings
+                detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+                default_tag = self.resolve(ScalarNode, node.value, (False, True))
+                implicit = (node.tag == detected_tag), (node.tag == default_tag)
+                self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+                                      style=node.style, comment=node.comment))
+            elif isinstance(node, SequenceNode):
+                implicit = (node.tag == self.resolve(SequenceNode, node.value, True))
+                comment = node.comment
+                # print('comment >>>>>>>>>>>>>.', comment, node.flow_style)
+                end_comment = None
+                seq_comment = None
+                if node.flow_style is True:
+                    if comment:  # eol comment on flow style sequence
+                        seq_comment = comment[0]
+                        # comment[0] = None
+                if comment and len(comment) > 2:
+                    end_comment = comment[2]
+                else:
+                    end_comment = None
+                self.emit(SequenceStartEvent(alias, node.tag, implicit,
+                                             flow_style=node.flow_style,
+                                             comment=node.comment))
+                index = 0
+                for item in node.value:
+                    self.serialize_node(item, node, index)
+                    index += 1
+                self.emit(SequenceEndEvent(comment=[seq_comment, end_comment]))
+            elif isinstance(node, MappingNode):
+                implicit = (node.tag == self.resolve(MappingNode, node.value, True))
+                comment = node.comment
+                end_comment = None
+                map_comment = None
+                if node.flow_style is True:
+                    if comment:  # eol comment on flow style sequence
+                        map_comment = comment[0]
+                        # comment[0] = None
+                if comment and len(comment) > 2:
+                    end_comment = comment[2]
+                self.emit(MappingStartEvent(alias, node.tag, implicit,
+                                            flow_style=node.flow_style,
+                                            comment=node.comment))
+                for key, value in node.value:
+                    self.serialize_node(key, node, None)
+                    self.serialize_node(value, node, key)
+                self.emit(MappingEndEvent(comment=[map_comment, end_comment]))
+            self.ascend_resolver()
+
+
+def templated_id(s):
+    return Serializer.ANCHOR_RE.match(s)
diff --git a/lib/spack/external/ruamel/yaml/setup.cfg b/lib/spack/external/ruamel/yaml/setup.cfg
new file mode 100644
index 0000000000..861a9f5542
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build = 
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/lib/spack/external/ruamel/yaml/tokens.py b/lib/spack/external/ruamel/yaml/tokens.py
new file mode 100644
index 0000000000..bd97785b82
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/tokens.py
@@ -0,0 +1,195 @@
+# # header
+# coding: utf-8
+
+
+class Token(object):
+    def __init__(self, start_mark, end_mark):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+
+    def __repr__(self):
+        attributes = [key for key in self.__dict__
+                      if not key.endswith('_mark')]
+        attributes.sort()
+        arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+                               for key in attributes])
+        return '%s(%s)' % (self.__class__.__name__, arguments)
+
+    def add_post_comment(self, comment):
+        if not hasattr(self, '_comment'):
+            self._comment = [None, None]
+        self._comment[0] = comment
+
+    def add_pre_comments(self, comments):
+        if not hasattr(self, '_comment'):
+            self._comment = [None, None]
+        assert self._comment[1] is None
+        self._comment[1] = comments
+
+    def get_comment(self):
+        return getattr(self, '_comment', None)
+
+    @property
+    def comment(self):
+        return getattr(self, '_comment', None)
+
+    def move_comment(self, target):
+        """move a comment from this token to target (normally next token)
+        used to combine e.g. comments before a BlockEntryToken to the
+        ScalarToken that follows it
+        """
+        c = self.comment
+        if c is None:
+            return
+        # don't push beyond last element
+        if isinstance(target, StreamEndToken):
+            return
+        delattr(self, '_comment')
+        tc = target.comment
+        if not tc:  # target comment, just insert
+            target._comment = c
+            return self
+        if c[0] and tc[0] or c[1] and tc[1]:
+            raise NotImplementedError('overlap in comment %r %r' % c, tc)
+        if c[0]:
+            tc[0] = c[0]
+        if c[1]:
+            tc[1] = c[1]
+        return self
+
+    def split_comment(self):
+        """ split the post part of a comment, and return it
+        as comment to be added. Delete second part if [None, None]
+         abc:  # this goes to sequence
+           # this goes to first element
+           - first element
+        """
+        comment = self.comment
+        if comment is None or comment[0] is None:
+            return None  # nothing to do
+        ret_val = [comment[0], None]
+        if comment[1] is None:
+            delattr(self, '_comment')
+        return ret_val
+
+
+# class BOMToken(Token):
+#     id = '<byte order mark>'
+
+class DirectiveToken(Token):
+    id = '<directive>'
+
+    def __init__(self, name, value, start_mark, end_mark):
+        Token.__init__(self, start_mark, end_mark)
+        self.name = name
+        self.value = value
+
+
+class DocumentStartToken(Token):
+    id = '<document start>'
+
+
+class DocumentEndToken(Token):
+    id = '<document end>'
+
+
+class StreamStartToken(Token):
+    id = '<stream start>'
+
+    def __init__(self, start_mark=None, end_mark=None, encoding=None):
+        Token.__init__(self, start_mark, end_mark)
+        self.encoding = encoding
+
+
+class StreamEndToken(Token):
+    id = '<stream end>'
+
+
+class BlockSequenceStartToken(Token):
+    id = '<block sequence start>'
+
+
+class BlockMappingStartToken(Token):
+    id = '<block mapping start>'
+
+
+class BlockEndToken(Token):
+    id = '<block end>'
+
+
+class FlowSequenceStartToken(Token):
+    id = '['
+
+
+class FlowMappingStartToken(Token):
+    id = '{'
+
+
+class FlowSequenceEndToken(Token):
+    id = ']'
+
+
+class FlowMappingEndToken(Token):
+    id = '}'
+
+
+class KeyToken(Token):
+    id = '?'
+
+
+class ValueToken(Token):
+    id = ':'
+
+
+class BlockEntryToken(Token):
+    id = '-'
+
+
+class FlowEntryToken(Token):
+    id = ','
+
+
+class AliasToken(Token):
+    id = '<alias>'
+
+    def __init__(self, value, start_mark, end_mark):
+        Token.__init__(self, start_mark, end_mark)
+        self.value = value
+
+
+class AnchorToken(Token):
+    id = '<anchor>'
+
+    def __init__(self, value, start_mark, end_mark):
+        Token.__init__(self, start_mark, end_mark)
+        self.value = value
+
+
+class TagToken(Token):
+    id = '<tag>'
+
+    def __init__(self, value, start_mark, end_mark):
+        Token.__init__(self, start_mark, end_mark)
+        self.value = value
+
+
+class ScalarToken(Token):
+    id = '<scalar>'
+
+    def __init__(self, value, plain, start_mark, end_mark, style=None):
+        Token.__init__(self, start_mark, end_mark)
+        self.value = value
+        self.plain = plain
+        self.style = style
+
+
+class CommentToken(Token):
+    id = '<comment>'
+
+    def __init__(self, value, start_mark, end_mark):
+        Token.__init__(self, start_mark, end_mark)
+        self.value = value
+
+    def reset(self):
+        if hasattr(self, 'pre_done'):
+            delattr(self, 'pre_done')
diff --git a/lib/spack/external/ruamel/yaml/util.py b/lib/spack/external/ruamel/yaml/util.py
new file mode 100644
index 0000000000..afc46fb12a
--- /dev/null
+++ b/lib/spack/external/ruamel/yaml/util.py
@@ -0,0 +1,139 @@
+# coding: utf-8
+
+"""
+some helper functions that might be generally useful
+"""
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+from .compat import text_type, binary_type
+from .main import round_trip_load
+
+
+# originally as comment
+# https://github.com/pre-commit/pre-commit/pull/211#issuecomment-186466605
+# if you use this in your code, I suggest adding a test in your test suite
+# that check this routines output against a known piece of your YAML
+# before upgrades to this code break your round-tripped YAML
+def load_yaml_guess_indent(stream, **kw):
+    """guess the indent and block sequence indent of yaml stream/string
+
+    returns round_trip_loaded stream, indent level, block sequence indent
+    - block sequence indent is the number of spaces before a dash relative to previous indent
+    - if there are no block sequences, indent is taken from nested mappings, block sequence
+      indent is unset (None) in that case
+    """
+    # load a yaml file guess the indentation, if you use TABs ...
+    def leading_spaces(l):
+        idx = 0
+        while idx < len(l) and l[idx] == ' ':
+            idx += 1
+        return idx
+
+    if isinstance(stream, text_type):
+        yaml_str = stream
+    elif isinstance(stream, binary_type):
+        yaml_str = stream.decode('utf-8')  # most likely, but the Reader checks BOM for this
+    else:
+        yaml_str = stream.read()
+    map_indent = None
+    indent = None  # default if not found for some reason
+    block_seq_indent = None
+    prev_line_key_only = None
+    key_indent = 0
+    for line in yaml_str.splitlines():
+        rline = line.rstrip()
+        lline = rline.lstrip()
+        if lline.startswith('- '):
+            l_s = leading_spaces(line)
+            block_seq_indent = l_s - key_indent
+            idx = l_s + 1
+            while line[idx] == ' ':  # this will end as we rstripped
+                idx += 1
+            if line[idx] == '#':     # comment after -
+                continue
+            indent = idx - key_indent
+            break
+        if map_indent is None and prev_line_key_only is not None and rline:
+            idx = 0
+            while line[idx] in ' -':
+                idx += 1
+            if idx > prev_line_key_only:
+                map_indent = idx - prev_line_key_only
+        if rline.endswith(':'):
+            key_indent = leading_spaces(line)
+            idx = 0
+            while line[idx] == ' ':  # this will end on ':'
+                idx += 1
+            prev_line_key_only = idx
+            continue
+        prev_line_key_only = None
+    if indent is None and map_indent is not None:
+        indent = map_indent
+    return round_trip_load(yaml_str, **kw), indent, block_seq_indent
+
+
+def configobj_walker(cfg):
+    """
+    walks over a ConfigObj (INI file with comments) generating
+    corresponding YAML output (including comments
+    """
+    from configobj import ConfigObj
+    assert isinstance(cfg, ConfigObj)
+    for c in cfg.initial_comment:
+        if c.strip():
+            yield c
+    for s in _walk_section(cfg):
+        if s.strip():
+            yield s
+    for c in cfg.final_comment:
+        if c.strip():
+            yield c
+
+
+def _walk_section(s, level=0):
+    from configobj import Section
+    assert isinstance(s, Section)
+    indent = u'  ' * level
+    for name in s.scalars:
+        for c in s.comments[name]:
+            yield indent + c.strip()
+        x = s[name]
+        if u'\n' in x:
+            i = indent + u'  '
+            x = u'|\n' + i + x.strip().replace(u'\n', u'\n' + i)
+        elif ':' in x:
+            x = u"'" + x.replace(u"'", u"''") + u"'"
+        line = u'{0}{1}: {2}'.format(indent, name, x)
+        c = s.inline_comments[name]
+        if c:
+            line += u' ' + c
+        yield line
+    for name in s.sections:
+        for c in s.comments[name]:
+            yield indent + c.strip()
+        line = u'{0}{1}:'.format(indent, name)
+        c = s.inline_comments[name]
+        if c:
+            line += u' ' + c
+        yield line
+        for val in _walk_section(s[name], level=level+1):
+            yield val
+
+# def config_obj_2_rt_yaml(cfg):
+#     from .comments import CommentedMap, CommentedSeq
+#     from configobj import ConfigObj
+#     assert isinstance(cfg, ConfigObj)
+#     #for c in cfg.initial_comment:
+#     #    if c.strip():
+#     #        pass
+#     cm = CommentedMap()
+#     for name in s.sections:
+#         cm[name] = d = CommentedMap()
+#
+#
+#     #for c in cfg.final_comment:
+#     #    if c.strip():
+#     #        yield c
+#     return cm
diff --git a/lib/spack/external/yaml/LICENSE b/lib/spack/external/yaml/LICENSE
deleted file mode 100644
index 050ced23f6..0000000000
--- a/lib/spack/external/yaml/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2006 Kirill Simonov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/lib/spack/external/yaml/README b/lib/spack/external/yaml/README
deleted file mode 100644
index d186328eeb..0000000000
--- a/lib/spack/external/yaml/README
+++ /dev/null
@@ -1,35 +0,0 @@
-PyYAML - The next generation YAML parser and emitter for Python.
-
-To install, type 'python setup.py install'.
-
-By default, the setup.py script checks whether LibYAML is installed
-and if so, builds and installs LibYAML bindings.  To skip the check
-and force installation of LibYAML bindings, use the option '--with-libyaml':
-'python setup.py --with-libyaml install'.  To disable the check and
-skip building and installing LibYAML bindings, use '--without-libyaml':
-'python setup.py --without-libyaml install'.
-
-When LibYAML bindings are installed, you may use fast LibYAML-based
-parser and emitter as follows:
-
-    >>> yaml.load(stream, Loader=yaml.CLoader)
-    >>> yaml.dump(data, Dumper=yaml.CDumper)
-
-PyYAML includes a comprehensive test suite.  To run the tests,
-type 'python setup.py test'.
-
-For more information, check the PyYAML homepage:
-'http://pyyaml.org/wiki/PyYAML'.
-
-For PyYAML tutorial and reference, see:
-'http://pyyaml.org/wiki/PyYAMLDocumentation'.
-
-Post your questions and opinions to the YAML-Core mailing list:
-'http://lists.sourceforge.net/lists/listinfo/yaml-core'.
-
-Submit bug reports and feature requests to the PyYAML bug tracker:
-'https://bitbucket.org/xi/pyyaml/issues/new'.
-
-PyYAML is written by Kirill Simonov <xi@resolvent.net>.  It is released
-under the MIT license. See the file LICENSE for more details.
-
diff --git a/lib/spack/external/yaml/lib/yaml/__init__.py b/lib/spack/external/yaml/lib/yaml/__init__.py
deleted file mode 100644
index 87c15d38aa..0000000000
--- a/lib/spack/external/yaml/lib/yaml/__init__.py
+++ /dev/null
@@ -1,315 +0,0 @@
-
-from error import *
-
-from tokens import *
-from events import *
-from nodes import *
-
-from loader import *
-from dumper import *
-
-__version__ = '3.12'
-
-try:
-    from cyaml import *
-    __with_libyaml__ = True
-except ImportError:
-    __with_libyaml__ = False
-
-def scan(stream, Loader=Loader):
-    """
-    Scan a YAML stream and produce scanning tokens.
-    """
-    loader = Loader(stream)
-    try:
-        while loader.check_token():
-            yield loader.get_token()
-    finally:
-        loader.dispose()
-
-def parse(stream, Loader=Loader):
-    """
-    Parse a YAML stream and produce parsing events.
-    """
-    loader = Loader(stream)
-    try:
-        while loader.check_event():
-            yield loader.get_event()
-    finally:
-        loader.dispose()
-
-def compose(stream, Loader=Loader):
-    """
-    Parse the first YAML document in a stream
-    and produce the corresponding representation tree.
-    """
-    loader = Loader(stream)
-    try:
-        return loader.get_single_node()
-    finally:
-        loader.dispose()
-
-def compose_all(stream, Loader=Loader):
-    """
-    Parse all YAML documents in a stream
-    and produce corresponding representation trees.
-    """
-    loader = Loader(stream)
-    try:
-        while loader.check_node():
-            yield loader.get_node()
-    finally:
-        loader.dispose()
-
-def load(stream, Loader=Loader):
-    """
-    Parse the first YAML document in a stream
-    and produce the corresponding Python object.
-    """
-    loader = Loader(stream)
-    try:
-        return loader.get_single_data()
-    finally:
-        loader.dispose()
-
-def load_all(stream, Loader=Loader):
-    """
-    Parse all YAML documents in a stream
-    and produce corresponding Python objects.
-    """
-    loader = Loader(stream)
-    try:
-        while loader.check_data():
-            yield loader.get_data()
-    finally:
-        loader.dispose()
-
-def safe_load(stream):
-    """
-    Parse the first YAML document in a stream
-    and produce the corresponding Python object.
-    Resolve only basic YAML tags.
-    """
-    return load(stream, SafeLoader)
-
-def safe_load_all(stream):
-    """
-    Parse all YAML documents in a stream
-    and produce corresponding Python objects.
-    Resolve only basic YAML tags.
-    """
-    return load_all(stream, SafeLoader)
-
-def emit(events, stream=None, Dumper=Dumper,
-        canonical=None, indent=None, width=None,
-        allow_unicode=None, line_break=None):
-    """
-    Emit YAML parsing events into a stream.
-    If stream is None, return the produced string instead.
-    """
-    getvalue = None
-    if stream is None:
-        from StringIO import StringIO
-        stream = StringIO()
-        getvalue = stream.getvalue
-    dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
-            allow_unicode=allow_unicode, line_break=line_break)
-    try:
-        for event in events:
-            dumper.emit(event)
-    finally:
-        dumper.dispose()
-    if getvalue:
-        return getvalue()
-
-def serialize_all(nodes, stream=None, Dumper=Dumper,
-        canonical=None, indent=None, width=None,
-        allow_unicode=None, line_break=None,
-        encoding='utf-8', explicit_start=None, explicit_end=None,
-        version=None, tags=None):
-    """
-    Serialize a sequence of representation trees into a YAML stream.
-    If stream is None, return the produced string instead.
-    """
-    getvalue = None
-    if stream is None:
-        if encoding is None:
-            from StringIO import StringIO
-        else:
-            from cStringIO import StringIO
-        stream = StringIO()
-        getvalue = stream.getvalue
-    dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
-            allow_unicode=allow_unicode, line_break=line_break,
-            encoding=encoding, version=version, tags=tags,
-            explicit_start=explicit_start, explicit_end=explicit_end)
-    try:
-        dumper.open()
-        for node in nodes:
-            dumper.serialize(node)
-        dumper.close()
-    finally:
-        dumper.dispose()
-    if getvalue:
-        return getvalue()
-
-def serialize(node, stream=None, Dumper=Dumper, **kwds):
-    """
-    Serialize a representation tree into a YAML stream.
-    If stream is None, return the produced string instead.
-    """
-    return serialize_all([node], stream, Dumper=Dumper, **kwds)
-
-def dump_all(documents, stream=None, Dumper=Dumper,
-        default_style=None, default_flow_style=None,
-        canonical=None, indent=None, width=None,
-        allow_unicode=None, line_break=None,
-        encoding='utf-8', explicit_start=None, explicit_end=None,
-        version=None, tags=None):
-    """
-    Serialize a sequence of Python objects into a YAML stream.
-    If stream is None, return the produced string instead.
-    """
-    getvalue = None
-    if stream is None:
-        if encoding is None:
-            from StringIO import StringIO
-        else:
-            from cStringIO import StringIO
-        stream = StringIO()
-        getvalue = stream.getvalue
-    dumper = Dumper(stream, default_style=default_style,
-            default_flow_style=default_flow_style,
-            canonical=canonical, indent=indent, width=width,
-            allow_unicode=allow_unicode, line_break=line_break,
-            encoding=encoding, version=version, tags=tags,
-            explicit_start=explicit_start, explicit_end=explicit_end)
-    try:
-        dumper.open()
-        for data in documents:
-            dumper.represent(data)
-        dumper.close()
-    finally:
-        dumper.dispose()
-    if getvalue:
-        return getvalue()
-
-def dump(data, stream=None, Dumper=Dumper, **kwds):
-    """
-    Serialize a Python object into a YAML stream.
-    If stream is None, return the produced string instead.
-    """
-    return dump_all([data], stream, Dumper=Dumper, **kwds)
-
-def safe_dump_all(documents, stream=None, **kwds):
-    """
-    Serialize a sequence of Python objects into a YAML stream.
-    Produce only basic YAML tags.
-    If stream is None, return the produced string instead.
-    """
-    return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
-
-def safe_dump(data, stream=None, **kwds):
-    """
-    Serialize a Python object into a YAML stream.
-    Produce only basic YAML tags.
-    If stream is None, return the produced string instead.
-    """
-    return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-
-def add_implicit_resolver(tag, regexp, first=None,
-        Loader=Loader, Dumper=Dumper):
-    """
-    Add an implicit scalar detector.
-    If an implicit scalar value matches the given regexp,
-    the corresponding tag is assigned to the scalar.
-    first is a sequence of possible initial characters or None.
-    """
-    Loader.add_implicit_resolver(tag, regexp, first)
-    Dumper.add_implicit_resolver(tag, regexp, first)
-
-def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
-    """
-    Add a path based resolver for the given tag.
-    A path is a list of keys that forms a path
-    to a node in the representation tree.
-    Keys can be string values, integers, or None.
-    """
-    Loader.add_path_resolver(tag, path, kind)
-    Dumper.add_path_resolver(tag, path, kind)
-
-def add_constructor(tag, constructor, Loader=Loader):
-    """
-    Add a constructor for the given tag.
-    Constructor is a function that accepts a Loader instance
-    and a node object and produces the corresponding Python object.
-    """
-    Loader.add_constructor(tag, constructor)
-
-def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
-    """
-    Add a multi-constructor for the given tag prefix.
-    Multi-constructor is called for a node if its tag starts with tag_prefix.
-    Multi-constructor accepts a Loader instance, a tag suffix,
-    and a node object and produces the corresponding Python object.
-    """
-    Loader.add_multi_constructor(tag_prefix, multi_constructor)
-
-def add_representer(data_type, representer, Dumper=Dumper):
-    """
-    Add a representer for the given type.
-    Representer is a function accepting a Dumper instance
-    and an instance of the given data type
-    and producing the corresponding representation node.
-    """
-    Dumper.add_representer(data_type, representer)
-
-def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
-    """
-    Add a representer for the given type.
-    Multi-representer is a function accepting a Dumper instance
-    and an instance of the given data type or subtype
-    and producing the corresponding representation node.
-    """
-    Dumper.add_multi_representer(data_type, multi_representer)
-
-class YAMLObjectMetaclass(type):
-    """
-    The metaclass for YAMLObject.
-    """
-    def __init__(cls, name, bases, kwds):
-        super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
-        if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
-            cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
-            cls.yaml_dumper.add_representer(cls, cls.to_yaml)
-
-class YAMLObject(object):
-    """
-    An object that can dump itself to a YAML stream
-    and load itself from a YAML stream.
-    """
-
-    __metaclass__ = YAMLObjectMetaclass
-    __slots__ = ()  # no direct instantiation, so allow immutable subclasses
-
-    yaml_loader = Loader
-    yaml_dumper = Dumper
-
-    yaml_tag = None
-    yaml_flow_style = None
-
-    def from_yaml(cls, loader, node):
-        """
-        Convert a representation node to a Python object.
-        """
-        return loader.construct_yaml_object(node, cls)
-    from_yaml = classmethod(from_yaml)
-
-    def to_yaml(cls, dumper, data):
-        """
-        Convert a Python object to a representation node.
-        """
-        return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
-                flow_style=cls.yaml_flow_style)
-    to_yaml = classmethod(to_yaml)
-
diff --git a/lib/spack/external/yaml/lib/yaml/constructor.py b/lib/spack/external/yaml/lib/yaml/constructor.py
deleted file mode 100644
index 635faac3e6..0000000000
--- a/lib/spack/external/yaml/lib/yaml/constructor.py
+++ /dev/null
@@ -1,675 +0,0 @@
-
-__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
-    'ConstructorError']
-
-from error import *
-from nodes import *
-
-import datetime
-
-import binascii, re, sys, types
-
-class ConstructorError(MarkedYAMLError):
-    pass
-
-class BaseConstructor(object):
-
-    yaml_constructors = {}
-    yaml_multi_constructors = {}
-
-    def __init__(self):
-        self.constructed_objects = {}
-        self.recursive_objects = {}
-        self.state_generators = []
-        self.deep_construct = False
-
-    def check_data(self):
-        # If there are more documents available?
-        return self.check_node()
-
-    def get_data(self):
-        # Construct and return the next document.
-        if self.check_node():
-            return self.construct_document(self.get_node())
-
-    def get_single_data(self):
-        # Ensure that the stream contains a single document and construct it.
-        node = self.get_single_node()
-        if node is not None:
-            return self.construct_document(node)
-        return None
-
-    def construct_document(self, node):
-        data = self.construct_object(node)
-        while self.state_generators:
-            state_generators = self.state_generators
-            self.state_generators = []
-            for generator in state_generators:
-                for dummy in generator:
-                    pass
-        self.constructed_objects = {}
-        self.recursive_objects = {}
-        self.deep_construct = False
-        return data
-
-    def construct_object(self, node, deep=False):
-        if node in self.constructed_objects:
-            return self.constructed_objects[node]
-        if deep:
-            old_deep = self.deep_construct
-            self.deep_construct = True
-        if node in self.recursive_objects:
-            raise ConstructorError(None, None,
-                    "found unconstructable recursive node", node.start_mark)
-        self.recursive_objects[node] = None
-        constructor = None
-        tag_suffix = None
-        if node.tag in self.yaml_constructors:
-            constructor = self.yaml_constructors[node.tag]
-        else:
-            for tag_prefix in self.yaml_multi_constructors:
-                if node.tag.startswith(tag_prefix):
-                    tag_suffix = node.tag[len(tag_prefix):]
-                    constructor = self.yaml_multi_constructors[tag_prefix]
-                    break
-            else:
-                if None in self.yaml_multi_constructors:
-                    tag_suffix = node.tag
-                    constructor = self.yaml_multi_constructors[None]
-                elif None in self.yaml_constructors:
-                    constructor = self.yaml_constructors[None]
-                elif isinstance(node, ScalarNode):
-                    constructor = self.__class__.construct_scalar
-                elif isinstance(node, SequenceNode):
-                    constructor = self.__class__.construct_sequence
-                elif isinstance(node, MappingNode):
-                    constructor = self.__class__.construct_mapping
-        if tag_suffix is None:
-            data = constructor(self, node)
-        else:
-            data = constructor(self, tag_suffix, node)
-        if isinstance(data, types.GeneratorType):
-            generator = data
-            data = generator.next()
-            if self.deep_construct:
-                for dummy in generator:
-                    pass
-            else:
-                self.state_generators.append(generator)
-        self.constructed_objects[node] = data
-        del self.recursive_objects[node]
-        if deep:
-            self.deep_construct = old_deep
-        return data
-
-    def construct_scalar(self, node):
-        if not isinstance(node, ScalarNode):
-            raise ConstructorError(None, None,
-                    "expected a scalar node, but found %s" % node.id,
-                    node.start_mark)
-        return node.value
-
-    def construct_sequence(self, node, deep=False):
-        if not isinstance(node, SequenceNode):
-            raise ConstructorError(None, None,
-                    "expected a sequence node, but found %s" % node.id,
-                    node.start_mark)
-        return [self.construct_object(child, deep=deep)
-                for child in node.value]
-
-    def construct_mapping(self, node, deep=False):
-        if not isinstance(node, MappingNode):
-            raise ConstructorError(None, None,
-                    "expected a mapping node, but found %s" % node.id,
-                    node.start_mark)
-        mapping = {}
-        for key_node, value_node in node.value:
-            key = self.construct_object(key_node, deep=deep)
-            try:
-                hash(key)
-            except TypeError, exc:
-                raise ConstructorError("while constructing a mapping", node.start_mark,
-                        "found unacceptable key (%s)" % exc, key_node.start_mark)
-            value = self.construct_object(value_node, deep=deep)
-            mapping[key] = value
-        return mapping
-
-    def construct_pairs(self, node, deep=False):
-        if not isinstance(node, MappingNode):
-            raise ConstructorError(None, None,
-                    "expected a mapping node, but found %s" % node.id,
-                    node.start_mark)
-        pairs = []
-        for key_node, value_node in node.value:
-            key = self.construct_object(key_node, deep=deep)
-            value = self.construct_object(value_node, deep=deep)
-            pairs.append((key, value))
-        return pairs
-
-    def add_constructor(cls, tag, constructor):
-        if not 'yaml_constructors' in cls.__dict__:
-            cls.yaml_constructors = cls.yaml_constructors.copy()
-        cls.yaml_constructors[tag] = constructor
-    add_constructor = classmethod(add_constructor)
-
-    def add_multi_constructor(cls, tag_prefix, multi_constructor):
-        if not 'yaml_multi_constructors' in cls.__dict__:
-            cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
-        cls.yaml_multi_constructors[tag_prefix] = multi_constructor
-    add_multi_constructor = classmethod(add_multi_constructor)
-
-class SafeConstructor(BaseConstructor):
-
-    def construct_scalar(self, node):
-        if isinstance(node, MappingNode):
-            for key_node, value_node in node.value:
-                if key_node.tag == u'tag:yaml.org,2002:value':
-                    return self.construct_scalar(value_node)
-        return BaseConstructor.construct_scalar(self, node)
-
-    def flatten_mapping(self, node):
-        merge = []
-        index = 0
-        while index < len(node.value):
-            key_node, value_node = node.value[index]
-            if key_node.tag == u'tag:yaml.org,2002:merge':
-                del node.value[index]
-                if isinstance(value_node, MappingNode):
-                    self.flatten_mapping(value_node)
-                    merge.extend(value_node.value)
-                elif isinstance(value_node, SequenceNode):
-                    submerge = []
-                    for subnode in value_node.value:
-                        if not isinstance(subnode, MappingNode):
-                            raise ConstructorError("while constructing a mapping",
-                                    node.start_mark,
-                                    "expected a mapping for merging, but found %s"
-                                    % subnode.id, subnode.start_mark)
-                        self.flatten_mapping(subnode)
-                        submerge.append(subnode.value)
-                    submerge.reverse()
-                    for value in submerge:
-                        merge.extend(value)
-                else:
-                    raise ConstructorError("while constructing a mapping", node.start_mark,
-                            "expected a mapping or list of mappings for merging, but found %s"
-                            % value_node.id, value_node.start_mark)
-            elif key_node.tag == u'tag:yaml.org,2002:value':
-                key_node.tag = u'tag:yaml.org,2002:str'
-                index += 1
-            else:
-                index += 1
-        if merge:
-            node.value = merge + node.value
-
-    def construct_mapping(self, node, deep=False):
-        if isinstance(node, MappingNode):
-            self.flatten_mapping(node)
-        return BaseConstructor.construct_mapping(self, node, deep=deep)
-
-    def construct_yaml_null(self, node):
-        self.construct_scalar(node)
-        return None
-
-    bool_values = {
-        u'yes':     True,
-        u'no':      False,
-        u'true':    True,
-        u'false':   False,
-        u'on':      True,
-        u'off':     False,
-    }
-
-    def construct_yaml_bool(self, node):
-        value = self.construct_scalar(node)
-        return self.bool_values[value.lower()]
-
-    def construct_yaml_int(self, node):
-        value = str(self.construct_scalar(node))
-        value = value.replace('_', '')
-        sign = +1
-        if value[0] == '-':
-            sign = -1
-        if value[0] in '+-':
-            value = value[1:]
-        if value == '0':
-            return 0
-        elif value.startswith('0b'):
-            return sign*int(value[2:], 2)
-        elif value.startswith('0x'):
-            return sign*int(value[2:], 16)
-        elif value[0] == '0':
-            return sign*int(value, 8)
-        elif ':' in value:
-            digits = [int(part) for part in value.split(':')]
-            digits.reverse()
-            base = 1
-            value = 0
-            for digit in digits:
-                value += digit*base
-                base *= 60
-            return sign*value
-        else:
-            return sign*int(value)
-
-    inf_value = 1e300
-    while inf_value != inf_value*inf_value:
-        inf_value *= inf_value
-    nan_value = -inf_value/inf_value   # Trying to make a quiet NaN (like C99).
-
-    def construct_yaml_float(self, node):
-        value = str(self.construct_scalar(node))
-        value = value.replace('_', '').lower()
-        sign = +1
-        if value[0] == '-':
-            sign = -1
-        if value[0] in '+-':
-            value = value[1:]
-        if value == '.inf':
-            return sign*self.inf_value
-        elif value == '.nan':
-            return self.nan_value
-        elif ':' in value:
-            digits = [float(part) for part in value.split(':')]
-            digits.reverse()
-            base = 1
-            value = 0.0
-            for digit in digits:
-                value += digit*base
-                base *= 60
-            return sign*value
-        else:
-            return sign*float(value)
-
-    def construct_yaml_binary(self, node):
-        value = self.construct_scalar(node)
-        try:
-            return str(value).decode('base64')
-        except (binascii.Error, UnicodeEncodeError), exc:
-            raise ConstructorError(None, None,
-                    "failed to decode base64 data: %s" % exc, node.start_mark) 
-
-    timestamp_regexp = re.compile(
-            ur'''^(?P<year>[0-9][0-9][0-9][0-9])
-                -(?P<month>[0-9][0-9]?)
-                -(?P<day>[0-9][0-9]?)
-                (?:(?:[Tt]|[ \t]+)
-                (?P<hour>[0-9][0-9]?)
-                :(?P<minute>[0-9][0-9])
-                :(?P<second>[0-9][0-9])
-                (?:\.(?P<fraction>[0-9]*))?
-                (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
-                (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
-    def construct_yaml_timestamp(self, node):
-        value = self.construct_scalar(node)
-        match = self.timestamp_regexp.match(node.value)
-        values = match.groupdict()
-        year = int(values['year'])
-        month = int(values['month'])
-        day = int(values['day'])
-        if not values['hour']:
-            return datetime.date(year, month, day)
-        hour = int(values['hour'])
-        minute = int(values['minute'])
-        second = int(values['second'])
-        fraction = 0
-        if values['fraction']:
-            fraction = values['fraction'][:6]
-            while len(fraction) < 6:
-                fraction += '0'
-            fraction = int(fraction)
-        delta = None
-        if values['tz_sign']:
-            tz_hour = int(values['tz_hour'])
-            tz_minute = int(values['tz_minute'] or 0)
-            delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
-            if values['tz_sign'] == '-':
-                delta = -delta
-        data = datetime.datetime(year, month, day, hour, minute, second, fraction)
-        if delta:
-            data -= delta
-        return data
-
-    def construct_yaml_omap(self, node):
-        # Note: we do not check for duplicate keys, because it's too
-        # CPU-expensive.
-        omap = []
-        yield omap
-        if not isinstance(node, SequenceNode):
-            raise ConstructorError("while constructing an ordered map", node.start_mark,
-                    "expected a sequence, but found %s" % node.id, node.start_mark)
-        for subnode in node.value:
-            if not isinstance(subnode, MappingNode):
-                raise ConstructorError("while constructing an ordered map", node.start_mark,
-                        "expected a mapping of length 1, but found %s" % subnode.id,
-                        subnode.start_mark)
-            if len(subnode.value) != 1:
-                raise ConstructorError("while constructing an ordered map", node.start_mark,
-                        "expected a single mapping item, but found %d items" % len(subnode.value),
-                        subnode.start_mark)
-            key_node, value_node = subnode.value[0]
-            key = self.construct_object(key_node)
-            value = self.construct_object(value_node)
-            omap.append((key, value))
-
-    def construct_yaml_pairs(self, node):
-        # Note: the same code as `construct_yaml_omap`.
-        pairs = []
-        yield pairs
-        if not isinstance(node, SequenceNode):
-            raise ConstructorError("while constructing pairs", node.start_mark,
-                    "expected a sequence, but found %s" % node.id, node.start_mark)
-        for subnode in node.value:
-            if not isinstance(subnode, MappingNode):
-                raise ConstructorError("while constructing pairs", node.start_mark,
-                        "expected a mapping of length 1, but found %s" % subnode.id,
-                        subnode.start_mark)
-            if len(subnode.value) != 1:
-                raise ConstructorError("while constructing pairs", node.start_mark,
-                        "expected a single mapping item, but found %d items" % len(subnode.value),
-                        subnode.start_mark)
-            key_node, value_node = subnode.value[0]
-            key = self.construct_object(key_node)
-            value = self.construct_object(value_node)
-            pairs.append((key, value))
-
-    def construct_yaml_set(self, node):
-        data = set()
-        yield data
-        value = self.construct_mapping(node)
-        data.update(value)
-
-    def construct_yaml_str(self, node):
-        value = self.construct_scalar(node)
-        try:
-            return value.encode('ascii')
-        except UnicodeEncodeError:
-            return value
-
-    def construct_yaml_seq(self, node):
-        data = []
-        yield data
-        data.extend(self.construct_sequence(node))
-
-    def construct_yaml_map(self, node):
-        data = {}
-        yield data
-        value = self.construct_mapping(node)
-        data.update(value)
-
-    def construct_yaml_object(self, node, cls):
-        data = cls.__new__(cls)
-        yield data
-        if hasattr(data, '__setstate__'):
-            state = self.construct_mapping(node, deep=True)
-            data.__setstate__(state)
-        else:
-            state = self.construct_mapping(node)
-            data.__dict__.update(state)
-
-    def construct_undefined(self, node):
-        raise ConstructorError(None, None,
-                "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
-                node.start_mark)
-
-SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:null',
-        SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:bool',
-        SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:int',
-        SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:float',
-        SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:binary',
-        SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:timestamp',
-        SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:omap',
-        SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:pairs',
-        SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:set',
-        SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:str',
-        SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:seq',
-        SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
-        u'tag:yaml.org,2002:map',
-        SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(None,
-        SafeConstructor.construct_undefined)
-
-class Constructor(SafeConstructor):
-
-    def construct_python_str(self, node):
-        return self.construct_scalar(node).encode('utf-8')
-
-    def construct_python_unicode(self, node):
-        return self.construct_scalar(node)
-
-    def construct_python_long(self, node):
-        return long(self.construct_yaml_int(node))
-
-    def construct_python_complex(self, node):
-       return complex(self.construct_scalar(node))
-
-    def construct_python_tuple(self, node):
-        return tuple(self.construct_sequence(node))
-
-    def find_python_module(self, name, mark):
-        if not name:
-            raise ConstructorError("while constructing a Python module", mark,
-                    "expected non-empty name appended to the tag", mark)
-        try:
-            __import__(name)
-        except ImportError, exc:
-            raise ConstructorError("while constructing a Python module", mark,
-                    "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
-        return sys.modules[name]
-
-    def find_python_name(self, name, mark):
-        if not name:
-            raise ConstructorError("while constructing a Python object", mark,
-                    "expected non-empty name appended to the tag", mark)
-        if u'.' in name:
-            module_name, object_name = name.rsplit('.', 1)
-        else:
-            module_name = '__builtin__'
-            object_name = name
-        try:
-            __import__(module_name)
-        except ImportError, exc:
-            raise ConstructorError("while constructing a Python object", mark,
-                    "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
-        module = sys.modules[module_name]
-        if not hasattr(module, object_name):
-            raise ConstructorError("while constructing a Python object", mark,
-                    "cannot find %r in the module %r" % (object_name.encode('utf-8'),
-                        module.__name__), mark)
-        return getattr(module, object_name)
-
-    def construct_python_name(self, suffix, node):
-        value = self.construct_scalar(node)
-        if value:
-            raise ConstructorError("while constructing a Python name", node.start_mark,
-                    "expected the empty value, but found %r" % value.encode('utf-8'),
-                    node.start_mark)
-        return self.find_python_name(suffix, node.start_mark)
-
-    def construct_python_module(self, suffix, node):
-        value = self.construct_scalar(node)
-        if value:
-            raise ConstructorError("while constructing a Python module", node.start_mark,
-                    "expected the empty value, but found %r" % value.encode('utf-8'),
-                    node.start_mark)
-        return self.find_python_module(suffix, node.start_mark)
-
-    class classobj: pass
-
-    def make_python_instance(self, suffix, node,
-            args=None, kwds=None, newobj=False):
-        if not args:
-            args = []
-        if not kwds:
-            kwds = {}
-        cls = self.find_python_name(suffix, node.start_mark)
-        if newobj and isinstance(cls, type(self.classobj))  \
-                and not args and not kwds:
-            instance = self.classobj()
-            instance.__class__ = cls
-            return instance
-        elif newobj and isinstance(cls, type):
-            return cls.__new__(cls, *args, **kwds)
-        else:
-            return cls(*args, **kwds)
-
-    def set_python_instance_state(self, instance, state):
-        if hasattr(instance, '__setstate__'):
-            instance.__setstate__(state)
-        else:
-            slotstate = {}
-            if isinstance(state, tuple) and len(state) == 2:
-                state, slotstate = state
-            if hasattr(instance, '__dict__'):
-                instance.__dict__.update(state)
-            elif state:
-                slotstate.update(state)
-            for key, value in slotstate.items():
-                setattr(object, key, value)
-
-    def construct_python_object(self, suffix, node):
-        # Format:
-        #   !!python/object:module.name { ... state ... }
-        instance = self.make_python_instance(suffix, node, newobj=True)
-        yield instance
-        deep = hasattr(instance, '__setstate__')
-        state = self.construct_mapping(node, deep=deep)
-        self.set_python_instance_state(instance, state)
-
-    def construct_python_object_apply(self, suffix, node, newobj=False):
-        # Format:
-        #   !!python/object/apply       # (or !!python/object/new)
-        #   args: [ ... arguments ... ]
-        #   kwds: { ... keywords ... }
-        #   state: ... state ...
-        #   listitems: [ ... listitems ... ]
-        #   dictitems: { ... dictitems ... }
-        # or short format:
-        #   !!python/object/apply [ ... arguments ... ]
-        # The difference between !!python/object/apply and !!python/object/new
-        # is how an object is created, check make_python_instance for details.
-        if isinstance(node, SequenceNode):
-            args = self.construct_sequence(node, deep=True)
-            kwds = {}
-            state = {}
-            listitems = []
-            dictitems = {}
-        else:
-            value = self.construct_mapping(node, deep=True)
-            args = value.get('args', [])
-            kwds = value.get('kwds', {})
-            state = value.get('state', {})
-            listitems = value.get('listitems', [])
-            dictitems = value.get('dictitems', {})
-        instance = self.make_python_instance(suffix, node, args, kwds, newobj)
-        if state:
-            self.set_python_instance_state(instance, state)
-        if listitems:
-            instance.extend(listitems)
-        if dictitems:
-            for key in dictitems:
-                instance[key] = dictitems[key]
-        return instance
-
-    def construct_python_object_new(self, suffix, node):
-        return self.construct_python_object_apply(suffix, node, newobj=True)
-
-Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/none',
-    Constructor.construct_yaml_null)
-
-Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/bool',
-    Constructor.construct_yaml_bool)
-
-Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/str',
-    Constructor.construct_python_str)
-
-Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/unicode',
-    Constructor.construct_python_unicode)
-
-Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/int',
-    Constructor.construct_yaml_int)
-
-Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/long',
-    Constructor.construct_python_long)
-
-Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/float',
-    Constructor.construct_yaml_float)
-
-Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/complex',
-    Constructor.construct_python_complex)
-
-Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/list',
-    Constructor.construct_yaml_seq)
-
-Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/tuple',
-    Constructor.construct_python_tuple)
-
-Constructor.add_constructor(
-    u'tag:yaml.org,2002:python/dict',
-    Constructor.construct_yaml_map)
-
-Constructor.add_multi_constructor(
-    u'tag:yaml.org,2002:python/name:',
-    Constructor.construct_python_name)
-
-Constructor.add_multi_constructor(
-    u'tag:yaml.org,2002:python/module:',
-    Constructor.construct_python_module)
-
-Constructor.add_multi_constructor(
-    u'tag:yaml.org,2002:python/object:',
-    Constructor.construct_python_object)
-
-Constructor.add_multi_constructor(
-    u'tag:yaml.org,2002:python/object/apply:',
-    Constructor.construct_python_object_apply)
-
-Constructor.add_multi_constructor(
-    u'tag:yaml.org,2002:python/object/new:',
-    Constructor.construct_python_object_new)
-
diff --git a/lib/spack/external/yaml/lib/yaml/cyaml.py b/lib/spack/external/yaml/lib/yaml/cyaml.py
deleted file mode 100644
index 68dcd75192..0000000000
--- a/lib/spack/external/yaml/lib/yaml/cyaml.py
+++ /dev/null
@@ -1,85 +0,0 @@
-
-__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
-        'CBaseDumper', 'CSafeDumper', 'CDumper']
-
-from _yaml import CParser, CEmitter
-
-from constructor import *
-
-from serializer import *
-from representer import *
-
-from resolver import *
-
-class CBaseLoader(CParser, BaseConstructor, BaseResolver):
-
-    def __init__(self, stream):
-        CParser.__init__(self, stream)
-        BaseConstructor.__init__(self)
-        BaseResolver.__init__(self)
-
-class CSafeLoader(CParser, SafeConstructor, Resolver):
-
-    def __init__(self, stream):
-        CParser.__init__(self, stream)
-        SafeConstructor.__init__(self)
-        Resolver.__init__(self)
-
-class CLoader(CParser, Constructor, Resolver):
-
-    def __init__(self, stream):
-        CParser.__init__(self, stream)
-        Constructor.__init__(self)
-        Resolver.__init__(self)
-
-class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        CEmitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width, encoding=encoding,
-                allow_unicode=allow_unicode, line_break=line_break,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        Representer.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
-class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        CEmitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width, encoding=encoding,
-                allow_unicode=allow_unicode, line_break=line_break,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        SafeRepresenter.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
-class CDumper(CEmitter, Serializer, Representer, Resolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        CEmitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width, encoding=encoding,
-                allow_unicode=allow_unicode, line_break=line_break,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        Representer.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
diff --git a/lib/spack/external/yaml/lib/yaml/dumper.py b/lib/spack/external/yaml/lib/yaml/dumper.py
deleted file mode 100644
index f811d2c919..0000000000
--- a/lib/spack/external/yaml/lib/yaml/dumper.py
+++ /dev/null
@@ -1,62 +0,0 @@
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
-
-from emitter import *
-from serializer import *
-from representer import *
-from resolver import *
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        Emitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width,
-                allow_unicode=allow_unicode, line_break=line_break)
-        Serializer.__init__(self, encoding=encoding,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        Representer.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        Emitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width,
-                allow_unicode=allow_unicode, line_break=line_break)
-        Serializer.__init__(self, encoding=encoding,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        SafeRepresenter.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        Emitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width,
-                allow_unicode=allow_unicode, line_break=line_break)
-        Serializer.__init__(self, encoding=encoding,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        Representer.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
diff --git a/lib/spack/external/yaml/lib/yaml/loader.py b/lib/spack/external/yaml/lib/yaml/loader.py
deleted file mode 100644
index 293ff467b1..0000000000
--- a/lib/spack/external/yaml/lib/yaml/loader.py
+++ /dev/null
@@ -1,40 +0,0 @@
-
-__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
-
-from reader import *
-from scanner import *
-from parser import *
-from composer import *
-from constructor import *
-from resolver import *
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
-
-    def __init__(self, stream):
-        Reader.__init__(self, stream)
-        Scanner.__init__(self)
-        Parser.__init__(self)
-        Composer.__init__(self)
-        BaseConstructor.__init__(self)
-        BaseResolver.__init__(self)
-
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
-
-    def __init__(self, stream):
-        Reader.__init__(self, stream)
-        Scanner.__init__(self)
-        Parser.__init__(self)
-        Composer.__init__(self)
-        SafeConstructor.__init__(self)
-        Resolver.__init__(self)
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
-
-    def __init__(self, stream):
-        Reader.__init__(self, stream)
-        Scanner.__init__(self)
-        Parser.__init__(self)
-        Composer.__init__(self)
-        Constructor.__init__(self)
-        Resolver.__init__(self)
-
diff --git a/lib/spack/external/yaml/lib/yaml/nodes.py b/lib/spack/external/yaml/lib/yaml/nodes.py
deleted file mode 100644
index c4f070c41e..0000000000
--- a/lib/spack/external/yaml/lib/yaml/nodes.py
+++ /dev/null
@@ -1,49 +0,0 @@
-
-class Node(object):
-    def __init__(self, tag, value, start_mark, end_mark):
-        self.tag = tag
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-    def __repr__(self):
-        value = self.value
-        #if isinstance(value, list):
-        #    if len(value) == 0:
-        #        value = '<empty>'
-        #    elif len(value) == 1:
-        #        value = '<1 item>'
-        #    else:
-        #        value = '<%d items>' % len(value)
-        #else:
-        #    if len(value) > 75:
-        #        value = repr(value[:70]+u' ... ')
-        #    else:
-        #        value = repr(value)
-        value = repr(value)
-        return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
-
-class ScalarNode(Node):
-    id = 'scalar'
-    def __init__(self, tag, value,
-            start_mark=None, end_mark=None, style=None):
-        self.tag = tag
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.style = style
-
-class CollectionNode(Node):
-    def __init__(self, tag, value,
-            start_mark=None, end_mark=None, flow_style=None):
-        self.tag = tag
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.flow_style = flow_style
-
-class SequenceNode(CollectionNode):
-    id = 'sequence'
-
-class MappingNode(CollectionNode):
-    id = 'mapping'
-
diff --git a/lib/spack/external/yaml/lib/yaml/reader.py b/lib/spack/external/yaml/lib/yaml/reader.py
deleted file mode 100644
index 3249e6b9f5..0000000000
--- a/lib/spack/external/yaml/lib/yaml/reader.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# This module contains abstractions for the input stream. You don't have to
-# looks further, there are no pretty code.
-#
-# We define two classes here.
-#
-#   Mark(source, line, column)
-# It's just a record and its only use is producing nice error messages.
-# Parser does not use it for any other purposes.
-#
-#   Reader(source, data)
-# Reader determines the encoding of `data` and converts it to unicode.
-# Reader provides the following methods and attributes:
-#   reader.peek(length=1) - return the next `length` characters
-#   reader.forward(length=1) - move the current position to `length` characters.
-#   reader.index - the number of the current character.
-#   reader.line, stream.column - the line and the column of the current character.
-
-__all__ = ['Reader', 'ReaderError']
-
-from error import YAMLError, Mark
-
-import codecs, re
-
-class ReaderError(YAMLError):
-
-    def __init__(self, name, position, character, encoding, reason):
-        self.name = name
-        self.character = character
-        self.position = position
-        self.encoding = encoding
-        self.reason = reason
-
-    def __str__(self):
-        if isinstance(self.character, str):
-            return "'%s' codec can't decode byte #x%02x: %s\n"  \
-                    "  in \"%s\", position %d"    \
-                    % (self.encoding, ord(self.character), self.reason,
-                            self.name, self.position)
-        else:
-            return "unacceptable character #x%04x: %s\n"    \
-                    "  in \"%s\", position %d"    \
-                    % (self.character, self.reason,
-                            self.name, self.position)
-
-class Reader(object):
-    # Reader:
-    # - determines the data encoding and converts it to unicode,
-    # - checks if characters are in allowed range,
-    # - adds '\0' to the end.
-
-    # Reader accepts
-    #  - a `str` object,
-    #  - a `unicode` object,
-    #  - a file-like object with its `read` method returning `str`,
-    #  - a file-like object with its `read` method returning `unicode`.
-
-    # Yeah, it's ugly and slow.
-
-    def __init__(self, stream):
-        self.name = None
-        self.stream = None
-        self.stream_pointer = 0
-        self.eof = True
-        self.buffer = u''
-        self.pointer = 0
-        self.raw_buffer = None
-        self.raw_decode = None
-        self.encoding = None
-        self.index = 0
-        self.line = 0
-        self.column = 0
-        if isinstance(stream, unicode):
-            self.name = "<unicode string>"
-            self.check_printable(stream)
-            self.buffer = stream+u'\0'
-        elif isinstance(stream, str):
-            self.name = "<string>"
-            self.raw_buffer = stream
-            self.determine_encoding()
-        else:
-            self.stream = stream
-            self.name = getattr(stream, 'name', "<file>")
-            self.eof = False
-            self.raw_buffer = ''
-            self.determine_encoding()
-
-    def peek(self, index=0):
-        try:
-            return self.buffer[self.pointer+index]
-        except IndexError:
-            self.update(index+1)
-            return self.buffer[self.pointer+index]
-
-    def prefix(self, length=1):
-        if self.pointer+length >= len(self.buffer):
-            self.update(length)
-        return self.buffer[self.pointer:self.pointer+length]
-
-    def forward(self, length=1):
-        if self.pointer+length+1 >= len(self.buffer):
-            self.update(length+1)
-        while length:
-            ch = self.buffer[self.pointer]
-            self.pointer += 1
-            self.index += 1
-            if ch in u'\n\x85\u2028\u2029'  \
-                    or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
-                self.line += 1
-                self.column = 0
-            elif ch != u'\uFEFF':
-                self.column += 1
-            length -= 1
-
-    def get_mark(self):
-        if self.stream is None:
-            return Mark(self.name, self.index, self.line, self.column,
-                    self.buffer, self.pointer)
-        else:
-            return Mark(self.name, self.index, self.line, self.column,
-                    None, None)
-
-    def determine_encoding(self):
-        while not self.eof and len(self.raw_buffer) < 2:
-            self.update_raw()
-        if not isinstance(self.raw_buffer, unicode):
-            if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
-                self.raw_decode = codecs.utf_16_le_decode
-                self.encoding = 'utf-16-le'
-            elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
-                self.raw_decode = codecs.utf_16_be_decode
-                self.encoding = 'utf-16-be'
-            else:
-                self.raw_decode = codecs.utf_8_decode
-                self.encoding = 'utf-8'
-        self.update(1)
-
-    NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
-    def check_printable(self, data):
-        match = self.NON_PRINTABLE.search(data)
-        if match:
-            character = match.group()
-            position = self.index+(len(self.buffer)-self.pointer)+match.start()
-            raise ReaderError(self.name, position, ord(character),
-                    'unicode', "special characters are not allowed")
-
-    def update(self, length):
-        if self.raw_buffer is None:
-            return
-        self.buffer = self.buffer[self.pointer:]
-        self.pointer = 0
-        while len(self.buffer) < length:
-            if not self.eof:
-                self.update_raw()
-            if self.raw_decode is not None:
-                try:
-                    data, converted = self.raw_decode(self.raw_buffer,
-                            'strict', self.eof)
-                except UnicodeDecodeError, exc:
-                    character = exc.object[exc.start]
-                    if self.stream is not None:
-                        position = self.stream_pointer-len(self.raw_buffer)+exc.start
-                    else:
-                        position = exc.start
-                    raise ReaderError(self.name, position, character,
-                            exc.encoding, exc.reason)
-            else:
-                data = self.raw_buffer
-                converted = len(data)
-            self.check_printable(data)
-            self.buffer += data
-            self.raw_buffer = self.raw_buffer[converted:]
-            if self.eof:
-                self.buffer += u'\0'
-                self.raw_buffer = None
-                break
-
-    def update_raw(self, size=1024):
-        data = self.stream.read(size)
-        if data:
-            self.raw_buffer += data
-            self.stream_pointer += len(data)
-        else:
-            self.eof = True
-
-#try:
-#    import psyco
-#    psyco.bind(Reader)
-#except ImportError:
-#    pass
-
diff --git a/lib/spack/external/yaml/lib/yaml/representer.py b/lib/spack/external/yaml/lib/yaml/representer.py
deleted file mode 100644
index 4ea8cb1fe1..0000000000
--- a/lib/spack/external/yaml/lib/yaml/representer.py
+++ /dev/null
@@ -1,486 +0,0 @@
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
-    'RepresenterError']
-
-from error import *
-from nodes import *
-
-import datetime
-
-import sys, copy_reg, types
-
-class RepresenterError(YAMLError):
-    pass
-
-class BaseRepresenter(object):
-
-    yaml_representers = {}
-    yaml_multi_representers = {}
-
-    def __init__(self, default_style=None, default_flow_style=None):
-        self.default_style = default_style
-        self.default_flow_style = default_flow_style
-        self.represented_objects = {}
-        self.object_keeper = []
-        self.alias_key = None
-
-    def represent(self, data):
-        node = self.represent_data(data)
-        self.serialize(node)
-        self.represented_objects = {}
-        self.object_keeper = []
-        self.alias_key = None
-
-    def get_classobj_bases(self, cls):
-        bases = [cls]
-        for base in cls.__bases__:
-            bases.extend(self.get_classobj_bases(base))
-        return bases
-
-    def represent_data(self, data):
-        if self.ignore_aliases(data):
-            self.alias_key = None
-        else:
-            self.alias_key = id(data)
-        if self.alias_key is not None:
-            if self.alias_key in self.represented_objects:
-                node = self.represented_objects[self.alias_key]
-                #if node is None:
-                #    raise RepresenterError("recursive objects are not allowed: %r" % data)
-                return node
-            #self.represented_objects[alias_key] = None
-            self.object_keeper.append(data)
-        data_types = type(data).__mro__
-        if type(data) is types.InstanceType:
-            data_types = self.get_classobj_bases(data.__class__)+list(data_types)
-        if data_types[0] in self.yaml_representers:
-            node = self.yaml_representers[data_types[0]](self, data)
-        else:
-            for data_type in data_types:
-                if data_type in self.yaml_multi_representers:
-                    node = self.yaml_multi_representers[data_type](self, data)
-                    break
-            else:
-                if None in self.yaml_multi_representers:
-                    node = self.yaml_multi_representers[None](self, data)
-                elif None in self.yaml_representers:
-                    node = self.yaml_representers[None](self, data)
-                else:
-                    node = ScalarNode(None, unicode(data))
-        #if alias_key is not None:
-        #    self.represented_objects[alias_key] = node
-        return node
-
-    def add_representer(cls, data_type, representer):
-        if not 'yaml_representers' in cls.__dict__:
-            cls.yaml_representers = cls.yaml_representers.copy()
-        cls.yaml_representers[data_type] = representer
-    add_representer = classmethod(add_representer)
-
-    def add_multi_representer(cls, data_type, representer):
-        if not 'yaml_multi_representers' in cls.__dict__:
-            cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
-        cls.yaml_multi_representers[data_type] = representer
-    add_multi_representer = classmethod(add_multi_representer)
-
-    def represent_scalar(self, tag, value, style=None):
-        if style is None:
-            style = self.default_style
-        node = ScalarNode(tag, value, style=style)
-        if self.alias_key is not None:
-            self.represented_objects[self.alias_key] = node
-        return node
-
-    def represent_sequence(self, tag, sequence, flow_style=None):
-        value = []
-        node = SequenceNode(tag, value, flow_style=flow_style)
-        if self.alias_key is not None:
-            self.represented_objects[self.alias_key] = node
-        best_style = True
-        for item in sequence:
-            node_item = self.represent_data(item)
-            if not (isinstance(node_item, ScalarNode) and not node_item.style):
-                best_style = False
-            value.append(node_item)
-        if flow_style is None:
-            if self.default_flow_style is not None:
-                node.flow_style = self.default_flow_style
-            else:
-                node.flow_style = best_style
-        return node
-
-    def represent_mapping(self, tag, mapping, flow_style=None):
-        value = []
-        node = MappingNode(tag, value, flow_style=flow_style)
-        if self.alias_key is not None:
-            self.represented_objects[self.alias_key] = node
-        best_style = True
-        if hasattr(mapping, 'items'):
-            mapping = mapping.items()
-            mapping.sort()
-        for item_key, item_value in mapping:
-            node_key = self.represent_data(item_key)
-            node_value = self.represent_data(item_value)
-            if not (isinstance(node_key, ScalarNode) and not node_key.style):
-                best_style = False
-            if not (isinstance(node_value, ScalarNode) and not node_value.style):
-                best_style = False
-            value.append((node_key, node_value))
-        if flow_style is None:
-            if self.default_flow_style is not None:
-                node.flow_style = self.default_flow_style
-            else:
-                node.flow_style = best_style
-        return node
-
-    def ignore_aliases(self, data):
-        return False
-
-class SafeRepresenter(BaseRepresenter):
-
-    def ignore_aliases(self, data):
-        if data is None:
-            return True
-        if isinstance(data, tuple) and data == ():
-            return True
-        if isinstance(data, (str, unicode, bool, int, float)):
-            return True
-
-    def represent_none(self, data):
-        return self.represent_scalar(u'tag:yaml.org,2002:null',
-                u'null')
-
-    def represent_str(self, data):
-        tag = None
-        style = None
-        try:
-            data = unicode(data, 'ascii')
-            tag = u'tag:yaml.org,2002:str'
-        except UnicodeDecodeError:
-            try:
-                data = unicode(data, 'utf-8')
-                tag = u'tag:yaml.org,2002:str'
-            except UnicodeDecodeError:
-                data = data.encode('base64')
-                tag = u'tag:yaml.org,2002:binary'
-                style = '|'
-        return self.represent_scalar(tag, data, style=style)
-
-    def represent_unicode(self, data):
-        return self.represent_scalar(u'tag:yaml.org,2002:str', data)
-
-    def represent_bool(self, data):
-        if data:
-            value = u'true'
-        else:
-            value = u'false'
-        return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
-
-    def represent_int(self, data):
-        return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
-
-    def represent_long(self, data):
-        return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
-
-    inf_value = 1e300
-    while repr(inf_value) != repr(inf_value*inf_value):
-        inf_value *= inf_value
-
-    def represent_float(self, data):
-        if data != data or (data == 0.0 and data == 1.0):
-            value = u'.nan'
-        elif data == self.inf_value:
-            value = u'.inf'
-        elif data == -self.inf_value:
-            value = u'-.inf'
-        else:
-            value = unicode(repr(data)).lower()
-            # Note that in some cases `repr(data)` represents a float number
-            # without the decimal parts.  For instance:
-            #   >>> repr(1e17)
-            #   '1e17'
-            # Unfortunately, this is not a valid float representation according
-            # to the definition of the `!!float` tag.  We fix this by adding
-            # '.0' before the 'e' symbol.
-            if u'.' not in value and u'e' in value:
-                value = value.replace(u'e', u'.0e', 1)
-        return self.represent_scalar(u'tag:yaml.org,2002:float', value)
-
-    def represent_list(self, data):
-        #pairs = (len(data) > 0 and isinstance(data, list))
-        #if pairs:
-        #    for item in data:
-        #        if not isinstance(item, tuple) or len(item) != 2:
-        #            pairs = False
-        #            break
-        #if not pairs:
-            return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
-        #value = []
-        #for item_key, item_value in data:
-        #    value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
-        #        [(item_key, item_value)]))
-        #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
-    def represent_dict(self, data):
-        return self.represent_mapping(u'tag:yaml.org,2002:map', data)
-
-    def represent_set(self, data):
-        value = {}
-        for key in data:
-            value[key] = None
-        return self.represent_mapping(u'tag:yaml.org,2002:set', value)
-
-    def represent_date(self, data):
-        value = unicode(data.isoformat())
-        return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
-    def represent_datetime(self, data):
-        value = unicode(data.isoformat(' '))
-        return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
-    def represent_yaml_object(self, tag, data, cls, flow_style=None):
-        if hasattr(data, '__getstate__'):
-            state = data.__getstate__()
-        else:
-            state = data.__dict__.copy()
-        return self.represent_mapping(tag, state, flow_style=flow_style)
-
-    def represent_undefined(self, data):
-        raise RepresenterError("cannot represent an object: %s" % data)
-
-SafeRepresenter.add_representer(type(None),
-        SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
-        SafeRepresenter.represent_str)
-
-SafeRepresenter.add_representer(unicode,
-        SafeRepresenter.represent_unicode)
-
-SafeRepresenter.add_representer(bool,
-        SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
-        SafeRepresenter.represent_int)
-
-SafeRepresenter.add_representer(long,
-        SafeRepresenter.represent_long)
-
-SafeRepresenter.add_representer(float,
-        SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
-        SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
-        SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
-        SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
-        SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(datetime.date,
-        SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
-        SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
-        SafeRepresenter.represent_undefined)
-
-class Representer(SafeRepresenter):
-
-    def represent_str(self, data):
-        tag = None
-        style = None
-        try:
-            data = unicode(data, 'ascii')
-            tag = u'tag:yaml.org,2002:str'
-        except UnicodeDecodeError:
-            try:
-                data = unicode(data, 'utf-8')
-                tag = u'tag:yaml.org,2002:python/str'
-            except UnicodeDecodeError:
-                data = data.encode('base64')
-                tag = u'tag:yaml.org,2002:binary'
-                style = '|'
-        return self.represent_scalar(tag, data, style=style)
-
-    def represent_unicode(self, data):
-        tag = None
-        try:
-            data.encode('ascii')
-            tag = u'tag:yaml.org,2002:python/unicode'
-        except UnicodeEncodeError:
-            tag = u'tag:yaml.org,2002:str'
-        return self.represent_scalar(tag, data)
-
-    def represent_long(self, data):
-        tag = u'tag:yaml.org,2002:int'
-        if int(data) is not data:
-            tag = u'tag:yaml.org,2002:python/long'
-        return self.represent_scalar(tag, unicode(data))
-
-    def represent_complex(self, data):
-        if data.imag == 0.0:
-            data = u'%r' % data.real
-        elif data.real == 0.0:
-            data = u'%rj' % data.imag
-        elif data.imag > 0:
-            data = u'%r+%rj' % (data.real, data.imag)
-        else:
-            data = u'%r%rj' % (data.real, data.imag)
-        return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
-
-    def represent_tuple(self, data):
-        return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
-
-    def represent_name(self, data):
-        name = u'%s.%s' % (data.__module__, data.__name__)
-        return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
-
-    def represent_module(self, data):
-        return self.represent_scalar(
-                u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
-
-    def represent_instance(self, data):
-        # For instances of classic classes, we use __getinitargs__ and
-        # __getstate__ to serialize the data.
-
-        # If data.__getinitargs__ exists, the object must be reconstructed by
-        # calling cls(**args), where args is a tuple returned by
-        # __getinitargs__. Otherwise, the cls.__init__ method should never be
-        # called and the class instance is created by instantiating a trivial
-        # class and assigning to the instance's __class__ variable.
-
-        # If data.__getstate__ exists, it returns the state of the object.
-        # Otherwise, the state of the object is data.__dict__.
-
-        # We produce either a !!python/object or !!python/object/new node.
-        # If data.__getinitargs__ does not exist and state is a dictionary, we
-        # produce a !!python/object node . Otherwise we produce a
-        # !!python/object/new node.
-
-        cls = data.__class__
-        class_name = u'%s.%s' % (cls.__module__, cls.__name__)
-        args = None
-        state = None
-        if hasattr(data, '__getinitargs__'):
-            args = list(data.__getinitargs__())
-        if hasattr(data, '__getstate__'):
-            state = data.__getstate__()
-        else:
-            state = data.__dict__
-        if args is None and isinstance(state, dict):
-            return self.represent_mapping(
-                    u'tag:yaml.org,2002:python/object:'+class_name, state)
-        if isinstance(state, dict) and not state:
-            return self.represent_sequence(
-                    u'tag:yaml.org,2002:python/object/new:'+class_name, args)
-        value = {}
-        if args:
-            value['args'] = args
-        value['state'] = state
-        return self.represent_mapping(
-                u'tag:yaml.org,2002:python/object/new:'+class_name, value)
-
-    def represent_object(self, data):
-        # We use __reduce__ API to save the data. data.__reduce__ returns
-        # a tuple of length 2-5:
-        #   (function, args, state, listitems, dictitems)
-
-        # For reconstructing, we calls function(*args), then set its state,
-        # listitems, and dictitems if they are not None.
-
-        # A special case is when function.__name__ == '__newobj__'. In this
-        # case we create the object with args[0].__new__(*args).
-
-        # Another special case is when __reduce__ returns a string - we don't
-        # support it.
-
-        # We produce a !!python/object, !!python/object/new or
-        # !!python/object/apply node.
-
-        cls = type(data)
-        if cls in copy_reg.dispatch_table:
-            reduce = copy_reg.dispatch_table[cls](data)
-        elif hasattr(data, '__reduce_ex__'):
-            reduce = data.__reduce_ex__(2)
-        elif hasattr(data, '__reduce__'):
-            reduce = data.__reduce__()
-        else:
-            raise RepresenterError("cannot represent object: %r" % data)
-        reduce = (list(reduce)+[None]*5)[:5]
-        function, args, state, listitems, dictitems = reduce
-        args = list(args)
-        if state is None:
-            state = {}
-        if listitems is not None:
-            listitems = list(listitems)
-        if dictitems is not None:
-            dictitems = dict(dictitems)
-        if function.__name__ == '__newobj__':
-            function = args[0]
-            args = args[1:]
-            tag = u'tag:yaml.org,2002:python/object/new:'
-            newobj = True
-        else:
-            tag = u'tag:yaml.org,2002:python/object/apply:'
-            newobj = False
-        function_name = u'%s.%s' % (function.__module__, function.__name__)
-        if not args and not listitems and not dictitems \
-                and isinstance(state, dict) and newobj:
-            return self.represent_mapping(
-                    u'tag:yaml.org,2002:python/object:'+function_name, state)
-        if not listitems and not dictitems  \
-                and isinstance(state, dict) and not state:
-            return self.represent_sequence(tag+function_name, args)
-        value = {}
-        if args:
-            value['args'] = args
-        if state or not isinstance(state, dict):
-            value['state'] = state
-        if listitems:
-            value['listitems'] = listitems
-        if dictitems:
-            value['dictitems'] = dictitems
-        return self.represent_mapping(tag+function_name, value)
-
-Representer.add_representer(str,
-        Representer.represent_str)
-
-Representer.add_representer(unicode,
-        Representer.represent_unicode)
-
-Representer.add_representer(long,
-        Representer.represent_long)
-
-Representer.add_representer(complex,
-        Representer.represent_complex)
-
-Representer.add_representer(tuple,
-        Representer.represent_tuple)
-
-Representer.add_representer(type,
-        Representer.represent_name)
-
-Representer.add_representer(types.ClassType,
-        Representer.represent_name)
-
-Representer.add_representer(types.FunctionType,
-        Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
-        Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
-        Representer.represent_module)
-
-Representer.add_multi_representer(types.InstanceType,
-        Representer.represent_instance)
-
-Representer.add_multi_representer(object,
-        Representer.represent_object)
-
diff --git a/lib/spack/external/yaml/lib/yaml/resolver.py b/lib/spack/external/yaml/lib/yaml/resolver.py
deleted file mode 100644
index 528fbc0ead..0000000000
--- a/lib/spack/external/yaml/lib/yaml/resolver.py
+++ /dev/null
@@ -1,227 +0,0 @@
-
-__all__ = ['BaseResolver', 'Resolver']
-
-from error import *
-from nodes import *
-
-import re
-
-class ResolverError(YAMLError):
-    pass
-
-class BaseResolver(object):
-
-    DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
-    DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
-    DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
-
-    yaml_implicit_resolvers = {}
-    yaml_path_resolvers = {}
-
-    def __init__(self):
-        self.resolver_exact_paths = []
-        self.resolver_prefix_paths = []
-
-    def add_implicit_resolver(cls, tag, regexp, first):
-        if not 'yaml_implicit_resolvers' in cls.__dict__:
-            implicit_resolvers = {}
-            for key in cls.yaml_implicit_resolvers:
-                implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
-            cls.yaml_implicit_resolvers = implicit_resolvers
-        if first is None:
-            first = [None]
-        for ch in first:
-            cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
-    add_implicit_resolver = classmethod(add_implicit_resolver)
-
-    def add_path_resolver(cls, tag, path, kind=None):
-        # Note: `add_path_resolver` is experimental.  The API could be changed.
-        # `new_path` is a pattern that is matched against the path from the
-        # root to the node that is being considered.  `node_path` elements are
-        # tuples `(node_check, index_check)`.  `node_check` is a node class:
-        # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`.  `None`
-        # matches any kind of a node.  `index_check` could be `None`, a boolean
-        # value, a string value, or a number.  `None` and `False` match against
-        # any _value_ of sequence and mapping nodes.  `True` matches against
-        # any _key_ of a mapping node.  A string `index_check` matches against
-        # a mapping value that corresponds to a scalar key which content is
-        # equal to the `index_check` value.  An integer `index_check` matches
-        # against a sequence value with the index equal to `index_check`.
-        if not 'yaml_path_resolvers' in cls.__dict__:
-            cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
-        new_path = []
-        for element in path:
-            if isinstance(element, (list, tuple)):
-                if len(element) == 2:
-                    node_check, index_check = element
-                elif len(element) == 1:
-                    node_check = element[0]
-                    index_check = True
-                else:
-                    raise ResolverError("Invalid path element: %s" % element)
-            else:
-                node_check = None
-                index_check = element
-            if node_check is str:
-                node_check = ScalarNode
-            elif node_check is list:
-                node_check = SequenceNode
-            elif node_check is dict:
-                node_check = MappingNode
-            elif node_check not in [ScalarNode, SequenceNode, MappingNode]  \
-                    and not isinstance(node_check, basestring)  \
-                    and node_check is not None:
-                raise ResolverError("Invalid node checker: %s" % node_check)
-            if not isinstance(index_check, (basestring, int))   \
-                    and index_check is not None:
-                raise ResolverError("Invalid index checker: %s" % index_check)
-            new_path.append((node_check, index_check))
-        if kind is str:
-            kind = ScalarNode
-        elif kind is list:
-            kind = SequenceNode
-        elif kind is dict:
-            kind = MappingNode
-        elif kind not in [ScalarNode, SequenceNode, MappingNode]    \
-                and kind is not None:
-            raise ResolverError("Invalid node kind: %s" % kind)
-        cls.yaml_path_resolvers[tuple(new_path), kind] = tag
-    add_path_resolver = classmethod(add_path_resolver)
-
-    def descend_resolver(self, current_node, current_index):
-        if not self.yaml_path_resolvers:
-            return
-        exact_paths = {}
-        prefix_paths = []
-        if current_node:
-            depth = len(self.resolver_prefix_paths)
-            for path, kind in self.resolver_prefix_paths[-1]:
-                if self.check_resolver_prefix(depth, path, kind,
-                        current_node, current_index):
-                    if len(path) > depth:
-                        prefix_paths.append((path, kind))
-                    else:
-                        exact_paths[kind] = self.yaml_path_resolvers[path, kind]
-        else:
-            for path, kind in self.yaml_path_resolvers:
-                if not path:
-                    exact_paths[kind] = self.yaml_path_resolvers[path, kind]
-                else:
-                    prefix_paths.append((path, kind))
-        self.resolver_exact_paths.append(exact_paths)
-        self.resolver_prefix_paths.append(prefix_paths)
-
-    def ascend_resolver(self):
-        if not self.yaml_path_resolvers:
-            return
-        self.resolver_exact_paths.pop()
-        self.resolver_prefix_paths.pop()
-
-    def check_resolver_prefix(self, depth, path, kind,
-            current_node, current_index):
-        node_check, index_check = path[depth-1]
-        if isinstance(node_check, basestring):
-            if current_node.tag != node_check:
-                return
-        elif node_check is not None:
-            if not isinstance(current_node, node_check):
-                return
-        if index_check is True and current_index is not None:
-            return
-        if (index_check is False or index_check is None)    \
-                and current_index is None:
-            return
-        if isinstance(index_check, basestring):
-            if not (isinstance(current_index, ScalarNode)
-                    and index_check == current_index.value):
-                return
-        elif isinstance(index_check, int) and not isinstance(index_check, bool):
-            if index_check != current_index:
-                return
-        return True
-
-    def resolve(self, kind, value, implicit):
-        if kind is ScalarNode and implicit[0]:
-            if value == u'':
-                resolvers = self.yaml_implicit_resolvers.get(u'', [])
-            else:
-                resolvers = self.yaml_implicit_resolvers.get(value[0], [])
-            resolvers += self.yaml_implicit_resolvers.get(None, [])
-            for tag, regexp in resolvers:
-                if regexp.match(value):
-                    return tag
-            implicit = implicit[1]
-        if self.yaml_path_resolvers:
-            exact_paths = self.resolver_exact_paths[-1]
-            if kind in exact_paths:
-                return exact_paths[kind]
-            if None in exact_paths:
-                return exact_paths[None]
-        if kind is ScalarNode:
-            return self.DEFAULT_SCALAR_TAG
-        elif kind is SequenceNode:
-            return self.DEFAULT_SEQUENCE_TAG
-        elif kind is MappingNode:
-            return self.DEFAULT_MAPPING_TAG
-
-class Resolver(BaseResolver):
-    pass
-
-Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:bool',
-        re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
-                    |true|True|TRUE|false|False|FALSE
-                    |on|On|ON|off|Off|OFF)$''', re.X),
-        list(u'yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:float',
-        re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
-                    |\.[0-9_]+(?:[eE][-+][0-9]+)?
-                    |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
-                    |[-+]?\.(?:inf|Inf|INF)
-                    |\.(?:nan|NaN|NAN))$''', re.X),
-        list(u'-+0123456789.'))
-
-Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:int',
-        re.compile(ur'''^(?:[-+]?0b[0-1_]+
-                    |[-+]?0[0-7_]+
-                    |[-+]?(?:0|[1-9][0-9_]*)
-                    |[-+]?0x[0-9a-fA-F_]+
-                    |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
-        list(u'-+0123456789'))
-
-Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:merge',
-        re.compile(ur'^(?:<<)$'),
-        [u'<'])
-
-Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:null',
-        re.compile(ur'''^(?: ~
-                    |null|Null|NULL
-                    | )$''', re.X),
-        [u'~', u'n', u'N', u''])
-
-Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:timestamp',
-        re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
-                    |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
-                     (?:[Tt]|[ \t]+)[0-9][0-9]?
-                     :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
-                     (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
-        list(u'0123456789'))
-
-Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:value',
-        re.compile(ur'^(?:=)$'),
-        [u'='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
-        u'tag:yaml.org,2002:yaml',
-        re.compile(ur'^(?:!|&|\*)$'),
-        list(u'!&*'))
-
diff --git a/lib/spack/external/yaml/lib/yaml/serializer.py b/lib/spack/external/yaml/lib/yaml/serializer.py
deleted file mode 100644
index 0bf1e96dc1..0000000000
--- a/lib/spack/external/yaml/lib/yaml/serializer.py
+++ /dev/null
@@ -1,111 +0,0 @@
-
-__all__ = ['Serializer', 'SerializerError']
-
-from error import YAMLError
-from events import *
-from nodes import *
-
-class SerializerError(YAMLError):
-    pass
-
-class Serializer(object):
-
-    ANCHOR_TEMPLATE = u'id%03d'
-
-    def __init__(self, encoding=None,
-            explicit_start=None, explicit_end=None, version=None, tags=None):
-        self.use_encoding = encoding
-        self.use_explicit_start = explicit_start
-        self.use_explicit_end = explicit_end
-        self.use_version = version
-        self.use_tags = tags
-        self.serialized_nodes = {}
-        self.anchors = {}
-        self.last_anchor_id = 0
-        self.closed = None
-
-    def open(self):
-        if self.closed is None:
-            self.emit(StreamStartEvent(encoding=self.use_encoding))
-            self.closed = False
-        elif self.closed:
-            raise SerializerError("serializer is closed")
-        else:
-            raise SerializerError("serializer is already opened")
-
-    def close(self):
-        if self.closed is None:
-            raise SerializerError("serializer is not opened")
-        elif not self.closed:
-            self.emit(StreamEndEvent())
-            self.closed = True
-
-    #def __del__(self):
-    #    self.close()
-
-    def serialize(self, node):
-        if self.closed is None:
-            raise SerializerError("serializer is not opened")
-        elif self.closed:
-            raise SerializerError("serializer is closed")
-        self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
-            version=self.use_version, tags=self.use_tags))
-        self.anchor_node(node)
-        self.serialize_node(node, None, None)
-        self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
-        self.serialized_nodes = {}
-        self.anchors = {}
-        self.last_anchor_id = 0
-
-    def anchor_node(self, node):
-        if node in self.anchors:
-            if self.anchors[node] is None:
-                self.anchors[node] = self.generate_anchor(node)
-        else:
-            self.anchors[node] = None
-            if isinstance(node, SequenceNode):
-                for item in node.value:
-                    self.anchor_node(item)
-            elif isinstance(node, MappingNode):
-                for key, value in node.value:
-                    self.anchor_node(key)
-                    self.anchor_node(value)
-
-    def generate_anchor(self, node):
-        self.last_anchor_id += 1
-        return self.ANCHOR_TEMPLATE % self.last_anchor_id
-
-    def serialize_node(self, node, parent, index):
-        alias = self.anchors[node]
-        if node in self.serialized_nodes:
-            self.emit(AliasEvent(alias))
-        else:
-            self.serialized_nodes[node] = True
-            self.descend_resolver(parent, index)
-            if isinstance(node, ScalarNode):
-                detected_tag = self.resolve(ScalarNode, node.value, (True, False))
-                default_tag = self.resolve(ScalarNode, node.value, (False, True))
-                implicit = (node.tag == detected_tag), (node.tag == default_tag)
-                self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
-                    style=node.style))
-            elif isinstance(node, SequenceNode):
-                implicit = (node.tag
-                            == self.resolve(SequenceNode, node.value, True))
-                self.emit(SequenceStartEvent(alias, node.tag, implicit,
-                    flow_style=node.flow_style))
-                index = 0
-                for item in node.value:
-                    self.serialize_node(item, node, index)
-                    index += 1
-                self.emit(SequenceEndEvent())
-            elif isinstance(node, MappingNode):
-                implicit = (node.tag
-                            == self.resolve(MappingNode, node.value, True))
-                self.emit(MappingStartEvent(alias, node.tag, implicit,
-                    flow_style=node.flow_style))
-                for key, value in node.value:
-                    self.serialize_node(key, node, None)
-                    self.serialize_node(value, node, key)
-                self.emit(MappingEndEvent())
-            self.ascend_resolver()
-
diff --git a/lib/spack/external/yaml/lib/yaml/tokens.py b/lib/spack/external/yaml/lib/yaml/tokens.py
deleted file mode 100644
index 4d0b48a394..0000000000
--- a/lib/spack/external/yaml/lib/yaml/tokens.py
+++ /dev/null
@@ -1,104 +0,0 @@
-
-class Token(object):
-    def __init__(self, start_mark, end_mark):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-    def __repr__(self):
-        attributes = [key for key in self.__dict__
-                if not key.endswith('_mark')]
-        attributes.sort()
-        arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
-                for key in attributes])
-        return '%s(%s)' % (self.__class__.__name__, arguments)
-
-#class BOMToken(Token):
-#    id = '<byte order mark>'
-
-class DirectiveToken(Token):
-    id = '<directive>'
-    def __init__(self, name, value, start_mark, end_mark):
-        self.name = name
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class DocumentStartToken(Token):
-    id = '<document start>'
-
-class DocumentEndToken(Token):
-    id = '<document end>'
-
-class StreamStartToken(Token):
-    id = '<stream start>'
-    def __init__(self, start_mark=None, end_mark=None,
-            encoding=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.encoding = encoding
-
-class StreamEndToken(Token):
-    id = '<stream end>'
-
-class BlockSequenceStartToken(Token):
-    id = '<block sequence start>'
-
-class BlockMappingStartToken(Token):
-    id = '<block mapping start>'
-
-class BlockEndToken(Token):
-    id = '<block end>'
-
-class FlowSequenceStartToken(Token):
-    id = '['
-
-class FlowMappingStartToken(Token):
-    id = '{'
-
-class FlowSequenceEndToken(Token):
-    id = ']'
-
-class FlowMappingEndToken(Token):
-    id = '}'
-
-class KeyToken(Token):
-    id = '?'
-
-class ValueToken(Token):
-    id = ':'
-
-class BlockEntryToken(Token):
-    id = '-'
-
-class FlowEntryToken(Token):
-    id = ','
-
-class AliasToken(Token):
-    id = '<alias>'
-    def __init__(self, value, start_mark, end_mark):
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class AnchorToken(Token):
-    id = '<anchor>'
-    def __init__(self, value, start_mark, end_mark):
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class TagToken(Token):
-    id = '<tag>'
-    def __init__(self, value, start_mark, end_mark):
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class ScalarToken(Token):
-    id = '<scalar>'
-    def __init__(self, value, plain, start_mark, end_mark, style=None):
-        self.value = value
-        self.plain = plain
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.style = style
-
diff --git a/lib/spack/external/yaml/lib3/yaml/composer.py b/lib/spack/external/yaml/lib3/yaml/composer.py
deleted file mode 100644
index d5c6a7acd9..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/composer.py
+++ /dev/null
@@ -1,139 +0,0 @@
-
-__all__ = ['Composer', 'ComposerError']
-
-from .error import MarkedYAMLError
-from .events import *
-from .nodes import *
-
-class ComposerError(MarkedYAMLError):
-    pass
-
-class Composer:
-
-    def __init__(self):
-        self.anchors = {}
-
-    def check_node(self):
-        # Drop the STREAM-START event.
-        if self.check_event(StreamStartEvent):
-            self.get_event()
-
-        # If there are more documents available?
-        return not self.check_event(StreamEndEvent)
-
-    def get_node(self):
-        # Get the root node of the next document.
-        if not self.check_event(StreamEndEvent):
-            return self.compose_document()
-
-    def get_single_node(self):
-        # Drop the STREAM-START event.
-        self.get_event()
-
-        # Compose a document if the stream is not empty.
-        document = None
-        if not self.check_event(StreamEndEvent):
-            document = self.compose_document()
-
-        # Ensure that the stream contains no more documents.
-        if not self.check_event(StreamEndEvent):
-            event = self.get_event()
-            raise ComposerError("expected a single document in the stream",
-                    document.start_mark, "but found another document",
-                    event.start_mark)
-
-        # Drop the STREAM-END event.
-        self.get_event()
-
-        return document
-
-    def compose_document(self):
-        # Drop the DOCUMENT-START event.
-        self.get_event()
-
-        # Compose the root node.
-        node = self.compose_node(None, None)
-
-        # Drop the DOCUMENT-END event.
-        self.get_event()
-
-        self.anchors = {}
-        return node
-
-    def compose_node(self, parent, index):
-        if self.check_event(AliasEvent):
-            event = self.get_event()
-            anchor = event.anchor
-            if anchor not in self.anchors:
-                raise ComposerError(None, None, "found undefined alias %r"
-                        % anchor, event.start_mark)
-            return self.anchors[anchor]
-        event = self.peek_event()
-        anchor = event.anchor
-        if anchor is not None:
-            if anchor in self.anchors:
-                raise ComposerError("found duplicate anchor %r; first occurence"
-                        % anchor, self.anchors[anchor].start_mark,
-                        "second occurence", event.start_mark)
-        self.descend_resolver(parent, index)
-        if self.check_event(ScalarEvent):
-            node = self.compose_scalar_node(anchor)
-        elif self.check_event(SequenceStartEvent):
-            node = self.compose_sequence_node(anchor)
-        elif self.check_event(MappingStartEvent):
-            node = self.compose_mapping_node(anchor)
-        self.ascend_resolver()
-        return node
-
-    def compose_scalar_node(self, anchor):
-        event = self.get_event()
-        tag = event.tag
-        if tag is None or tag == '!':
-            tag = self.resolve(ScalarNode, event.value, event.implicit)
-        node = ScalarNode(tag, event.value,
-                event.start_mark, event.end_mark, style=event.style)
-        if anchor is not None:
-            self.anchors[anchor] = node
-        return node
-
-    def compose_sequence_node(self, anchor):
-        start_event = self.get_event()
-        tag = start_event.tag
-        if tag is None or tag == '!':
-            tag = self.resolve(SequenceNode, None, start_event.implicit)
-        node = SequenceNode(tag, [],
-                start_event.start_mark, None,
-                flow_style=start_event.flow_style)
-        if anchor is not None:
-            self.anchors[anchor] = node
-        index = 0
-        while not self.check_event(SequenceEndEvent):
-            node.value.append(self.compose_node(node, index))
-            index += 1
-        end_event = self.get_event()
-        node.end_mark = end_event.end_mark
-        return node
-
-    def compose_mapping_node(self, anchor):
-        start_event = self.get_event()
-        tag = start_event.tag
-        if tag is None or tag == '!':
-            tag = self.resolve(MappingNode, None, start_event.implicit)
-        node = MappingNode(tag, [],
-                start_event.start_mark, None,
-                flow_style=start_event.flow_style)
-        if anchor is not None:
-            self.anchors[anchor] = node
-        while not self.check_event(MappingEndEvent):
-            #key_event = self.peek_event()
-            item_key = self.compose_node(node, None)
-            #if item_key in node.value:
-            #    raise ComposerError("while composing a mapping", start_event.start_mark,
-            #            "found duplicate key", key_event.start_mark)
-            item_value = self.compose_node(node, item_key)
-            #node.value[item_key] = item_value
-            node.value.append((item_key, item_value))
-        end_event = self.get_event()
-        node.end_mark = end_event.end_mark
-        return node
-
diff --git a/lib/spack/external/yaml/lib3/yaml/constructor.py b/lib/spack/external/yaml/lib3/yaml/constructor.py
deleted file mode 100644
index 981543aebb..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/constructor.py
+++ /dev/null
@@ -1,686 +0,0 @@
-
-__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
-    'ConstructorError']
-
-from .error import *
-from .nodes import *
-
-import collections, datetime, base64, binascii, re, sys, types
-
-class ConstructorError(MarkedYAMLError):
-    pass
-
-class BaseConstructor:
-
-    yaml_constructors = {}
-    yaml_multi_constructors = {}
-
-    def __init__(self):
-        self.constructed_objects = {}
-        self.recursive_objects = {}
-        self.state_generators = []
-        self.deep_construct = False
-
-    def check_data(self):
-        # If there are more documents available?
-        return self.check_node()
-
-    def get_data(self):
-        # Construct and return the next document.
-        if self.check_node():
-            return self.construct_document(self.get_node())
-
-    def get_single_data(self):
-        # Ensure that the stream contains a single document and construct it.
-        node = self.get_single_node()
-        if node is not None:
-            return self.construct_document(node)
-        return None
-
-    def construct_document(self, node):
-        data = self.construct_object(node)
-        while self.state_generators:
-            state_generators = self.state_generators
-            self.state_generators = []
-            for generator in state_generators:
-                for dummy in generator:
-                    pass
-        self.constructed_objects = {}
-        self.recursive_objects = {}
-        self.deep_construct = False
-        return data
-
-    def construct_object(self, node, deep=False):
-        if node in self.constructed_objects:
-            return self.constructed_objects[node]
-        if deep:
-            old_deep = self.deep_construct
-            self.deep_construct = True
-        if node in self.recursive_objects:
-            raise ConstructorError(None, None,
-                    "found unconstructable recursive node", node.start_mark)
-        self.recursive_objects[node] = None
-        constructor = None
-        tag_suffix = None
-        if node.tag in self.yaml_constructors:
-            constructor = self.yaml_constructors[node.tag]
-        else:
-            for tag_prefix in self.yaml_multi_constructors:
-                if node.tag.startswith(tag_prefix):
-                    tag_suffix = node.tag[len(tag_prefix):]
-                    constructor = self.yaml_multi_constructors[tag_prefix]
-                    break
-            else:
-                if None in self.yaml_multi_constructors:
-                    tag_suffix = node.tag
-                    constructor = self.yaml_multi_constructors[None]
-                elif None in self.yaml_constructors:
-                    constructor = self.yaml_constructors[None]
-                elif isinstance(node, ScalarNode):
-                    constructor = self.__class__.construct_scalar
-                elif isinstance(node, SequenceNode):
-                    constructor = self.__class__.construct_sequence
-                elif isinstance(node, MappingNode):
-                    constructor = self.__class__.construct_mapping
-        if tag_suffix is None:
-            data = constructor(self, node)
-        else:
-            data = constructor(self, tag_suffix, node)
-        if isinstance(data, types.GeneratorType):
-            generator = data
-            data = next(generator)
-            if self.deep_construct:
-                for dummy in generator:
-                    pass
-            else:
-                self.state_generators.append(generator)
-        self.constructed_objects[node] = data
-        del self.recursive_objects[node]
-        if deep:
-            self.deep_construct = old_deep
-        return data
-
-    def construct_scalar(self, node):
-        if not isinstance(node, ScalarNode):
-            raise ConstructorError(None, None,
-                    "expected a scalar node, but found %s" % node.id,
-                    node.start_mark)
-        return node.value
-
-    def construct_sequence(self, node, deep=False):
-        if not isinstance(node, SequenceNode):
-            raise ConstructorError(None, None,
-                    "expected a sequence node, but found %s" % node.id,
-                    node.start_mark)
-        return [self.construct_object(child, deep=deep)
-                for child in node.value]
-
-    def construct_mapping(self, node, deep=False):
-        if not isinstance(node, MappingNode):
-            raise ConstructorError(None, None,
-                    "expected a mapping node, but found %s" % node.id,
-                    node.start_mark)
-        mapping = {}
-        for key_node, value_node in node.value:
-            key = self.construct_object(key_node, deep=deep)
-            if not isinstance(key, collections.Hashable):
-                raise ConstructorError("while constructing a mapping", node.start_mark,
-                        "found unhashable key", key_node.start_mark)
-            value = self.construct_object(value_node, deep=deep)
-            mapping[key] = value
-        return mapping
-
-    def construct_pairs(self, node, deep=False):
-        if not isinstance(node, MappingNode):
-            raise ConstructorError(None, None,
-                    "expected a mapping node, but found %s" % node.id,
-                    node.start_mark)
-        pairs = []
-        for key_node, value_node in node.value:
-            key = self.construct_object(key_node, deep=deep)
-            value = self.construct_object(value_node, deep=deep)
-            pairs.append((key, value))
-        return pairs
-
-    @classmethod
-    def add_constructor(cls, tag, constructor):
-        if not 'yaml_constructors' in cls.__dict__:
-            cls.yaml_constructors = cls.yaml_constructors.copy()
-        cls.yaml_constructors[tag] = constructor
-
-    @classmethod
-    def add_multi_constructor(cls, tag_prefix, multi_constructor):
-        if not 'yaml_multi_constructors' in cls.__dict__:
-            cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
-        cls.yaml_multi_constructors[tag_prefix] = multi_constructor
-
-class SafeConstructor(BaseConstructor):
-
-    def construct_scalar(self, node):
-        if isinstance(node, MappingNode):
-            for key_node, value_node in node.value:
-                if key_node.tag == 'tag:yaml.org,2002:value':
-                    return self.construct_scalar(value_node)
-        return super().construct_scalar(node)
-
-    def flatten_mapping(self, node):
-        merge = []
-        index = 0
-        while index < len(node.value):
-            key_node, value_node = node.value[index]
-            if key_node.tag == 'tag:yaml.org,2002:merge':
-                del node.value[index]
-                if isinstance(value_node, MappingNode):
-                    self.flatten_mapping(value_node)
-                    merge.extend(value_node.value)
-                elif isinstance(value_node, SequenceNode):
-                    submerge = []
-                    for subnode in value_node.value:
-                        if not isinstance(subnode, MappingNode):
-                            raise ConstructorError("while constructing a mapping",
-                                    node.start_mark,
-                                    "expected a mapping for merging, but found %s"
-                                    % subnode.id, subnode.start_mark)
-                        self.flatten_mapping(subnode)
-                        submerge.append(subnode.value)
-                    submerge.reverse()
-                    for value in submerge:
-                        merge.extend(value)
-                else:
-                    raise ConstructorError("while constructing a mapping", node.start_mark,
-                            "expected a mapping or list of mappings for merging, but found %s"
-                            % value_node.id, value_node.start_mark)
-            elif key_node.tag == 'tag:yaml.org,2002:value':
-                key_node.tag = 'tag:yaml.org,2002:str'
-                index += 1
-            else:
-                index += 1
-        if merge:
-            node.value = merge + node.value
-
-    def construct_mapping(self, node, deep=False):
-        if isinstance(node, MappingNode):
-            self.flatten_mapping(node)
-        return super().construct_mapping(node, deep=deep)
-
-    def construct_yaml_null(self, node):
-        self.construct_scalar(node)
-        return None
-
-    bool_values = {
-        'yes':      True,
-        'no':       False,
-        'true':     True,
-        'false':    False,
-        'on':       True,
-        'off':      False,
-    }
-
-    def construct_yaml_bool(self, node):
-        value = self.construct_scalar(node)
-        return self.bool_values[value.lower()]
-
-    def construct_yaml_int(self, node):
-        value = self.construct_scalar(node)
-        value = value.replace('_', '')
-        sign = +1
-        if value[0] == '-':
-            sign = -1
-        if value[0] in '+-':
-            value = value[1:]
-        if value == '0':
-            return 0
-        elif value.startswith('0b'):
-            return sign*int(value[2:], 2)
-        elif value.startswith('0x'):
-            return sign*int(value[2:], 16)
-        elif value[0] == '0':
-            return sign*int(value, 8)
-        elif ':' in value:
-            digits = [int(part) for part in value.split(':')]
-            digits.reverse()
-            base = 1
-            value = 0
-            for digit in digits:
-                value += digit*base
-                base *= 60
-            return sign*value
-        else:
-            return sign*int(value)
-
-    inf_value = 1e300
-    while inf_value != inf_value*inf_value:
-        inf_value *= inf_value
-    nan_value = -inf_value/inf_value   # Trying to make a quiet NaN (like C99).
-
-    def construct_yaml_float(self, node):
-        value = self.construct_scalar(node)
-        value = value.replace('_', '').lower()
-        sign = +1
-        if value[0] == '-':
-            sign = -1
-        if value[0] in '+-':
-            value = value[1:]
-        if value == '.inf':
-            return sign*self.inf_value
-        elif value == '.nan':
-            return self.nan_value
-        elif ':' in value:
-            digits = [float(part) for part in value.split(':')]
-            digits.reverse()
-            base = 1
-            value = 0.0
-            for digit in digits:
-                value += digit*base
-                base *= 60
-            return sign*value
-        else:
-            return sign*float(value)
-
-    def construct_yaml_binary(self, node):
-        try:
-            value = self.construct_scalar(node).encode('ascii')
-        except UnicodeEncodeError as exc:
-            raise ConstructorError(None, None,
-                    "failed to convert base64 data into ascii: %s" % exc,
-                    node.start_mark)
-        try:
-            if hasattr(base64, 'decodebytes'):
-                return base64.decodebytes(value)
-            else:
-                return base64.decodestring(value)
-        except binascii.Error as exc:
-            raise ConstructorError(None, None,
-                    "failed to decode base64 data: %s" % exc, node.start_mark)
-
-    timestamp_regexp = re.compile(
-            r'''^(?P<year>[0-9][0-9][0-9][0-9])
-                -(?P<month>[0-9][0-9]?)
-                -(?P<day>[0-9][0-9]?)
-                (?:(?:[Tt]|[ \t]+)
-                (?P<hour>[0-9][0-9]?)
-                :(?P<minute>[0-9][0-9])
-                :(?P<second>[0-9][0-9])
-                (?:\.(?P<fraction>[0-9]*))?
-                (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
-                (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
-    def construct_yaml_timestamp(self, node):
-        value = self.construct_scalar(node)
-        match = self.timestamp_regexp.match(node.value)
-        values = match.groupdict()
-        year = int(values['year'])
-        month = int(values['month'])
-        day = int(values['day'])
-        if not values['hour']:
-            return datetime.date(year, month, day)
-        hour = int(values['hour'])
-        minute = int(values['minute'])
-        second = int(values['second'])
-        fraction = 0
-        if values['fraction']:
-            fraction = values['fraction'][:6]
-            while len(fraction) < 6:
-                fraction += '0'
-            fraction = int(fraction)
-        delta = None
-        if values['tz_sign']:
-            tz_hour = int(values['tz_hour'])
-            tz_minute = int(values['tz_minute'] or 0)
-            delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
-            if values['tz_sign'] == '-':
-                delta = -delta
-        data = datetime.datetime(year, month, day, hour, minute, second, fraction)
-        if delta:
-            data -= delta
-        return data
-
-    def construct_yaml_omap(self, node):
-        # Note: we do not check for duplicate keys, because it's too
-        # CPU-expensive.
-        omap = []
-        yield omap
-        if not isinstance(node, SequenceNode):
-            raise ConstructorError("while constructing an ordered map", node.start_mark,
-                    "expected a sequence, but found %s" % node.id, node.start_mark)
-        for subnode in node.value:
-            if not isinstance(subnode, MappingNode):
-                raise ConstructorError("while constructing an ordered map", node.start_mark,
-                        "expected a mapping of length 1, but found %s" % subnode.id,
-                        subnode.start_mark)
-            if len(subnode.value) != 1:
-                raise ConstructorError("while constructing an ordered map", node.start_mark,
-                        "expected a single mapping item, but found %d items" % len(subnode.value),
-                        subnode.start_mark)
-            key_node, value_node = subnode.value[0]
-            key = self.construct_object(key_node)
-            value = self.construct_object(value_node)
-            omap.append((key, value))
-
-    def construct_yaml_pairs(self, node):
-        # Note: the same code as `construct_yaml_omap`.
-        pairs = []
-        yield pairs
-        if not isinstance(node, SequenceNode):
-            raise ConstructorError("while constructing pairs", node.start_mark,
-                    "expected a sequence, but found %s" % node.id, node.start_mark)
-        for subnode in node.value:
-            if not isinstance(subnode, MappingNode):
-                raise ConstructorError("while constructing pairs", node.start_mark,
-                        "expected a mapping of length 1, but found %s" % subnode.id,
-                        subnode.start_mark)
-            if len(subnode.value) != 1:
-                raise ConstructorError("while constructing pairs", node.start_mark,
-                        "expected a single mapping item, but found %d items" % len(subnode.value),
-                        subnode.start_mark)
-            key_node, value_node = subnode.value[0]
-            key = self.construct_object(key_node)
-            value = self.construct_object(value_node)
-            pairs.append((key, value))
-
-    def construct_yaml_set(self, node):
-        data = set()
-        yield data
-        value = self.construct_mapping(node)
-        data.update(value)
-
-    def construct_yaml_str(self, node):
-        return self.construct_scalar(node)
-
-    def construct_yaml_seq(self, node):
-        data = []
-        yield data
-        data.extend(self.construct_sequence(node))
-
-    def construct_yaml_map(self, node):
-        data = {}
-        yield data
-        value = self.construct_mapping(node)
-        data.update(value)
-
-    def construct_yaml_object(self, node, cls):
-        data = cls.__new__(cls)
-        yield data
-        if hasattr(data, '__setstate__'):
-            state = self.construct_mapping(node, deep=True)
-            data.__setstate__(state)
-        else:
-            state = self.construct_mapping(node)
-            data.__dict__.update(state)
-
-    def construct_undefined(self, node):
-        raise ConstructorError(None, None,
-                "could not determine a constructor for the tag %r" % node.tag,
-                node.start_mark)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:null',
-        SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:bool',
-        SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:int',
-        SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:float',
-        SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:binary',
-        SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:timestamp',
-        SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:omap',
-        SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:pairs',
-        SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:set',
-        SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:str',
-        SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:seq',
-        SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
-        'tag:yaml.org,2002:map',
-        SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(None,
-        SafeConstructor.construct_undefined)
-
-class Constructor(SafeConstructor):
-
-    def construct_python_str(self, node):
-        return self.construct_scalar(node)
-
-    def construct_python_unicode(self, node):
-        return self.construct_scalar(node)
-
-    def construct_python_bytes(self, node):
-        try:
-            value = self.construct_scalar(node).encode('ascii')
-        except UnicodeEncodeError as exc:
-            raise ConstructorError(None, None,
-                    "failed to convert base64 data into ascii: %s" % exc,
-                    node.start_mark)
-        try:
-            if hasattr(base64, 'decodebytes'):
-                return base64.decodebytes(value)
-            else:
-                return base64.decodestring(value)
-        except binascii.Error as exc:
-            raise ConstructorError(None, None,
-                    "failed to decode base64 data: %s" % exc, node.start_mark)
-
-    def construct_python_long(self, node):
-        return self.construct_yaml_int(node)
-
-    def construct_python_complex(self, node):
-       return complex(self.construct_scalar(node))
-
-    def construct_python_tuple(self, node):
-        return tuple(self.construct_sequence(node))
-
-    def find_python_module(self, name, mark):
-        if not name:
-            raise ConstructorError("while constructing a Python module", mark,
-                    "expected non-empty name appended to the tag", mark)
-        try:
-            __import__(name)
-        except ImportError as exc:
-            raise ConstructorError("while constructing a Python module", mark,
-                    "cannot find module %r (%s)" % (name, exc), mark)
-        return sys.modules[name]
-
-    def find_python_name(self, name, mark):
-        if not name:
-            raise ConstructorError("while constructing a Python object", mark,
-                    "expected non-empty name appended to the tag", mark)
-        if '.' in name:
-            module_name, object_name = name.rsplit('.', 1)
-        else:
-            module_name = 'builtins'
-            object_name = name
-        try:
-            __import__(module_name)
-        except ImportError as exc:
-            raise ConstructorError("while constructing a Python object", mark,
-                    "cannot find module %r (%s)" % (module_name, exc), mark)
-        module = sys.modules[module_name]
-        if not hasattr(module, object_name):
-            raise ConstructorError("while constructing a Python object", mark,
-                    "cannot find %r in the module %r"
-                    % (object_name, module.__name__), mark)
-        return getattr(module, object_name)
-
-    def construct_python_name(self, suffix, node):
-        value = self.construct_scalar(node)
-        if value:
-            raise ConstructorError("while constructing a Python name", node.start_mark,
-                    "expected the empty value, but found %r" % value, node.start_mark)
-        return self.find_python_name(suffix, node.start_mark)
-
-    def construct_python_module(self, suffix, node):
-        value = self.construct_scalar(node)
-        if value:
-            raise ConstructorError("while constructing a Python module", node.start_mark,
-                    "expected the empty value, but found %r" % value, node.start_mark)
-        return self.find_python_module(suffix, node.start_mark)
-
-    def make_python_instance(self, suffix, node,
-            args=None, kwds=None, newobj=False):
-        if not args:
-            args = []
-        if not kwds:
-            kwds = {}
-        cls = self.find_python_name(suffix, node.start_mark)
-        if newobj and isinstance(cls, type):
-            return cls.__new__(cls, *args, **kwds)
-        else:
-            return cls(*args, **kwds)
-
-    def set_python_instance_state(self, instance, state):
-        if hasattr(instance, '__setstate__'):
-            instance.__setstate__(state)
-        else:
-            slotstate = {}
-            if isinstance(state, tuple) and len(state) == 2:
-                state, slotstate = state
-            if hasattr(instance, '__dict__'):
-                instance.__dict__.update(state)
-            elif state:
-                slotstate.update(state)
-            for key, value in slotstate.items():
-                setattr(object, key, value)
-
-    def construct_python_object(self, suffix, node):
-        # Format:
-        #   !!python/object:module.name { ... state ... }
-        instance = self.make_python_instance(suffix, node, newobj=True)
-        yield instance
-        deep = hasattr(instance, '__setstate__')
-        state = self.construct_mapping(node, deep=deep)
-        self.set_python_instance_state(instance, state)
-
-    def construct_python_object_apply(self, suffix, node, newobj=False):
-        # Format:
-        #   !!python/object/apply       # (or !!python/object/new)
-        #   args: [ ... arguments ... ]
-        #   kwds: { ... keywords ... }
-        #   state: ... state ...
-        #   listitems: [ ... listitems ... ]
-        #   dictitems: { ... dictitems ... }
-        # or short format:
-        #   !!python/object/apply [ ... arguments ... ]
-        # The difference between !!python/object/apply and !!python/object/new
-        # is how an object is created, check make_python_instance for details.
-        if isinstance(node, SequenceNode):
-            args = self.construct_sequence(node, deep=True)
-            kwds = {}
-            state = {}
-            listitems = []
-            dictitems = {}
-        else:
-            value = self.construct_mapping(node, deep=True)
-            args = value.get('args', [])
-            kwds = value.get('kwds', {})
-            state = value.get('state', {})
-            listitems = value.get('listitems', [])
-            dictitems = value.get('dictitems', {})
-        instance = self.make_python_instance(suffix, node, args, kwds, newobj)
-        if state:
-            self.set_python_instance_state(instance, state)
-        if listitems:
-            instance.extend(listitems)
-        if dictitems:
-            for key in dictitems:
-                instance[key] = dictitems[key]
-        return instance
-
-    def construct_python_object_new(self, suffix, node):
-        return self.construct_python_object_apply(suffix, node, newobj=True)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/none',
-    Constructor.construct_yaml_null)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/bool',
-    Constructor.construct_yaml_bool)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/str',
-    Constructor.construct_python_str)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/unicode',
-    Constructor.construct_python_unicode)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/bytes',
-    Constructor.construct_python_bytes)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/int',
-    Constructor.construct_yaml_int)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/long',
-    Constructor.construct_python_long)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/float',
-    Constructor.construct_yaml_float)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/complex',
-    Constructor.construct_python_complex)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/list',
-    Constructor.construct_yaml_seq)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/tuple',
-    Constructor.construct_python_tuple)
-
-Constructor.add_constructor(
-    'tag:yaml.org,2002:python/dict',
-    Constructor.construct_yaml_map)
-
-Constructor.add_multi_constructor(
-    'tag:yaml.org,2002:python/name:',
-    Constructor.construct_python_name)
-
-Constructor.add_multi_constructor(
-    'tag:yaml.org,2002:python/module:',
-    Constructor.construct_python_module)
-
-Constructor.add_multi_constructor(
-    'tag:yaml.org,2002:python/object:',
-    Constructor.construct_python_object)
-
-Constructor.add_multi_constructor(
-    'tag:yaml.org,2002:python/object/apply:',
-    Constructor.construct_python_object_apply)
-
-Constructor.add_multi_constructor(
-    'tag:yaml.org,2002:python/object/new:',
-    Constructor.construct_python_object_new)
-
diff --git a/lib/spack/external/yaml/lib3/yaml/cyaml.py b/lib/spack/external/yaml/lib3/yaml/cyaml.py
deleted file mode 100644
index d5cb87e994..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/cyaml.py
+++ /dev/null
@@ -1,85 +0,0 @@
-
-__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
-        'CBaseDumper', 'CSafeDumper', 'CDumper']
-
-from _yaml import CParser, CEmitter
-
-from .constructor import *
-
-from .serializer import *
-from .representer import *
-
-from .resolver import *
-
-class CBaseLoader(CParser, BaseConstructor, BaseResolver):
-
-    def __init__(self, stream):
-        CParser.__init__(self, stream)
-        BaseConstructor.__init__(self)
-        BaseResolver.__init__(self)
-
-class CSafeLoader(CParser, SafeConstructor, Resolver):
-
-    def __init__(self, stream):
-        CParser.__init__(self, stream)
-        SafeConstructor.__init__(self)
-        Resolver.__init__(self)
-
-class CLoader(CParser, Constructor, Resolver):
-
-    def __init__(self, stream):
-        CParser.__init__(self, stream)
-        Constructor.__init__(self)
-        Resolver.__init__(self)
-
-class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        CEmitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width, encoding=encoding,
-                allow_unicode=allow_unicode, line_break=line_break,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        Representer.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
-class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        CEmitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width, encoding=encoding,
-                allow_unicode=allow_unicode, line_break=line_break,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        SafeRepresenter.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
-class CDumper(CEmitter, Serializer, Representer, Resolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        CEmitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width, encoding=encoding,
-                allow_unicode=allow_unicode, line_break=line_break,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        Representer.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
diff --git a/lib/spack/external/yaml/lib3/yaml/dumper.py b/lib/spack/external/yaml/lib3/yaml/dumper.py
deleted file mode 100644
index 0b69128771..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/dumper.py
+++ /dev/null
@@ -1,62 +0,0 @@
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
-
-from .emitter import *
-from .serializer import *
-from .representer import *
-from .resolver import *
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        Emitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width,
-                allow_unicode=allow_unicode, line_break=line_break)
-        Serializer.__init__(self, encoding=encoding,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        Representer.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        Emitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width,
-                allow_unicode=allow_unicode, line_break=line_break)
-        Serializer.__init__(self, encoding=encoding,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        SafeRepresenter.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
-
-    def __init__(self, stream,
-            default_style=None, default_flow_style=None,
-            canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None,
-            encoding=None, explicit_start=None, explicit_end=None,
-            version=None, tags=None):
-        Emitter.__init__(self, stream, canonical=canonical,
-                indent=indent, width=width,
-                allow_unicode=allow_unicode, line_break=line_break)
-        Serializer.__init__(self, encoding=encoding,
-                explicit_start=explicit_start, explicit_end=explicit_end,
-                version=version, tags=tags)
-        Representer.__init__(self, default_style=default_style,
-                default_flow_style=default_flow_style)
-        Resolver.__init__(self)
-
diff --git a/lib/spack/external/yaml/lib3/yaml/emitter.py b/lib/spack/external/yaml/lib3/yaml/emitter.py
deleted file mode 100644
index 34cb145a5f..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/emitter.py
+++ /dev/null
@@ -1,1137 +0,0 @@
-
-# Emitter expects events obeying the following grammar:
-# stream ::= STREAM-START document* STREAM-END
-# document ::= DOCUMENT-START node DOCUMENT-END
-# node ::= SCALAR | sequence | mapping
-# sequence ::= SEQUENCE-START node* SEQUENCE-END
-# mapping ::= MAPPING-START (node node)* MAPPING-END
-
-__all__ = ['Emitter', 'EmitterError']
-
-from .error import YAMLError
-from .events import *
-
-class EmitterError(YAMLError):
-    pass
-
-class ScalarAnalysis:
-    def __init__(self, scalar, empty, multiline,
-            allow_flow_plain, allow_block_plain,
-            allow_single_quoted, allow_double_quoted,
-            allow_block):
-        self.scalar = scalar
-        self.empty = empty
-        self.multiline = multiline
-        self.allow_flow_plain = allow_flow_plain
-        self.allow_block_plain = allow_block_plain
-        self.allow_single_quoted = allow_single_quoted
-        self.allow_double_quoted = allow_double_quoted
-        self.allow_block = allow_block
-
-class Emitter:
-
-    DEFAULT_TAG_PREFIXES = {
-        '!' : '!',
-        'tag:yaml.org,2002:' : '!!',
-    }
-
-    def __init__(self, stream, canonical=None, indent=None, width=None,
-            allow_unicode=None, line_break=None):
-
-        # The stream should have the methods `write` and possibly `flush`.
-        self.stream = stream
-
-        # Encoding can be overriden by STREAM-START.
-        self.encoding = None
-
-        # Emitter is a state machine with a stack of states to handle nested
-        # structures.
-        self.states = []
-        self.state = self.expect_stream_start
-
-        # Current event and the event queue.
-        self.events = []
-        self.event = None
-
-        # The current indentation level and the stack of previous indents.
-        self.indents = []
-        self.indent = None
-
-        # Flow level.
-        self.flow_level = 0
-
-        # Contexts.
-        self.root_context = False
-        self.sequence_context = False
-        self.mapping_context = False
-        self.simple_key_context = False
-
-        # Characteristics of the last emitted character:
-        #  - current position.
-        #  - is it a whitespace?
-        #  - is it an indention character
-        #    (indentation space, '-', '?', or ':')?
-        self.line = 0
-        self.column = 0
-        self.whitespace = True
-        self.indention = True
-
-        # Whether the document requires an explicit document indicator
-        self.open_ended = False
-
-        # Formatting details.
-        self.canonical = canonical
-        self.allow_unicode = allow_unicode
-        self.best_indent = 2
-        if indent and 1 < indent < 10:
-            self.best_indent = indent
-        self.best_width = 80
-        if width and width > self.best_indent*2:
-            self.best_width = width
-        self.best_line_break = '\n'
-        if line_break in ['\r', '\n', '\r\n']:
-            self.best_line_break = line_break
-
-        # Tag prefixes.
-        self.tag_prefixes = None
-
-        # Prepared anchor and tag.
-        self.prepared_anchor = None
-        self.prepared_tag = None
-
-        # Scalar analysis and style.
-        self.analysis = None
-        self.style = None
-
-    def dispose(self):
-        # Reset the state attributes (to clear self-references)
-        self.states = []
-        self.state = None
-
-    def emit(self, event):
-        self.events.append(event)
-        while not self.need_more_events():
-            self.event = self.events.pop(0)
-            self.state()
-            self.event = None
-
-    # In some cases, we wait for a few next events before emitting.
-
-    def need_more_events(self):
-        if not self.events:
-            return True
-        event = self.events[0]
-        if isinstance(event, DocumentStartEvent):
-            return self.need_events(1)
-        elif isinstance(event, SequenceStartEvent):
-            return self.need_events(2)
-        elif isinstance(event, MappingStartEvent):
-            return self.need_events(3)
-        else:
-            return False
-
-    def need_events(self, count):
-        level = 0
-        for event in self.events[1:]:
-            if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
-                level += 1
-            elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
-                level -= 1
-            elif isinstance(event, StreamEndEvent):
-                level = -1
-            if level < 0:
-                return False
-        return (len(self.events) < count+1)
-
-    def increase_indent(self, flow=False, indentless=False):
-        self.indents.append(self.indent)
-        if self.indent is None:
-            if flow:
-                self.indent = self.best_indent
-            else:
-                self.indent = 0
-        elif not indentless:
-            self.indent += self.best_indent
-
-    # States.
-
-    # Stream handlers.
-
-    def expect_stream_start(self):
-        if isinstance(self.event, StreamStartEvent):
-            if self.event.encoding and not hasattr(self.stream, 'encoding'):
-                self.encoding = self.event.encoding
-            self.write_stream_start()
-            self.state = self.expect_first_document_start
-        else:
-            raise EmitterError("expected StreamStartEvent, but got %s"
-                    % self.event)
-
-    def expect_nothing(self):
-        raise EmitterError("expected nothing, but got %s" % self.event)
-
-    # Document handlers.
-
-    def expect_first_document_start(self):
-        return self.expect_document_start(first=True)
-
-    def expect_document_start(self, first=False):
-        if isinstance(self.event, DocumentStartEvent):
-            if (self.event.version or self.event.tags) and self.open_ended:
-                self.write_indicator('...', True)
-                self.write_indent()
-            if self.event.version:
-                version_text = self.prepare_version(self.event.version)
-                self.write_version_directive(version_text)
-            self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
-            if self.event.tags:
-                handles = sorted(self.event.tags.keys())
-                for handle in handles:
-                    prefix = self.event.tags[handle]
-                    self.tag_prefixes[prefix] = handle
-                    handle_text = self.prepare_tag_handle(handle)
-                    prefix_text = self.prepare_tag_prefix(prefix)
-                    self.write_tag_directive(handle_text, prefix_text)
-            implicit = (first and not self.event.explicit and not self.canonical
-                    and not self.event.version and not self.event.tags
-                    and not self.check_empty_document())
-            if not implicit:
-                self.write_indent()
-                self.write_indicator('---', True)
-                if self.canonical:
-                    self.write_indent()
-            self.state = self.expect_document_root
-        elif isinstance(self.event, StreamEndEvent):
-            if self.open_ended:
-                self.write_indicator('...', True)
-                self.write_indent()
-            self.write_stream_end()
-            self.state = self.expect_nothing
-        else:
-            raise EmitterError("expected DocumentStartEvent, but got %s"
-                    % self.event)
-
-    def expect_document_end(self):
-        if isinstance(self.event, DocumentEndEvent):
-            self.write_indent()
-            if self.event.explicit:
-                self.write_indicator('...', True)
-                self.write_indent()
-            self.flush_stream()
-            self.state = self.expect_document_start
-        else:
-            raise EmitterError("expected DocumentEndEvent, but got %s"
-                    % self.event)
-
-    def expect_document_root(self):
-        self.states.append(self.expect_document_end)
-        self.expect_node(root=True)
-
-    # Node handlers.
-
-    def expect_node(self, root=False, sequence=False, mapping=False,
-            simple_key=False):
-        self.root_context = root
-        self.sequence_context = sequence
-        self.mapping_context = mapping
-        self.simple_key_context = simple_key
-        if isinstance(self.event, AliasEvent):
-            self.expect_alias()
-        elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
-            self.process_anchor('&')
-            self.process_tag()
-            if isinstance(self.event, ScalarEvent):
-                self.expect_scalar()
-            elif isinstance(self.event, SequenceStartEvent):
-                if self.flow_level or self.canonical or self.event.flow_style   \
-                        or self.check_empty_sequence():
-                    self.expect_flow_sequence()
-                else:
-                    self.expect_block_sequence()
-            elif isinstance(self.event, MappingStartEvent):
-                if self.flow_level or self.canonical or self.event.flow_style   \
-                        or self.check_empty_mapping():
-                    self.expect_flow_mapping()
-                else:
-                    self.expect_block_mapping()
-        else:
-            raise EmitterError("expected NodeEvent, but got %s" % self.event)
-
-    def expect_alias(self):
-        if self.event.anchor is None:
-            raise EmitterError("anchor is not specified for alias")
-        self.process_anchor('*')
-        self.state = self.states.pop()
-
-    def expect_scalar(self):
-        self.increase_indent(flow=True)
-        self.process_scalar()
-        self.indent = self.indents.pop()
-        self.state = self.states.pop()
-
-    # Flow sequence handlers.
-
-    def expect_flow_sequence(self):
-        self.write_indicator('[', True, whitespace=True)
-        self.flow_level += 1
-        self.increase_indent(flow=True)
-        self.state = self.expect_first_flow_sequence_item
-
-    def expect_first_flow_sequence_item(self):
-        if isinstance(self.event, SequenceEndEvent):
-            self.indent = self.indents.pop()
-            self.flow_level -= 1
-            self.write_indicator(']', False)
-            self.state = self.states.pop()
-        else:
-            if self.canonical or self.column > self.best_width:
-                self.write_indent()
-            self.states.append(self.expect_flow_sequence_item)
-            self.expect_node(sequence=True)
-
-    def expect_flow_sequence_item(self):
-        if isinstance(self.event, SequenceEndEvent):
-            self.indent = self.indents.pop()
-            self.flow_level -= 1
-            if self.canonical:
-                self.write_indicator(',', False)
-                self.write_indent()
-            self.write_indicator(']', False)
-            self.state = self.states.pop()
-        else:
-            self.write_indicator(',', False)
-            if self.canonical or self.column > self.best_width:
-                self.write_indent()
-            self.states.append(self.expect_flow_sequence_item)
-            self.expect_node(sequence=True)
-
-    # Flow mapping handlers.
-
-    def expect_flow_mapping(self):
-        self.write_indicator('{', True, whitespace=True)
-        self.flow_level += 1
-        self.increase_indent(flow=True)
-        self.state = self.expect_first_flow_mapping_key
-
-    def expect_first_flow_mapping_key(self):
-        if isinstance(self.event, MappingEndEvent):
-            self.indent = self.indents.pop()
-            self.flow_level -= 1
-            self.write_indicator('}', False)
-            self.state = self.states.pop()
-        else:
-            if self.canonical or self.column > self.best_width:
-                self.write_indent()
-            if not self.canonical and self.check_simple_key():
-                self.states.append(self.expect_flow_mapping_simple_value)
-                self.expect_node(mapping=True, simple_key=True)
-            else:
-                self.write_indicator('?', True)
-                self.states.append(self.expect_flow_mapping_value)
-                self.expect_node(mapping=True)
-
-    def expect_flow_mapping_key(self):
-        if isinstance(self.event, MappingEndEvent):
-            self.indent = self.indents.pop()
-            self.flow_level -= 1
-            if self.canonical:
-                self.write_indicator(',', False)
-                self.write_indent()
-            self.write_indicator('}', False)
-            self.state = self.states.pop()
-        else:
-            self.write_indicator(',', False)
-            if self.canonical or self.column > self.best_width:
-                self.write_indent()
-            if not self.canonical and self.check_simple_key():
-                self.states.append(self.expect_flow_mapping_simple_value)
-                self.expect_node(mapping=True, simple_key=True)
-            else:
-                self.write_indicator('?', True)
-                self.states.append(self.expect_flow_mapping_value)
-                self.expect_node(mapping=True)
-
-    def expect_flow_mapping_simple_value(self):
-        self.write_indicator(':', False)
-        self.states.append(self.expect_flow_mapping_key)
-        self.expect_node(mapping=True)
-
-    def expect_flow_mapping_value(self):
-        if self.canonical or self.column > self.best_width:
-            self.write_indent()
-        self.write_indicator(':', True)
-        self.states.append(self.expect_flow_mapping_key)
-        self.expect_node(mapping=True)
-
-    # Block sequence handlers.
-
-    def expect_block_sequence(self):
-        indentless = (self.mapping_context and not self.indention)
-        self.increase_indent(flow=False, indentless=indentless)
-        self.state = self.expect_first_block_sequence_item
-
-    def expect_first_block_sequence_item(self):
-        return self.expect_block_sequence_item(first=True)
-
-    def expect_block_sequence_item(self, first=False):
-        if not first and isinstance(self.event, SequenceEndEvent):
-            self.indent = self.indents.pop()
-            self.state = self.states.pop()
-        else:
-            self.write_indent()
-            self.write_indicator('-', True, indention=True)
-            self.states.append(self.expect_block_sequence_item)
-            self.expect_node(sequence=True)
-
-    # Block mapping handlers.
-
-    def expect_block_mapping(self):
-        self.increase_indent(flow=False)
-        self.state = self.expect_first_block_mapping_key
-
-    def expect_first_block_mapping_key(self):
-        return self.expect_block_mapping_key(first=True)
-
-    def expect_block_mapping_key(self, first=False):
-        if not first and isinstance(self.event, MappingEndEvent):
-            self.indent = self.indents.pop()
-            self.state = self.states.pop()
-        else:
-            self.write_indent()
-            if self.check_simple_key():
-                self.states.append(self.expect_block_mapping_simple_value)
-                self.expect_node(mapping=True, simple_key=True)
-            else:
-                self.write_indicator('?', True, indention=True)
-                self.states.append(self.expect_block_mapping_value)
-                self.expect_node(mapping=True)
-
-    def expect_block_mapping_simple_value(self):
-        self.write_indicator(':', False)
-        self.states.append(self.expect_block_mapping_key)
-        self.expect_node(mapping=True)
-
-    def expect_block_mapping_value(self):
-        self.write_indent()
-        self.write_indicator(':', True, indention=True)
-        self.states.append(self.expect_block_mapping_key)
-        self.expect_node(mapping=True)
-
-    # Checkers.
-
-    def check_empty_sequence(self):
-        return (isinstance(self.event, SequenceStartEvent) and self.events
-                and isinstance(self.events[0], SequenceEndEvent))
-
-    def check_empty_mapping(self):
-        return (isinstance(self.event, MappingStartEvent) and self.events
-                and isinstance(self.events[0], MappingEndEvent))
-
-    def check_empty_document(self):
-        if not isinstance(self.event, DocumentStartEvent) or not self.events:
-            return False
-        event = self.events[0]
-        return (isinstance(event, ScalarEvent) and event.anchor is None
-                and event.tag is None and event.implicit and event.value == '')
-
-    def check_simple_key(self):
-        length = 0
-        if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
-            if self.prepared_anchor is None:
-                self.prepared_anchor = self.prepare_anchor(self.event.anchor)
-            length += len(self.prepared_anchor)
-        if isinstance(self.event, (ScalarEvent, CollectionStartEvent))  \
-                and self.event.tag is not None:
-            if self.prepared_tag is None:
-                self.prepared_tag = self.prepare_tag(self.event.tag)
-            length += len(self.prepared_tag)
-        if isinstance(self.event, ScalarEvent):
-            if self.analysis is None:
-                self.analysis = self.analyze_scalar(self.event.value)
-            length += len(self.analysis.scalar)
-        return (length < 128 and (isinstance(self.event, AliasEvent)
-            or (isinstance(self.event, ScalarEvent)
-                    and not self.analysis.empty and not self.analysis.multiline)
-            or self.check_empty_sequence() or self.check_empty_mapping()))
-
-    # Anchor, Tag, and Scalar processors.
-
-    def process_anchor(self, indicator):
-        if self.event.anchor is None:
-            self.prepared_anchor = None
-            return
-        if self.prepared_anchor is None:
-            self.prepared_anchor = self.prepare_anchor(self.event.anchor)
-        if self.prepared_anchor:
-            self.write_indicator(indicator+self.prepared_anchor, True)
-        self.prepared_anchor = None
-
-    def process_tag(self):
-        tag = self.event.tag
-        if isinstance(self.event, ScalarEvent):
-            if self.style is None:
-                self.style = self.choose_scalar_style()
-            if ((not self.canonical or tag is None) and
-                ((self.style == '' and self.event.implicit[0])
-                        or (self.style != '' and self.event.implicit[1]))):
-                self.prepared_tag = None
-                return
-            if self.event.implicit[0] and tag is None:
-                tag = '!'
-                self.prepared_tag = None
-        else:
-            if (not self.canonical or tag is None) and self.event.implicit:
-                self.prepared_tag = None
-                return
-        if tag is None:
-            raise EmitterError("tag is not specified")
-        if self.prepared_tag is None:
-            self.prepared_tag = self.prepare_tag(tag)
-        if self.prepared_tag:
-            self.write_indicator(self.prepared_tag, True)
-        self.prepared_tag = None
-
-    def choose_scalar_style(self):
-        if self.analysis is None:
-            self.analysis = self.analyze_scalar(self.event.value)
-        if self.event.style == '"' or self.canonical:
-            return '"'
-        if not self.event.style and self.event.implicit[0]:
-            if (not (self.simple_key_context and
-                    (self.analysis.empty or self.analysis.multiline))
-                and (self.flow_level and self.analysis.allow_flow_plain
-                    or (not self.flow_level and self.analysis.allow_block_plain))):
-                return ''
-        if self.event.style and self.event.style in '|>':
-            if (not self.flow_level and not self.simple_key_context
-                    and self.analysis.allow_block):
-                return self.event.style
-        if not self.event.style or self.event.style == '\'':
-            if (self.analysis.allow_single_quoted and
-                    not (self.simple_key_context and self.analysis.multiline)):
-                return '\''
-        return '"'
-
-    def process_scalar(self):
-        if self.analysis is None:
-            self.analysis = self.analyze_scalar(self.event.value)
-        if self.style is None:
-            self.style = self.choose_scalar_style()
-        split = (not self.simple_key_context)
-        #if self.analysis.multiline and split    \
-        #        and (not self.style or self.style in '\'\"'):
-        #    self.write_indent()
-        if self.style == '"':
-            self.write_double_quoted(self.analysis.scalar, split)
-        elif self.style == '\'':
-            self.write_single_quoted(self.analysis.scalar, split)
-        elif self.style == '>':
-            self.write_folded(self.analysis.scalar)
-        elif self.style == '|':
-            self.write_literal(self.analysis.scalar)
-        else:
-            self.write_plain(self.analysis.scalar, split)
-        self.analysis = None
-        self.style = None
-
-    # Analyzers.
-
-    def prepare_version(self, version):
-        major, minor = version
-        if major != 1:
-            raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
-        return '%d.%d' % (major, minor)
-
-    def prepare_tag_handle(self, handle):
-        if not handle:
-            raise EmitterError("tag handle must not be empty")
-        if handle[0] != '!' or handle[-1] != '!':
-            raise EmitterError("tag handle must start and end with '!': %r" % handle)
-        for ch in handle[1:-1]:
-            if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'    \
-                    or ch in '-_'):
-                raise EmitterError("invalid character %r in the tag handle: %r"
-                        % (ch, handle))
-        return handle
-
-    def prepare_tag_prefix(self, prefix):
-        if not prefix:
-            raise EmitterError("tag prefix must not be empty")
-        chunks = []
-        start = end = 0
-        if prefix[0] == '!':
-            end = 1
-        while end < len(prefix):
-            ch = prefix[end]
-            if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
-                    or ch in '-;/?!:@&=+$,_.~*\'()[]':
-                end += 1
-            else:
-                if start < end:
-                    chunks.append(prefix[start:end])
-                start = end = end+1
-                data = ch.encode('utf-8')
-                for ch in data:
-                    chunks.append('%%%02X' % ord(ch))
-        if start < end:
-            chunks.append(prefix[start:end])
-        return ''.join(chunks)
-
-    def prepare_tag(self, tag):
-        if not tag:
-            raise EmitterError("tag must not be empty")
-        if tag == '!':
-            return tag
-        handle = None
-        suffix = tag
-        prefixes = sorted(self.tag_prefixes.keys())
-        for prefix in prefixes:
-            if tag.startswith(prefix)   \
-                    and (prefix == '!' or len(prefix) < len(tag)):
-                handle = self.tag_prefixes[prefix]
-                suffix = tag[len(prefix):]
-        chunks = []
-        start = end = 0
-        while end < len(suffix):
-            ch = suffix[end]
-            if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
-                    or ch in '-;/?:@&=+$,_.~*\'()[]'   \
-                    or (ch == '!' and handle != '!'):
-                end += 1
-            else:
-                if start < end:
-                    chunks.append(suffix[start:end])
-                start = end = end+1
-                data = ch.encode('utf-8')
-                for ch in data:
-                    chunks.append('%%%02X' % ord(ch))
-        if start < end:
-            chunks.append(suffix[start:end])
-        suffix_text = ''.join(chunks)
-        if handle:
-            return '%s%s' % (handle, suffix_text)
-        else:
-            return '!<%s>' % suffix_text
-
-    def prepare_anchor(self, anchor):
-        if not anchor:
-            raise EmitterError("anchor must not be empty")
-        for ch in anchor:
-            if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'    \
-                    or ch in '-_'):
-                raise EmitterError("invalid character %r in the anchor: %r"
-                        % (ch, anchor))
-        return anchor
-
-    def analyze_scalar(self, scalar):
-
-        # Empty scalar is a special case.
-        if not scalar:
-            return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
-                    allow_flow_plain=False, allow_block_plain=True,
-                    allow_single_quoted=True, allow_double_quoted=True,
-                    allow_block=False)
-
-        # Indicators and special characters.
-        block_indicators = False
-        flow_indicators = False
-        line_breaks = False
-        special_characters = False
-
-        # Important whitespace combinations.
-        leading_space = False
-        leading_break = False
-        trailing_space = False
-        trailing_break = False
-        break_space = False
-        space_break = False
-
-        # Check document indicators.
-        if scalar.startswith('---') or scalar.startswith('...'):
-            block_indicators = True
-            flow_indicators = True
-
-        # First character or preceded by a whitespace.
-        preceeded_by_whitespace = True
-
-        # Last character or followed by a whitespace.
-        followed_by_whitespace = (len(scalar) == 1 or
-                scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
-
-        # The previous character is a space.
-        previous_space = False
-
-        # The previous character is a break.
-        previous_break = False
-
-        index = 0
-        while index < len(scalar):
-            ch = scalar[index]
-
-            # Check for indicators.
-            if index == 0:
-                # Leading indicators are special characters.
-                if ch in '#,[]{}&*!|>\'\"%@`': 
-                    flow_indicators = True
-                    block_indicators = True
-                if ch in '?:':
-                    flow_indicators = True
-                    if followed_by_whitespace:
-                        block_indicators = True
-                if ch == '-' and followed_by_whitespace:
-                    flow_indicators = True
-                    block_indicators = True
-            else:
-                # Some indicators cannot appear within a scalar as well.
-                if ch in ',?[]{}':
-                    flow_indicators = True
-                if ch == ':':
-                    flow_indicators = True
-                    if followed_by_whitespace:
-                        block_indicators = True
-                if ch == '#' and preceeded_by_whitespace:
-                    flow_indicators = True
-                    block_indicators = True
-
-            # Check for line breaks, special, and unicode characters.
-            if ch in '\n\x85\u2028\u2029':
-                line_breaks = True
-            if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
-                if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
-                        or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
-                    unicode_characters = True
-                    if not self.allow_unicode:
-                        special_characters = True
-                else:
-                    special_characters = True
-
-            # Detect important whitespace combinations.
-            if ch == ' ':
-                if index == 0:
-                    leading_space = True
-                if index == len(scalar)-1:
-                    trailing_space = True
-                if previous_break:
-                    break_space = True
-                previous_space = True
-                previous_break = False
-            elif ch in '\n\x85\u2028\u2029':
-                if index == 0:
-                    leading_break = True
-                if index == len(scalar)-1:
-                    trailing_break = True
-                if previous_space:
-                    space_break = True
-                previous_space = False
-                previous_break = True
-            else:
-                previous_space = False
-                previous_break = False
-
-            # Prepare for the next character.
-            index += 1
-            preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
-            followed_by_whitespace = (index+1 >= len(scalar) or
-                    scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
-
-        # Let's decide what styles are allowed.
-        allow_flow_plain = True
-        allow_block_plain = True
-        allow_single_quoted = True
-        allow_double_quoted = True
-        allow_block = True
-
-        # Leading and trailing whitespaces are bad for plain scalars.
-        if (leading_space or leading_break
-                or trailing_space or trailing_break):
-            allow_flow_plain = allow_block_plain = False
-
-        # We do not permit trailing spaces for block scalars.
-        if trailing_space:
-            allow_block = False
-
-        # Spaces at the beginning of a new line are only acceptable for block
-        # scalars.
-        if break_space:
-            allow_flow_plain = allow_block_plain = allow_single_quoted = False
-
-        # Spaces followed by breaks, as well as special character are only
-        # allowed for double quoted scalars.
-        if space_break or special_characters:
-            allow_flow_plain = allow_block_plain =  \
-            allow_single_quoted = allow_block = False
-
-        # Although the plain scalar writer supports breaks, we never emit
-        # multiline plain scalars.
-        if line_breaks:
-            allow_flow_plain = allow_block_plain = False
-
-        # Flow indicators are forbidden for flow plain scalars.
-        if flow_indicators:
-            allow_flow_plain = False
-
-        # Block indicators are forbidden for block plain scalars.
-        if block_indicators:
-            allow_block_plain = False
-
-        return ScalarAnalysis(scalar=scalar,
-                empty=False, multiline=line_breaks,
-                allow_flow_plain=allow_flow_plain,
-                allow_block_plain=allow_block_plain,
-                allow_single_quoted=allow_single_quoted,
-                allow_double_quoted=allow_double_quoted,
-                allow_block=allow_block)
-
-    # Writers.
-
-    def flush_stream(self):
-        if hasattr(self.stream, 'flush'):
-            self.stream.flush()
-
-    def write_stream_start(self):
-        # Write BOM if needed.
-        if self.encoding and self.encoding.startswith('utf-16'):
-            self.stream.write('\uFEFF'.encode(self.encoding))
-
-    def write_stream_end(self):
-        self.flush_stream()
-
-    def write_indicator(self, indicator, need_whitespace,
-            whitespace=False, indention=False):
-        if self.whitespace or not need_whitespace:
-            data = indicator
-        else:
-            data = ' '+indicator
-        self.whitespace = whitespace
-        self.indention = self.indention and indention
-        self.column += len(data)
-        self.open_ended = False
-        if self.encoding:
-            data = data.encode(self.encoding)
-        self.stream.write(data)
-
-    def write_indent(self):
-        indent = self.indent or 0
-        if not self.indention or self.column > indent   \
-                or (self.column == indent and not self.whitespace):
-            self.write_line_break()
-        if self.column < indent:
-            self.whitespace = True
-            data = ' '*(indent-self.column)
-            self.column = indent
-            if self.encoding:
-                data = data.encode(self.encoding)
-            self.stream.write(data)
-
-    def write_line_break(self, data=None):
-        if data is None:
-            data = self.best_line_break
-        self.whitespace = True
-        self.indention = True
-        self.line += 1
-        self.column = 0
-        if self.encoding:
-            data = data.encode(self.encoding)
-        self.stream.write(data)
-
-    def write_version_directive(self, version_text):
-        data = '%%YAML %s' % version_text
-        if self.encoding:
-            data = data.encode(self.encoding)
-        self.stream.write(data)
-        self.write_line_break()
-
-    def write_tag_directive(self, handle_text, prefix_text):
-        data = '%%TAG %s %s' % (handle_text, prefix_text)
-        if self.encoding:
-            data = data.encode(self.encoding)
-        self.stream.write(data)
-        self.write_line_break()
-
-    # Scalar streams.
-
-    def write_single_quoted(self, text, split=True):
-        self.write_indicator('\'', True)
-        spaces = False
-        breaks = False
-        start = end = 0
-        while end <= len(text):
-            ch = None
-            if end < len(text):
-                ch = text[end]
-            if spaces:
-                if ch is None or ch != ' ':
-                    if start+1 == end and self.column > self.best_width and split   \
-                            and start != 0 and end != len(text):
-                        self.write_indent()
-                    else:
-                        data = text[start:end]
-                        self.column += len(data)
-                        if self.encoding:
-                            data = data.encode(self.encoding)
-                        self.stream.write(data)
-                    start = end
-            elif breaks:
-                if ch is None or ch not in '\n\x85\u2028\u2029':
-                    if text[start] == '\n':
-                        self.write_line_break()
-                    for br in text[start:end]:
-                        if br == '\n':
-                            self.write_line_break()
-                        else:
-                            self.write_line_break(br)
-                    self.write_indent()
-                    start = end
-            else:
-                if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
-                    if start < end:
-                        data = text[start:end]
-                        self.column += len(data)
-                        if self.encoding:
-                            data = data.encode(self.encoding)
-                        self.stream.write(data)
-                        start = end
-            if ch == '\'':
-                data = '\'\''
-                self.column += 2
-                if self.encoding:
-                    data = data.encode(self.encoding)
-                self.stream.write(data)
-                start = end + 1
-            if ch is not None:
-                spaces = (ch == ' ')
-                breaks = (ch in '\n\x85\u2028\u2029')
-            end += 1
-        self.write_indicator('\'', False)
-
-    ESCAPE_REPLACEMENTS = {
-        '\0':       '0',
-        '\x07':     'a',
-        '\x08':     'b',
-        '\x09':     't',
-        '\x0A':     'n',
-        '\x0B':     'v',
-        '\x0C':     'f',
-        '\x0D':     'r',
-        '\x1B':     'e',
-        '\"':       '\"',
-        '\\':       '\\',
-        '\x85':     'N',
-        '\xA0':     '_',
-        '\u2028':   'L',
-        '\u2029':   'P',
-    }
-
-    def write_double_quoted(self, text, split=True):
-        self.write_indicator('"', True)
-        start = end = 0
-        while end <= len(text):
-            ch = None
-            if end < len(text):
-                ch = text[end]
-            if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
-                    or not ('\x20' <= ch <= '\x7E'
-                        or (self.allow_unicode
-                            and ('\xA0' <= ch <= '\uD7FF'
-                                or '\uE000' <= ch <= '\uFFFD'))):
-                if start < end:
-                    data = text[start:end]
-                    self.column += len(data)
-                    if self.encoding:
-                        data = data.encode(self.encoding)
-                    self.stream.write(data)
-                    start = end
-                if ch is not None:
-                    if ch in self.ESCAPE_REPLACEMENTS:
-                        data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
-                    elif ch <= '\xFF':
-                        data = '\\x%02X' % ord(ch)
-                    elif ch <= '\uFFFF':
-                        data = '\\u%04X' % ord(ch)
-                    else:
-                        data = '\\U%08X' % ord(ch)
-                    self.column += len(data)
-                    if self.encoding:
-                        data = data.encode(self.encoding)
-                    self.stream.write(data)
-                    start = end+1
-            if 0 < end < len(text)-1 and (ch == ' ' or start >= end)    \
-                    and self.column+(end-start) > self.best_width and split:
-                data = text[start:end]+'\\'
-                if start < end:
-                    start = end
-                self.column += len(data)
-                if self.encoding:
-                    data = data.encode(self.encoding)
-                self.stream.write(data)
-                self.write_indent()
-                self.whitespace = False
-                self.indention = False
-                if text[start] == ' ':
-                    data = '\\'
-                    self.column += len(data)
-                    if self.encoding:
-                        data = data.encode(self.encoding)
-                    self.stream.write(data)
-            end += 1
-        self.write_indicator('"', False)
-
-    def determine_block_hints(self, text):
-        hints = ''
-        if text:
-            if text[0] in ' \n\x85\u2028\u2029':
-                hints += str(self.best_indent)
-            if text[-1] not in '\n\x85\u2028\u2029':
-                hints += '-'
-            elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
-                hints += '+'
-        return hints
-
-    def write_folded(self, text):
-        hints = self.determine_block_hints(text)
-        self.write_indicator('>'+hints, True)
-        if hints[-1:] == '+':
-            self.open_ended = True
-        self.write_line_break()
-        leading_space = True
-        spaces = False
-        breaks = True
-        start = end = 0
-        while end <= len(text):
-            ch = None
-            if end < len(text):
-                ch = text[end]
-            if breaks:
-                if ch is None or ch not in '\n\x85\u2028\u2029':
-                    if not leading_space and ch is not None and ch != ' '   \
-                            and text[start] == '\n':
-                        self.write_line_break()
-                    leading_space = (ch == ' ')
-                    for br in text[start:end]:
-                        if br == '\n':
-                            self.write_line_break()
-                        else:
-                            self.write_line_break(br)
-                    if ch is not None:
-                        self.write_indent()
-                    start = end
-            elif spaces:
-                if ch != ' ':
-                    if start+1 == end and self.column > self.best_width:
-                        self.write_indent()
-                    else:
-                        data = text[start:end]
-                        self.column += len(data)
-                        if self.encoding:
-                            data = data.encode(self.encoding)
-                        self.stream.write(data)
-                    start = end
-            else:
-                if ch is None or ch in ' \n\x85\u2028\u2029':
-                    data = text[start:end]
-                    self.column += len(data)
-                    if self.encoding:
-                        data = data.encode(self.encoding)
-                    self.stream.write(data)
-                    if ch is None:
-                        self.write_line_break()
-                    start = end
-            if ch is not None:
-                breaks = (ch in '\n\x85\u2028\u2029')
-                spaces = (ch == ' ')
-            end += 1
-
-    def write_literal(self, text):
-        hints = self.determine_block_hints(text)
-        self.write_indicator('|'+hints, True)
-        if hints[-1:] == '+':
-            self.open_ended = True
-        self.write_line_break()
-        breaks = True
-        start = end = 0
-        while end <= len(text):
-            ch = None
-            if end < len(text):
-                ch = text[end]
-            if breaks:
-                if ch is None or ch not in '\n\x85\u2028\u2029':
-                    for br in text[start:end]:
-                        if br == '\n':
-                            self.write_line_break()
-                        else:
-                            self.write_line_break(br)
-                    if ch is not None:
-                        self.write_indent()
-                    start = end
-            else:
-                if ch is None or ch in '\n\x85\u2028\u2029':
-                    data = text[start:end]
-                    if self.encoding:
-                        data = data.encode(self.encoding)
-                    self.stream.write(data)
-                    if ch is None:
-                        self.write_line_break()
-                    start = end
-            if ch is not None:
-                breaks = (ch in '\n\x85\u2028\u2029')
-            end += 1
-
-    def write_plain(self, text, split=True):
-        if self.root_context:
-            self.open_ended = True
-        if not text:
-            return
-        if not self.whitespace:
-            data = ' '
-            self.column += len(data)
-            if self.encoding:
-                data = data.encode(self.encoding)
-            self.stream.write(data)
-        self.whitespace = False
-        self.indention = False
-        spaces = False
-        breaks = False
-        start = end = 0
-        while end <= len(text):
-            ch = None
-            if end < len(text):
-                ch = text[end]
-            if spaces:
-                if ch != ' ':
-                    if start+1 == end and self.column > self.best_width and split:
-                        self.write_indent()
-                        self.whitespace = False
-                        self.indention = False
-                    else:
-                        data = text[start:end]
-                        self.column += len(data)
-                        if self.encoding:
-                            data = data.encode(self.encoding)
-                        self.stream.write(data)
-                    start = end
-            elif breaks:
-                if ch not in '\n\x85\u2028\u2029':
-                    if text[start] == '\n':
-                        self.write_line_break()
-                    for br in text[start:end]:
-                        if br == '\n':
-                            self.write_line_break()
-                        else:
-                            self.write_line_break(br)
-                    self.write_indent()
-                    self.whitespace = False
-                    self.indention = False
-                    start = end
-            else:
-                if ch is None or ch in ' \n\x85\u2028\u2029':
-                    data = text[start:end]
-                    self.column += len(data)
-                    if self.encoding:
-                        data = data.encode(self.encoding)
-                    self.stream.write(data)
-                    start = end
-            if ch is not None:
-                spaces = (ch == ' ')
-                breaks = (ch in '\n\x85\u2028\u2029')
-            end += 1
-
diff --git a/lib/spack/external/yaml/lib3/yaml/error.py b/lib/spack/external/yaml/lib3/yaml/error.py
deleted file mode 100644
index b796b4dc51..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/error.py
+++ /dev/null
@@ -1,75 +0,0 @@
-
-__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-
-class Mark:
-
-    def __init__(self, name, index, line, column, buffer, pointer):
-        self.name = name
-        self.index = index
-        self.line = line
-        self.column = column
-        self.buffer = buffer
-        self.pointer = pointer
-
-    def get_snippet(self, indent=4, max_length=75):
-        if self.buffer is None:
-            return None
-        head = ''
-        start = self.pointer
-        while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
-            start -= 1
-            if self.pointer-start > max_length/2-1:
-                head = ' ... '
-                start += 5
-                break
-        tail = ''
-        end = self.pointer
-        while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
-            end += 1
-            if end-self.pointer > max_length/2-1:
-                tail = ' ... '
-                end -= 5
-                break
-        snippet = self.buffer[start:end]
-        return ' '*indent + head + snippet + tail + '\n'  \
-                + ' '*(indent+self.pointer-start+len(head)) + '^'
-
-    def __str__(self):
-        snippet = self.get_snippet()
-        where = "  in \"%s\", line %d, column %d"   \
-                % (self.name, self.line+1, self.column+1)
-        if snippet is not None:
-            where += ":\n"+snippet
-        return where
-
-class YAMLError(Exception):
-    pass
-
-class MarkedYAMLError(YAMLError):
-
-    def __init__(self, context=None, context_mark=None,
-            problem=None, problem_mark=None, note=None):
-        self.context = context
-        self.context_mark = context_mark
-        self.problem = problem
-        self.problem_mark = problem_mark
-        self.note = note
-
-    def __str__(self):
-        lines = []
-        if self.context is not None:
-            lines.append(self.context)
-        if self.context_mark is not None  \
-            and (self.problem is None or self.problem_mark is None
-                    or self.context_mark.name != self.problem_mark.name
-                    or self.context_mark.line != self.problem_mark.line
-                    or self.context_mark.column != self.problem_mark.column):
-            lines.append(str(self.context_mark))
-        if self.problem is not None:
-            lines.append(self.problem)
-        if self.problem_mark is not None:
-            lines.append(str(self.problem_mark))
-        if self.note is not None:
-            lines.append(self.note)
-        return '\n'.join(lines)
-
diff --git a/lib/spack/external/yaml/lib3/yaml/events.py b/lib/spack/external/yaml/lib3/yaml/events.py
deleted file mode 100644
index f79ad389cb..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/events.py
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# Abstract classes.
-
-class Event(object):
-    def __init__(self, start_mark=None, end_mark=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-    def __repr__(self):
-        attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
-                if hasattr(self, key)]
-        arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
-                for key in attributes])
-        return '%s(%s)' % (self.__class__.__name__, arguments)
-
-class NodeEvent(Event):
-    def __init__(self, anchor, start_mark=None, end_mark=None):
-        self.anchor = anchor
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class CollectionStartEvent(NodeEvent):
-    def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
-            flow_style=None):
-        self.anchor = anchor
-        self.tag = tag
-        self.implicit = implicit
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.flow_style = flow_style
-
-class CollectionEndEvent(Event):
-    pass
-
-# Implementations.
-
-class StreamStartEvent(Event):
-    def __init__(self, start_mark=None, end_mark=None, encoding=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.encoding = encoding
-
-class StreamEndEvent(Event):
-    pass
-
-class DocumentStartEvent(Event):
-    def __init__(self, start_mark=None, end_mark=None,
-            explicit=None, version=None, tags=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.explicit = explicit
-        self.version = version
-        self.tags = tags
-
-class DocumentEndEvent(Event):
-    def __init__(self, start_mark=None, end_mark=None,
-            explicit=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.explicit = explicit
-
-class AliasEvent(NodeEvent):
-    pass
-
-class ScalarEvent(NodeEvent):
-    def __init__(self, anchor, tag, implicit, value,
-            start_mark=None, end_mark=None, style=None):
-        self.anchor = anchor
-        self.tag = tag
-        self.implicit = implicit
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.style = style
-
-class SequenceStartEvent(CollectionStartEvent):
-    pass
-
-class SequenceEndEvent(CollectionEndEvent):
-    pass
-
-class MappingStartEvent(CollectionStartEvent):
-    pass
-
-class MappingEndEvent(CollectionEndEvent):
-    pass
-
diff --git a/lib/spack/external/yaml/lib3/yaml/loader.py b/lib/spack/external/yaml/lib3/yaml/loader.py
deleted file mode 100644
index 08c8f01b34..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/loader.py
+++ /dev/null
@@ -1,40 +0,0 @@
-
-__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
-
-from .reader import *
-from .scanner import *
-from .parser import *
-from .composer import *
-from .constructor import *
-from .resolver import *
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
-
-    def __init__(self, stream):
-        Reader.__init__(self, stream)
-        Scanner.__init__(self)
-        Parser.__init__(self)
-        Composer.__init__(self)
-        BaseConstructor.__init__(self)
-        BaseResolver.__init__(self)
-
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
-
-    def __init__(self, stream):
-        Reader.__init__(self, stream)
-        Scanner.__init__(self)
-        Parser.__init__(self)
-        Composer.__init__(self)
-        SafeConstructor.__init__(self)
-        Resolver.__init__(self)
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
-
-    def __init__(self, stream):
-        Reader.__init__(self, stream)
-        Scanner.__init__(self)
-        Parser.__init__(self)
-        Composer.__init__(self)
-        Constructor.__init__(self)
-        Resolver.__init__(self)
-
diff --git a/lib/spack/external/yaml/lib3/yaml/nodes.py b/lib/spack/external/yaml/lib3/yaml/nodes.py
deleted file mode 100644
index c4f070c41e..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/nodes.py
+++ /dev/null
@@ -1,49 +0,0 @@
-
-class Node(object):
-    def __init__(self, tag, value, start_mark, end_mark):
-        self.tag = tag
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-    def __repr__(self):
-        value = self.value
-        #if isinstance(value, list):
-        #    if len(value) == 0:
-        #        value = '<empty>'
-        #    elif len(value) == 1:
-        #        value = '<1 item>'
-        #    else:
-        #        value = '<%d items>' % len(value)
-        #else:
-        #    if len(value) > 75:
-        #        value = repr(value[:70]+u' ... ')
-        #    else:
-        #        value = repr(value)
-        value = repr(value)
-        return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
-
-class ScalarNode(Node):
-    id = 'scalar'
-    def __init__(self, tag, value,
-            start_mark=None, end_mark=None, style=None):
-        self.tag = tag
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.style = style
-
-class CollectionNode(Node):
-    def __init__(self, tag, value,
-            start_mark=None, end_mark=None, flow_style=None):
-        self.tag = tag
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.flow_style = flow_style
-
-class SequenceNode(CollectionNode):
-    id = 'sequence'
-
-class MappingNode(CollectionNode):
-    id = 'mapping'
-
diff --git a/lib/spack/external/yaml/lib3/yaml/parser.py b/lib/spack/external/yaml/lib3/yaml/parser.py
deleted file mode 100644
index 13a5995d29..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/parser.py
+++ /dev/null
@@ -1,589 +0,0 @@
-
-# The following YAML grammar is LL(1) and is parsed by a recursive descent
-# parser.
-#
-# stream            ::= STREAM-START implicit_document? explicit_document* STREAM-END
-# implicit_document ::= block_node DOCUMENT-END*
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-# block_node_or_indentless_sequence ::=
-#                       ALIAS
-#                       | properties (block_content | indentless_block_sequence)?
-#                       | block_content
-#                       | indentless_block_sequence
-# block_node        ::= ALIAS
-#                       | properties block_content?
-#                       | block_content
-# flow_node         ::= ALIAS
-#                       | properties flow_content?
-#                       | flow_content
-# properties        ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content     ::= block_collection | flow_collection | SCALAR
-# flow_content      ::= flow_collection | SCALAR
-# block_collection  ::= block_sequence | block_mapping
-# flow_collection   ::= flow_sequence | flow_mapping
-# block_sequence    ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-# indentless_sequence   ::= (BLOCK-ENTRY block_node?)+
-# block_mapping     ::= BLOCK-MAPPING_START
-#                       ((KEY block_node_or_indentless_sequence?)?
-#                       (VALUE block_node_or_indentless_sequence?)?)*
-#                       BLOCK-END
-# flow_sequence     ::= FLOW-SEQUENCE-START
-#                       (flow_sequence_entry FLOW-ENTRY)*
-#                       flow_sequence_entry?
-#                       FLOW-SEQUENCE-END
-# flow_sequence_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping      ::= FLOW-MAPPING-START
-#                       (flow_mapping_entry FLOW-ENTRY)*
-#                       flow_mapping_entry?
-#                       FLOW-MAPPING-END
-# flow_mapping_entry    ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-#
-# FIRST sets:
-#
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'ParserError']
-
-from .error import MarkedYAMLError
-from .tokens import *
-from .events import *
-from .scanner import *
-
-class ParserError(MarkedYAMLError):
-    pass
-
-class Parser:
-    # Since writing a recursive-descendant parser is a straightforward task, we
-    # do not give many comments here.
-
-    DEFAULT_TAGS = {
-        '!':   '!',
-        '!!':  'tag:yaml.org,2002:',
-    }
-
-    def __init__(self):
-        self.current_event = None
-        self.yaml_version = None
-        self.tag_handles = {}
-        self.states = []
-        self.marks = []
-        self.state = self.parse_stream_start
-
-    def dispose(self):
-        # Reset the state attributes (to clear self-references)
-        self.states = []
-        self.state = None
-
-    def check_event(self, *choices):
-        # Check the type of the next event.
-        if self.current_event is None:
-            if self.state:
-                self.current_event = self.state()
-        if self.current_event is not None:
-            if not choices:
-                return True
-            for choice in choices:
-                if isinstance(self.current_event, choice):
-                    return True
-        return False
-
-    def peek_event(self):
-        # Get the next event.
-        if self.current_event is None:
-            if self.state:
-                self.current_event = self.state()
-        return self.current_event
-
-    def get_event(self):
-        # Get the next event and proceed further.
-        if self.current_event is None:
-            if self.state:
-                self.current_event = self.state()
-        value = self.current_event
-        self.current_event = None
-        return value
-
-    # stream    ::= STREAM-START implicit_document? explicit_document* STREAM-END
-    # implicit_document ::= block_node DOCUMENT-END*
-    # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-
-    def parse_stream_start(self):
-
-        # Parse the stream start.
-        token = self.get_token()
-        event = StreamStartEvent(token.start_mark, token.end_mark,
-                encoding=token.encoding)
-
-        # Prepare the next state.
-        self.state = self.parse_implicit_document_start
-
-        return event
-
-    def parse_implicit_document_start(self):
-
-        # Parse an implicit document.
-        if not self.check_token(DirectiveToken, DocumentStartToken,
-                StreamEndToken):
-            self.tag_handles = self.DEFAULT_TAGS
-            token = self.peek_token()
-            start_mark = end_mark = token.start_mark
-            event = DocumentStartEvent(start_mark, end_mark,
-                    explicit=False)
-
-            # Prepare the next state.
-            self.states.append(self.parse_document_end)
-            self.state = self.parse_block_node
-
-            return event
-
-        else:
-            return self.parse_document_start()
-
-    def parse_document_start(self):
-
-        # Parse any extra document end indicators.
-        while self.check_token(DocumentEndToken):
-            self.get_token()
-
-        # Parse an explicit document.
-        if not self.check_token(StreamEndToken):
-            token = self.peek_token()
-            start_mark = token.start_mark
-            version, tags = self.process_directives()
-            if not self.check_token(DocumentStartToken):
-                raise ParserError(None, None,
-                        "expected '<document start>', but found %r"
-                        % self.peek_token().id,
-                        self.peek_token().start_mark)
-            token = self.get_token()
-            end_mark = token.end_mark
-            event = DocumentStartEvent(start_mark, end_mark,
-                    explicit=True, version=version, tags=tags)
-            self.states.append(self.parse_document_end)
-            self.state = self.parse_document_content
-        else:
-            # Parse the end of the stream.
-            token = self.get_token()
-            event = StreamEndEvent(token.start_mark, token.end_mark)
-            assert not self.states
-            assert not self.marks
-            self.state = None
-        return event
-
-    def parse_document_end(self):
-
-        # Parse the document end.
-        token = self.peek_token()
-        start_mark = end_mark = token.start_mark
-        explicit = False
-        if self.check_token(DocumentEndToken):
-            token = self.get_token()
-            end_mark = token.end_mark
-            explicit = True
-        event = DocumentEndEvent(start_mark, end_mark,
-                explicit=explicit)
-
-        # Prepare the next state.
-        self.state = self.parse_document_start
-
-        return event
-
-    def parse_document_content(self):
-        if self.check_token(DirectiveToken,
-                DocumentStartToken, DocumentEndToken, StreamEndToken):
-            event = self.process_empty_scalar(self.peek_token().start_mark)
-            self.state = self.states.pop()
-            return event
-        else:
-            return self.parse_block_node()
-
-    def process_directives(self):
-        self.yaml_version = None
-        self.tag_handles = {}
-        while self.check_token(DirectiveToken):
-            token = self.get_token()
-            if token.name == 'YAML':
-                if self.yaml_version is not None:
-                    raise ParserError(None, None,
-                            "found duplicate YAML directive", token.start_mark)
-                major, minor = token.value
-                if major != 1:
-                    raise ParserError(None, None,
-                            "found incompatible YAML document (version 1.* is required)",
-                            token.start_mark)
-                self.yaml_version = token.value
-            elif token.name == 'TAG':
-                handle, prefix = token.value
-                if handle in self.tag_handles:
-                    raise ParserError(None, None,
-                            "duplicate tag handle %r" % handle,
-                            token.start_mark)
-                self.tag_handles[handle] = prefix
-        if self.tag_handles:
-            value = self.yaml_version, self.tag_handles.copy()
-        else:
-            value = self.yaml_version, None
-        for key in self.DEFAULT_TAGS:
-            if key not in self.tag_handles:
-                self.tag_handles[key] = self.DEFAULT_TAGS[key]
-        return value
-
-    # block_node_or_indentless_sequence ::= ALIAS
-    #               | properties (block_content | indentless_block_sequence)?
-    #               | block_content
-    #               | indentless_block_sequence
-    # block_node    ::= ALIAS
-    #                   | properties block_content?
-    #                   | block_content
-    # flow_node     ::= ALIAS
-    #                   | properties flow_content?
-    #                   | flow_content
-    # properties    ::= TAG ANCHOR? | ANCHOR TAG?
-    # block_content     ::= block_collection | flow_collection | SCALAR
-    # flow_content      ::= flow_collection | SCALAR
-    # block_collection  ::= block_sequence | block_mapping
-    # flow_collection   ::= flow_sequence | flow_mapping
-
-    def parse_block_node(self):
-        return self.parse_node(block=True)
-
-    def parse_flow_node(self):
-        return self.parse_node()
-
-    def parse_block_node_or_indentless_sequence(self):
-        return self.parse_node(block=True, indentless_sequence=True)
-
-    def parse_node(self, block=False, indentless_sequence=False):
-        if self.check_token(AliasToken):
-            token = self.get_token()
-            event = AliasEvent(token.value, token.start_mark, token.end_mark)
-            self.state = self.states.pop()
-        else:
-            anchor = None
-            tag = None
-            start_mark = end_mark = tag_mark = None
-            if self.check_token(AnchorToken):
-                token = self.get_token()
-                start_mark = token.start_mark
-                end_mark = token.end_mark
-                anchor = token.value
-                if self.check_token(TagToken):
-                    token = self.get_token()
-                    tag_mark = token.start_mark
-                    end_mark = token.end_mark
-                    tag = token.value
-            elif self.check_token(TagToken):
-                token = self.get_token()
-                start_mark = tag_mark = token.start_mark
-                end_mark = token.end_mark
-                tag = token.value
-                if self.check_token(AnchorToken):
-                    token = self.get_token()
-                    end_mark = token.end_mark
-                    anchor = token.value
-            if tag is not None:
-                handle, suffix = tag
-                if handle is not None:
-                    if handle not in self.tag_handles:
-                        raise ParserError("while parsing a node", start_mark,
-                                "found undefined tag handle %r" % handle,
-                                tag_mark)
-                    tag = self.tag_handles[handle]+suffix
-                else:
-                    tag = suffix
-            #if tag == '!':
-            #    raise ParserError("while parsing a node", start_mark,
-            #            "found non-specific tag '!'", tag_mark,
-            #            "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
-            if start_mark is None:
-                start_mark = end_mark = self.peek_token().start_mark
-            event = None
-            implicit = (tag is None or tag == '!')
-            if indentless_sequence and self.check_token(BlockEntryToken):
-                end_mark = self.peek_token().end_mark
-                event = SequenceStartEvent(anchor, tag, implicit,
-                        start_mark, end_mark)
-                self.state = self.parse_indentless_sequence_entry
-            else:
-                if self.check_token(ScalarToken):
-                    token = self.get_token()
-                    end_mark = token.end_mark
-                    if (token.plain and tag is None) or tag == '!':
-                        implicit = (True, False)
-                    elif tag is None:
-                        implicit = (False, True)
-                    else:
-                        implicit = (False, False)
-                    event = ScalarEvent(anchor, tag, implicit, token.value,
-                            start_mark, end_mark, style=token.style)
-                    self.state = self.states.pop()
-                elif self.check_token(FlowSequenceStartToken):
-                    end_mark = self.peek_token().end_mark
-                    event = SequenceStartEvent(anchor, tag, implicit,
-                            start_mark, end_mark, flow_style=True)
-                    self.state = self.parse_flow_sequence_first_entry
-                elif self.check_token(FlowMappingStartToken):
-                    end_mark = self.peek_token().end_mark
-                    event = MappingStartEvent(anchor, tag, implicit,
-                            start_mark, end_mark, flow_style=True)
-                    self.state = self.parse_flow_mapping_first_key
-                elif block and self.check_token(BlockSequenceStartToken):
-                    end_mark = self.peek_token().start_mark
-                    event = SequenceStartEvent(anchor, tag, implicit,
-                            start_mark, end_mark, flow_style=False)
-                    self.state = self.parse_block_sequence_first_entry
-                elif block and self.check_token(BlockMappingStartToken):
-                    end_mark = self.peek_token().start_mark
-                    event = MappingStartEvent(anchor, tag, implicit,
-                            start_mark, end_mark, flow_style=False)
-                    self.state = self.parse_block_mapping_first_key
-                elif anchor is not None or tag is not None:
-                    # Empty scalars are allowed even if a tag or an anchor is
-                    # specified.
-                    event = ScalarEvent(anchor, tag, (implicit, False), '',
-                            start_mark, end_mark)
-                    self.state = self.states.pop()
-                else:
-                    if block:
-                        node = 'block'
-                    else:
-                        node = 'flow'
-                    token = self.peek_token()
-                    raise ParserError("while parsing a %s node" % node, start_mark,
-                            "expected the node content, but found %r" % token.id,
-                            token.start_mark)
-        return event
-
-    # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-
-    def parse_block_sequence_first_entry(self):
-        token = self.get_token()
-        self.marks.append(token.start_mark)
-        return self.parse_block_sequence_entry()
-
-    def parse_block_sequence_entry(self):
-        if self.check_token(BlockEntryToken):
-            token = self.get_token()
-            if not self.check_token(BlockEntryToken, BlockEndToken):
-                self.states.append(self.parse_block_sequence_entry)
-                return self.parse_block_node()
-            else:
-                self.state = self.parse_block_sequence_entry
-                return self.process_empty_scalar(token.end_mark)
-        if not self.check_token(BlockEndToken):
-            token = self.peek_token()
-            raise ParserError("while parsing a block collection", self.marks[-1],
-                    "expected <block end>, but found %r" % token.id, token.start_mark)
-        token = self.get_token()
-        event = SequenceEndEvent(token.start_mark, token.end_mark)
-        self.state = self.states.pop()
-        self.marks.pop()
-        return event
-
-    # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-
-    def parse_indentless_sequence_entry(self):
-        if self.check_token(BlockEntryToken):
-            token = self.get_token()
-            if not self.check_token(BlockEntryToken,
-                    KeyToken, ValueToken, BlockEndToken):
-                self.states.append(self.parse_indentless_sequence_entry)
-                return self.parse_block_node()
-            else:
-                self.state = self.parse_indentless_sequence_entry
-                return self.process_empty_scalar(token.end_mark)
-        token = self.peek_token()
-        event = SequenceEndEvent(token.start_mark, token.start_mark)
-        self.state = self.states.pop()
-        return event
-
-    # block_mapping     ::= BLOCK-MAPPING_START
-    #                       ((KEY block_node_or_indentless_sequence?)?
-    #                       (VALUE block_node_or_indentless_sequence?)?)*
-    #                       BLOCK-END
-
-    def parse_block_mapping_first_key(self):
-        token = self.get_token()
-        self.marks.append(token.start_mark)
-        return self.parse_block_mapping_key()
-
-    def parse_block_mapping_key(self):
-        if self.check_token(KeyToken):
-            token = self.get_token()
-            if not self.check_token(KeyToken, ValueToken, BlockEndToken):
-                self.states.append(self.parse_block_mapping_value)
-                return self.parse_block_node_or_indentless_sequence()
-            else:
-                self.state = self.parse_block_mapping_value
-                return self.process_empty_scalar(token.end_mark)
-        if not self.check_token(BlockEndToken):
-            token = self.peek_token()
-            raise ParserError("while parsing a block mapping", self.marks[-1],
-                    "expected <block end>, but found %r" % token.id, token.start_mark)
-        token = self.get_token()
-        event = MappingEndEvent(token.start_mark, token.end_mark)
-        self.state = self.states.pop()
-        self.marks.pop()
-        return event
-
-    def parse_block_mapping_value(self):
-        if self.check_token(ValueToken):
-            token = self.get_token()
-            if not self.check_token(KeyToken, ValueToken, BlockEndToken):
-                self.states.append(self.parse_block_mapping_key)
-                return self.parse_block_node_or_indentless_sequence()
-            else:
-                self.state = self.parse_block_mapping_key
-                return self.process_empty_scalar(token.end_mark)
-        else:
-            self.state = self.parse_block_mapping_key
-            token = self.peek_token()
-            return self.process_empty_scalar(token.start_mark)
-
-    # flow_sequence     ::= FLOW-SEQUENCE-START
-    #                       (flow_sequence_entry FLOW-ENTRY)*
-    #                       flow_sequence_entry?
-    #                       FLOW-SEQUENCE-END
-    # flow_sequence_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-    #
-    # Note that while production rules for both flow_sequence_entry and
-    # flow_mapping_entry are equal, their interpretations are different.
-    # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
-    # generate an inline mapping (set syntax).
-
-    def parse_flow_sequence_first_entry(self):
-        token = self.get_token()
-        self.marks.append(token.start_mark)
-        return self.parse_flow_sequence_entry(first=True)
-
-    def parse_flow_sequence_entry(self, first=False):
-        if not self.check_token(FlowSequenceEndToken):
-            if not first:
-                if self.check_token(FlowEntryToken):
-                    self.get_token()
-                else:
-                    token = self.peek_token()
-                    raise ParserError("while parsing a flow sequence", self.marks[-1],
-                            "expected ',' or ']', but got %r" % token.id, token.start_mark)
-            
-            if self.check_token(KeyToken):
-                token = self.peek_token()
-                event = MappingStartEvent(None, None, True,
-                        token.start_mark, token.end_mark,
-                        flow_style=True)
-                self.state = self.parse_flow_sequence_entry_mapping_key
-                return event
-            elif not self.check_token(FlowSequenceEndToken):
-                self.states.append(self.parse_flow_sequence_entry)
-                return self.parse_flow_node()
-        token = self.get_token()
-        event = SequenceEndEvent(token.start_mark, token.end_mark)
-        self.state = self.states.pop()
-        self.marks.pop()
-        return event
-
-    def parse_flow_sequence_entry_mapping_key(self):
-        token = self.get_token()
-        if not self.check_token(ValueToken,
-                FlowEntryToken, FlowSequenceEndToken):
-            self.states.append(self.parse_flow_sequence_entry_mapping_value)
-            return self.parse_flow_node()
-        else:
-            self.state = self.parse_flow_sequence_entry_mapping_value
-            return self.process_empty_scalar(token.end_mark)
-
-    def parse_flow_sequence_entry_mapping_value(self):
-        if self.check_token(ValueToken):
-            token = self.get_token()
-            if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
-                self.states.append(self.parse_flow_sequence_entry_mapping_end)
-                return self.parse_flow_node()
-            else:
-                self.state = self.parse_flow_sequence_entry_mapping_end
-                return self.process_empty_scalar(token.end_mark)
-        else:
-            self.state = self.parse_flow_sequence_entry_mapping_end
-            token = self.peek_token()
-            return self.process_empty_scalar(token.start_mark)
-
-    def parse_flow_sequence_entry_mapping_end(self):
-        self.state = self.parse_flow_sequence_entry
-        token = self.peek_token()
-        return MappingEndEvent(token.start_mark, token.start_mark)
-
-    # flow_mapping  ::= FLOW-MAPPING-START
-    #                   (flow_mapping_entry FLOW-ENTRY)*
-    #                   flow_mapping_entry?
-    #                   FLOW-MAPPING-END
-    # flow_mapping_entry    ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
-    def parse_flow_mapping_first_key(self):
-        token = self.get_token()
-        self.marks.append(token.start_mark)
-        return self.parse_flow_mapping_key(first=True)
-
-    def parse_flow_mapping_key(self, first=False):
-        if not self.check_token(FlowMappingEndToken):
-            if not first:
-                if self.check_token(FlowEntryToken):
-                    self.get_token()
-                else:
-                    token = self.peek_token()
-                    raise ParserError("while parsing a flow mapping", self.marks[-1],
-                            "expected ',' or '}', but got %r" % token.id, token.start_mark)
-            if self.check_token(KeyToken):
-                token = self.get_token()
-                if not self.check_token(ValueToken,
-                        FlowEntryToken, FlowMappingEndToken):
-                    self.states.append(self.parse_flow_mapping_value)
-                    return self.parse_flow_node()
-                else:
-                    self.state = self.parse_flow_mapping_value
-                    return self.process_empty_scalar(token.end_mark)
-            elif not self.check_token(FlowMappingEndToken):
-                self.states.append(self.parse_flow_mapping_empty_value)
-                return self.parse_flow_node()
-        token = self.get_token()
-        event = MappingEndEvent(token.start_mark, token.end_mark)
-        self.state = self.states.pop()
-        self.marks.pop()
-        return event
-
-    def parse_flow_mapping_value(self):
-        if self.check_token(ValueToken):
-            token = self.get_token()
-            if not self.check_token(FlowEntryToken, FlowMappingEndToken):
-                self.states.append(self.parse_flow_mapping_key)
-                return self.parse_flow_node()
-            else:
-                self.state = self.parse_flow_mapping_key
-                return self.process_empty_scalar(token.end_mark)
-        else:
-            self.state = self.parse_flow_mapping_key
-            token = self.peek_token()
-            return self.process_empty_scalar(token.start_mark)
-
-    def parse_flow_mapping_empty_value(self):
-        self.state = self.parse_flow_mapping_key
-        return self.process_empty_scalar(self.peek_token().start_mark)
-
-    def process_empty_scalar(self, mark):
-        return ScalarEvent(None, None, (True, False), '', mark, mark)
-
diff --git a/lib/spack/external/yaml/lib3/yaml/representer.py b/lib/spack/external/yaml/lib3/yaml/representer.py
deleted file mode 100644
index b9e65c5109..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/representer.py
+++ /dev/null
@@ -1,387 +0,0 @@
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
-    'RepresenterError']
-
-from .error import *
-from .nodes import *
-
-import datetime, sys, copyreg, types, base64, collections
-
-class RepresenterError(YAMLError):
-    pass
-
-class BaseRepresenter:
-
-    yaml_representers = {}
-    yaml_multi_representers = {}
-
-    def __init__(self, default_style=None, default_flow_style=None):
-        self.default_style = default_style
-        self.default_flow_style = default_flow_style
-        self.represented_objects = {}
-        self.object_keeper = []
-        self.alias_key = None
-
-    def represent(self, data):
-        node = self.represent_data(data)
-        self.serialize(node)
-        self.represented_objects = {}
-        self.object_keeper = []
-        self.alias_key = None
-
-    def represent_data(self, data):
-        if self.ignore_aliases(data):
-            self.alias_key = None
-        else:
-            self.alias_key = id(data)
-        if self.alias_key is not None:
-            if self.alias_key in self.represented_objects:
-                node = self.represented_objects[self.alias_key]
-                #if node is None:
-                #    raise RepresenterError("recursive objects are not allowed: %r" % data)
-                return node
-            #self.represented_objects[alias_key] = None
-            self.object_keeper.append(data)
-        data_types = type(data).__mro__
-        if data_types[0] in self.yaml_representers:
-            node = self.yaml_representers[data_types[0]](self, data)
-        else:
-            for data_type in data_types:
-                if data_type in self.yaml_multi_representers:
-                    node = self.yaml_multi_representers[data_type](self, data)
-                    break
-            else:
-                if None in self.yaml_multi_representers:
-                    node = self.yaml_multi_representers[None](self, data)
-                elif None in self.yaml_representers:
-                    node = self.yaml_representers[None](self, data)
-                else:
-                    node = ScalarNode(None, str(data))
-        #if alias_key is not None:
-        #    self.represented_objects[alias_key] = node
-        return node
-
-    @classmethod
-    def add_representer(cls, data_type, representer):
-        if not 'yaml_representers' in cls.__dict__:
-            cls.yaml_representers = cls.yaml_representers.copy()
-        cls.yaml_representers[data_type] = representer
-
-    @classmethod
-    def add_multi_representer(cls, data_type, representer):
-        if not 'yaml_multi_representers' in cls.__dict__:
-            cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
-        cls.yaml_multi_representers[data_type] = representer
-
-    def represent_scalar(self, tag, value, style=None):
-        if style is None:
-            style = self.default_style
-        node = ScalarNode(tag, value, style=style)
-        if self.alias_key is not None:
-            self.represented_objects[self.alias_key] = node
-        return node
-
-    def represent_sequence(self, tag, sequence, flow_style=None):
-        value = []
-        node = SequenceNode(tag, value, flow_style=flow_style)
-        if self.alias_key is not None:
-            self.represented_objects[self.alias_key] = node
-        best_style = True
-        for item in sequence:
-            node_item = self.represent_data(item)
-            if not (isinstance(node_item, ScalarNode) and not node_item.style):
-                best_style = False
-            value.append(node_item)
-        if flow_style is None:
-            if self.default_flow_style is not None:
-                node.flow_style = self.default_flow_style
-            else:
-                node.flow_style = best_style
-        return node
-
-    def represent_mapping(self, tag, mapping, flow_style=None):
-        value = []
-        node = MappingNode(tag, value, flow_style=flow_style)
-        if self.alias_key is not None:
-            self.represented_objects[self.alias_key] = node
-        best_style = True
-        if hasattr(mapping, 'items'):
-            mapping = list(mapping.items())
-            try:
-                mapping = sorted(mapping)
-            except TypeError:
-                pass
-        for item_key, item_value in mapping:
-            node_key = self.represent_data(item_key)
-            node_value = self.represent_data(item_value)
-            if not (isinstance(node_key, ScalarNode) and not node_key.style):
-                best_style = False
-            if not (isinstance(node_value, ScalarNode) and not node_value.style):
-                best_style = False
-            value.append((node_key, node_value))
-        if flow_style is None:
-            if self.default_flow_style is not None:
-                node.flow_style = self.default_flow_style
-            else:
-                node.flow_style = best_style
-        return node
-
-    def ignore_aliases(self, data):
-        return False
-
-class SafeRepresenter(BaseRepresenter):
-
-    def ignore_aliases(self, data):
-        if data is None:
-            return True
-        if isinstance(data, tuple) and data == ():
-            return True
-        if isinstance(data, (str, bytes, bool, int, float)):
-            return True
-
-    def represent_none(self, data):
-        return self.represent_scalar('tag:yaml.org,2002:null', 'null')
-
-    def represent_str(self, data):
-        return self.represent_scalar('tag:yaml.org,2002:str', data)
-
-    def represent_binary(self, data):
-        if hasattr(base64, 'encodebytes'):
-            data = base64.encodebytes(data).decode('ascii')
-        else:
-            data = base64.encodestring(data).decode('ascii')
-        return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
-
-    def represent_bool(self, data):
-        if data:
-            value = 'true'
-        else:
-            value = 'false'
-        return self.represent_scalar('tag:yaml.org,2002:bool', value)
-
-    def represent_int(self, data):
-        return self.represent_scalar('tag:yaml.org,2002:int', str(data))
-
-    inf_value = 1e300
-    while repr(inf_value) != repr(inf_value*inf_value):
-        inf_value *= inf_value
-
-    def represent_float(self, data):
-        if data != data or (data == 0.0 and data == 1.0):
-            value = '.nan'
-        elif data == self.inf_value:
-            value = '.inf'
-        elif data == -self.inf_value:
-            value = '-.inf'
-        else:
-            value = repr(data).lower()
-            # Note that in some cases `repr(data)` represents a float number
-            # without the decimal parts.  For instance:
-            #   >>> repr(1e17)
-            #   '1e17'
-            # Unfortunately, this is not a valid float representation according
-            # to the definition of the `!!float` tag.  We fix this by adding
-            # '.0' before the 'e' symbol.
-            if '.' not in value and 'e' in value:
-                value = value.replace('e', '.0e', 1)
-        return self.represent_scalar('tag:yaml.org,2002:float', value)
-
-    def represent_list(self, data):
-        #pairs = (len(data) > 0 and isinstance(data, list))
-        #if pairs:
-        #    for item in data:
-        #        if not isinstance(item, tuple) or len(item) != 2:
-        #            pairs = False
-        #            break
-        #if not pairs:
-            return self.represent_sequence('tag:yaml.org,2002:seq', data)
-        #value = []
-        #for item_key, item_value in data:
-        #    value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
-        #        [(item_key, item_value)]))
-        #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
-    def represent_dict(self, data):
-        return self.represent_mapping('tag:yaml.org,2002:map', data)
-
-    def represent_set(self, data):
-        value = {}
-        for key in data:
-            value[key] = None
-        return self.represent_mapping('tag:yaml.org,2002:set', value)
-
-    def represent_date(self, data):
-        value = data.isoformat()
-        return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
-
-    def represent_datetime(self, data):
-        value = data.isoformat(' ')
-        return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
-
-    def represent_yaml_object(self, tag, data, cls, flow_style=None):
-        if hasattr(data, '__getstate__'):
-            state = data.__getstate__()
-        else:
-            state = data.__dict__.copy()
-        return self.represent_mapping(tag, state, flow_style=flow_style)
-
-    def represent_undefined(self, data):
-        raise RepresenterError("cannot represent an object: %s" % data)
-
-SafeRepresenter.add_representer(type(None),
-        SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
-        SafeRepresenter.represent_str)
-
-SafeRepresenter.add_representer(bytes,
-        SafeRepresenter.represent_binary)
-
-SafeRepresenter.add_representer(bool,
-        SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
-        SafeRepresenter.represent_int)
-
-SafeRepresenter.add_representer(float,
-        SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
-        SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
-        SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
-        SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
-        SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(datetime.date,
-        SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
-        SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
-        SafeRepresenter.represent_undefined)
-
-class Representer(SafeRepresenter):
-
-    def represent_complex(self, data):
-        if data.imag == 0.0:
-            data = '%r' % data.real
-        elif data.real == 0.0:
-            data = '%rj' % data.imag
-        elif data.imag > 0:
-            data = '%r+%rj' % (data.real, data.imag)
-        else:
-            data = '%r%rj' % (data.real, data.imag)
-        return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
-
-    def represent_tuple(self, data):
-        return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
-
-    def represent_name(self, data):
-        name = '%s.%s' % (data.__module__, data.__name__)
-        return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
-
-    def represent_module(self, data):
-        return self.represent_scalar(
-                'tag:yaml.org,2002:python/module:'+data.__name__, '')
-
-    def represent_object(self, data):
-        # We use __reduce__ API to save the data. data.__reduce__ returns
-        # a tuple of length 2-5:
-        #   (function, args, state, listitems, dictitems)
-
-        # For reconstructing, we calls function(*args), then set its state,
-        # listitems, and dictitems if they are not None.
-
-        # A special case is when function.__name__ == '__newobj__'. In this
-        # case we create the object with args[0].__new__(*args).
-
-        # Another special case is when __reduce__ returns a string - we don't
-        # support it.
-
-        # We produce a !!python/object, !!python/object/new or
-        # !!python/object/apply node.
-
-        cls = type(data)
-        if cls in copyreg.dispatch_table:
-            reduce = copyreg.dispatch_table[cls](data)
-        elif hasattr(data, '__reduce_ex__'):
-            reduce = data.__reduce_ex__(2)
-        elif hasattr(data, '__reduce__'):
-            reduce = data.__reduce__()
-        else:
-            raise RepresenterError("cannot represent object: %r" % data)
-        reduce = (list(reduce)+[None]*5)[:5]
-        function, args, state, listitems, dictitems = reduce
-        args = list(args)
-        if state is None:
-            state = {}
-        if listitems is not None:
-            listitems = list(listitems)
-        if dictitems is not None:
-            dictitems = dict(dictitems)
-        if function.__name__ == '__newobj__':
-            function = args[0]
-            args = args[1:]
-            tag = 'tag:yaml.org,2002:python/object/new:'
-            newobj = True
-        else:
-            tag = 'tag:yaml.org,2002:python/object/apply:'
-            newobj = False
-        function_name = '%s.%s' % (function.__module__, function.__name__)
-        if not args and not listitems and not dictitems \
-                and isinstance(state, dict) and newobj:
-            return self.represent_mapping(
-                    'tag:yaml.org,2002:python/object:'+function_name, state)
-        if not listitems and not dictitems  \
-                and isinstance(state, dict) and not state:
-            return self.represent_sequence(tag+function_name, args)
-        value = {}
-        if args:
-            value['args'] = args
-        if state or not isinstance(state, dict):
-            value['state'] = state
-        if listitems:
-            value['listitems'] = listitems
-        if dictitems:
-            value['dictitems'] = dictitems
-        return self.represent_mapping(tag+function_name, value)
-
-    def represent_ordered_dict(self, data):
-        # Provide uniform representation across different Python versions.
-        data_type = type(data)
-        tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
-                % (data_type.__module__, data_type.__name__)
-        items = [[key, value] for key, value in data.items()]
-        return self.represent_sequence(tag, [items])
-
-Representer.add_representer(complex,
-        Representer.represent_complex)
-
-Representer.add_representer(tuple,
-        Representer.represent_tuple)
-
-Representer.add_representer(type,
-        Representer.represent_name)
-
-Representer.add_representer(collections.OrderedDict,
-        Representer.represent_ordered_dict)
-
-Representer.add_representer(types.FunctionType,
-        Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
-        Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
-        Representer.represent_module)
-
-Representer.add_multi_representer(object,
-        Representer.represent_object)
-
diff --git a/lib/spack/external/yaml/lib3/yaml/resolver.py b/lib/spack/external/yaml/lib3/yaml/resolver.py
deleted file mode 100644
index 02b82e73ee..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/resolver.py
+++ /dev/null
@@ -1,227 +0,0 @@
-
-__all__ = ['BaseResolver', 'Resolver']
-
-from .error import *
-from .nodes import *
-
-import re
-
-class ResolverError(YAMLError):
-    pass
-
-class BaseResolver:
-
-    DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
-    DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
-    DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
-
-    yaml_implicit_resolvers = {}
-    yaml_path_resolvers = {}
-
-    def __init__(self):
-        self.resolver_exact_paths = []
-        self.resolver_prefix_paths = []
-
-    @classmethod
-    def add_implicit_resolver(cls, tag, regexp, first):
-        if not 'yaml_implicit_resolvers' in cls.__dict__:
-            implicit_resolvers = {}
-            for key in cls.yaml_implicit_resolvers:
-                implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
-            cls.yaml_implicit_resolvers = implicit_resolvers
-        if first is None:
-            first = [None]
-        for ch in first:
-            cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
-
-    @classmethod
-    def add_path_resolver(cls, tag, path, kind=None):
-        # Note: `add_path_resolver` is experimental.  The API could be changed.
-        # `new_path` is a pattern that is matched against the path from the
-        # root to the node that is being considered.  `node_path` elements are
-        # tuples `(node_check, index_check)`.  `node_check` is a node class:
-        # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`.  `None`
-        # matches any kind of a node.  `index_check` could be `None`, a boolean
-        # value, a string value, or a number.  `None` and `False` match against
-        # any _value_ of sequence and mapping nodes.  `True` matches against
-        # any _key_ of a mapping node.  A string `index_check` matches against
-        # a mapping value that corresponds to a scalar key which content is
-        # equal to the `index_check` value.  An integer `index_check` matches
-        # against a sequence value with the index equal to `index_check`.
-        if not 'yaml_path_resolvers' in cls.__dict__:
-            cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
-        new_path = []
-        for element in path:
-            if isinstance(element, (list, tuple)):
-                if len(element) == 2:
-                    node_check, index_check = element
-                elif len(element) == 1:
-                    node_check = element[0]
-                    index_check = True
-                else:
-                    raise ResolverError("Invalid path element: %s" % element)
-            else:
-                node_check = None
-                index_check = element
-            if node_check is str:
-                node_check = ScalarNode
-            elif node_check is list:
-                node_check = SequenceNode
-            elif node_check is dict:
-                node_check = MappingNode
-            elif node_check not in [ScalarNode, SequenceNode, MappingNode]  \
-                    and not isinstance(node_check, str) \
-                    and node_check is not None:
-                raise ResolverError("Invalid node checker: %s" % node_check)
-            if not isinstance(index_check, (str, int))  \
-                    and index_check is not None:
-                raise ResolverError("Invalid index checker: %s" % index_check)
-            new_path.append((node_check, index_check))
-        if kind is str:
-            kind = ScalarNode
-        elif kind is list:
-            kind = SequenceNode
-        elif kind is dict:
-            kind = MappingNode
-        elif kind not in [ScalarNode, SequenceNode, MappingNode]    \
-                and kind is not None:
-            raise ResolverError("Invalid node kind: %s" % kind)
-        cls.yaml_path_resolvers[tuple(new_path), kind] = tag
-
-    def descend_resolver(self, current_node, current_index):
-        if not self.yaml_path_resolvers:
-            return
-        exact_paths = {}
-        prefix_paths = []
-        if current_node:
-            depth = len(self.resolver_prefix_paths)
-            for path, kind in self.resolver_prefix_paths[-1]:
-                if self.check_resolver_prefix(depth, path, kind,
-                        current_node, current_index):
-                    if len(path) > depth:
-                        prefix_paths.append((path, kind))
-                    else:
-                        exact_paths[kind] = self.yaml_path_resolvers[path, kind]
-        else:
-            for path, kind in self.yaml_path_resolvers:
-                if not path:
-                    exact_paths[kind] = self.yaml_path_resolvers[path, kind]
-                else:
-                    prefix_paths.append((path, kind))
-        self.resolver_exact_paths.append(exact_paths)
-        self.resolver_prefix_paths.append(prefix_paths)
-
-    def ascend_resolver(self):
-        if not self.yaml_path_resolvers:
-            return
-        self.resolver_exact_paths.pop()
-        self.resolver_prefix_paths.pop()
-
-    def check_resolver_prefix(self, depth, path, kind,
-            current_node, current_index):
-        node_check, index_check = path[depth-1]
-        if isinstance(node_check, str):
-            if current_node.tag != node_check:
-                return
-        elif node_check is not None:
-            if not isinstance(current_node, node_check):
-                return
-        if index_check is True and current_index is not None:
-            return
-        if (index_check is False or index_check is None)    \
-                and current_index is None:
-            return
-        if isinstance(index_check, str):
-            if not (isinstance(current_index, ScalarNode)
-                    and index_check == current_index.value):
-                return
-        elif isinstance(index_check, int) and not isinstance(index_check, bool):
-            if index_check != current_index:
-                return
-        return True
-
-    def resolve(self, kind, value, implicit):
-        if kind is ScalarNode and implicit[0]:
-            if value == '':
-                resolvers = self.yaml_implicit_resolvers.get('', [])
-            else:
-                resolvers = self.yaml_implicit_resolvers.get(value[0], [])
-            resolvers += self.yaml_implicit_resolvers.get(None, [])
-            for tag, regexp in resolvers:
-                if regexp.match(value):
-                    return tag
-            implicit = implicit[1]
-        if self.yaml_path_resolvers:
-            exact_paths = self.resolver_exact_paths[-1]
-            if kind in exact_paths:
-                return exact_paths[kind]
-            if None in exact_paths:
-                return exact_paths[None]
-        if kind is ScalarNode:
-            return self.DEFAULT_SCALAR_TAG
-        elif kind is SequenceNode:
-            return self.DEFAULT_SEQUENCE_TAG
-        elif kind is MappingNode:
-            return self.DEFAULT_MAPPING_TAG
-
-class Resolver(BaseResolver):
-    pass
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:bool',
-        re.compile(r'''^(?:yes|Yes|YES|no|No|NO
-                    |true|True|TRUE|false|False|FALSE
-                    |on|On|ON|off|Off|OFF)$''', re.X),
-        list('yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:float',
-        re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
-                    |\.[0-9_]+(?:[eE][-+][0-9]+)?
-                    |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
-                    |[-+]?\.(?:inf|Inf|INF)
-                    |\.(?:nan|NaN|NAN))$''', re.X),
-        list('-+0123456789.'))
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:int',
-        re.compile(r'''^(?:[-+]?0b[0-1_]+
-                    |[-+]?0[0-7_]+
-                    |[-+]?(?:0|[1-9][0-9_]*)
-                    |[-+]?0x[0-9a-fA-F_]+
-                    |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
-        list('-+0123456789'))
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:merge',
-        re.compile(r'^(?:<<)$'),
-        ['<'])
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:null',
-        re.compile(r'''^(?: ~
-                    |null|Null|NULL
-                    | )$''', re.X),
-        ['~', 'n', 'N', ''])
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:timestamp',
-        re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
-                    |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
-                     (?:[Tt]|[ \t]+)[0-9][0-9]?
-                     :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
-                     (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
-        list('0123456789'))
-
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:value',
-        re.compile(r'^(?:=)$'),
-        ['='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
-        'tag:yaml.org,2002:yaml',
-        re.compile(r'^(?:!|&|\*)$'),
-        list('!&*'))
-
diff --git a/lib/spack/external/yaml/lib3/yaml/scanner.py b/lib/spack/external/yaml/lib3/yaml/scanner.py
deleted file mode 100644
index c8d127b8ec..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/scanner.py
+++ /dev/null
@@ -1,1444 +0,0 @@
-
-# Scanner produces tokens of the following types:
-# STREAM-START
-# STREAM-END
-# DIRECTIVE(name, value)
-# DOCUMENT-START
-# DOCUMENT-END
-# BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START
-# BLOCK-END
-# FLOW-SEQUENCE-START
-# FLOW-MAPPING-START
-# FLOW-SEQUENCE-END
-# FLOW-MAPPING-END
-# BLOCK-ENTRY
-# FLOW-ENTRY
-# KEY
-# VALUE
-# ALIAS(value)
-# ANCHOR(value)
-# TAG(value)
-# SCALAR(value, plain, style)
-#
-# Read comments in the Scanner code for more details.
-#
-
-__all__ = ['Scanner', 'ScannerError']
-
-from .error import MarkedYAMLError
-from .tokens import *
-
-class ScannerError(MarkedYAMLError):
-    pass
-
-class SimpleKey:
-    # See below simple keys treatment.
-
-    def __init__(self, token_number, required, index, line, column, mark):
-        self.token_number = token_number
-        self.required = required
-        self.index = index
-        self.line = line
-        self.column = column
-        self.mark = mark
-
-class Scanner:
-
-    def __init__(self):
-        """Initialize the scanner."""
-        # It is assumed that Scanner and Reader will have a common descendant.
-        # Reader do the dirty work of checking for BOM and converting the
-        # input data to Unicode. It also adds NUL to the end.
-        #
-        # Reader supports the following methods
-        #   self.peek(i=0)       # peek the next i-th character
-        #   self.prefix(l=1)     # peek the next l characters
-        #   self.forward(l=1)    # read the next l characters and move the pointer.
-
-        # Had we reached the end of the stream?
-        self.done = False
-
-        # The number of unclosed '{' and '['. `flow_level == 0` means block
-        # context.
-        self.flow_level = 0
-
-        # List of processed tokens that are not yet emitted.
-        self.tokens = []
-
-        # Add the STREAM-START token.
-        self.fetch_stream_start()
-
-        # Number of tokens that were emitted through the `get_token` method.
-        self.tokens_taken = 0
-
-        # The current indentation level.
-        self.indent = -1
-
-        # Past indentation levels.
-        self.indents = []
-
-        # Variables related to simple keys treatment.
-
-        # A simple key is a key that is not denoted by the '?' indicator.
-        # Example of simple keys:
-        #   ---
-        #   block simple key: value
-        #   ? not a simple key:
-        #   : { flow simple key: value }
-        # We emit the KEY token before all keys, so when we find a potential
-        # simple key, we try to locate the corresponding ':' indicator.
-        # Simple keys should be limited to a single line and 1024 characters.
-
-        # Can a simple key start at the current position? A simple key may
-        # start:
-        # - at the beginning of the line, not counting indentation spaces
-        #       (in block context),
-        # - after '{', '[', ',' (in the flow context),
-        # - after '?', ':', '-' (in the block context).
-        # In the block context, this flag also signifies if a block collection
-        # may start at the current position.
-        self.allow_simple_key = True
-
-        # Keep track of possible simple keys. This is a dictionary. The key
-        # is `flow_level`; there can be no more that one possible simple key
-        # for each level. The value is a SimpleKey record:
-        #   (token_number, required, index, line, column, mark)
-        # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
-        # '[', or '{' tokens.
-        self.possible_simple_keys = {}
-
-    # Public methods.
-
-    def check_token(self, *choices):
-        # Check if the next token is one of the given types.
-        while self.need_more_tokens():
-            self.fetch_more_tokens()
-        if self.tokens:
-            if not choices:
-                return True
-            for choice in choices:
-                if isinstance(self.tokens[0], choice):
-                    return True
-        return False
-
-    def peek_token(self):
-        # Return the next token, but do not delete if from the queue.
-        while self.need_more_tokens():
-            self.fetch_more_tokens()
-        if self.tokens:
-            return self.tokens[0]
-
-    def get_token(self):
-        # Return the next token.
-        while self.need_more_tokens():
-            self.fetch_more_tokens()
-        if self.tokens:
-            self.tokens_taken += 1
-            return self.tokens.pop(0)
-
-    # Private methods.
-
-    def need_more_tokens(self):
-        if self.done:
-            return False
-        if not self.tokens:
-            return True
-        # The current token may be a potential simple key, so we
-        # need to look further.
-        self.stale_possible_simple_keys()
-        if self.next_possible_simple_key() == self.tokens_taken:
-            return True
-
-    def fetch_more_tokens(self):
-
-        # Eat whitespaces and comments until we reach the next token.
-        self.scan_to_next_token()
-
-        # Remove obsolete possible simple keys.
-        self.stale_possible_simple_keys()
-
-        # Compare the current indentation and column. It may add some tokens
-        # and decrease the current indentation level.
-        self.unwind_indent(self.column)
-
-        # Peek the next character.
-        ch = self.peek()
-
-        # Is it the end of stream?
-        if ch == '\0':
-            return self.fetch_stream_end()
-
-        # Is it a directive?
-        if ch == '%' and self.check_directive():
-            return self.fetch_directive()
-
-        # Is it the document start?
-        if ch == '-' and self.check_document_start():
-            return self.fetch_document_start()
-
-        # Is it the document end?
-        if ch == '.' and self.check_document_end():
-            return self.fetch_document_end()
-
-        # TODO: support for BOM within a stream.
-        #if ch == '\uFEFF':
-        #    return self.fetch_bom()    <-- issue BOMToken
-
-        # Note: the order of the following checks is NOT significant.
-
-        # Is it the flow sequence start indicator?
-        if ch == '[':
-            return self.fetch_flow_sequence_start()
-
-        # Is it the flow mapping start indicator?
-        if ch == '{':
-            return self.fetch_flow_mapping_start()
-
-        # Is it the flow sequence end indicator?
-        if ch == ']':
-            return self.fetch_flow_sequence_end()
-
-        # Is it the flow mapping end indicator?
-        if ch == '}':
-            return self.fetch_flow_mapping_end()
-
-        # Is it the flow entry indicator?
-        if ch == ',':
-            return self.fetch_flow_entry()
-
-        # Is it the block entry indicator?
-        if ch == '-' and self.check_block_entry():
-            return self.fetch_block_entry()
-
-        # Is it the key indicator?
-        if ch == '?' and self.check_key():
-            return self.fetch_key()
-
-        # Is it the value indicator?
-        if ch == ':' and self.check_value():
-            return self.fetch_value()
-
-        # Is it an alias?
-        if ch == '*':
-            return self.fetch_alias()
-
-        # Is it an anchor?
-        if ch == '&':
-            return self.fetch_anchor()
-
-        # Is it a tag?
-        if ch == '!':
-            return self.fetch_tag()
-
-        # Is it a literal scalar?
-        if ch == '|' and not self.flow_level:
-            return self.fetch_literal()
-
-        # Is it a folded scalar?
-        if ch == '>' and not self.flow_level:
-            return self.fetch_folded()
-
-        # Is it a single quoted scalar?
-        if ch == '\'':
-            return self.fetch_single()
-
-        # Is it a double quoted scalar?
-        if ch == '\"':
-            return self.fetch_double()
-
-        # It must be a plain scalar then.
-        if self.check_plain():
-            return self.fetch_plain()
-
-        # No? It's an error. Let's produce a nice error message.
-        raise ScannerError("while scanning for the next token", None,
-                "found character %r that cannot start any token" % ch,
-                self.get_mark())
-
-    # Simple keys treatment.
-
-    def next_possible_simple_key(self):
-        # Return the number of the nearest possible simple key. Actually we
-        # don't need to loop through the whole dictionary. We may replace it
-        # with the following code:
-        #   if not self.possible_simple_keys:
-        #       return None
-        #   return self.possible_simple_keys[
-        #           min(self.possible_simple_keys.keys())].token_number
-        min_token_number = None
-        for level in self.possible_simple_keys:
-            key = self.possible_simple_keys[level]
-            if min_token_number is None or key.token_number < min_token_number:
-                min_token_number = key.token_number
-        return min_token_number
-
-    def stale_possible_simple_keys(self):
-        # Remove entries that are no longer possible simple keys. According to
-        # the YAML specification, simple keys
-        # - should be limited to a single line,
-        # - should be no longer than 1024 characters.
-        # Disabling this procedure will allow simple keys of any length and
-        # height (may cause problems if indentation is broken though).
-        for level in list(self.possible_simple_keys):
-            key = self.possible_simple_keys[level]
-            if key.line != self.line  \
-                    or self.index-key.index > 1024:
-                if key.required:
-                    raise ScannerError("while scanning a simple key", key.mark,
-                            "could not find expected ':'", self.get_mark())
-                del self.possible_simple_keys[level]
-
-    def save_possible_simple_key(self):
-        # The next token may start a simple key. We check if it's possible
-        # and save its position. This function is called for
-        #   ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
-
-        # Check if a simple key is required at the current position.
-        required = not self.flow_level and self.indent == self.column
-
-        # The next token might be a simple key. Let's save it's number and
-        # position.
-        if self.allow_simple_key:
-            self.remove_possible_simple_key()
-            token_number = self.tokens_taken+len(self.tokens)
-            key = SimpleKey(token_number, required,
-                    self.index, self.line, self.column, self.get_mark())
-            self.possible_simple_keys[self.flow_level] = key
-
-    def remove_possible_simple_key(self):
-        # Remove the saved possible key position at the current flow level.
-        if self.flow_level in self.possible_simple_keys:
-            key = self.possible_simple_keys[self.flow_level]
-            
-            if key.required:
-                raise ScannerError("while scanning a simple key", key.mark,
-                        "could not find expected ':'", self.get_mark())
-
-            del self.possible_simple_keys[self.flow_level]
-
-    # Indentation functions.
-
-    def unwind_indent(self, column):
-
-        ## In flow context, tokens should respect indentation.
-        ## Actually the condition should be `self.indent >= column` according to
-        ## the spec. But this condition will prohibit intuitively correct
-        ## constructions such as
-        ## key : {
-        ## }
-        #if self.flow_level and self.indent > column:
-        #    raise ScannerError(None, None,
-        #            "invalid intendation or unclosed '[' or '{'",
-        #            self.get_mark())
-
-        # In the flow context, indentation is ignored. We make the scanner less
-        # restrictive then specification requires.
-        if self.flow_level:
-            return
-
-        # In block context, we may need to issue the BLOCK-END tokens.
-        while self.indent > column:
-            mark = self.get_mark()
-            self.indent = self.indents.pop()
-            self.tokens.append(BlockEndToken(mark, mark))
-
-    def add_indent(self, column):
-        # Check if we need to increase indentation.
-        if self.indent < column:
-            self.indents.append(self.indent)
-            self.indent = column
-            return True
-        return False
-
-    # Fetchers.
-
-    def fetch_stream_start(self):
-        # We always add STREAM-START as the first token and STREAM-END as the
-        # last token.
-
-        # Read the token.
-        mark = self.get_mark()
-        
-        # Add STREAM-START.
-        self.tokens.append(StreamStartToken(mark, mark,
-            encoding=self.encoding))
-        
-
-    def fetch_stream_end(self):
-
-        # Set the current intendation to -1.
-        self.unwind_indent(-1)
-
-        # Reset simple keys.
-        self.remove_possible_simple_key()
-        self.allow_simple_key = False
-        self.possible_simple_keys = {}
-
-        # Read the token.
-        mark = self.get_mark()
-        
-        # Add STREAM-END.
-        self.tokens.append(StreamEndToken(mark, mark))
-
-        # The steam is finished.
-        self.done = True
-
-    def fetch_directive(self):
-        
-        # Set the current intendation to -1.
-        self.unwind_indent(-1)
-
-        # Reset simple keys.
-        self.remove_possible_simple_key()
-        self.allow_simple_key = False
-
-        # Scan and add DIRECTIVE.
-        self.tokens.append(self.scan_directive())
-
-    def fetch_document_start(self):
-        self.fetch_document_indicator(DocumentStartToken)
-
-    def fetch_document_end(self):
-        self.fetch_document_indicator(DocumentEndToken)
-
-    def fetch_document_indicator(self, TokenClass):
-
-        # Set the current intendation to -1.
-        self.unwind_indent(-1)
-
-        # Reset simple keys. Note that there could not be a block collection
-        # after '---'.
-        self.remove_possible_simple_key()
-        self.allow_simple_key = False
-
-        # Add DOCUMENT-START or DOCUMENT-END.
-        start_mark = self.get_mark()
-        self.forward(3)
-        end_mark = self.get_mark()
-        self.tokens.append(TokenClass(start_mark, end_mark))
-
-    def fetch_flow_sequence_start(self):
-        self.fetch_flow_collection_start(FlowSequenceStartToken)
-
-    def fetch_flow_mapping_start(self):
-        self.fetch_flow_collection_start(FlowMappingStartToken)
-
-    def fetch_flow_collection_start(self, TokenClass):
-
-        # '[' and '{' may start a simple key.
-        self.save_possible_simple_key()
-
-        # Increase the flow level.
-        self.flow_level += 1
-
-        # Simple keys are allowed after '[' and '{'.
-        self.allow_simple_key = True
-
-        # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
-        start_mark = self.get_mark()
-        self.forward()
-        end_mark = self.get_mark()
-        self.tokens.append(TokenClass(start_mark, end_mark))
-
-    def fetch_flow_sequence_end(self):
-        self.fetch_flow_collection_end(FlowSequenceEndToken)
-
-    def fetch_flow_mapping_end(self):
-        self.fetch_flow_collection_end(FlowMappingEndToken)
-
-    def fetch_flow_collection_end(self, TokenClass):
-
-        # Reset possible simple key on the current level.
-        self.remove_possible_simple_key()
-
-        # Decrease the flow level.
-        self.flow_level -= 1
-
-        # No simple keys after ']' or '}'.
-        self.allow_simple_key = False
-
-        # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
-        start_mark = self.get_mark()
-        self.forward()
-        end_mark = self.get_mark()
-        self.tokens.append(TokenClass(start_mark, end_mark))
-
-    def fetch_flow_entry(self):
-
-        # Simple keys are allowed after ','.
-        self.allow_simple_key = True
-
-        # Reset possible simple key on the current level.
-        self.remove_possible_simple_key()
-
-        # Add FLOW-ENTRY.
-        start_mark = self.get_mark()
-        self.forward()
-        end_mark = self.get_mark()
-        self.tokens.append(FlowEntryToken(start_mark, end_mark))
-
-    def fetch_block_entry(self):
-
-        # Block context needs additional checks.
-        if not self.flow_level:
-
-            # Are we allowed to start a new entry?
-            if not self.allow_simple_key:
-                raise ScannerError(None, None,
-                        "sequence entries are not allowed here",
-                        self.get_mark())
-
-            # We may need to add BLOCK-SEQUENCE-START.
-            if self.add_indent(self.column):
-                mark = self.get_mark()
-                self.tokens.append(BlockSequenceStartToken(mark, mark))
-
-        # It's an error for the block entry to occur in the flow context,
-        # but we let the parser detect this.
-        else:
-            pass
-
-        # Simple keys are allowed after '-'.
-        self.allow_simple_key = True
-
-        # Reset possible simple key on the current level.
-        self.remove_possible_simple_key()
-
-        # Add BLOCK-ENTRY.
-        start_mark = self.get_mark()
-        self.forward()
-        end_mark = self.get_mark()
-        self.tokens.append(BlockEntryToken(start_mark, end_mark))
-
-    def fetch_key(self):
-        
-        # Block context needs additional checks.
-        if not self.flow_level:
-
-            # Are we allowed to start a key (not nessesary a simple)?
-            if not self.allow_simple_key:
-                raise ScannerError(None, None,
-                        "mapping keys are not allowed here",
-                        self.get_mark())
-
-            # We may need to add BLOCK-MAPPING-START.
-            if self.add_indent(self.column):
-                mark = self.get_mark()
-                self.tokens.append(BlockMappingStartToken(mark, mark))
-
-        # Simple keys are allowed after '?' in the block context.
-        self.allow_simple_key = not self.flow_level
-
-        # Reset possible simple key on the current level.
-        self.remove_possible_simple_key()
-
-        # Add KEY.
-        start_mark = self.get_mark()
-        self.forward()
-        end_mark = self.get_mark()
-        self.tokens.append(KeyToken(start_mark, end_mark))
-
-    def fetch_value(self):
-
-        # Do we determine a simple key?
-        if self.flow_level in self.possible_simple_keys:
-
-            # Add KEY.
-            key = self.possible_simple_keys[self.flow_level]
-            del self.possible_simple_keys[self.flow_level]
-            self.tokens.insert(key.token_number-self.tokens_taken,
-                    KeyToken(key.mark, key.mark))
-
-            # If this key starts a new block mapping, we need to add
-            # BLOCK-MAPPING-START.
-            if not self.flow_level:
-                if self.add_indent(key.column):
-                    self.tokens.insert(key.token_number-self.tokens_taken,
-                            BlockMappingStartToken(key.mark, key.mark))
-
-            # There cannot be two simple keys one after another.
-            self.allow_simple_key = False
-
-        # It must be a part of a complex key.
-        else:
-            
-            # Block context needs additional checks.
-            # (Do we really need them? They will be catched by the parser
-            # anyway.)
-            if not self.flow_level:
-
-                # We are allowed to start a complex value if and only if
-                # we can start a simple key.
-                if not self.allow_simple_key:
-                    raise ScannerError(None, None,
-                            "mapping values are not allowed here",
-                            self.get_mark())
-
-            # If this value starts a new block mapping, we need to add
-            # BLOCK-MAPPING-START.  It will be detected as an error later by
-            # the parser.
-            if not self.flow_level:
-                if self.add_indent(self.column):
-                    mark = self.get_mark()
-                    self.tokens.append(BlockMappingStartToken(mark, mark))
-
-            # Simple keys are allowed after ':' in the block context.
-            self.allow_simple_key = not self.flow_level
-
-            # Reset possible simple key on the current level.
-            self.remove_possible_simple_key()
-
-        # Add VALUE.
-        start_mark = self.get_mark()
-        self.forward()
-        end_mark = self.get_mark()
-        self.tokens.append(ValueToken(start_mark, end_mark))
-
-    def fetch_alias(self):
-
-        # ALIAS could be a simple key.
-        self.save_possible_simple_key()
-
-        # No simple keys after ALIAS.
-        self.allow_simple_key = False
-
-        # Scan and add ALIAS.
-        self.tokens.append(self.scan_anchor(AliasToken))
-
-    def fetch_anchor(self):
-
-        # ANCHOR could start a simple key.
-        self.save_possible_simple_key()
-
-        # No simple keys after ANCHOR.
-        self.allow_simple_key = False
-
-        # Scan and add ANCHOR.
-        self.tokens.append(self.scan_anchor(AnchorToken))
-
-    def fetch_tag(self):
-
-        # TAG could start a simple key.
-        self.save_possible_simple_key()
-
-        # No simple keys after TAG.
-        self.allow_simple_key = False
-
-        # Scan and add TAG.
-        self.tokens.append(self.scan_tag())
-
-    def fetch_literal(self):
-        self.fetch_block_scalar(style='|')
-
-    def fetch_folded(self):
-        self.fetch_block_scalar(style='>')
-
-    def fetch_block_scalar(self, style):
-
-        # A simple key may follow a block scalar.
-        self.allow_simple_key = True
-
-        # Reset possible simple key on the current level.
-        self.remove_possible_simple_key()
-
-        # Scan and add SCALAR.
-        self.tokens.append(self.scan_block_scalar(style))
-
-    def fetch_single(self):
-        self.fetch_flow_scalar(style='\'')
-
-    def fetch_double(self):
-        self.fetch_flow_scalar(style='"')
-
-    def fetch_flow_scalar(self, style):
-
-        # A flow scalar could be a simple key.
-        self.save_possible_simple_key()
-
-        # No simple keys after flow scalars.
-        self.allow_simple_key = False
-
-        # Scan and add SCALAR.
-        self.tokens.append(self.scan_flow_scalar(style))
-
-    def fetch_plain(self):
-
-        # A plain scalar could be a simple key.
-        self.save_possible_simple_key()
-
-        # No simple keys after plain scalars. But note that `scan_plain` will
-        # change this flag if the scan is finished at the beginning of the
-        # line.
-        self.allow_simple_key = False
-
-        # Scan and add SCALAR. May change `allow_simple_key`.
-        self.tokens.append(self.scan_plain())
-
-    # Checkers.
-
-    def check_directive(self):
-
-        # DIRECTIVE:        ^ '%' ...
-        # The '%' indicator is already checked.
-        if self.column == 0:
-            return True
-
-    def check_document_start(self):
-
-        # DOCUMENT-START:   ^ '---' (' '|'\n')
-        if self.column == 0:
-            if self.prefix(3) == '---'  \
-                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
-                return True
-
-    def check_document_end(self):
-
-        # DOCUMENT-END:     ^ '...' (' '|'\n')
-        if self.column == 0:
-            if self.prefix(3) == '...'  \
-                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
-                return True
-
-    def check_block_entry(self):
-
-        # BLOCK-ENTRY:      '-' (' '|'\n')
-        return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
-    def check_key(self):
-
-        # KEY(flow context):    '?'
-        if self.flow_level:
-            return True
-
-        # KEY(block context):   '?' (' '|'\n')
-        else:
-            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
-    def check_value(self):
-
-        # VALUE(flow context):  ':'
-        if self.flow_level:
-            return True
-
-        # VALUE(block context): ':' (' '|'\n')
-        else:
-            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
-    def check_plain(self):
-
-        # A plain scalar may start with any non-space character except:
-        #   '-', '?', ':', ',', '[', ']', '{', '}',
-        #   '#', '&', '*', '!', '|', '>', '\'', '\"',
-        #   '%', '@', '`'.
-        #
-        # It may also start with
-        #   '-', '?', ':'
-        # if it is followed by a non-space character.
-        #
-        # Note that we limit the last rule to the block context (except the
-        # '-' character) because we want the flow context to be space
-        # independent.
-        ch = self.peek()
-        return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`'  \
-                or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
-                        and (ch == '-' or (not self.flow_level and ch in '?:')))
-
-    # Scanners.
-
-    def scan_to_next_token(self):
-        # We ignore spaces, line breaks and comments.
-        # If we find a line break in the block context, we set the flag
-        # `allow_simple_key` on.
-        # The byte order mark is stripped if it's the first character in the
-        # stream. We do not yet support BOM inside the stream as the
-        # specification requires. Any such mark will be considered as a part
-        # of the document.
-        #
-        # TODO: We need to make tab handling rules more sane. A good rule is
-        #   Tabs cannot precede tokens
-        #   BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
-        #   KEY(block), VALUE(block), BLOCK-ENTRY
-        # So the checking code is
-        #   if <TAB>:
-        #       self.allow_simple_keys = False
-        # We also need to add the check for `allow_simple_keys == True` to
-        # `unwind_indent` before issuing BLOCK-END.
-        # Scanners for block, flow, and plain scalars need to be modified.
-
-        if self.index == 0 and self.peek() == '\uFEFF':
-            self.forward()
-        found = False
-        while not found:
-            while self.peek() == ' ':
-                self.forward()
-            if self.peek() == '#':
-                while self.peek() not in '\0\r\n\x85\u2028\u2029':
-                    self.forward()
-            if self.scan_line_break():
-                if not self.flow_level:
-                    self.allow_simple_key = True
-            else:
-                found = True
-
-    def scan_directive(self):
-        # See the specification for details.
-        start_mark = self.get_mark()
-        self.forward()
-        name = self.scan_directive_name(start_mark)
-        value = None
-        if name == 'YAML':
-            value = self.scan_yaml_directive_value(start_mark)
-            end_mark = self.get_mark()
-        elif name == 'TAG':
-            value = self.scan_tag_directive_value(start_mark)
-            end_mark = self.get_mark()
-        else:
-            end_mark = self.get_mark()
-            while self.peek() not in '\0\r\n\x85\u2028\u2029':
-                self.forward()
-        self.scan_directive_ignored_line(start_mark)
-        return DirectiveToken(name, value, start_mark, end_mark)
-
-    def scan_directive_name(self, start_mark):
-        # See the specification for details.
-        length = 0
-        ch = self.peek(length)
-        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
-                or ch in '-_':
-            length += 1
-            ch = self.peek(length)
-        if not length:
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected alphabetic or numeric character, but found %r"
-                    % ch, self.get_mark())
-        value = self.prefix(length)
-        self.forward(length)
-        ch = self.peek()
-        if ch not in '\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected alphabetic or numeric character, but found %r"
-                    % ch, self.get_mark())
-        return value
-
-    def scan_yaml_directive_value(self, start_mark):
-        # See the specification for details.
-        while self.peek() == ' ':
-            self.forward()
-        major = self.scan_yaml_directive_number(start_mark)
-        if self.peek() != '.':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected a digit or '.', but found %r" % self.peek(),
-                    self.get_mark())
-        self.forward()
-        minor = self.scan_yaml_directive_number(start_mark)
-        if self.peek() not in '\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected a digit or ' ', but found %r" % self.peek(),
-                    self.get_mark())
-        return (major, minor)
-
-    def scan_yaml_directive_number(self, start_mark):
-        # See the specification for details.
-        ch = self.peek()
-        if not ('0' <= ch <= '9'):
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected a digit, but found %r" % ch, self.get_mark())
-        length = 0
-        while '0' <= self.peek(length) <= '9':
-            length += 1
-        value = int(self.prefix(length))
-        self.forward(length)
-        return value
-
-    def scan_tag_directive_value(self, start_mark):
-        # See the specification for details.
-        while self.peek() == ' ':
-            self.forward()
-        handle = self.scan_tag_directive_handle(start_mark)
-        while self.peek() == ' ':
-            self.forward()
-        prefix = self.scan_tag_directive_prefix(start_mark)
-        return (handle, prefix)
-
-    def scan_tag_directive_handle(self, start_mark):
-        # See the specification for details.
-        value = self.scan_tag_handle('directive', start_mark)
-        ch = self.peek()
-        if ch != ' ':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected ' ', but found %r" % ch, self.get_mark())
-        return value
-
-    def scan_tag_directive_prefix(self, start_mark):
-        # See the specification for details.
-        value = self.scan_tag_uri('directive', start_mark)
-        ch = self.peek()
-        if ch not in '\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected ' ', but found %r" % ch, self.get_mark())
-        return value
-
-    def scan_directive_ignored_line(self, start_mark):
-        # See the specification for details.
-        while self.peek() == ' ':
-            self.forward()
-        if self.peek() == '#':
-            while self.peek() not in '\0\r\n\x85\u2028\u2029':
-                self.forward()
-        ch = self.peek()
-        if ch not in '\0\r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a directive", start_mark,
-                    "expected a comment or a line break, but found %r"
-                        % ch, self.get_mark())
-        self.scan_line_break()
-
-    def scan_anchor(self, TokenClass):
-        # The specification does not restrict characters for anchors and
-        # aliases. This may lead to problems, for instance, the document:
-        #   [ *alias, value ]
-        # can be interpteted in two ways, as
-        #   [ "value" ]
-        # and
-        #   [ *alias , "value" ]
-        # Therefore we restrict aliases to numbers and ASCII letters.
-        start_mark = self.get_mark()
-        indicator = self.peek()
-        if indicator == '*':
-            name = 'alias'
-        else:
-            name = 'anchor'
-        self.forward()
-        length = 0
-        ch = self.peek(length)
-        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
-                or ch in '-_':
-            length += 1
-            ch = self.peek(length)
-        if not length:
-            raise ScannerError("while scanning an %s" % name, start_mark,
-                    "expected alphabetic or numeric character, but found %r"
-                    % ch, self.get_mark())
-        value = self.prefix(length)
-        self.forward(length)
-        ch = self.peek()
-        if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
-            raise ScannerError("while scanning an %s" % name, start_mark,
-                    "expected alphabetic or numeric character, but found %r"
-                    % ch, self.get_mark())
-        end_mark = self.get_mark()
-        return TokenClass(value, start_mark, end_mark)
-
-    def scan_tag(self):
-        # See the specification for details.
-        start_mark = self.get_mark()
-        ch = self.peek(1)
-        if ch == '<':
-            handle = None
-            self.forward(2)
-            suffix = self.scan_tag_uri('tag', start_mark)
-            if self.peek() != '>':
-                raise ScannerError("while parsing a tag", start_mark,
-                        "expected '>', but found %r" % self.peek(),
-                        self.get_mark())
-            self.forward()
-        elif ch in '\0 \t\r\n\x85\u2028\u2029':
-            handle = None
-            suffix = '!'
-            self.forward()
-        else:
-            length = 1
-            use_handle = False
-            while ch not in '\0 \r\n\x85\u2028\u2029':
-                if ch == '!':
-                    use_handle = True
-                    break
-                length += 1
-                ch = self.peek(length)
-            handle = '!'
-            if use_handle:
-                handle = self.scan_tag_handle('tag', start_mark)
-            else:
-                handle = '!'
-                self.forward()
-            suffix = self.scan_tag_uri('tag', start_mark)
-        ch = self.peek()
-        if ch not in '\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a tag", start_mark,
-                    "expected ' ', but found %r" % ch, self.get_mark())
-        value = (handle, suffix)
-        end_mark = self.get_mark()
-        return TagToken(value, start_mark, end_mark)
-
-    def scan_block_scalar(self, style):
-        # See the specification for details.
-
-        if style == '>':
-            folded = True
-        else:
-            folded = False
-
-        chunks = []
-        start_mark = self.get_mark()
-
-        # Scan the header.
-        self.forward()
-        chomping, increment = self.scan_block_scalar_indicators(start_mark)
-        self.scan_block_scalar_ignored_line(start_mark)
-
-        # Determine the indentation level and go to the first non-empty line.
-        min_indent = self.indent+1
-        if min_indent < 1:
-            min_indent = 1
-        if increment is None:
-            breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
-            indent = max(min_indent, max_indent)
-        else:
-            indent = min_indent+increment-1
-            breaks, end_mark = self.scan_block_scalar_breaks(indent)
-        line_break = ''
-
-        # Scan the inner part of the block scalar.
-        while self.column == indent and self.peek() != '\0':
-            chunks.extend(breaks)
-            leading_non_space = self.peek() not in ' \t'
-            length = 0
-            while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
-                length += 1
-            chunks.append(self.prefix(length))
-            self.forward(length)
-            line_break = self.scan_line_break()
-            breaks, end_mark = self.scan_block_scalar_breaks(indent)
-            if self.column == indent and self.peek() != '\0':
-
-                # Unfortunately, folding rules are ambiguous.
-                #
-                # This is the folding according to the specification:
-                
-                if folded and line_break == '\n'    \
-                        and leading_non_space and self.peek() not in ' \t':
-                    if not breaks:
-                        chunks.append(' ')
-                else:
-                    chunks.append(line_break)
-                
-                # This is Clark Evans's interpretation (also in the spec
-                # examples):
-                #
-                #if folded and line_break == '\n':
-                #    if not breaks:
-                #        if self.peek() not in ' \t':
-                #            chunks.append(' ')
-                #        else:
-                #            chunks.append(line_break)
-                #else:
-                #    chunks.append(line_break)
-            else:
-                break
-
-        # Chomp the tail.
-        if chomping is not False:
-            chunks.append(line_break)
-        if chomping is True:
-            chunks.extend(breaks)
-
-        # We are done.
-        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
-                style)
-
-    def scan_block_scalar_indicators(self, start_mark):
-        # See the specification for details.
-        chomping = None
-        increment = None
-        ch = self.peek()
-        if ch in '+-':
-            if ch == '+':
-                chomping = True
-            else:
-                chomping = False
-            self.forward()
-            ch = self.peek()
-            if ch in '0123456789':
-                increment = int(ch)
-                if increment == 0:
-                    raise ScannerError("while scanning a block scalar", start_mark,
-                            "expected indentation indicator in the range 1-9, but found 0",
-                            self.get_mark())
-                self.forward()
-        elif ch in '0123456789':
-            increment = int(ch)
-            if increment == 0:
-                raise ScannerError("while scanning a block scalar", start_mark,
-                        "expected indentation indicator in the range 1-9, but found 0",
-                        self.get_mark())
-            self.forward()
-            ch = self.peek()
-            if ch in '+-':
-                if ch == '+':
-                    chomping = True
-                else:
-                    chomping = False
-                self.forward()
-        ch = self.peek()
-        if ch not in '\0 \r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a block scalar", start_mark,
-                    "expected chomping or indentation indicators, but found %r"
-                    % ch, self.get_mark())
-        return chomping, increment
-
-    def scan_block_scalar_ignored_line(self, start_mark):
-        # See the specification for details.
-        while self.peek() == ' ':
-            self.forward()
-        if self.peek() == '#':
-            while self.peek() not in '\0\r\n\x85\u2028\u2029':
-                self.forward()
-        ch = self.peek()
-        if ch not in '\0\r\n\x85\u2028\u2029':
-            raise ScannerError("while scanning a block scalar", start_mark,
-                    "expected a comment or a line break, but found %r" % ch,
-                    self.get_mark())
-        self.scan_line_break()
-
-    def scan_block_scalar_indentation(self):
-        # See the specification for details.
-        chunks = []
-        max_indent = 0
-        end_mark = self.get_mark()
-        while self.peek() in ' \r\n\x85\u2028\u2029':
-            if self.peek() != ' ':
-                chunks.append(self.scan_line_break())
-                end_mark = self.get_mark()
-            else:
-                self.forward()
-                if self.column > max_indent:
-                    max_indent = self.column
-        return chunks, max_indent, end_mark
-
-    def scan_block_scalar_breaks(self, indent):
-        # See the specification for details.
-        chunks = []
-        end_mark = self.get_mark()
-        while self.column < indent and self.peek() == ' ':
-            self.forward()
-        while self.peek() in '\r\n\x85\u2028\u2029':
-            chunks.append(self.scan_line_break())
-            end_mark = self.get_mark()
-            while self.column < indent and self.peek() == ' ':
-                self.forward()
-        return chunks, end_mark
-
-    def scan_flow_scalar(self, style):
-        # See the specification for details.
-        # Note that we loose indentation rules for quoted scalars. Quoted
-        # scalars don't need to adhere indentation because " and ' clearly
-        # mark the beginning and the end of them. Therefore we are less
-        # restrictive then the specification requires. We only need to check
-        # that document separators are not included in scalars.
-        if style == '"':
-            double = True
-        else:
-            double = False
-        chunks = []
-        start_mark = self.get_mark()
-        quote = self.peek()
-        self.forward()
-        chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
-        while self.peek() != quote:
-            chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
-            chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
-        self.forward()
-        end_mark = self.get_mark()
-        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
-                style)
-
-    ESCAPE_REPLACEMENTS = {
-        '0':    '\0',
-        'a':    '\x07',
-        'b':    '\x08',
-        't':    '\x09',
-        '\t':   '\x09',
-        'n':    '\x0A',
-        'v':    '\x0B',
-        'f':    '\x0C',
-        'r':    '\x0D',
-        'e':    '\x1B',
-        ' ':    '\x20',
-        '\"':   '\"',
-        '\\':   '\\',
-        'N':    '\x85',
-        '_':    '\xA0',
-        'L':    '\u2028',
-        'P':    '\u2029',
-    }
-
-    ESCAPE_CODES = {
-        'x':    2,
-        'u':    4,
-        'U':    8,
-    }
-
-    def scan_flow_scalar_non_spaces(self, double, start_mark):
-        # See the specification for details.
-        chunks = []
-        while True:
-            length = 0
-            while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
-                length += 1
-            if length:
-                chunks.append(self.prefix(length))
-                self.forward(length)
-            ch = self.peek()
-            if not double and ch == '\'' and self.peek(1) == '\'':
-                chunks.append('\'')
-                self.forward(2)
-            elif (double and ch == '\'') or (not double and ch in '\"\\'):
-                chunks.append(ch)
-                self.forward()
-            elif double and ch == '\\':
-                self.forward()
-                ch = self.peek()
-                if ch in self.ESCAPE_REPLACEMENTS:
-                    chunks.append(self.ESCAPE_REPLACEMENTS[ch])
-                    self.forward()
-                elif ch in self.ESCAPE_CODES:
-                    length = self.ESCAPE_CODES[ch]
-                    self.forward()
-                    for k in range(length):
-                        if self.peek(k) not in '0123456789ABCDEFabcdef':
-                            raise ScannerError("while scanning a double-quoted scalar", start_mark,
-                                    "expected escape sequence of %d hexdecimal numbers, but found %r" %
-                                        (length, self.peek(k)), self.get_mark())
-                    code = int(self.prefix(length), 16)
-                    chunks.append(chr(code))
-                    self.forward(length)
-                elif ch in '\r\n\x85\u2028\u2029':
-                    self.scan_line_break()
-                    chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
-                else:
-                    raise ScannerError("while scanning a double-quoted scalar", start_mark,
-                            "found unknown escape character %r" % ch, self.get_mark())
-            else:
-                return chunks
-
-    def scan_flow_scalar_spaces(self, double, start_mark):
-        # See the specification for details.
-        chunks = []
-        length = 0
-        while self.peek(length) in ' \t':
-            length += 1
-        whitespaces = self.prefix(length)
-        self.forward(length)
-        ch = self.peek()
-        if ch == '\0':
-            raise ScannerError("while scanning a quoted scalar", start_mark,
-                    "found unexpected end of stream", self.get_mark())
-        elif ch in '\r\n\x85\u2028\u2029':
-            line_break = self.scan_line_break()
-            breaks = self.scan_flow_scalar_breaks(double, start_mark)
-            if line_break != '\n':
-                chunks.append(line_break)
-            elif not breaks:
-                chunks.append(' ')
-            chunks.extend(breaks)
-        else:
-            chunks.append(whitespaces)
-        return chunks
-
-    def scan_flow_scalar_breaks(self, double, start_mark):
-        # See the specification for details.
-        chunks = []
-        while True:
-            # Instead of checking indentation, we check for document
-            # separators.
-            prefix = self.prefix(3)
-            if (prefix == '---' or prefix == '...')   \
-                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
-                raise ScannerError("while scanning a quoted scalar", start_mark,
-                        "found unexpected document separator", self.get_mark())
-            while self.peek() in ' \t':
-                self.forward()
-            if self.peek() in '\r\n\x85\u2028\u2029':
-                chunks.append(self.scan_line_break())
-            else:
-                return chunks
-
-    def scan_plain(self):
-        # See the specification for details.
-        # We add an additional restriction for the flow context:
-        #   plain scalars in the flow context cannot contain ',', ':' and '?'.
-        # We also keep track of the `allow_simple_key` flag here.
-        # Indentation rules are loosed for the flow context.
-        chunks = []
-        start_mark = self.get_mark()
-        end_mark = start_mark
-        indent = self.indent+1
-        # We allow zero indentation for scalars, but then we need to check for
-        # document separators at the beginning of the line.
-        #if indent == 0:
-        #    indent = 1
-        spaces = []
-        while True:
-            length = 0
-            if self.peek() == '#':
-                break
-            while True:
-                ch = self.peek(length)
-                if ch in '\0 \t\r\n\x85\u2028\u2029'    \
-                        or (not self.flow_level and ch == ':' and
-                                self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
-                        or (self.flow_level and ch in ',:?[]{}'):
-                    break
-                length += 1
-            # It's not clear what we should do with ':' in the flow context.
-            if (self.flow_level and ch == ':'
-                    and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
-                self.forward(length)
-                raise ScannerError("while scanning a plain scalar", start_mark,
-                    "found unexpected ':'", self.get_mark(),
-                    "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
-            if length == 0:
-                break
-            self.allow_simple_key = False
-            chunks.extend(spaces)
-            chunks.append(self.prefix(length))
-            self.forward(length)
-            end_mark = self.get_mark()
-            spaces = self.scan_plain_spaces(indent, start_mark)
-            if not spaces or self.peek() == '#' \
-                    or (not self.flow_level and self.column < indent):
-                break
-        return ScalarToken(''.join(chunks), True, start_mark, end_mark)
-
-    def scan_plain_spaces(self, indent, start_mark):
-        # See the specification for details.
-        # The specification is really confusing about tabs in plain scalars.
-        # We just forbid them completely. Do not use tabs in YAML!
-        chunks = []
-        length = 0
-        while self.peek(length) in ' ':
-            length += 1
-        whitespaces = self.prefix(length)
-        self.forward(length)
-        ch = self.peek()
-        if ch in '\r\n\x85\u2028\u2029':
-            line_break = self.scan_line_break()
-            self.allow_simple_key = True
-            prefix = self.prefix(3)
-            if (prefix == '---' or prefix == '...')   \
-                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
-                return
-            breaks = []
-            while self.peek() in ' \r\n\x85\u2028\u2029':
-                if self.peek() == ' ':
-                    self.forward()
-                else:
-                    breaks.append(self.scan_line_break())
-                    prefix = self.prefix(3)
-                    if (prefix == '---' or prefix == '...')   \
-                            and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
-                        return
-            if line_break != '\n':
-                chunks.append(line_break)
-            elif not breaks:
-                chunks.append(' ')
-            chunks.extend(breaks)
-        elif whitespaces:
-            chunks.append(whitespaces)
-        return chunks
-
-    def scan_tag_handle(self, name, start_mark):
-        # See the specification for details.
-        # For some strange reasons, the specification does not allow '_' in
-        # tag handles. I have allowed it anyway.
-        ch = self.peek()
-        if ch != '!':
-            raise ScannerError("while scanning a %s" % name, start_mark,
-                    "expected '!', but found %r" % ch, self.get_mark())
-        length = 1
-        ch = self.peek(length)
-        if ch != ' ':
-            while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
-                    or ch in '-_':
-                length += 1
-                ch = self.peek(length)
-            if ch != '!':
-                self.forward(length)
-                raise ScannerError("while scanning a %s" % name, start_mark,
-                        "expected '!', but found %r" % ch, self.get_mark())
-            length += 1
-        value = self.prefix(length)
-        self.forward(length)
-        return value
-
-    def scan_tag_uri(self, name, start_mark):
-        # See the specification for details.
-        # Note: we do not check if URI is well-formed.
-        chunks = []
-        length = 0
-        ch = self.peek(length)
-        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
-                or ch in '-;/?:@&=+$,_.!~*\'()[]%':
-            if ch == '%':
-                chunks.append(self.prefix(length))
-                self.forward(length)
-                length = 0
-                chunks.append(self.scan_uri_escapes(name, start_mark))
-            else:
-                length += 1
-            ch = self.peek(length)
-        if length:
-            chunks.append(self.prefix(length))
-            self.forward(length)
-            length = 0
-        if not chunks:
-            raise ScannerError("while parsing a %s" % name, start_mark,
-                    "expected URI, but found %r" % ch, self.get_mark())
-        return ''.join(chunks)
-
-    def scan_uri_escapes(self, name, start_mark):
-        # See the specification for details.
-        codes = []
-        mark = self.get_mark()
-        while self.peek() == '%':
-            self.forward()
-            for k in range(2):
-                if self.peek(k) not in '0123456789ABCDEFabcdef':
-                    raise ScannerError("while scanning a %s" % name, start_mark,
-                            "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
-                            % self.peek(k), self.get_mark())
-            codes.append(int(self.prefix(2), 16))
-            self.forward(2)
-        try:
-            value = bytes(codes).decode('utf-8')
-        except UnicodeDecodeError as exc:
-            raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
-        return value
-
-    def scan_line_break(self):
-        # Transforms:
-        #   '\r\n'      :   '\n'
-        #   '\r'        :   '\n'
-        #   '\n'        :   '\n'
-        #   '\x85'      :   '\n'
-        #   '\u2028'    :   '\u2028'
-        #   '\u2029     :   '\u2029'
-        #   default     :   ''
-        ch = self.peek()
-        if ch in '\r\n\x85':
-            if self.prefix(2) == '\r\n':
-                self.forward(2)
-            else:
-                self.forward()
-            return '\n'
-        elif ch in '\u2028\u2029':
-            self.forward()
-            return ch
-        return ''
-
-#try:
-#    import psyco
-#    psyco.bind(Scanner)
-#except ImportError:
-#    pass
-
diff --git a/lib/spack/external/yaml/lib3/yaml/serializer.py b/lib/spack/external/yaml/lib3/yaml/serializer.py
deleted file mode 100644
index fe911e67ae..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/serializer.py
+++ /dev/null
@@ -1,111 +0,0 @@
-
-__all__ = ['Serializer', 'SerializerError']
-
-from .error import YAMLError
-from .events import *
-from .nodes import *
-
-class SerializerError(YAMLError):
-    pass
-
-class Serializer:
-
-    ANCHOR_TEMPLATE = 'id%03d'
-
-    def __init__(self, encoding=None,
-            explicit_start=None, explicit_end=None, version=None, tags=None):
-        self.use_encoding = encoding
-        self.use_explicit_start = explicit_start
-        self.use_explicit_end = explicit_end
-        self.use_version = version
-        self.use_tags = tags
-        self.serialized_nodes = {}
-        self.anchors = {}
-        self.last_anchor_id = 0
-        self.closed = None
-
-    def open(self):
-        if self.closed is None:
-            self.emit(StreamStartEvent(encoding=self.use_encoding))
-            self.closed = False
-        elif self.closed:
-            raise SerializerError("serializer is closed")
-        else:
-            raise SerializerError("serializer is already opened")
-
-    def close(self):
-        if self.closed is None:
-            raise SerializerError("serializer is not opened")
-        elif not self.closed:
-            self.emit(StreamEndEvent())
-            self.closed = True
-
-    #def __del__(self):
-    #    self.close()
-
-    def serialize(self, node):
-        if self.closed is None:
-            raise SerializerError("serializer is not opened")
-        elif self.closed:
-            raise SerializerError("serializer is closed")
-        self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
-            version=self.use_version, tags=self.use_tags))
-        self.anchor_node(node)
-        self.serialize_node(node, None, None)
-        self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
-        self.serialized_nodes = {}
-        self.anchors = {}
-        self.last_anchor_id = 0
-
-    def anchor_node(self, node):
-        if node in self.anchors:
-            if self.anchors[node] is None:
-                self.anchors[node] = self.generate_anchor(node)
-        else:
-            self.anchors[node] = None
-            if isinstance(node, SequenceNode):
-                for item in node.value:
-                    self.anchor_node(item)
-            elif isinstance(node, MappingNode):
-                for key, value in node.value:
-                    self.anchor_node(key)
-                    self.anchor_node(value)
-
-    def generate_anchor(self, node):
-        self.last_anchor_id += 1
-        return self.ANCHOR_TEMPLATE % self.last_anchor_id
-
-    def serialize_node(self, node, parent, index):
-        alias = self.anchors[node]
-        if node in self.serialized_nodes:
-            self.emit(AliasEvent(alias))
-        else:
-            self.serialized_nodes[node] = True
-            self.descend_resolver(parent, index)
-            if isinstance(node, ScalarNode):
-                detected_tag = self.resolve(ScalarNode, node.value, (True, False))
-                default_tag = self.resolve(ScalarNode, node.value, (False, True))
-                implicit = (node.tag == detected_tag), (node.tag == default_tag)
-                self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
-                    style=node.style))
-            elif isinstance(node, SequenceNode):
-                implicit = (node.tag
-                            == self.resolve(SequenceNode, node.value, True))
-                self.emit(SequenceStartEvent(alias, node.tag, implicit,
-                    flow_style=node.flow_style))
-                index = 0
-                for item in node.value:
-                    self.serialize_node(item, node, index)
-                    index += 1
-                self.emit(SequenceEndEvent())
-            elif isinstance(node, MappingNode):
-                implicit = (node.tag
-                            == self.resolve(MappingNode, node.value, True))
-                self.emit(MappingStartEvent(alias, node.tag, implicit,
-                    flow_style=node.flow_style))
-                for key, value in node.value:
-                    self.serialize_node(key, node, None)
-                    self.serialize_node(value, node, key)
-                self.emit(MappingEndEvent())
-            self.ascend_resolver()
-
diff --git a/lib/spack/external/yaml/lib3/yaml/tokens.py b/lib/spack/external/yaml/lib3/yaml/tokens.py
deleted file mode 100644
index 4d0b48a394..0000000000
--- a/lib/spack/external/yaml/lib3/yaml/tokens.py
+++ /dev/null
@@ -1,104 +0,0 @@
-
-class Token(object):
-    def __init__(self, start_mark, end_mark):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-    def __repr__(self):
-        attributes = [key for key in self.__dict__
-                if not key.endswith('_mark')]
-        attributes.sort()
-        arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
-                for key in attributes])
-        return '%s(%s)' % (self.__class__.__name__, arguments)
-
-#class BOMToken(Token):
-#    id = '<byte order mark>'
-
-class DirectiveToken(Token):
-    id = '<directive>'
-    def __init__(self, name, value, start_mark, end_mark):
-        self.name = name
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class DocumentStartToken(Token):
-    id = '<document start>'
-
-class DocumentEndToken(Token):
-    id = '<document end>'
-
-class StreamStartToken(Token):
-    id = '<stream start>'
-    def __init__(self, start_mark=None, end_mark=None,
-            encoding=None):
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.encoding = encoding
-
-class StreamEndToken(Token):
-    id = '<stream end>'
-
-class BlockSequenceStartToken(Token):
-    id = '<block sequence start>'
-
-class BlockMappingStartToken(Token):
-    id = '<block mapping start>'
-
-class BlockEndToken(Token):
-    id = '<block end>'
-
-class FlowSequenceStartToken(Token):
-    id = '['
-
-class FlowMappingStartToken(Token):
-    id = '{'
-
-class FlowSequenceEndToken(Token):
-    id = ']'
-
-class FlowMappingEndToken(Token):
-    id = '}'
-
-class KeyToken(Token):
-    id = '?'
-
-class ValueToken(Token):
-    id = ':'
-
-class BlockEntryToken(Token):
-    id = '-'
-
-class FlowEntryToken(Token):
-    id = ','
-
-class AliasToken(Token):
-    id = '<alias>'
-    def __init__(self, value, start_mark, end_mark):
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class AnchorToken(Token):
-    id = '<anchor>'
-    def __init__(self, value, start_mark, end_mark):
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class TagToken(Token):
-    id = '<tag>'
-    def __init__(self, value, start_mark, end_mark):
-        self.value = value
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-
-class ScalarToken(Token):
-    id = '<scalar>'
-    def __init__(self, value, plain, start_mark, end_mark, style=None):
-        self.value = value
-        self.plain = plain
-        self.start_mark = start_mark
-        self.end_mark = end_mark
-        self.style = style
-
diff --git a/lib/spack/spack/binary_distribution.py b/lib/spack/spack/binary_distribution.py
index c743be7261..669cdb78aa 100644
--- a/lib/spack/spack/binary_distribution.py
+++ b/lib/spack/spack/binary_distribution.py
@@ -31,7 +31,7 @@
 import hashlib
 from contextlib import closing
 
-import yaml
+import ruamel.yaml as yaml
 
 import llnl.util.tty as tty
 from llnl.util.filesystem import mkdirp, install_tree, get_filetype
diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py
index eb00c2453c..c1216450b1 100644
--- a/lib/spack/spack/config.py
+++ b/lib/spack/spack/config.py
@@ -61,8 +61,8 @@
 from six import iteritems
 from ordereddict_backport import OrderedDict
 
-import yaml
-from yaml.error import MarkedYAMLError
+import ruamel.yaml as yaml
+from ruamel.yaml.error import MarkedYAMLError
 
 import llnl.util.lang
 import llnl.util.tty as tty
diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py
index 4d82643b38..62bc61c647 100644
--- a/lib/spack/spack/database.py
+++ b/lib/spack/spack/database.py
@@ -48,7 +48,7 @@
 from six import string_types
 from six import iteritems
 
-from yaml.error import MarkedYAMLError, YAMLError
+from ruamel.yaml.error import MarkedYAMLError, YAMLError
 
 import llnl.util.tty as tty
 from llnl.util.filesystem import mkdirp
diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py
index 6b4e9c7336..20e6f7d4ed 100644
--- a/lib/spack/spack/directory_layout.py
+++ b/lib/spack/spack/directory_layout.py
@@ -26,9 +26,10 @@
 import shutil
 import glob
 import tempfile
-import yaml
 import re
 
+import ruamel.yaml as yaml
+
 from llnl.util.filesystem import mkdirp
 
 import spack.config
diff --git a/lib/spack/spack/provider_index.py b/lib/spack/spack/provider_index.py
index 02e76182ac..c0265e3810 100644
--- a/lib/spack/spack/provider_index.py
+++ b/lib/spack/spack/provider_index.py
@@ -31,7 +31,7 @@
 
 import spack.error
 import spack.util.spack_yaml as syaml
-from yaml.error import MarkedYAMLError
+from ruamel.yaml.error import MarkedYAMLError
 
 
 class ProviderIndex(object):
diff --git a/lib/spack/spack/repo.py b/lib/spack/spack/repo.py
index 980fb0cbab..adde47da4c 100644
--- a/lib/spack/spack/repo.py
+++ b/lib/spack/spack/repo.py
@@ -42,7 +42,7 @@
 
 from types import ModuleType
 
-import yaml
+import ruamel.yaml as yaml
 
 import llnl.util.lang
 import llnl.util.tty as tty
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 2d92d68d66..8bf230a576 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -139,7 +139,7 @@
 from spack.variant import DuplicateVariantError
 from spack.variant import UnsatisfiableVariantSpecError
 from spack.version import VersionList, VersionRange, Version, ver
-from yaml.error import MarkedYAMLError
+from ruamel.yaml.error import MarkedYAMLError
 
 __all__ = [
     'Spec',
@@ -1470,6 +1470,7 @@ def to_node_dict(self, hash_function=None):
                 v.yaml_entry() for _, v in self.variants.items()
             )
         )
+
         params.update(sorted(self.compiler_flags.items()))
         if params:
             d['parameters'] = params
@@ -1911,7 +1912,8 @@ def concretize(self, tests=False):
                 mvar.value = mvar.value + tuple(patches)
                 # FIXME: Monkey patches mvar to store patches order
                 p = getattr(mvar, '_patches_in_order_of_appearance', [])
-                mvar._patches_in_order_of_appearance = dedupe(p + patches)
+                mvar._patches_in_order_of_appearance = list(
+                    dedupe(p + patches))
 
         for s in self.traverse():
             if s.external_module:
diff --git a/lib/spack/spack/test/config.py b/lib/spack/spack/test/config.py
index e2aa3d7c66..796224853d 100644
--- a/lib/spack/spack/test/config.py
+++ b/lib/spack/spack/test/config.py
@@ -30,7 +30,7 @@
 from llnl.util.filesystem import touch, mkdirp
 
 import pytest
-import yaml
+import ruamel.yaml as yaml
 
 import spack.paths
 import spack.config
diff --git a/lib/spack/spack/test/conftest.py b/lib/spack/spack/test/conftest.py
index 5a2ee699e1..419cbec196 100644
--- a/lib/spack/spack/test/conftest.py
+++ b/lib/spack/spack/test/conftest.py
@@ -33,7 +33,7 @@
 import ordereddict_backport
 import py
 import pytest
-import yaml
+import ruamel.yaml as yaml
 
 from llnl.util.filesystem import remove_linked_tree
 
diff --git a/lib/spack/spack/test/modules/conftest.py b/lib/spack/spack/test/modules/conftest.py
index 5b52395270..1acf5d7e75 100644
--- a/lib/spack/spack/test/modules/conftest.py
+++ b/lib/spack/spack/test/modules/conftest.py
@@ -27,7 +27,7 @@
 import contextlib
 import inspect
 
-import yaml
+import ruamel.yaml as yaml
 import pytest
 from six import StringIO
 
diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py
index c02f6c9d04..5492ebe26e 100644
--- a/lib/spack/spack/util/spack_yaml.py
+++ b/lib/spack/spack/util/spack_yaml.py
@@ -34,10 +34,10 @@
 from ordereddict_backport import OrderedDict
 from six import string_types, StringIO
 
-import yaml
-from yaml import Loader, Dumper
-from yaml.nodes import MappingNode, SequenceNode, ScalarNode
-from yaml.constructor import ConstructorError
+import ruamel.yaml as yaml
+from ruamel.yaml import Loader, Dumper
+from ruamel.yaml.nodes import MappingNode, SequenceNode, ScalarNode
+from ruamel.yaml.constructor import ConstructorError
 
 from llnl.util.tty.color import colorize, clen, cextra
 
-- 
GitLab