diff --git a/lib/spack/llnl/util/filesystem.py b/lib/spack/llnl/util/filesystem.py
index 029a7536dfb4a20b90c8b78a4fb55e2a45b2f3a2..03f25d3dff7e4dcb033908b603d077b296e90356 100644
--- a/lib/spack/llnl/util/filesystem.py
+++ b/lib/spack/llnl/util/filesystem.py
@@ -222,7 +222,7 @@ def working_dir(dirname, **kwargs):
 
 def touch(path):
     """Creates an empty file at the specified path."""
-    with closing(open(path, 'a')) as file:
+    with open(path, 'a') as file:
         os.utime(path, None)
 
 
diff --git a/lib/spack/llnl/util/lock.py b/lib/spack/llnl/util/lock.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac3684bd5575451965fba37661b89af9738efadd
--- /dev/null
+++ b/lib/spack/llnl/util/lock.py
@@ -0,0 +1,175 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import os
+import fcntl
+import errno
+import time
+import socket
+
+# Default timeout in seconds, after which locks will raise exceptions.
+_default_timeout = 60
+
+# Sleep time per iteration in spin loop (in seconds)
+_sleep_time = 1e-5
+
+
+class Lock(object):
+    def __init__(self,file_path):
+        self._file_path = file_path
+        self._fd = None
+        self._reads = 0
+        self._writes = 0
+
+
+    def _lock(self, op, timeout):
+        """This takes a lock using POSIX locks (``fnctl.lockf``).
+
+        The lock is implemented as a spin lock using a nonblocking
+        call to lockf().
+
+        On acquiring an exclusive lock, the lock writes this process's
+        pid and host to the lock file, in case the holding process
+        needs to be killed later.
+
+        If the lock times out, it raises a ``LockError``.
+        """
+        start_time = time.time()
+        while (time.time() - start_time) < timeout:
+            try:
+                if self._fd is None:
+                    self._fd = os.open(self._file_path, os.O_RDWR)
+
+                fcntl.lockf(self._fd, op | fcntl.LOCK_NB)
+                if op == fcntl.LOCK_EX:
+                    os.write(self._fd, "pid=%s,host=%s" % (os.getpid(), socket.getfqdn()))
+                return
+
+            except IOError as error:
+                if error.errno == errno.EAGAIN or error.errno == errno.EACCES:
+                    pass
+                else:
+                    raise
+            time.sleep(_sleep_time)
+
+        raise LockError("Timed out waiting for lock.")
+
+
+    def _unlock(self):
+        """Releases a lock using POSIX locks (``fcntl.lockf``)
+
+        Releases the lock regardless of mode. Note that read locks may
+        be masquerading as write locks, but this removes either.
+
+        """
+        fcntl.lockf(self._fd,fcntl.LOCK_UN)
+        os.close(self._fd)
+        self._fd = None
+
+
+    def acquire_read(self, timeout=_default_timeout):
+        """Acquires a recursive, shared lock for reading.
+
+        Read and write locks can be acquired and released in arbitrary
+        order, but the POSIX lock is held until all local read and
+        write locks are released.
+
+        Returns True if it is the first acquire and actually acquires
+        the POSIX lock, False if it is a nested transaction.
+
+        """
+        if self._reads == 0 and self._writes == 0:
+            self._lock(fcntl.LOCK_SH, timeout)   # can raise LockError.
+            self._reads += 1
+            return True
+        else:
+            self._reads += 1
+            return False
+
+
+    def acquire_write(self, timeout=_default_timeout):
+        """Acquires a recursive, exclusive lock for writing.
+
+        Read and write locks can be acquired and released in arbitrary
+        order, but the POSIX lock is held until all local read and
+        write locks are released.
+
+        Returns True if it is the first acquire and actually acquires
+        the POSIX lock, False if it is a nested transaction.
+
+        """
+        if self._writes == 0:
+            self._lock(fcntl.LOCK_EX, timeout)   # can raise LockError.
+            self._writes += 1
+            return True
+        else:
+            self._writes += 1
+            return False
+
+
+    def release_read(self):
+        """Releases a read lock.
+
+        Returns True if the last recursive lock was released, False if
+        there are still outstanding locks.
+
+        Does limited correctness checking: if a read lock is released
+        when none are held, this will raise an assertion error.
+
+        """
+        assert self._reads > 0
+
+        if self._reads == 1 and self._writes == 0:
+            self._unlock()      # can raise LockError.
+            self._reads -= 1
+            return True
+        else:
+            self._reads -= 1
+            return False
+
+
+    def release_write(self):
+        """Releases a write lock.
+
+        Returns True if the last recursive lock was released, False if
+        there are still outstanding locks.
+
+        Does limited correctness checking: if a read lock is released
+        when none are held, this will raise an assertion error.
+
+        """
+        assert self._writes > 0
+
+        if self._writes == 1 and self._reads == 0:
+            self._unlock()      # can raise LockError.
+            self._writes -= 1
+            return True
+        else:
+            self._writes -= 1
+            return False
+
+
+class LockError(Exception):
+    """Raised when an attempt to acquire a lock times out."""
+    pass
diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py
index 6e8d41895fbc116d7da0bb132925fed427a972d8..1ecf662178cd13707d5a3e54ee9e46f21d2c0d76 100644
--- a/lib/spack/spack/__init__.py
+++ b/lib/spack/spack/__init__.py
@@ -55,6 +55,12 @@
 packages_path = join_path(var_path, "packages")
 db = PackageDB(packages_path)
 
+#
+# Set up the installed packages database
+#
+from spack.database import Database
+installed_db = Database(install_path)
+
 #
 # Paths to mock files for testing.
 #
diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py
index b96ac5af510a1f46c1aa3bea5692e00fb6f110e1..6ce6fa0960c178ee59aca3a4eec4311dbf94d6bb 100644
--- a/lib/spack/spack/cmd/__init__.py
+++ b/lib/spack/spack/cmd/__init__.py
@@ -124,7 +124,7 @@ def elide_list(line_list, max_num=10):
 
 
 def disambiguate_spec(spec):
-    matching_specs = spack.db.get_installed(spec)
+    matching_specs = spack.installed_db.query(spec)
     if not matching_specs:
         tty.die("Spec '%s' matches no installed packages." % spec)
 
diff --git a/lib/spack/spack/cmd/deactivate.py b/lib/spack/spack/cmd/deactivate.py
index e44be41029d106182045725073d5e26f2e7bbd96..1f0e303cdf2813fda794ec06b64d40460b03c678 100644
--- a/lib/spack/spack/cmd/deactivate.py
+++ b/lib/spack/spack/cmd/deactivate.py
@@ -54,7 +54,7 @@ def deactivate(parser, args):
     if args.all:
         if pkg.extendable:
             tty.msg("Deactivating all extensions of %s" % pkg.spec.short_spec)
-            ext_pkgs = spack.db.installed_extensions_for(spec)
+            ext_pkgs = spack.installed_db.installed_extensions_for(spec)
 
             for ext_pkg in ext_pkgs:
                 ext_pkg.spec.normalize()
diff --git a/lib/spack/spack/cmd/diy.py b/lib/spack/spack/cmd/diy.py
index 6e7f10fba63ca03e420fe5b5c5701a8956146bcf..f7998720ac40354e40201f952a701aa26ba6c0c3 100644
--- a/lib/spack/spack/cmd/diy.py
+++ b/lib/spack/spack/cmd/diy.py
@@ -58,36 +58,38 @@ def diy(self, args):
     if len(specs) > 1:
         tty.die("spack diy only takes one spec.")
 
-    spec = specs[0]
-    if not spack.db.exists(spec.name):
-        tty.warn("No such package: %s" % spec.name)
-        create = tty.get_yes_or_no("Create this package?", default=False)
-        if not create:
-            tty.msg("Exiting without creating.")
-            sys.exit(1)
-        else:
-            tty.msg("Running 'spack edit -f %s'" % spec.name)
-            edit_package(spec.name, True)
-            return
+    # Take a write lock before checking for existence.
+    with spack.installed_db.write_lock():
+        spec = specs[0]
+        if not spack.db.exists(spec.name):
+            tty.warn("No such package: %s" % spec.name)
+            create = tty.get_yes_or_no("Create this package?", default=False)
+            if not create:
+                tty.msg("Exiting without creating.")
+                sys.exit(1)
+            else:
+                tty.msg("Running 'spack edit -f %s'" % spec.name)
+                edit_package(spec.name, True)
+                return
 
-    if not spec.version.concrete:
-        tty.die("spack diy spec must have a single, concrete version.")
+        if not spec.version.concrete:
+            tty.die("spack diy spec must have a single, concrete version.")
 
-    spec.concretize()
-    package = spack.db.get(spec)
+        spec.concretize()
+        package = spack.db.get(spec)
 
-    if package.installed:
-        tty.error("Already installed in %s" % package.prefix)
-        tty.msg("Uninstall or try adding a version suffix for this DIY build.")
-        sys.exit(1)
+        if package.installed:
+            tty.error("Already installed in %s" % package.prefix)
+            tty.msg("Uninstall or try adding a version suffix for this DIY build.")
+            sys.exit(1)
 
-    # Forces the build to run out of the current directory.
-    package.stage = DIYStage(os.getcwd())
+        # Forces the build to run out of the current directory.
+        package.stage = DIYStage(os.getcwd())
 
-    # TODO: make this an argument, not a global.
-    spack.do_checksum = False
+        # TODO: make this an argument, not a global.
+        spack.do_checksum = False
 
-    package.do_install(
-        keep_prefix=args.keep_prefix,
-        ignore_deps=args.ignore_deps,
-        keep_stage=True)   # don't remove source dir for DIY.
+        package.do_install(
+            keep_prefix=args.keep_prefix,
+            ignore_deps=args.ignore_deps,
+            keep_stage=True)   # don't remove source dir for DIY.
diff --git a/lib/spack/spack/cmd/extensions.py b/lib/spack/spack/cmd/extensions.py
index fc8e6842c3bb2bc7199c82a231decd9ffde262fe..7cadc424b000827d6997c1d52c4825db7e55e8ff 100644
--- a/lib/spack/spack/cmd/extensions.py
+++ b/lib/spack/spack/cmd/extensions.py
@@ -80,7 +80,7 @@ def extensions(parser, args):
     colify(ext.name for ext in extensions)
 
     # List specs of installed extensions.
-    installed  = [s.spec for s in spack.db.installed_extensions_for(spec)]
+    installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)]
     print
     if not installed:
         tty.msg("None installed.")
diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py
index 3c993990b14fff9db827ae1072c814b189594c55..0b0dd6ef6fc20cc029366f1cdb8f02fbe01acc2e 100644
--- a/lib/spack/spack/cmd/find.py
+++ b/lib/spack/spack/cmd/find.py
@@ -54,6 +54,16 @@ def setup_parser(subparser):
         '-L', '--very-long', action='store_true', dest='very_long',
         help='Show dependency hashes as well as versions.')
 
+    subparser.add_argument(
+        '-u', '--unknown', action='store_true', dest='unknown',
+        help='Show only specs Spack does not have a package for.')
+    subparser.add_argument(
+        '-m', '--missing', action='store_true', dest='missing',
+        help='Show missing dependencies as well as installed specs.')
+    subparser.add_argument(
+        '-M', '--only-missing', action='store_true', dest='only_missing',
+        help='Show only missing dependencies.')
+
     subparser.add_argument(
         'query_specs', nargs=argparse.REMAINDER,
         help='optional specs to filter results')
@@ -113,6 +123,7 @@ def fmt(s):
                 if hashes:
                     string += gray_hash(s, hlen) + ' '
                 string += s.format('$-_$@$+', color=True)
+
                 return string
             colify(fmt(s) for s in specs)
 
@@ -136,11 +147,21 @@ def find(parser, args):
         if not query_specs:
             return
 
+    # Set up query arguments.
+    installed, known = True, any
+    if args.only_missing:
+        installed = False
+    elif args.missing:
+        installed = any
+    if args.unknown:
+        known = False
+    q_args = { 'installed' : installed, 'known' : known }
+
     # Get all the specs the user asked for
     if not query_specs:
-        specs = set(spack.db.installed_package_specs())
+        specs = set(spack.installed_db.query(**q_args))
     else:
-        results = [set(spack.db.get_installed(qs)) for qs in query_specs]
+        results = [set(spack.installed_db.query(qs, **q_args)) for qs in query_specs]
         specs = set.union(*results)
 
     if not args.mode:
diff --git a/lib/spack/spack/cmd/fsck.py b/lib/spack/spack/cmd/fsck.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a3c450dcf9d4a43d1767616cf2c0faa332060af
--- /dev/null
+++ b/lib/spack/spack/cmd/fsck.py
@@ -0,0 +1,32 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+from external import argparse
+import spack
+
+description = "Correct database irregularities"
+
+# Very basic version of spack fsck
+def fsck(parser, args):
+    spack.installed_db.reindex(spack.install_layout)
diff --git a/lib/spack/spack/cmd/install.py b/lib/spack/spack/cmd/install.py
index acb688a0923486e47b8eb94ec0c21c515ff7dea4..ba824bd658398e21746b32c9bb9cdaf4ed8510a9 100644
--- a/lib/spack/spack/cmd/install.py
+++ b/lib/spack/spack/cmd/install.py
@@ -71,10 +71,11 @@ def install(parser, args):
     specs = spack.cmd.parse_specs(args.packages, concretize=True)
     for spec in specs:
         package = spack.db.get(spec)
-        package.do_install(
-            keep_prefix=args.keep_prefix,
-            keep_stage=args.keep_stage,
-            ignore_deps=args.ignore_deps,
-            make_jobs=args.jobs,
-            verbose=args.verbose,
-            fake=args.fake)
+        with spack.installed_db.write_lock():
+            package.do_install(
+                keep_prefix=args.keep_prefix,
+                keep_stage=args.keep_stage,
+                ignore_deps=args.ignore_deps,
+                make_jobs=args.jobs,
+                verbose=args.verbose,
+                fake=args.fake)
diff --git a/lib/spack/spack/cmd/module.py b/lib/spack/spack/cmd/module.py
index 34f0855a50bff34ef12a5a8f45d0d7a993620354..654b0cb2fa77e91a547090a5ba87da67858ea3e7 100644
--- a/lib/spack/spack/cmd/module.py
+++ b/lib/spack/spack/cmd/module.py
@@ -65,7 +65,7 @@ def module_find(mtype, spec_array):
         tty.die("You can only pass one spec.")
     spec = specs[0]
 
-    specs = [s for s in spack.db.installed_package_specs() if s.satisfies(spec)]
+    specs = spack.installed_db.query(spec)
     if len(specs) == 0:
         tty.die("No installed packages match spec %s" % spec)
 
@@ -86,7 +86,7 @@ def module_find(mtype, spec_array):
 def module_refresh():
     """Regenerate all module files for installed packages known to
        spack (some packages may no longer exist)."""
-    specs = [s for s in spack.db.installed_known_package_specs()]
+    specs = [s for s in spack.installed_db.query(installed=True, known=True)]
 
     for name, cls in module_types.items():
         tty.msg("Regenerating %s module files." % name)
diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py
index aa62510fede4e50d1d480284e1e6c323be417e24..1dae84444ab6dd284ac263c83a9e2ffba8a81d81 100644
--- a/lib/spack/spack/cmd/uninstall.py
+++ b/lib/spack/spack/cmd/uninstall.py
@@ -53,51 +53,52 @@ def uninstall(parser, args):
     if not args.packages:
         tty.die("uninstall requires at least one package argument.")
 
-    specs = spack.cmd.parse_specs(args.packages)
+    with spack.installed_db.write_lock():
+        specs = spack.cmd.parse_specs(args.packages)
 
-    # For each spec provided, make sure it refers to only one package.
-    # Fail and ask user to be unambiguous if it doesn't
-    pkgs = []
-    for spec in specs:
-        matching_specs = spack.db.get_installed(spec)
-        if not args.all and len(matching_specs) > 1:
-            tty.error("%s matches multiple packages:" % spec)
-            print
-            display_specs(matching_specs, long=True)
-            print
-            print "You can either:"
-            print "  a) Use a more specific spec, or"
-            print "  b) use spack uninstall -a to uninstall ALL matching specs."
-            sys.exit(1)
+        # For each spec provided, make sure it refers to only one package.
+        # Fail and ask user to be unambiguous if it doesn't
+        pkgs = []
+        for spec in specs:
+            matching_specs = spack.installed_db.query(spec)
+            if not args.all and len(matching_specs) > 1:
+                tty.error("%s matches multiple packages:" % spec)
+                print
+                display_specs(matching_specs, long=True)
+                print
+                print "You can either:"
+                print "  a) Use a more specific spec, or"
+                print "  b) use spack uninstall -a to uninstall ALL matching specs."
+                sys.exit(1)
 
-        if len(matching_specs) == 0:
-            if args.force: continue
-            tty.die("%s does not match any installed packages." % spec)
+            if len(matching_specs) == 0:
+                if args.force: continue
+                tty.die("%s does not match any installed packages." % spec)
 
-        for s in matching_specs:
-            try:
-                # should work if package is known to spack
-                pkgs.append(s.package)
+            for s in matching_specs:
+                try:
+                    # should work if package is known to spack
+                    pkgs.append(s.package)
 
-            except spack.packages.UnknownPackageError, e:
-                # The package.py file has gone away -- but still want to uninstall.
-                spack.Package(s).do_uninstall(force=True)
+                except spack.packages.UnknownPackageError, e:
+                    # The package.py file has gone away -- but still want to uninstall.
+                    spack.Package(s).do_uninstall(force=True)
 
-    # Sort packages to be uninstalled by the number of installed dependents
-    # This ensures we do things in the right order
-    def num_installed_deps(pkg):
-        return len(pkg.installed_dependents)
-    pkgs.sort(key=num_installed_deps)
+        # Sort packages to be uninstalled by the number of installed dependents
+        # This ensures we do things in the right order
+        def num_installed_deps(pkg):
+            return len(pkg.installed_dependents)
+        pkgs.sort(key=num_installed_deps)
 
-    # Uninstall packages in order now.
-    for pkg in pkgs:
-        try:
-            pkg.do_uninstall(force=args.force)
-        except PackageStillNeededError, e:
-            tty.error("Will not uninstall %s" % e.spec.format("$_$@$%@$#", color=True))
-            print
-            print "The following packages depend on it:"
-            display_specs(e.dependents, long=True)
-            print
-            print "You can use spack uninstall -f to force this action."
-            sys.exit(1)
+        # Uninstall packages in order now.
+        for pkg in pkgs:
+            try:
+                pkg.do_uninstall(force=args.force)
+            except PackageStillNeededError, e:
+                tty.error("Will not uninstall %s" % e.spec.format("$_$@$%@$#", color=True))
+                print
+                print "The following packages depend on it:"
+                display_specs(e.dependents, long=True)
+                print
+                print "You can use spack uninstall -f to force this action."
+                sys.exit(1)
diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ce00a45e96ed078cab4d07ae8a6740d11c39063
--- /dev/null
+++ b/lib/spack/spack/database.py
@@ -0,0 +1,637 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""Spack's installation tracking database.
+
+The database serves two purposes:
+
+  1. It implements a cache on top of a potentially very large Spack
+     directory hierarchy, speeding up many operations that would
+     otherwise require filesystem access.
+
+  2. It will allow us to track external installations as well as lost
+     packages and their dependencies.
+
+Prior ot the implementation of this store, a direcotry layout served
+as the authoritative database of packages in Spack.  This module
+provides a cache and a sanity checking mechanism for what is in the
+filesystem.
+
+"""
+import os
+import time
+import socket
+
+from external import yaml
+from external.yaml.error import MarkedYAMLError, YAMLError
+
+import llnl.util.tty as tty
+from llnl.util.filesystem import *
+from llnl.util.lock import *
+
+import spack.spec
+from spack.version import Version
+from spack.spec import Spec
+from spack.error import SpackError
+
+# DB goes in this directory underneath the root
+_db_dirname = '.spack-db'
+
+# DB version.  This is stuck in the DB file to track changes in format.
+_db_version = Version('0.9')
+
+# Default timeout for spack database locks is 5 min.
+_db_lock_timeout = 60
+
+
+def _autospec(function):
+    """Decorator that automatically converts the argument of a single-arg
+       function to a Spec."""
+    def converter(self, spec_like, *args, **kwargs):
+        if not isinstance(spec_like, spack.spec.Spec):
+            spec_like = spack.spec.Spec(spec_like)
+        return function(self, spec_like, *args, **kwargs)
+    return converter
+
+
+class InstallRecord(object):
+    """A record represents one installation in the DB.
+
+    The record keeps track of the spec for the installation, its
+    install path, AND whether or not it is installed.  We need the
+    installed flag in case a user either:
+
+        a) blew away a directory, or
+        b) used spack uninstall -f to get rid of it
+
+    If, in either case, the package was removed but others still
+    depend on it, we still need to track its spec, so we don't
+    actually remove from the database until a spec has no installed
+    dependents left.
+
+    """
+    def __init__(self, spec, path, installed, ref_count=0):
+        self.spec = spec
+        self.path = path
+        self.installed = installed
+        self.ref_count = ref_count
+
+    def to_dict(self):
+        return { 'spec'      : self.spec.to_node_dict(),
+                 'path'      : self.path,
+                 'installed' : self.installed,
+                 'ref_count' : self.ref_count }
+
+    @classmethod
+    def from_dict(cls, spec, dictionary):
+        d = dictionary
+        return InstallRecord(spec, d['path'], d['installed'], d['ref_count'])
+
+
+class Database(object):
+    def __init__(self, root, db_dir=None):
+        """Create a Database for Spack installations under ``root``.
+
+        A Database is a cache of Specs data from ``$prefix/spec.yaml``
+        files in Spack installation directories.
+
+        By default, Database files (data and lock files) are stored
+        under ``root/.spack-db``, which is created if it does not
+        exist.  This is the ``db_dir``.
+
+        The Database will attempt to read an ``index.yaml`` file in
+        ``db_dir``.  If it does not find one, it will be created when
+        needed by scanning the entire Database root for ``spec.yaml``
+        files according to Spack's ``DirectoryLayout``.
+
+        Caller may optionally provide a custom ``db_dir`` parameter
+        where data will be stored.  This is intended to be used for
+        testing the Database class.
+
+        """
+        self.root = root
+
+        if db_dir is None:
+            # If the db_dir is not provided, default to within the db root.
+            self._db_dir = join_path(self.root, _db_dirname)
+        else:
+            # Allow customizing the database directory location for testing.
+            self._db_dir = db_dir
+
+        # Set up layout of database files within the db dir
+        self._index_path = join_path(self._db_dir, 'index.yaml')
+        self._lock_path  = join_path(self._db_dir, 'lock')
+
+        # Create needed directories and files
+        if not os.path.exists(self._db_dir):
+            mkdirp(self._db_dir)
+
+        if not os.path.exists(self._lock_path):
+            touch(self._lock_path)
+
+        # initialize rest of state.
+        self.lock = Lock(self._lock_path)
+        self._data = {}
+
+
+    def write_transaction(self, timeout=_db_lock_timeout):
+        """Get a write lock context manager for use in a `with` block."""
+        return WriteTransaction(self, self._read, self._write, timeout)
+
+
+    def read_transaction(self, timeout=_db_lock_timeout):
+        """Get a read lock context manager for use in a `with` block."""
+        return ReadTransaction(self, self._read, None, timeout)
+
+
+    def _write_to_yaml(self, stream):
+        """Write out the databsae to a YAML file.
+
+        This function does not do any locking or transactions.
+        """
+        # map from per-spec hash code to installation record.
+        installs = dict((k, v.to_dict()) for k, v in self._data.items())
+
+        # databaes includes installation list and version.
+
+        # NOTE: this DB version does not handle multiple installs of
+        # the same spec well.  If there are 2 identical specs with
+        # different paths, it can't differentiate.
+        # TODO: fix this before we support multiple install locations.
+        database = {
+            'database' : {
+                'installs' : installs,
+                'version' : str(_db_version)
+            }
+        }
+
+        try:
+            return yaml.dump(database, stream=stream, default_flow_style=False)
+        except YAMLError as e:
+            raise SpackYAMLError("error writing YAML database:", str(e))
+
+
+    def _read_spec_from_yaml(self, hash_key, installs, parent_key=None):
+        """Recursively construct a spec from a hash in a YAML database.
+
+        Does not do any locking.
+        """
+        if hash_key not in installs:
+            parent = read_spec(installs[parent_key]['path'])
+
+        spec_dict = installs[hash_key]['spec']
+
+        # Build spec from dict first.
+        spec = Spec.from_node_dict(spec_dict)
+
+        # Add dependencies from other records in the install DB to
+        # form a full spec.
+        for dep_hash in spec_dict[spec.name]['dependencies'].values():
+            child = self._read_spec_from_yaml(dep_hash, installs, hash_key)
+            spec._add_dependency(child)
+
+        return spec
+
+
+    def _read_from_yaml(self, stream):
+        """
+        Fill database from YAML, do not maintain old data
+        Translate the spec portions from node-dict form to spec form
+
+        Does not do any locking.
+        """
+        try:
+            if isinstance(stream, basestring):
+                with open(stream, 'r') as f:
+                    yfile = yaml.load(f)
+            else:
+                yfile = yaml.load(stream)
+
+        except MarkedYAMLError as e:
+            raise SpackYAMLError("error parsing YAML database:", str(e))
+
+        if yfile is None:
+            return
+
+        def check(cond, msg):
+            if not cond: raise CorruptDatabaseError(self._index_path, msg)
+
+        check('database' in yfile, "No 'database' attribute in YAML.")
+
+        # High-level file checks
+        db = yfile['database']
+        check('installs' in db, "No 'installs' in YAML DB.")
+        check('version'  in db, "No 'version' in YAML DB.")
+
+        # TODO: better version checking semantics.
+        version = Version(db['version'])
+        if version != _db_version:
+            raise InvalidDatabaseVersionError(_db_version, version)
+
+        # Iterate through database and check each record.
+        installs = db['installs']
+        data = {}
+        for hash_key, rec in installs.items():
+            try:
+                # This constructs a spec DAG from the list of all installs
+                spec = self._read_spec_from_yaml(hash_key, installs)
+
+                # Validate the spec by ensuring the stored and actual
+                # hashes are the same.
+                spec_hash = spec.dag_hash()
+                if not spec_hash == hash_key:
+                    tty.warn("Hash mismatch in database: %s -> spec with hash %s"
+                             % (hash_key, spec_hash))
+                    continue    # TODO: is skipping the right thing to do?
+
+                # Insert the brand new spec in the database.  Each
+                # spec has its own copies of its dependency specs.
+                # TODO: would a more immmutable spec implementation simplify this?
+                data[hash_key] = InstallRecord.from_dict(spec, rec)
+
+            except Exception as e:
+                tty.warn("Invalid database reecord:",
+                         "file:  %s" % self._index_path,
+                         "hash:  %s" % hash_key,
+                         "cause: %s" % str(e))
+                raise
+
+        self._data = data
+
+
+    def reindex(self, directory_layout):
+        """Build database index from scratch based from a directory layout.
+
+        Locks the DB if it isn't locked already.
+
+        """
+        with self.write_transaction():
+            old_data = self._data
+            try:
+                self._data = {}
+
+                # Ask the directory layout to traverse the filesystem.
+                for spec in directory_layout.all_specs():
+                    # Create a spec for each known package and add it.
+                    path = directory_layout.path_for_spec(spec)
+                    self._add(spec, path, directory_layout)
+
+                self._check_ref_counts()
+
+            except:
+                # If anything explodes, restore old data, skip write.
+                self._data = old_data
+                raise
+
+
+    def _check_ref_counts(self):
+        """Ensure consistency of reference counts in the DB.
+
+        Raise an AssertionError if something is amiss.
+
+        Does no locking.
+        """
+        counts = {}
+        for key, rec in self._data.items():
+            counts.setdefault(key, 0)
+            for dep in rec.spec.dependencies.values():
+                dep_key = dep.dag_hash()
+                counts.setdefault(dep_key, 0)
+                counts[dep_key] += 1
+
+        for rec in self._data.values():
+            key = rec.spec.dag_hash()
+            expected = counts[key]
+            found = rec.ref_count
+            if not expected == found:
+                raise AssertionError(
+                    "Invalid ref_count: %s: %d (expected %d), in DB %s."
+                    % (key, found, expected, self._index_path))
+
+
+    def _write(self):
+        """Write the in-memory database index to its file path.
+
+        Does no locking.
+
+        """
+        temp_name = '%s.%s.temp' % (socket.getfqdn(), os.getpid())
+        temp_file = join_path(self._db_dir, temp_name)
+
+        # Write a temporary database file them move it into place
+        try:
+            with open(temp_file, 'w') as f:
+                self._write_to_yaml(f)
+            os.rename(temp_file, self._index_path)
+
+        except:
+            # Clean up temp file if something goes wrong.
+            if os.path.exists(temp_file):
+                os.remove(temp_file)
+            raise
+
+
+    def _read(self):
+        """Re-read Database from the data in the set location.
+
+        This does no locking.
+        """
+        if os.path.isfile(self._index_path):
+            # Read from YAML file if a database exists
+            self._read_from_yaml(self._index_path)
+
+        else:
+            # The file doesn't exist, try to traverse the directory.
+            # reindex() takes its own write lock, so no lock here.
+            self.reindex(spack.install_layout)
+
+
+    def read(self):
+        with self.read_transaction(): pass
+
+
+    def write(self):
+        with self.write_transaction(): pass
+
+
+    def _add(self, spec, path, directory_layout=None):
+        """Add an install record for spec at path to the database.
+
+        This assumes that the spec is not already installed. It
+        updates the ref counts on dependencies of the spec in the DB.
+
+        This operation is in-memory, and does not lock the DB.
+
+        """
+        key = spec.dag_hash()
+        if key in self._data:
+            rec = self._data[key]
+            rec.installed = True
+
+            # TODO: this overwrites a previous install path (when path !=
+            # self._data[key].path), and the old path still has a
+            # dependent in the DB. We could consider re-RPATH-ing the
+            # dependents.  This case is probably infrequent and may not be
+            # worth fixing, but this is where we can discover it.
+            rec.path = path
+
+        else:
+            self._data[key] = InstallRecord(spec, path, True)
+            for dep in spec.dependencies.values():
+                self._increment_ref_count(dep, directory_layout)
+
+
+    def _increment_ref_count(self, spec, directory_layout=None):
+        """Recursively examine dependencies and update their DB entries."""
+        key = spec.dag_hash()
+        if key not in self._data:
+            installed = False
+            path = None
+            if directory_layout:
+                path = directory_layout.path_for_spec(spec)
+                installed = os.path.isdir(path)
+
+            self._data[key] = InstallRecord(spec.copy(), path, installed)
+
+            for dep in spec.dependencies.values():
+                self._increment_ref_count(dep)
+
+        self._data[key].ref_count += 1
+
+    @_autospec
+    def add(self, spec, path):
+        """Add spec at path to database, locking and reading DB to sync.
+
+        ``add()`` will lock and read from the DB on disk.
+
+        """
+        # TODO: ensure that spec is concrete?
+        # Entire add is transactional.
+        with self.write_transaction():
+            self._add(spec, path)
+
+
+    def _get_matching_spec_key(self, spec, **kwargs):
+        """Get the exact spec OR get a single spec that matches."""
+        key = spec.dag_hash()
+        if not key in self._data:
+            match = self.query_one(spec, **kwargs)
+            if match:
+                return match.dag_hash()
+            raise KeyError("No such spec in database! %s" % spec)
+        return key
+
+
+    @_autospec
+    def get_record(self, spec, **kwargs):
+        key = self._get_matching_spec_key(spec, **kwargs)
+        return self._data[key]
+
+
+    def _decrement_ref_count(self, spec):
+        key = spec.dag_hash()
+
+        if not key in self._data:
+            # TODO: print something here?  DB is corrupt, but
+            # not much we can do.
+            return
+
+        rec = self._data[key]
+        rec.ref_count -= 1
+
+        if rec.ref_count == 0 and not rec.installed:
+            del self._data[key]
+            for dep in spec.dependencies.values():
+                self._decrement_ref_count(dep)
+
+
+    def _remove(self, spec):
+        """Non-locking version of remove(); does real work.
+        """
+        key = self._get_matching_spec_key(spec)
+        rec = self._data[key]
+
+        if rec.ref_count > 0:
+            rec.installed = False
+            return rec.spec
+
+        del self._data[key]
+        for dep in rec.spec.dependencies.values():
+            self._decrement_ref_count(dep)
+
+        # Returns the concrete spec so we know it in the case where a
+        # query spec was passed in.
+        return rec.spec
+
+
+    @_autospec
+    def remove(self, spec):
+        """Removes a spec from the database.  To be called on uninstall.
+
+        Reads the database, then:
+
+          1. Marks the spec as not installed.
+          2. Removes the spec if it has no more dependents.
+          3. If removed, recursively updates dependencies' ref counts
+             and remvoes them if they are no longer needed.
+
+        """
+        # Take a lock around the entire removal.
+        with self.write_transaction():
+            return self._remove(spec)
+
+
+    @_autospec
+    def installed_extensions_for(self, extendee_spec):
+        """
+        Return the specs of all packages that extend
+        the given spec
+        """
+        for s in self.query():
+            try:
+                if s.package.extends(extendee_spec):
+                    yield s.package
+            except UnknownPackageError as e:
+                continue
+            # skips unknown packages
+            # TODO: conditional way to do this instead of catching exceptions
+
+
+    def query(self, query_spec=any, known=any, installed=True):
+        """Run a query on the database.
+
+        ``query_spec``
+            Queries iterate through specs in the database and return
+            those that satisfy the supplied ``query_spec``.  If
+            query_spec is `any`, This will match all specs in the
+            database.  If it is a spec, we'll evaluate
+            ``spec.satisfies(query_spec)``.
+
+        The query can be constrained by two additional attributes:
+
+        ``known``
+            Possible values: True, False, any
+
+            Specs that are "known" are those for which Spack can
+            locate a ``package.py`` file -- i.e., Spack "knows" how to
+            install them.  Specs that are unknown may represent
+            packages that existed in a previous version of Spack, but
+            have since either changed their name or been removed.
+
+        ``installed``
+            Possible values: True, False, any
+
+            Specs for which a prefix exists are "installed". A spec
+            that is NOT installed will be in the database if some
+            other spec depends on it but its installation has gone
+            away since Spack installed it.
+
+        TODO: Specs are a lot like queries.  Should there be a
+              wildcard spec object, and should specs have attributes
+              like installed and known that can be queried?  Or are
+              these really special cases that only belong here?
+
+        """
+        with self.read_transaction():
+            results = []
+            for key, rec in self._data.items():
+                if installed is not any and rec.installed != installed:
+                    continue
+                if known is not any and spack.db.exists(rec.spec.name) != known:
+                    continue
+                if query_spec is any or rec.spec.satisfies(query_spec):
+                    results.append(rec.spec)
+
+            return sorted(results)
+
+
+    def query_one(self, query_spec, known=any, installed=True):
+        """Query for exactly one spec that matches the query spec.
+
+        Raises an assertion error if more than one spec matches the
+        query. Returns None if no installed package matches.
+
+        """
+        concrete_specs = self.query(query_spec, known, installed)
+        assert len(concrete_specs) <= 1
+        return concrete_specs[0] if concrete_specs else None
+
+
+    def missing(self, spec):
+        with self.read_transaction():
+            key =  spec.dag_hash()
+            return key in self._data and not self._data[key].installed
+
+
+class _Transaction(object):
+    """Simple nested transaction context manager that uses a file lock.
+
+    This class can trigger actions when the lock is acquired for the
+    first time and released for the last.
+
+    Timeout for lock is customizable.
+    """
+    def __init__(self, db, acquire_fn=None, release_fn=None,
+                 timeout=_db_lock_timeout):
+        self._db = db
+        self._timeout = timeout
+        self._acquire_fn = acquire_fn
+        self._release_fn = release_fn
+
+    def __enter__(self):
+        if self._enter() and self._acquire_fn:
+            self._acquire_fn()
+
+    def __exit__(self, type, value, traceback):
+        if self._exit() and self._release_fn:
+            self._release_fn()
+
+
+class ReadTransaction(_Transaction):
+    def _enter(self):
+        return self._db.lock.acquire_read(self._timeout)
+
+    def _exit(self):
+        return self._db.lock.release_read()
+
+
+class WriteTransaction(_Transaction):
+    def _enter(self):
+        return self._db.lock.acquire_write(self._timeout)
+
+    def _exit(self):
+        return self._db.lock.release_write()
+
+
+class CorruptDatabaseError(SpackError):
+    def __init__(self, path, msg=''):
+        super(CorruptDatabaseError, self).__init__(
+            "Spack database is corrupt: %s.  %s" %(path, msg))
+
+
+class InvalidDatabaseVersionError(SpackError):
+    def __init__(self, expected, found):
+        super(InvalidDatabaseVersionError, self).__init__(
+            "Expected database version %s but found version %s"
+            % (expected, found))
diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py
index 85ecc1ce2b169989048f72c557d83bfee36c762b..da8f4187cc5e917d55b1dc968143377c9bfe28a7 100644
--- a/lib/spack/spack/directory_layout.py
+++ b/lib/spack/spack/directory_layout.py
@@ -32,7 +32,6 @@
 from external import yaml
 
 import llnl.util.tty as tty
-from llnl.util.lang import memoized
 from llnl.util.filesystem import join_path, mkdirp
 
 from spack.spec import Spec
@@ -258,7 +257,6 @@ def create_install_directory(self, spec):
         self.write_spec(spec, spec_file_path)
 
 
-    @memoized
     def all_specs(self):
         if not os.path.isdir(self.root):
             return []
@@ -269,7 +267,6 @@ def all_specs(self):
         return [self.read_spec(s) for s in spec_files]
 
 
-    @memoized
     def specs_by_hash(self):
         by_hash = {}
         for spec in self.all_specs():
diff --git a/lib/spack/spack/error.py b/lib/spack/spack/error.py
index bfa7951a473ebddc1d5e98d25546cf14f45793a1..b3b24e6105b5a5fd88d7d2131253cf9d98931029 100644
--- a/lib/spack/spack/error.py
+++ b/lib/spack/spack/error.py
@@ -55,8 +55,8 @@ def die(self):
 
     def __str__(self):
         msg = self.message
-        if self.long_message:
-            msg += "\n    %s" % self.long_message
+        if self._long_message:
+            msg += "\n    %s" % self._long_message
         return msg
 
 class UnsupportedPlatformError(SpackError):
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index b1257a092fb70c1531fca5a5bc3e3d4c8dcb122f..b15d4b2040cfb7a285f0adbcd80a18ecb4274af6 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -570,9 +570,12 @@ def installed(self):
     @property
     def installed_dependents(self):
         """Return a list of the specs of all installed packages that depend
-           on this one."""
+           on this one.
+
+        TODO: move this method to database.py?
+        """
         dependents = []
-        for spec in spack.db.installed_package_specs():
+        for spec in spack.installed_db.query():
             if self.name == spec.name:
                 continue
             for dep in spec.traverse():
@@ -608,6 +611,7 @@ def url_version(self, version):
     def remove_prefix(self):
         """Removes the prefix for a package along with any empty parent directories."""
         spack.install_layout.remove_install_directory(self.spec)
+        spack.installed_db.remove(self.spec)
 
 
     def do_fetch(self):
@@ -786,6 +790,7 @@ def cleanup():
                          "Manually remove this directory to fix:",
                          self.prefix)
 
+
         def real_work():
             try:
                 tty.msg("Building %s." % self.name)
@@ -845,6 +850,10 @@ def real_work():
         # Do the build.
         spack.build_environment.fork(self, real_work)
 
+        # note: PARENT of the build process adds the new package to
+        # the database, so that we don't need to re-read from file.
+        spack.installed_db.add(self.spec, self.prefix)
+
         # Once everything else is done, run post install hooks
         spack.hooks.post_install(self)
 
diff --git a/lib/spack/spack/packages.py b/lib/spack/spack/packages.py
index adfbc26c1d431cd0baec332f7c4c5d8b4f0deb45..2e3e95ca4072bd6dc99865e1496cf499b53ea03a 100644
--- a/lib/spack/spack/packages.py
+++ b/lib/spack/spack/packages.py
@@ -95,12 +95,6 @@ def purge(self):
         self.instances.clear()
 
 
-    @_autospec
-    def get_installed(self, spec):
-        """Get all the installed specs that satisfy the provided spec constraint."""
-        return [s for s in self.installed_package_specs() if s.satisfies(spec)]
-
-
     @_autospec
     def providers_for(self, vpkg_spec):
         if self.provider_index is None:
@@ -117,19 +111,6 @@ def extensions_for(self, extendee_spec):
         return [p for p in self.all_packages() if p.extends(extendee_spec)]
 
 
-    @_autospec
-    def installed_extensions_for(self, extendee_spec):
-        for s in self.installed_package_specs():
-            try:
-                if s.package.extends(extendee_spec):
-                    yield s.package
-            except UnknownPackageError, e:
-                # Skip packages we know nothing about
-                continue
-                # TODO: add some conditional way to do this instead of
-                # catching exceptions.
-
-
     def dirname_for_package_name(self, pkg_name):
         """Get the directory name for a particular package.  This is the
            directory that contains its package.py file."""
@@ -150,29 +131,6 @@ def filename_for_package_name(self, pkg_name):
         return join_path(pkg_dir, _package_file_name)
 
 
-    def installed_package_specs(self):
-        """Read installed package names straight from the install directory
-           layout.
-        """
-        # Get specs from the directory layout but ensure that they're
-        # all normalized properly.
-        installed = []
-        for spec in spack.install_layout.all_specs():
-            spec.normalize()
-            installed.append(spec)
-        return installed
-
-
-    def installed_known_package_specs(self):
-        """Read installed package names straight from the install
-           directory layout, but return only specs for which the
-           package is known to this version of spack.
-        """
-        for spec in spack.install_layout.all_specs():
-            if self.exists(spec.name):
-                yield spec
-
-
     @memoized
     def all_package_names(self):
         """Generator function for all packages.  This looks for
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index e1fbb84423403788be2bda74ba606f6a389f1181..7b79feb311aa65402679398e694b8b5914b488d3 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -640,7 +640,9 @@ def prefix(self):
 
 
     def dag_hash(self, length=None):
-        """Return a hash of the entire spec DAG, including connectivity."""
+        """
+        Return a hash of the entire spec DAG, including connectivity.
+        """
         yaml_text = yaml.dump(
             self.to_node_dict(), default_flow_style=True, width=sys.maxint)
         sha = hashlib.sha1(yaml_text)
@@ -710,7 +712,7 @@ def from_yaml(stream):
         try:
             yfile = yaml.load(stream)
         except MarkedYAMLError, e:
-            raise SpackYAMLError("error parsing YMAL spec:", str(e))
+            raise SpackYAMLError("error parsing YAML spec:", str(e))
 
         for node in yfile['spec']:
             name = next(iter(node))
@@ -1998,4 +2000,4 @@ def __init__(self, provided, required):
 
 class SpackYAMLError(spack.error.SpackError):
     def __init__(self, msg, yaml_error):
-        super(SpackError, self).__init__(msg, str(yaml_error))
+        super(SpackYAMLError, self).__init__(msg, str(yaml_error))
diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py
index 6fd80d10848693aa78e79809211cdb0c47728202..0f776bfea46ba245d1ebf051e47fdb612c203b36 100644
--- a/lib/spack/spack/test/__init__.py
+++ b/lib/spack/spack/test/__init__.py
@@ -57,7 +57,9 @@
               'optional_deps',
               'make_executable',
               'configure_guess',
-              'unit_install']
+              'unit_install',
+              'lock',
+              'database']
 
 
 def list_tests():
@@ -77,7 +79,7 @@ def run(names, verbose=False):
             if test not in test_names:
                 tty.error("%s is not a valid spack test name." % test,
                           "Valid names are:")
-                colify(test_names, indent=4)
+                colify(sorted(test_names), indent=4)
                 sys.exit(1)
 
     runner = unittest.TextTestRunner(verbosity=verbosity)
diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c5926e84083e4b7ffb2ccc2b4e52fed9f603465
--- /dev/null
+++ b/lib/spack/spack/test/database.py
@@ -0,0 +1,345 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""
+These tests check the database is functioning properly,
+both in memory and in its file
+"""
+import tempfile
+import shutil
+import multiprocessing
+
+from llnl.util.lock import *
+from llnl.util.filesystem import join_path
+
+import spack
+from spack.database import Database
+from spack.directory_layout import YamlDirectoryLayout
+from spack.test.mock_packages_test import *
+
+from llnl.util.tty.colify import colify
+
+def _print_ref_counts():
+    """Print out all ref counts for the graph used here, for debugging"""
+    recs = []
+
+    def add_rec(spec):
+        cspecs = spack.installed_db.query(spec, installed=any)
+
+        if not cspecs:
+            recs.append("[ %-7s ] %-20s-" % ('', spec))
+        else:
+            key = cspecs[0].dag_hash()
+            rec = spack.installed_db.get_record(cspecs[0])
+            recs.append("[ %-7s ] %-20s%d" % (key[:7], spec, rec.ref_count))
+
+    with spack.installed_db.read_transaction():
+        add_rec('mpileaks ^mpich')
+        add_rec('callpath ^mpich')
+        add_rec('mpich')
+
+        add_rec('mpileaks ^mpich2')
+        add_rec('callpath ^mpich2')
+        add_rec('mpich2')
+
+        add_rec('mpileaks ^zmpi')
+        add_rec('callpath ^zmpi')
+        add_rec('zmpi')
+        add_rec('fake')
+
+        add_rec('dyninst')
+        add_rec('libdwarf')
+        add_rec('libelf')
+
+    colify(recs, cols=3)
+
+
+class DatabaseTest(MockPackagesTest):
+
+    def _mock_install(self, spec):
+        s = Spec(spec)
+        pkg = spack.db.get(s.concretized())
+        pkg.do_install(fake=True)
+
+
+    def _mock_remove(self, spec):
+        specs = spack.installed_db.query(spec)
+        assert(len(specs) == 1)
+        spec = specs[0]
+        spec.package.do_uninstall(spec)
+
+
+    def setUp(self):
+        super(DatabaseTest, self).setUp()
+        #
+        # TODO: make the mockup below easier.
+        #
+
+        # Make a fake install directory
+        self.install_path = tempfile.mkdtemp()
+        self.spack_install_path = spack.install_path
+        spack.install_path = self.install_path
+
+        self.install_layout = YamlDirectoryLayout(self.install_path)
+        self.spack_install_layout = spack.install_layout
+        spack.install_layout = self.install_layout
+
+        # Make fake database and fake install directory.
+        self.installed_db = Database(self.install_path)
+        self.spack_installed_db = spack.installed_db
+        spack.installed_db = self.installed_db
+
+        # make a mock database with some packages installed note that
+        # the ref count for dyninst here will be 3, as it's recycled
+        # across each install.
+        #
+        # Here is what the mock DB looks like:
+        #
+        # o  mpileaks     o  mpileaks'    o  mpileaks''
+        # |\              |\              |\
+        # | o  callpath   | o  callpath'  | o  callpath''
+        # |/|             |/|             |/|
+        # o |  mpich      o |  mpich2     o |  zmpi
+        #   |               |             o |  fake
+        #   |               |               |
+        #   |               |______________/
+        #   | .____________/
+        #   |/
+        #   o  dyninst
+        #   |\
+        #   | o  libdwarf
+        #   |/
+        #   o  libelf
+        #
+
+        # Transaction used to avoid repeated writes.
+        with spack.installed_db.write_transaction():
+            self._mock_install('mpileaks ^mpich')
+            self._mock_install('mpileaks ^mpich2')
+            self._mock_install('mpileaks ^zmpi')
+
+
+    def tearDown(self):
+        super(DatabaseTest, self).tearDown()
+        shutil.rmtree(self.install_path)
+        spack.install_path = self.spack_install_path
+        spack.install_layout = self.spack_install_layout
+        spack.installed_db = self.spack_installed_db
+
+
+    def test_010_all_install_sanity(self):
+        """Ensure that the install layout reflects what we think it does."""
+        all_specs = spack.install_layout.all_specs()
+        self.assertEqual(len(all_specs), 13)
+
+        # query specs with multiple configurations
+        mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
+        callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
+        mpi_specs =      [s for s in all_specs if s.satisfies('mpi')]
+
+        self.assertEqual(len(mpileaks_specs), 3)
+        self.assertEqual(len(callpath_specs), 3)
+        self.assertEqual(len(mpi_specs),      3)
+
+        # query specs with single configurations
+        dyninst_specs =  [s for s in all_specs if s.satisfies('dyninst')]
+        libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')]
+        libelf_specs =   [s for s in all_specs if s.satisfies('libelf')]
+
+        self.assertEqual(len(dyninst_specs),  1)
+        self.assertEqual(len(libdwarf_specs), 1)
+        self.assertEqual(len(libelf_specs),   1)
+
+        # Query by dependency
+        self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]),  1)
+        self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]), 1)
+        self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]),   1)
+
+
+    def test_015_write_and_read(self):
+        # write and read DB
+        with spack.installed_db.write_transaction():
+            specs = spack.installed_db.query()
+            recs = [spack.installed_db.get_record(s) for s in specs]
+            spack.installed_db.write()
+        spack.installed_db.read()
+
+        for spec, rec in zip(specs, recs):
+            new_rec = spack.installed_db.get_record(spec)
+            self.assertEqual(new_rec.ref_count, rec.ref_count)
+            self.assertEqual(new_rec.spec,      rec.spec)
+            self.assertEqual(new_rec.path,      rec.path)
+            self.assertEqual(new_rec.installed, rec.installed)
+
+
+    def _check_db_sanity(self):
+        """Utiilty function to check db against install layout."""
+        expected = sorted(spack.install_layout.all_specs())
+        actual = sorted(self.installed_db.query())
+
+        self.assertEqual(len(expected), len(actual))
+        for e, a in zip(expected, actual):
+            self.assertEqual(e, a)
+
+
+    def test_020_db_sanity(self):
+        """Make sure query() returns what's actually in the db."""
+        self._check_db_sanity()
+
+
+    def test_030_db_sanity_from_another_process(self):
+        def read_and_modify():
+            self._check_db_sanity()  # check that other process can read DB
+            with self.installed_db.write_transaction():
+                self._mock_remove('mpileaks ^zmpi')
+
+        p = multiprocessing.Process(target=read_and_modify, args=())
+        p.start()
+        p.join()
+
+        # ensure child process change is visible in parent process
+        with self.installed_db.read_transaction():
+            self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 0)
+
+
+    def test_040_ref_counts(self):
+        """Ensure that we got ref counts right when we read the DB."""
+        self.installed_db._check_ref_counts()
+
+
+    def test_050_basic_query(self):
+        """Ensure that querying the database is consistent with what is installed."""
+        # query everything
+        self.assertEqual(len(spack.installed_db.query()), 13)
+
+        # query specs with multiple configurations
+        mpileaks_specs = self.installed_db.query('mpileaks')
+        callpath_specs = self.installed_db.query('callpath')
+        mpi_specs =      self.installed_db.query('mpi')
+
+        self.assertEqual(len(mpileaks_specs), 3)
+        self.assertEqual(len(callpath_specs), 3)
+        self.assertEqual(len(mpi_specs),      3)
+
+        # query specs with single configurations
+        dyninst_specs =  self.installed_db.query('dyninst')
+        libdwarf_specs = self.installed_db.query('libdwarf')
+        libelf_specs =   self.installed_db.query('libelf')
+
+        self.assertEqual(len(dyninst_specs),  1)
+        self.assertEqual(len(libdwarf_specs), 1)
+        self.assertEqual(len(libelf_specs),   1)
+
+        # Query by dependency
+        self.assertEqual(len(self.installed_db.query('mpileaks ^mpich')),  1)
+        self.assertEqual(len(self.installed_db.query('mpileaks ^mpich2')), 1)
+        self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')),   1)
+
+
+    def _check_remove_and_add_package(self, spec):
+        """Remove a spec from the DB, then add it and make sure everything's
+           still ok once it is added.  This checks that it was
+           removed, that it's back when added again, and that ref
+           counts are consistent.
+        """
+        original = self.installed_db.query()
+        self.installed_db._check_ref_counts()
+
+        # Remove spec
+        concrete_spec = self.installed_db.remove(spec)
+        self.installed_db._check_ref_counts()
+        remaining = self.installed_db.query()
+
+        # ensure spec we removed is gone
+        self.assertEqual(len(original) - 1, len(remaining))
+        self.assertTrue(all(s in original for s in remaining))
+        self.assertTrue(concrete_spec not in remaining)
+
+        # add it back and make sure everything is ok.
+        self.installed_db.add(concrete_spec, "")
+        installed = self.installed_db.query()
+        self.assertEqual(len(installed), len(original))
+
+        # sanity check against direcory layout and check ref counts.
+        self._check_db_sanity()
+        self.installed_db._check_ref_counts()
+
+
+    def test_060_remove_and_add_root_package(self):
+        self._check_remove_and_add_package('mpileaks ^mpich')
+
+
+    def test_070_remove_and_add_dependency_package(self):
+        self._check_remove_and_add_package('dyninst')
+
+
+    def test_080_root_ref_counts(self):
+        rec = self.installed_db.get_record('mpileaks ^mpich')
+
+        # Remove a top-level spec from the DB
+        self.installed_db.remove('mpileaks ^mpich')
+
+        # record no longer in DB
+        self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), [])
+
+        # record's deps have updated ref_counts
+        self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 0)
+        self.assertEqual(self.installed_db.get_record('mpich').ref_count, 1)
+
+        # put the spec back
+        self.installed_db.add(rec.spec, rec.path)
+
+        # record is present again
+        self.assertEqual(len(self.installed_db.query('mpileaks ^mpich', installed=any)), 1)
+
+        # dependencies have ref counts updated
+        self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 1)
+        self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2)
+
+
+    def test_090_non_root_ref_counts(self):
+        mpileaks_mpich_rec = self.installed_db.get_record('mpileaks ^mpich')
+        callpath_mpich_rec = self.installed_db.get_record('callpath ^mpich')
+
+        # "force remove" a non-root spec from the DB
+        self.installed_db.remove('callpath ^mpich')
+
+        # record still in DB but marked uninstalled
+        self.assertEqual(self.installed_db.query('callpath ^mpich', installed=True), [])
+        self.assertEqual(len(self.installed_db.query('callpath ^mpich', installed=any)), 1)
+
+        # record and its deps have same ref_counts
+        self.assertEqual(self.installed_db.get_record('callpath ^mpich', installed=any).ref_count, 1)
+        self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2)
+
+        # remove only dependent of uninstalled callpath record
+        self.installed_db.remove('mpileaks ^mpich')
+
+        # record and parent are completely gone.
+        self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), [])
+        self.assertEqual(self.installed_db.query('callpath ^mpich', installed=any), [])
+
+        # mpich ref count updated properly.
+        mpich_rec = self.installed_db.get_record('mpich')
+        self.assertEqual(mpich_rec.ref_count, 0)
diff --git a/lib/spack/spack/test/lock.py b/lib/spack/spack/test/lock.py
new file mode 100644
index 0000000000000000000000000000000000000000..5664e71b03767ab215f0dd3b1671c36638c1b32a
--- /dev/null
+++ b/lib/spack/spack/test/lock.py
@@ -0,0 +1,266 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""
+These tests ensure that our lock works correctly.
+"""
+import unittest
+import os
+import tempfile
+import shutil
+from multiprocessing import Process
+
+from llnl.util.lock import *
+from llnl.util.filesystem import join_path, touch
+
+from spack.util.multiproc import Barrier
+
+# This is the longest a failed test will take, as the barriers will
+# time out and raise an exception.
+barrier_timeout = 5
+
+
+class LockTest(unittest.TestCase):
+
+    def setUp(self):
+        self.tempdir = tempfile.mkdtemp()
+        self.lock_path = join_path(self.tempdir, 'lockfile')
+        touch(self.lock_path)
+
+
+    def tearDown(self):
+         shutil.rmtree(self.tempdir, ignore_errors=True)
+
+
+    def multiproc_test(self, *functions):
+        """Order some processes using simple barrier synchronization."""
+        b = Barrier(len(functions), timeout=barrier_timeout)
+        procs = [Process(target=f, args=(b,)) for f in functions]
+        for p in procs: p.start()
+        for p in procs:
+            p.join()
+            self.assertEqual(p.exitcode, 0)
+
+
+    #
+    # Process snippets below can be composed into tests.
+    #
+    def acquire_write(self, barrier):
+        lock = Lock(self.lock_path)
+        lock.acquire_write()  # grab exclusive lock
+        barrier.wait()
+        barrier.wait() # hold the lock until exception raises in other procs.
+
+    def acquire_read(self, barrier):
+        lock = Lock(self.lock_path)
+        lock.acquire_read()  # grab shared lock
+        barrier.wait()
+        barrier.wait() # hold the lock until exception raises in other procs.
+
+    def timeout_write(self, barrier):
+        lock = Lock(self.lock_path)
+        barrier.wait() # wait for lock acquire in first process
+        self.assertRaises(LockError, lock.acquire_write, 0.1)
+        barrier.wait()
+
+    def timeout_read(self, barrier):
+        lock = Lock(self.lock_path)
+        barrier.wait() # wait for lock acquire in first process
+        self.assertRaises(LockError, lock.acquire_read, 0.1)
+        barrier.wait()
+
+
+    #
+    # Test that exclusive locks on other processes time out when an
+    # exclusive lock is held.
+    #
+    def test_write_lock_timeout_on_write(self):
+        self.multiproc_test(self.acquire_write, self.timeout_write)
+
+    def test_write_lock_timeout_on_write_2(self):
+        self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write)
+
+    def test_write_lock_timeout_on_write_3(self):
+        self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write, self.timeout_write)
+
+
+    #
+    # Test that shared locks on other processes time out when an
+    # exclusive lock is held.
+    #
+    def test_read_lock_timeout_on_write(self):
+        self.multiproc_test(self.acquire_write, self.timeout_read)
+
+    def test_read_lock_timeout_on_write_2(self):
+        self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read)
+
+    def test_read_lock_timeout_on_write_3(self):
+        self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read, self.timeout_read)
+
+
+    #
+    # Test that exclusive locks time out when shared locks are held.
+    #
+    def test_write_lock_timeout_on_read(self):
+        self.multiproc_test(self.acquire_read, self.timeout_write)
+
+    def test_write_lock_timeout_on_read_2(self):
+        self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write)
+
+    def test_write_lock_timeout_on_read_3(self):
+        self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write, self.timeout_write)
+
+
+    #
+    # Test that exclusive locks time while lots of shared locks are held.
+    #
+    def test_write_lock_timeout_with_multiple_readers_2_1(self):
+        self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write)
+
+    def test_write_lock_timeout_with_multiple_readers_2_2(self):
+        self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write)
+
+    def test_write_lock_timeout_with_multiple_readers_3_1(self):
+        self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write)
+
+    def test_write_lock_timeout_with_multiple_readers_3_2(self):
+        self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write)
+
+
+    #
+    # Longer test case that ensures locks are reusable. Ordering is
+    # enforced by barriers throughout -- steps are shown with numbers.
+    #
+    def test_complex_acquire_and_release_chain(self):
+        def p1(barrier):
+            lock = Lock(self.lock_path)
+
+            lock.acquire_write()
+            barrier.wait() # ---------------------------------------- 1
+            # others test timeout
+            barrier.wait() # ---------------------------------------- 2
+            lock.release_write()   # release and others acquire read
+            barrier.wait() # ---------------------------------------- 3
+            self.assertRaises(LockError, lock.acquire_write, 0.1)
+            lock.acquire_read()
+            barrier.wait() # ---------------------------------------- 4
+            lock.release_read()
+            barrier.wait() # ---------------------------------------- 5
+
+            # p2 upgrades read to write
+            barrier.wait() # ---------------------------------------- 6
+            self.assertRaises(LockError, lock.acquire_write, 0.1)
+            self.assertRaises(LockError, lock.acquire_read, 0.1)
+            barrier.wait() # ---------------------------------------- 7
+            # p2 releases write and read
+            barrier.wait() # ---------------------------------------- 8
+
+            # p3 acquires read
+            barrier.wait() # ---------------------------------------- 9
+            # p3 upgrades read to write
+            barrier.wait() # ---------------------------------------- 10
+            self.assertRaises(LockError, lock.acquire_write, 0.1)
+            self.assertRaises(LockError, lock.acquire_read, 0.1)
+            barrier.wait() # ---------------------------------------- 11
+            # p3 releases locks
+            barrier.wait() # ---------------------------------------- 12
+            lock.acquire_read()
+            barrier.wait() # ---------------------------------------- 13
+            lock.release_read()
+
+
+        def p2(barrier):
+            lock = Lock(self.lock_path)
+
+            # p1 acquires write
+            barrier.wait() # ---------------------------------------- 1
+            self.assertRaises(LockError, lock.acquire_write, 0.1)
+            self.assertRaises(LockError, lock.acquire_read, 0.1)
+            barrier.wait() # ---------------------------------------- 2
+            lock.acquire_read()
+            barrier.wait() # ---------------------------------------- 3
+            # p1 tests shared read
+            barrier.wait() # ---------------------------------------- 4
+            # others release reads
+            barrier.wait() # ---------------------------------------- 5
+
+            lock.acquire_write() # upgrade read to write
+            barrier.wait() # ---------------------------------------- 6
+            # others test timeout
+            barrier.wait() # ---------------------------------------- 7
+            lock.release_write()  # release read AND write (need both)
+            lock.release_read()
+            barrier.wait() # ---------------------------------------- 8
+
+            # p3 acquires read
+            barrier.wait() # ---------------------------------------- 9
+            # p3 upgrades read to write
+            barrier.wait() # ---------------------------------------- 10
+            self.assertRaises(LockError, lock.acquire_write, 0.1)
+            self.assertRaises(LockError, lock.acquire_read, 0.1)
+            barrier.wait() # ---------------------------------------- 11
+            # p3 releases locks
+            barrier.wait() # ---------------------------------------- 12
+            lock.acquire_read()
+            barrier.wait() # ---------------------------------------- 13
+            lock.release_read()
+
+
+        def p3(barrier):
+            lock = Lock(self.lock_path)
+
+            # p1 acquires write
+            barrier.wait() # ---------------------------------------- 1
+            self.assertRaises(LockError, lock.acquire_write, 0.1)
+            self.assertRaises(LockError, lock.acquire_read, 0.1)
+            barrier.wait() # ---------------------------------------- 2
+            lock.acquire_read()
+            barrier.wait() # ---------------------------------------- 3
+            # p1 tests shared read
+            barrier.wait() # ---------------------------------------- 4
+            lock.release_read()
+            barrier.wait() # ---------------------------------------- 5
+
+            # p2 upgrades read to write
+            barrier.wait() # ---------------------------------------- 6
+            self.assertRaises(LockError, lock.acquire_write, 0.1)
+            self.assertRaises(LockError, lock.acquire_read, 0.1)
+            barrier.wait() # ---------------------------------------- 7
+            # p2 releases write & read
+            barrier.wait() # ---------------------------------------- 8
+
+            lock.acquire_read()
+            barrier.wait() # ---------------------------------------- 9
+            lock.acquire_write()
+            barrier.wait() # ---------------------------------------- 10
+            # others test timeout
+            barrier.wait() # ---------------------------------------- 11
+            lock.release_read()   # release read AND write in opposite
+            lock.release_write()  # order from before on p2
+            barrier.wait() # ---------------------------------------- 12
+            lock.acquire_read()
+            barrier.wait() # ---------------------------------------- 13
+            lock.release_read()
+
+        self.multiproc_test(p1, p2, p3)
diff --git a/lib/spack/spack/util/multiproc.py b/lib/spack/spack/util/multiproc.py
index 9e045a090f4866a80a4a7c328153f594d5271007..21cd6f543d0e4e5c4da721f95fe3660edcb14e5e 100644
--- a/lib/spack/spack/util/multiproc.py
+++ b/lib/spack/spack/util/multiproc.py
@@ -27,9 +27,11 @@
 than multiprocessing.Pool.apply() can.  For example, apply() will fail
 to pickle functions if they're passed indirectly as parameters.
 """
-from multiprocessing import Process, Pipe
+from multiprocessing import Process, Pipe, Semaphore, Value
 from itertools import izip
 
+__all__ = ['spawn', 'parmap', 'Barrier']
+
 def spawn(f):
     def fun(pipe,x):
         pipe.send(f(x))
@@ -43,3 +45,49 @@ def parmap(f,X):
     [p.join() for p in proc]
     return [p.recv() for (p,c) in pipe]
 
+
+class Barrier:
+    """Simple reusable semaphore barrier.
+
+    Python 2.6 doesn't have multiprocessing barriers so we implement this.
+
+    See http://greenteapress.com/semaphores/downey08semaphores.pdf, p. 41.
+    """
+    def __init__(self, n, timeout=None):
+        self.n = n
+        self.to = timeout
+        self.count = Value('i', 0)
+        self.mutex = Semaphore(1)
+        self.turnstile1 = Semaphore(0)
+        self.turnstile2 = Semaphore(1)
+
+
+    def wait(self):
+        if not self.mutex.acquire(timeout=self.to):
+            raise BarrierTimeoutError()
+        self.count.value += 1
+        if self.count.value == self.n:
+            if not self.turnstile2.acquire(timeout=self.to):
+                raise BarrierTimeoutError()
+            self.turnstile1.release()
+        self.mutex.release()
+
+        if not self.turnstile1.acquire(timeout=self.to):
+            raise BarrierTimeoutError()
+        self.turnstile1.release()
+
+        if not self.mutex.acquire(timeout=self.to):
+            raise BarrierTimeoutError()
+        self.count.value -= 1
+        if self.count.value == 0:
+            if not self.turnstile1.acquire(timeout=self.to):
+                raise BarrierTimeoutError()
+            self.turnstile2.release()
+        self.mutex.release()
+
+        if not self.turnstile2.acquire(timeout=self.to):
+            raise BarrierTimeoutError()
+        self.turnstile2.release()
+
+
+class BarrierTimeoutError: pass