Skip to content
Snippets Groups Projects
Commit a58ae0c5 authored by Todd Gamblin's avatar Todd Gamblin
Browse files

Build database working with simple transaction support; all tests passing.

parent bf8479be
Branches
Tags
No related merge requests found
......@@ -158,6 +158,11 @@ def clen(string):
return len(re.sub(r'\033[^m]*m', '', string))
def cextra(string):
""""Length of extra color characters in a string"""
return len(''.join(re.findall(r'\033[^m]*m', string)))
def cwrite(string, stream=sys.stdout, color=None):
"""Replace all color expressions in string with ANSI control
codes and write the result to the stream. If color is
......
......@@ -124,16 +124,15 @@ def elide_list(line_list, max_num=10):
def disambiguate_spec(spec):
with spack.installed_db.read_lock():
matching_specs = spack.installed_db.query(spec)
if not matching_specs:
tty.die("Spec '%s' matches no installed packages." % spec)
elif len(matching_specs) > 1:
args = ["%s matches multiple packages." % spec,
"Matching packages:"]
args += [" " + str(s) for s in matching_specs]
args += ["Use a more specific spec."]
tty.die(*args)
matching_specs = spack.installed_db.query(spec)
if not matching_specs:
tty.die("Spec '%s' matches no installed packages." % spec)
elif len(matching_specs) > 1:
args = ["%s matches multiple packages." % spec,
"Matching packages:"]
args += [" " + str(s) for s in matching_specs]
args += ["Use a more specific spec."]
tty.die(*args)
return matching_specs[0]
......@@ -54,13 +54,12 @@ def deactivate(parser, args):
if args.all:
if pkg.extendable:
tty.msg("Deactivating all extensions of %s" % pkg.spec.short_spec)
with spack.installed_db.read_lock():
ext_pkgs = spack.installed_db.installed_extensions_for(spec)
ext_pkgs = spack.installed_db.installed_extensions_for(spec)
for ext_pkg in ext_pkgs:
ext_pkg.spec.normalize()
if ext_pkg.activated:
ext_pkg.do_deactivate(force=True)
for ext_pkg in ext_pkgs:
ext_pkg.spec.normalize()
if ext_pkg.activated:
ext_pkg.do_deactivate(force=True)
elif pkg.is_extension:
if not args.force and not spec.package.activated:
......
......@@ -54,11 +54,12 @@ def diy(self, args):
if not args.spec:
tty.die("spack diy requires a package spec argument.")
with spack.installed_db.write_lock():
specs = spack.cmd.parse_specs(args.spec)
if len(specs) > 1:
tty.die("spack diy only takes one spec.")
specs = spack.cmd.parse_specs(args.spec)
if len(specs) > 1:
tty.die("spack diy only takes one spec.")
# Take a write lock before checking for existence.
with spack.installed_db.write_lock():
spec = specs[0]
if not spack.db.exists(spec.name):
tty.warn("No such package: %s" % spec.name)
......@@ -85,7 +86,7 @@ def diy(self, args):
# Forces the build to run out of the current directory.
package.stage = DIYStage(os.getcwd())
# TODO: make this an argument, not a global.
# TODO: make this an argument, not a global.
spack.do_checksum = False
package.do_install(
......
......@@ -80,8 +80,7 @@ def extensions(parser, args):
colify(ext.name for ext in extensions)
# List specs of installed extensions.
with spack.installed_db.read_lock():
installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)]
installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)]
print
if not installed:
tty.msg("None installed.")
......
......@@ -158,12 +158,11 @@ def find(parser, args):
q_args = { 'installed' : installed, 'known' : known }
# Get all the specs the user asked for
with spack.installed_db.read_lock():
if not query_specs:
specs = set(spack.installed_db.query(**q_args))
else:
results = [set(spack.installed_db.query(qs, **q_args)) for qs in query_specs]
specs = set.union(*results)
if not query_specs:
specs = set(spack.installed_db.query(**q_args))
else:
results = [set(spack.installed_db.query(qs, **q_args)) for qs in query_specs]
specs = set.union(*results)
if not args.mode:
args.mode = 'short'
......
......@@ -68,10 +68,10 @@ def install(parser, args):
if args.no_checksum:
spack.do_checksum = False # TODO: remove this global.
with spack.installed_db.write_lock():
specs = spack.cmd.parse_specs(args.packages, concretize=True)
for spec in specs:
package = spack.db.get(spec)
specs = spack.cmd.parse_specs(args.packages, concretize=True)
for spec in specs:
package = spack.db.get(spec)
with spack.installed_db.write_lock():
package.do_install(
keep_prefix=args.keep_prefix,
keep_stage=args.keep_stage,
......
......@@ -84,21 +84,21 @@ def uninstall(parser, args):
# The package.py file has gone away -- but still want to uninstall.
spack.Package(s).do_uninstall(force=True)
# Sort packages to be uninstalled by the number of installed dependents
# This ensures we do things in the right order
def num_installed_deps(pkg):
return len(pkg.installed_dependents)
pkgs.sort(key=num_installed_deps)
# Sort packages to be uninstalled by the number of installed dependents
# This ensures we do things in the right order
def num_installed_deps(pkg):
return len(pkg.installed_dependents)
pkgs.sort(key=num_installed_deps)
# Uninstall packages in order now.
for pkg in pkgs:
try:
pkg.do_uninstall(force=args.force)
except PackageStillNeededError, e:
tty.error("Will not uninstall %s" % e.spec.format("$_$@$%@$#", color=True))
print
print "The following packages depend on it:"
display_specs(e.dependents, long=True)
print
print "You can use spack uninstall -f to force this action."
sys.exit(1)
# Uninstall packages in order now.
for pkg in pkgs:
try:
pkg.do_uninstall(force=args.force)
except PackageStillNeededError, e:
tty.error("Will not uninstall %s" % e.spec.format("$_$@$%@$#", color=True))
print
print "The following packages depend on it:"
display_specs(e.dependents, long=True)
print
print "You can use spack uninstall -f to force this action."
sys.exit(1)
......@@ -48,7 +48,7 @@
import llnl.util.tty as tty
from llnl.util.filesystem import *
from llnl.util.lock import Lock
from llnl.util.lock import *
import spack.spec
from spack.version import Version
......@@ -62,7 +62,8 @@
_db_version = Version('0.9')
# Default timeout for spack database locks is 5 min.
_db_lock_timeout = 300
_db_lock_timeout = 60
def _autospec(function):
"""Decorator that automatically converts the argument of a single-arg
......@@ -90,11 +91,11 @@ class InstallRecord(object):
dependents left.
"""
def __init__(self, spec, path, installed):
def __init__(self, spec, path, installed, ref_count=0):
self.spec = spec
self.path = path
self.installed = installed
self.ref_count = 0
self.ref_count = ref_count
def to_dict(self):
return { 'spec' : self.spec.to_node_dict(),
......@@ -103,25 +104,42 @@ def to_dict(self):
'ref_count' : self.ref_count }
@classmethod
def from_dict(cls, d):
# TODO: check the dict more rigorously.
return InstallRecord(d['spec'], d['path'], d['installed'], d['ref_count'])
def from_dict(cls, spec, dictionary):
d = dictionary
return InstallRecord(spec, d['path'], d['installed'], d['ref_count'])
class Database(object):
def __init__(self, root):
"""Create an empty Database.
Location defaults to root/_index.yaml
The individual data are dicts containing
spec: the top level spec of a package
path: the path to the install of that package
dep_hash: a hash of the dependence DAG for that package
def __init__(self, root, db_dir=None):
"""Create a Database for Spack installations under ``root``.
A Database is a cache of Specs data from ``$prefix/spec.yaml``
files in Spack installation directories.
By default, Database files (data and lock files) are stored
under ``root/.spack-db``, which is created if it does not
exist. This is the ``db_dir``.
The Database will attempt to read an ``index.yaml`` file in
``db_dir``. If it does not find one, it will be created when
needed by scanning the entire Database root for ``spec.yaml``
files according to Spack's ``DirectoryLayout``.
Caller may optionally provide a custom ``db_dir`` parameter
where data will be stored. This is intended to be used for
testing the Database class.
"""
self._root = root
self.root = root
# Set up layout of database files.
self._db_dir = join_path(self._root, _db_dirname)
if db_dir is None:
# If the db_dir is not provided, default to within the db root.
self._db_dir = join_path(self.root, _db_dirname)
else:
# Allow customizing the database directory location for testing.
self._db_dir = db_dir
# Set up layout of database files within the db dir
self._index_path = join_path(self._db_dir, 'index.yaml')
self._lock_path = join_path(self._db_dir, 'lock')
......@@ -135,21 +153,23 @@ def __init__(self, root):
# initialize rest of state.
self.lock = Lock(self._lock_path)
self._data = {}
self._last_write_time = 0
def write_lock(self, timeout=_db_lock_timeout):
"""Get a write lock context for use in a `with` block."""
return self.lock.write_lock(timeout)
def write_transaction(self, timeout=_db_lock_timeout):
"""Get a write lock context manager for use in a `with` block."""
return WriteTransaction(self, self._read, self._write, timeout)
def read_lock(self, timeout=_db_lock_timeout):
"""Get a read lock context for use in a `with` block."""
return self.lock.read_lock(timeout)
def read_transaction(self, timeout=_db_lock_timeout):
"""Get a read lock context manager for use in a `with` block."""
return ReadTransaction(self, self._read, None, timeout)
def _write_to_yaml(self, stream):
"""Write out the databsae to a YAML file."""
"""Write out the databsae to a YAML file.
This function does not do any locking or transactions.
"""
# map from per-spec hash code to installation record.
installs = dict((k, v.to_dict()) for k, v in self._data.items())
......@@ -173,7 +193,10 @@ def _write_to_yaml(self, stream):
def _read_spec_from_yaml(self, hash_key, installs, parent_key=None):
"""Recursively construct a spec from a hash in a YAML database."""
"""Recursively construct a spec from a hash in a YAML database.
Does not do any locking.
"""
if hash_key not in installs:
parent = read_spec(installs[parent_key]['path'])
......@@ -195,6 +218,8 @@ def _read_from_yaml(self, stream):
"""
Fill database from YAML, do not maintain old data
Translate the spec portions from node-dict form to spec form
Does not do any locking.
"""
try:
if isinstance(stream, basestring):
......@@ -243,7 +268,7 @@ def check(cond, msg):
# Insert the brand new spec in the database. Each
# spec has its own copies of its dependency specs.
# TODO: would a more immmutable spec implementation simplify this?
data[hash_key] = InstallRecord(spec, rec['path'], rec['installed'])
data[hash_key] = InstallRecord.from_dict(spec, rec)
except Exception as e:
tty.warn("Invalid database reecord:",
......@@ -256,57 +281,60 @@ def check(cond, msg):
def reindex(self, directory_layout):
"""Build database index from scratch based from a directory layout."""
with self.write_lock():
data = {}
"""Build database index from scratch based from a directory layout.
# Ask the directory layout to traverse the filesystem.
for spec in directory_layout.all_specs():
# Create a spec for each known package and add it.
path = directory_layout.path_for_spec(spec)
hash_key = spec.dag_hash()
data[hash_key] = InstallRecord(spec, path, True)
Locks the DB if it isn't locked already.
# Recursively examine dependencies and add them, even
# if they are NOT installed. This ensures we know
# about missing dependencies.
for dep in spec.traverse(root=False):
dep_hash = dep.dag_hash()
if dep_hash not in data:
path = directory_layout.path_for_spec(dep)
installed = os.path.isdir(path)
data[dep_hash] = InstallRecord(dep.copy(), path, installed)
data[dep_hash].ref_count += 1
"""
with self.write_transaction():
old_data = self._data
try:
self._data = {}
# Assuming everything went ok, replace this object's data.
self._data = data
# Ask the directory layout to traverse the filesystem.
for spec in directory_layout.all_specs():
# Create a spec for each known package and add it.
path = directory_layout.path_for_spec(spec)
self._add(spec, path, directory_layout)
# write out, blowing away the old version if necessary
self.write()
self._check_ref_counts()
except:
# If anything explodes, restore old data, skip write.
self._data = old_data
raise
def read(self):
"""
Re-read Database from the data in the set location
If the cache is fresh, return immediately.
"""
if not self.is_dirty():
return
if os.path.isfile(self._index_path):
# Read from YAML file if a database exists
self._read_from_yaml(self._index_path)
else:
# The file doesn't exist, try to traverse the directory.
self.reindex(spack.install_layout)
def _check_ref_counts(self):
"""Ensure consistency of reference counts in the DB.
Raise an AssertionError if something is amiss.
def write(self):
Does no locking.
"""
Write the database to the standard location
Everywhere that the database is written it is read
within the same lock, so there is no need to refresh
the database within write()
counts = {}
for key, rec in self._data.items():
counts.setdefault(key, 0)
for dep in rec.spec.dependencies.values():
dep_key = dep.dag_hash()
counts.setdefault(dep_key, 0)
counts[dep_key] += 1
for rec in self._data.values():
key = rec.spec.dag_hash()
expected = counts[key]
found = rec.ref_count
if not expected == found:
raise AssertionError(
"Invalid ref_count: %s: %d (expected %d), in DB %s."
% (key, found, expected, self._index_path))
def _write(self):
"""Write the in-memory database index to its file path.
Does no locking.
"""
temp_name = '%s.%s.temp' % (socket.getfqdn(), os.getpid())
temp_file = join_path(self._db_dir, temp_name)
......@@ -314,7 +342,6 @@ def write(self):
# Write a temporary database file them move it into place
try:
with open(temp_file, 'w') as f:
self._last_write_time = int(time.time())
self._write_to_yaml(f)
os.rename(temp_file, self._index_path)
......@@ -325,36 +352,137 @@ def write(self):
raise
def is_dirty(self):
def _read(self):
"""Re-read Database from the data in the set location.
This does no locking.
"""
Returns true iff the database file does not exist
or was most recently written to by another spack instance.
if os.path.isfile(self._index_path):
# Read from YAML file if a database exists
self._read_from_yaml(self._index_path)
else:
# The file doesn't exist, try to traverse the directory.
# reindex() takes its own write lock, so no lock here.
self.reindex(spack.install_layout)
def read(self):
with self.read_transaction(): pass
def write(self):
with self.write_transaction(): pass
def _add(self, spec, path, directory_layout=None):
"""Add an install record for spec at path to the database.
This assumes that the spec is not already installed. It
updates the ref counts on dependencies of the spec in the DB.
This operation is in-memory, and does not lock the DB.
"""
return (not os.path.isfile(self._index_path) or
(os.path.getmtime(self._index_path) > self._last_write_time))
key = spec.dag_hash()
if key in self._data:
rec = self._data[key]
rec.installed = True
# TODO: this overwrites a previous install path (when path !=
# self._data[key].path), and the old path still has a
# dependent in the DB. We could consider re-RPATH-ing the
# dependents. This case is probably infrequent and may not be
# worth fixing, but this is where we can discover it.
rec.path = path
else:
self._data[key] = InstallRecord(spec, path, True)
for dep in spec.dependencies.values():
self._increment_ref_count(dep, directory_layout)
def _increment_ref_count(self, spec, directory_layout=None):
"""Recursively examine dependencies and update their DB entries."""
key = spec.dag_hash()
if key not in self._data:
installed = False
path = None
if directory_layout:
path = directory_layout.path_for_spec(spec)
installed = os.path.isdir(path)
self._data[key] = InstallRecord(spec.copy(), path, installed)
for dep in spec.dependencies.values():
self._increment_ref_count(dep)
self._data[key].ref_count += 1
@_autospec
def add(self, spec, path):
"""Read the database from the set location
"""Add spec at path to database, locking and reading DB to sync.
``add()`` will lock and read from the DB on disk.
"""
# TODO: ensure that spec is concrete?
# Entire add is transactional.
with self.write_transaction():
self._add(spec, path)
def _get_matching_spec_key(self, spec, **kwargs):
"""Get the exact spec OR get a single spec that matches."""
key = spec.dag_hash()
if not key in self._data:
match = self.query_one(spec, **kwargs)
if match:
return match.dag_hash()
raise KeyError("No such spec in database! %s" % spec)
return key
@_autospec
def get_record(self, spec, **kwargs):
key = self._get_matching_spec_key(spec, **kwargs)
return self._data[key]
def _decrement_ref_count(self, spec):
key = spec.dag_hash()
if not key in self._data:
# TODO: print something here? DB is corrupt, but
# not much we can do.
return
Add the specified entry as a dict, then write the database
back to memory. This assumes that ALL dependencies are already in
the database. Should not be called otherwise.
rec = self._data[key]
rec.ref_count -= 1
if rec.ref_count == 0 and not rec.installed:
del self._data[key]
for dep in spec.dependencies.values():
self._decrement_ref_count(dep)
def _remove(self, spec):
"""Non-locking version of remove(); does real work.
"""
# Should always already be locked
with self.write_lock():
self.read()
self._data[spec.dag_hash()] = InstallRecord(spec, path, True)
key = self._get_matching_spec_key(spec)
rec = self._data[key]
if rec.ref_count > 0:
rec.installed = False
return rec.spec
# sanity check the dependencies in case something went
# wrong during install()
# TODO: ensure no races during distributed install.
for dep in spec.traverse(root=False):
assert dep.dag_hash() in self._data
del self._data[key]
for dep in rec.spec.dependencies.values():
self._decrement_ref_count(dep)
self.write()
# Returns the concrete spec so we know it in the case where a
# query spec was passed in.
return rec.spec
@_autospec
......@@ -369,13 +497,9 @@ def remove(self, spec):
and remvoes them if they are no longer needed.
"""
# Should always already be locked
with self.write_lock():
self.read()
hash_key = spec.dag_hash()
if hash_key in self._data:
del self._data[hash_key]
self.write()
# Take a lock around the entire removal.
with self.write_transaction():
return self._remove(spec)
@_autospec
......@@ -429,24 +553,75 @@ def query(self, query_spec=any, known=any, installed=True):
these really special cases that only belong here?
"""
with self.read_lock():
self.read()
with self.read_transaction():
results = []
for key, rec in self._data.items():
if installed is not any and rec.installed != installed:
continue
if known is not any and spack.db.exists(rec.spec.name) != known:
continue
if query_spec is any or rec.spec.satisfies(query_spec):
results.append(rec.spec)
results = []
for key, rec in self._data.items():
if installed is not any and rec.installed != installed:
continue
if known is not any and spack.db.exists(rec.spec.name) != known:
continue
if query_spec is any or rec.spec.satisfies(query_spec):
results.append(rec.spec)
return sorted(results)
return sorted(results)
def query_one(self, query_spec, known=any, installed=True):
"""Query for exactly one spec that matches the query spec.
Raises an assertion error if more than one spec matches the
query. Returns None if no installed package matches.
"""
concrete_specs = self.query(query_spec, known, installed)
assert len(concrete_specs) <= 1
return concrete_specs[0] if concrete_specs else None
def missing(self, spec):
key = spec.dag_hash()
return key in self._data and not self._data[key].installed
with self.read_transaction():
key = spec.dag_hash()
return key in self._data and not self._data[key].installed
class _Transaction(object):
"""Simple nested transaction context manager that uses a file lock.
This class can trigger actions when the lock is acquired for the
first time and released for the last.
Timeout for lock is customizable.
"""
def __init__(self, db, acquire_fn=None, release_fn=None,
timeout=_db_lock_timeout):
self._db = db
self._timeout = timeout
self._acquire_fn = acquire_fn
self._release_fn = release_fn
def __enter__(self):
if self._enter() and self._acquire_fn:
self._acquire_fn()
def __exit__(self, type, value, traceback):
if self._exit() and self._release_fn:
self._release_fn()
class ReadTransaction(_Transaction):
def _enter(self):
return self._db.lock.acquire_read(self._timeout)
def _exit(self):
return self._db.lock.release_read()
class WriteTransaction(_Transaction):
def _enter(self):
return self._db.lock.acquire_write(self._timeout)
def _exit(self):
return self._db.lock.release_write()
class CorruptDatabaseError(SpackError):
......
......@@ -32,7 +32,6 @@
from external import yaml
import llnl.util.tty as tty
from llnl.util.lang import memoized
from llnl.util.filesystem import join_path, mkdirp
from spack.spec import Spec
......@@ -263,7 +262,6 @@ def create_install_directory(self, spec):
self.write_spec(spec, spec_file_path)
@memoized
def all_specs(self):
if not os.path.isdir(self.root):
return []
......@@ -274,7 +272,6 @@ def all_specs(self):
return [self.read_spec(s) for s in spec_files]
@memoized
def specs_by_hash(self):
by_hash = {}
for spec in self.all_specs():
......
......@@ -845,7 +845,7 @@ def real_work():
# note: PARENT of the build process adds the new package to
# the database, so that we don't need to re-read from file.
spack.installed_db.add(self.spec, spack.install_layout.path_for_spec(self.spec))
spack.installed_db.add(self.spec, self.prefix)
# Once everything else is done, run post install hooks
spack.hooks.post_install(self)
......
##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
......@@ -26,79 +26,320 @@
These tests check the database is functioning properly,
both in memory and in its file
"""
import unittest
import tempfile
import shutil
import multiprocessing
from llnl.util.lock import *
from llnl.util.filesystem import join_path
import spack
from spack.database import Database
from spack.directory_layout import YamlDirectoryLayout
from spack.test.mock_packages_test import *
from llnl.util.tty.colify import colify
def _print_ref_counts():
"""Print out all ref counts for the graph used here, for debugging"""
recs = []
def add_rec(spec):
cspecs = spack.installed_db.query(spec, installed=any)
if not cspecs:
recs.append("[ %-7s ] %-20s-" % ('', spec))
else:
key = cspecs[0].dag_hash()
rec = spack.installed_db.get_record(cspecs[0])
recs.append("[ %-7s ] %-20s%d" % (key[:7], spec, rec.ref_count))
with spack.installed_db.read_transaction():
add_rec('mpileaks ^mpich')
add_rec('callpath ^mpich')
add_rec('mpich')
add_rec('mpileaks ^mpich2')
add_rec('callpath ^mpich2')
add_rec('mpich2')
add_rec('mpileaks ^zmpi')
add_rec('callpath ^zmpi')
add_rec('zmpi')
add_rec('fake')
add_rec('dyninst')
add_rec('libdwarf')
add_rec('libelf')
colify(recs, cols=3)
class DatabaseTest(MockPackagesTest):
def _mock_install(self, spec):
s = Spec(spec)
pkg = spack.db.get(s.concretized())
pkg.do_install(fake=True)
def _mock_remove(self, spec):
specs = spack.installed_db.query(spec)
assert(len(specs) == 1)
spec = specs[0]
spec.package.do_uninstall(spec)
class DatabaseTest(unittest.TestCase):
def setUp(self):
self.original_db = spack.installed_db
spack.installed_db = Database(self.original_db._root,"_test_index.yaml")
self.file_path = join_path(self.original_db._root,"_test_index.yaml")
if os.path.exists(self.file_path):
os.remove(self.file_path)
super(DatabaseTest, self).setUp()
#
# TODO: make the mockup below easier.
#
# Make a fake install directory
self.install_path = tempfile.mkdtemp()
self.spack_install_path = spack.install_path
spack.install_path = self.install_path
self.install_layout = YamlDirectoryLayout(self.install_path)
self.spack_install_layout = spack.install_layout
spack.install_layout = self.install_layout
# Make fake database and fake install directory.
self.installed_db = Database(self.install_path)
self.spack_installed_db = spack.installed_db
spack.installed_db = self.installed_db
# make a mock database with some packages installed note that
# the ref count for dyninst here will be 3, as it's recycled
# across each install.
#
# Here is what the mock DB looks like:
#
# o mpileaks o mpileaks' o mpileaks''
# |\ |\ |\
# | o callpath | o callpath' | o callpath''
# |/| |/| |/|
# o | mpich o | mpich2 o | zmpi
# | | o | fake
# | | |
# | |______________/
# | .____________/
# |/
# o dyninst
# |\
# | o libdwarf
# |/
# o libelf
#
# Transaction used to avoid repeated writes.
with spack.installed_db.write_transaction():
self._mock_install('mpileaks ^mpich')
self._mock_install('mpileaks ^mpich2')
self._mock_install('mpileaks ^zmpi')
def tearDown(self):
spack.installed_db = self.original_db
os.remove(self.file_path)
def _test_read_from_install_tree(self):
specs = spack.install_layout.all_specs()
spack.installed_db.read_database()
spack.installed_db.write()
for sph in spack.installed_db._data:
self.assertTrue(sph['spec'] in specs)
self.assertEqual(len(specs),len(spack.installed_db._data))
def _test_remove_and_add(self):
specs = spack.install_layout.all_specs()
spack.installed_db.remove(specs[len(specs)-1])
for sph in spack.installed_db._data:
self.assertTrue(sph['spec'] in specs[:len(specs)-1])
self.assertEqual(len(specs)-1,len(spack.installed_db._data))
spack.installed_db.add(specs[len(specs)-1],"")
for sph in spack.installed_db._data:
self.assertTrue(sph['spec'] in specs)
self.assertEqual(len(specs),len(spack.installed_db._data))
def _test_read_from_file(self):
spack.installed_db.read_database()
size = len(spack.installed_db._data)
spack.installed_db._data = spack.installed_db._data[1:]
os.utime(spack.installed_db._file_path,None)
spack.installed_db.read_database()
self.assertEqual(size,len(spack.installed_db._data))
specs = spack.install_layout.all_specs()
self.assertEqual(size,len(specs))
for sph in spack.installed_db._data:
self.assertTrue(sph['spec'] in specs)
def _test_write_to_file(self):
spack.installed_db.read_database()
size = len(spack.installed_db._data)
real_data = spack.installed_db._data
spack.installed_db._data = real_data[:size-1]
spack.installed_db.write()
spack.installed_db._data = real_data
os.utime(spack.installed_db._file_path,None)
spack.installed_db.read_database()
self.assertEqual(size-1,len(spack.installed_db._data))
specs = spack.install_layout.all_specs()
self.assertEqual(size,len(specs))
for sph in spack.installed_db._data:
self.assertTrue(sph['spec'] in specs[:size-1])
def test_ordered_test(self):
self._test_read_from_install_tree()
self._test_remove_and_add()
self._test_read_from_file()
self._test_write_to_file()
super(DatabaseTest, self).tearDown()
shutil.rmtree(self.install_path)
spack.install_path = self.spack_install_path
spack.install_layout = self.spack_install_layout
spack.installed_db = self.spack_installed_db
def test_010_all_install_sanity(self):
"""Ensure that the install layout reflects what we think it does."""
all_specs = spack.install_layout.all_specs()
self.assertEqual(len(all_specs), 13)
# query specs with multiple configurations
mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
self.assertEqual(len(mpileaks_specs), 3)
self.assertEqual(len(callpath_specs), 3)
self.assertEqual(len(mpi_specs), 3)
# query specs with single configurations
dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')]
libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')]
libelf_specs = [s for s in all_specs if s.satisfies('libelf')]
self.assertEqual(len(dyninst_specs), 1)
self.assertEqual(len(libdwarf_specs), 1)
self.assertEqual(len(libelf_specs), 1)
# Query by dependency
self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]), 1)
self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]), 1)
self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]), 1)
def test_015_write_and_read(self):
# write and read DB
with spack.installed_db.write_transaction():
specs = spack.installed_db.query()
recs = [spack.installed_db.get_record(s) for s in specs]
spack.installed_db.write()
spack.installed_db.read()
for spec, rec in zip(specs, recs):
new_rec = spack.installed_db.get_record(spec)
self.assertEqual(new_rec.ref_count, rec.ref_count)
self.assertEqual(new_rec.spec, rec.spec)
self.assertEqual(new_rec.path, rec.path)
self.assertEqual(new_rec.installed, rec.installed)
def _check_db_sanity(self):
"""Utiilty function to check db against install layout."""
expected = sorted(spack.install_layout.all_specs())
actual = sorted(self.installed_db.query())
self.assertEqual(len(expected), len(actual))
for e, a in zip(expected, actual):
self.assertEqual(e, a)
def test_020_db_sanity(self):
"""Make sure query() returns what's actually in the db."""
self._check_db_sanity()
def test_030_db_sanity_from_another_process(self):
def read_and_modify():
self._check_db_sanity() # check that other process can read DB
with self.installed_db.write_transaction():
self._mock_remove('mpileaks ^zmpi')
p = multiprocessing.Process(target=read_and_modify, args=())
p.start()
p.join()
# ensure child process change is visible in parent process
with self.installed_db.read_transaction():
self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 0)
def test_040_ref_counts(self):
"""Ensure that we got ref counts right when we read the DB."""
self.installed_db._check_ref_counts()
def test_050_basic_query(self):
"""Ensure that querying the database is consistent with what is installed."""
# query everything
self.assertEqual(len(spack.installed_db.query()), 13)
# query specs with multiple configurations
mpileaks_specs = self.installed_db.query('mpileaks')
callpath_specs = self.installed_db.query('callpath')
mpi_specs = self.installed_db.query('mpi')
self.assertEqual(len(mpileaks_specs), 3)
self.assertEqual(len(callpath_specs), 3)
self.assertEqual(len(mpi_specs), 3)
# query specs with single configurations
dyninst_specs = self.installed_db.query('dyninst')
libdwarf_specs = self.installed_db.query('libdwarf')
libelf_specs = self.installed_db.query('libelf')
self.assertEqual(len(dyninst_specs), 1)
self.assertEqual(len(libdwarf_specs), 1)
self.assertEqual(len(libelf_specs), 1)
# Query by dependency
self.assertEqual(len(self.installed_db.query('mpileaks ^mpich')), 1)
self.assertEqual(len(self.installed_db.query('mpileaks ^mpich2')), 1)
self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 1)
def _check_remove_and_add_package(self, spec):
"""Remove a spec from the DB, then add it and make sure everything's
still ok once it is added. This checks that it was
removed, that it's back when added again, and that ref
counts are consistent.
"""
original = self.installed_db.query()
self.installed_db._check_ref_counts()
# Remove spec
concrete_spec = self.installed_db.remove(spec)
self.installed_db._check_ref_counts()
remaining = self.installed_db.query()
# ensure spec we removed is gone
self.assertEqual(len(original) - 1, len(remaining))
self.assertTrue(all(s in original for s in remaining))
self.assertTrue(concrete_spec not in remaining)
# add it back and make sure everything is ok.
self.installed_db.add(concrete_spec, "")
installed = self.installed_db.query()
self.assertEqual(len(installed), len(original))
# sanity check against direcory layout and check ref counts.
self._check_db_sanity()
self.installed_db._check_ref_counts()
def test_060_remove_and_add_root_package(self):
self._check_remove_and_add_package('mpileaks ^mpich')
def test_070_remove_and_add_dependency_package(self):
self._check_remove_and_add_package('dyninst')
def test_080_root_ref_counts(self):
rec = self.installed_db.get_record('mpileaks ^mpich')
# Remove a top-level spec from the DB
self.installed_db.remove('mpileaks ^mpich')
# record no longer in DB
self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), [])
# record's deps have updated ref_counts
self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 0)
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 1)
# put the spec back
self.installed_db.add(rec.spec, rec.path)
# record is present again
self.assertEqual(len(self.installed_db.query('mpileaks ^mpich', installed=any)), 1)
# dependencies have ref counts updated
self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 1)
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2)
def test_090_non_root_ref_counts(self):
mpileaks_mpich_rec = self.installed_db.get_record('mpileaks ^mpich')
callpath_mpich_rec = self.installed_db.get_record('callpath ^mpich')
# "force remove" a non-root spec from the DB
self.installed_db.remove('callpath ^mpich')
# record still in DB but marked uninstalled
self.assertEqual(self.installed_db.query('callpath ^mpich', installed=True), [])
self.assertEqual(len(self.installed_db.query('callpath ^mpich', installed=any)), 1)
# record and its deps have same ref_counts
self.assertEqual(self.installed_db.get_record('callpath ^mpich', installed=any).ref_count, 1)
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2)
# remove only dependent of uninstalled callpath record
self.installed_db.remove('mpileaks ^mpich')
# record and parent are completely gone.
self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), [])
self.assertEqual(self.installed_db.query('callpath ^mpich', installed=any), [])
# mpich ref count updated properly.
mpich_rec = self.installed_db.get_record('mpich')
self.assertEqual(mpich_rec.ref_count, 0)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment