diff --git a/util/build_detector.sh b/util/build_detector.sh
new file mode 100755
index 0000000000000000000000000000000000000000..bccf765b24b79d9aa00d5075a1c20f321abfb1fd
--- /dev/null
+++ b/util/build_detector.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+## =============================================================================
+## Build and install the JUGGLER_DETECTOR detector package into our local prefix
+## =============================================================================
+
+## make sure we launch this script from the project root directory
+PROJECT_ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"/..
+pushd ${PROJECT_ROOT}
+
+## =============================================================================
+## Load the environment variables. To build the detector we need the following
+## variables:
+##
+## - JUGGLER_DETECTOR: the detector package we want to use for this benchmark
+## - LOCAL_PREFIX:     location where local packages should be installed
+## - DETECTOR_PREFIX:  prefix for the detector definitions 
+## - DETECTOR_PATH:    full path for the detector definitions
+##                     this is the same as ${DETECTOR_PREFIX}/${JUGGLER_DETECTOR}
+##
+## You can read options/env.sh for more in-depth explanations of the variables
+## and how they can be controlled.
+source options/env.sh
+
+## =============================================================================
+## Step 1: download/update the detector definitions (if needed)
+pushd ${DETECTOR_PREFIX}
+
+## We need an up-to-date copy of the detector
+if [ ! -d ${JUGGLER_DETECTOR} ]; then
+  echo "Fetching ${JUGGLER_DETECTOR}"
+  git clone -b ${JUGGLER_DETECTOR_VERSION} https://eicweb.phy.anl.gov/EIC/detectors/${JUGGLER_DETECTOR}.git
+else
+  echo "Updating ${JUGGLER_DETECTOR}"
+  pushd ${JUGGLER_DETECTOR}
+  git pull --ff-only
+  popd
+fi
+## We also need an up-to-date copy of the accelerator. For now this is done
+## manually. Down the road we could maybe automize this with cmake
+if [ ! -d accelerator ]; then
+  echo "Fetching accelerator"
+  git clone https://eicweb.phy.anl.gov/EIC/detectors/accelerator.git
+else
+  echo "Updating accelerator"
+  pushd accelerator
+  git pull --ff-only
+  popd
+fi
+## Now symlink the accelerator definition into the detector definition
+echo "Linking accelerator definition into detector definition"
+ln -s -f ${DETECTOR_PREFIX}/accelerator/eic ${DETECTOR_PATH}/eic
+
+## =============================================================================
+## Step 2: Compile and install the detector definition
+echo "Building and installing the ${JUGGLER_DETECTOR} package"
+
+mkdir -p ${DETECTOR_PREFIX}/build
+pushd ${DETECTOR_PREFIX}/build
+cmake ${DETECTOR_PATH} -DCMAKE_INSTALL_PREFIX=${LOCAL_PREFIX} && make -j30 install
+
+## =============================================================================
+## Step 3: That's all!
+echo "Detector build/install complete!"
diff --git a/util/collect_benchmarks.py b/util/collect_benchmarks.py
new file mode 100755
index 0000000000000000000000000000000000000000..0af7e9a12b37eb7616be3400d4046f0088bcf223
--- /dev/null
+++ b/util/collect_benchmarks.py
@@ -0,0 +1,181 @@
+#!/usr/bin/env python3
+
+"""
+Combine the json files from the individual benchmark tests into
+a final master json file combining all benchmarks.
+
+Benchmark results are expected to be all json files in the results
+directory.
+"""
+
+## Our master definition file, the benchmark project directory
+MASTER_FILE=r'benchmarks/benchmarks.json'
+
+## Our results directory
+RESULTS_PATH=r'results'
+
+## Output json file with all benchmark results
+OUTPUT_FILE=r'results/summary.json'
+
+import argparse
+import json
+from pathlib import Path
+
+## Exceptions for this module
+class Error(Exception):
+    '''Base class for exceptions in this module.'''
+    pass
+class FileNotFoundError(Error):
+    '''File does not exist.
+
+    Attributes:
+        file: the file name
+        message: error message
+    '''
+    def __init__(self, file):
+        self.file = file
+        self.message = 'No such file or directory: {}'.format(file)
+
+class InvalidDefinitionError(Error):
+    '''Raised for missing keys in the definitions.
+
+    Attributes:
+        key: the missing key
+        file: the definition file
+        message: error message
+    '''
+    def __init__(self, key, file):
+        self.key = key
+        self.file = file
+        self.message = "key '{}' not found in '{}'".format(key, file)
+
+class InvalidResultError(Error):
+    '''Raised for invalid benchmark result value.
+
+    Attributes:
+        key: the missing key
+        value: the invalid value
+        file: the benchmark definition file
+        message: error message
+    '''
+    def __init__(self, key, value, file):
+        self.key = key
+        self.value = value
+        self.file = file
+        self.message = "value '{}' for key '{}' invalid in benchmark file '{}'".format(
+                value, key, file)
+    
+def collect_benchmarks():
+    '''Collect all benchmark results and write results to a single file.'''
+    print("Collecting all benchmark results")
+
+    ## load the test definition for this benchmark
+    results = _load_master()
+
+    ## collect the test results
+    results['benchmarks'] = _load_benchmarks()
+    
+    ## calculate aggregate test statistics
+    results = _aggregate_results(results)
+
+    ## save results to output file
+    _save(results)
+
+    ## Summarize results
+    for bm in results['benchmarks']:
+        _print_benchmark(bm)
+    _print_summary(results)
+
+def _load_master():
+    '''Load master definition.'''
+    master_file = Path(MASTER_FILE)
+    if not master_file.exists():
+        raise FileNotFoundError(master_file)
+    print('  --> Loading master definition from:', master_file)
+    results = None
+    with master_file.open() as f:
+        results = json.load(f)
+    ## ensure this is a valid benchmark file
+    for key in ('name', 'title', 'description'):
+        if not key in results:
+            raise InvalidDefinitionError('target', master_file)
+    return results
+
+def _load_benchmarks():
+    '''Load all benchmark results from the results folder.'''
+    print('  --> Collecting all benchmarks')
+    rootdir = Path(RESULTS_PATH)
+    results = []
+    for file in rootdir.glob('*.json'):
+        print('    --> Loading file:', file, '... ', end='')
+        with open(file) as f:
+            bm = json.load(f)
+            ## skip files that don't include test results
+            if not 'tests' in bm:
+                print('skipped (does not contain benchmark results).')
+                continue
+            ## check if these are valid benchmark results,
+            ## raise exception otherwise
+            for key in ('name', 'title', 'description', 'target', 'n_tests',
+                    'n_pass', 'n_fail', 'n_error', 'maximum', 'sum', 'value',
+                    'result'):
+                if not key in bm:
+                    raise InvalidDefinitionError(key, file)
+            if bm['result'] not in ('pass', 'fail', 'error'):
+                raise InvalidResultError('result', bm['result'], file)
+            ## Append to our test results
+            results.append(bm)
+            print('done')
+    return results
+
+def _aggregate_results(results):
+    '''Aggregate benchmark results.'''
+    print('  --> Aggregating benchmark statistics')
+    results['n_benchmarks'] = len(results['benchmarks'])
+    results['n_pass'] = len([1 for t in results['benchmarks'] if t['result'] == 'pass'])
+    results['n_fail'] = len([1 for t in results['benchmarks'] if t['result'] == 'fail'])
+    results['n_error'] = len([1 for t in results['benchmarks'] if t['result'] == 'error'])
+    if results['n_error'] > 0:
+        results['result'] = 'error'
+    elif results['n_fail'] == 0:
+        results['result'] = 'pass'
+    else:
+        results['result'] = 'fail'
+    return results
+
+def _save(results):
+    '''Save aggregated benchmark results'''
+    ofile = Path(OUTPUT_FILE)
+    print('  --> Saving results to:', ofile)
+    with ofile.open('w') as f:
+        json.dump(results, f, indent=4)
+
+def _print_benchmark(bm):
+    '''Print benchmark summary to the terminal.'''
+    print('====================================================================')
+    print('  Summary for:', bm['title'])
+    print('  Pass: {}, Fail: {}, Error: {} out of {} total tests'.format(
+        bm['n_pass'], bm['n_fail'], bm['n_error'],
+        bm['n_tests']))
+    print('  Weighted sum: {} / {}'.format(bm['sum'], bm['maximum']))
+    print('  kBenchmark value: {} (target: {})'.format(
+        bm['value'], bm['target']))
+    print('  ===> status:', bm['result'])
+
+def _print_summary(results):
+    '''Print master benchmark summary to the terminal.'''
+    print('====================================================================')
+    print('MASTER BENCHMARK SUMMARY FOR:', results['title'].upper())
+    print('Pass: {}, Fail: {}, Error: {} out of {} total benchmarks'.format(
+        results['n_pass'], results['n_fail'], results['n_error'],
+        results['n_benchmarks']))
+    print('===> status:', results['result'])
+    print('====================================================================')
+
+
+if __name__ == "__main__":
+    try:
+        collect_benchmarks()
+    except Error as e:
+        print()
+        print('ERROR', e.message)
diff --git a/util/collect_tests.py b/util/collect_tests.py
new file mode 100755
index 0000000000000000000000000000000000000000..4d860ca79d9f996204a5ca9dc447fa10ed8ec4f4
--- /dev/null
+++ b/util/collect_tests.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python3
+
+"""
+Collect the json files from individual benchmark tests into
+a larger json file that combines all benchmark information,
+and do additional accounting for the benchmark.
+
+Tests results are expected to have the following file name and directory
+structure:
+   results/<BENCHMARK_NAME>/**/<SOME_NAME>.json
+where ** implies we check recursively check all sub-directories of <BENCHMARK_NAME>
+
+Internally, we will look for the "tests" keyword in each of these
+files to identify them as benchmark components.
+"""
+
+## Our benchmark definition file, stored in the benchmark root directory
+BENCHMARK_FILE=r'benchmarks/{}/benchmark.json'
+
+## Our benchmark results directory
+RESULTS_PATH=r'results/{}'
+
+## Output json file with benchmark results
+OUTPUT_FILE=r'results/{}.json'
+
+import argparse
+import json
+from pathlib import Path
+
+## Exceptions for this module
+class Error(Exception):
+    '''Base class for exceptions in this module.'''
+    pass
+class FileNotFoundError(Exception):
+    '''File does not exist.
+
+    Attributes:
+        file: the file name
+        message: error message
+    '''
+    def __init__(self, file):
+        self.file = file
+        self.message = 'No such file or directory: {}'.format(file)
+
+class InvalidBenchmarkDefinitionError(Exception):
+    '''Raised for missing keys in the benchmark definition.
+
+    Attributes:
+        key: the missing key
+        file: the benchmark definition file
+        message: error message
+    '''
+    def __init__(self, key, file):
+        self.key = key
+        self.file = file
+        self.message = "key '{}' not found in benchmark file '{}'".format(key, file)
+
+class InvalidTestDefinitionError(Exception):
+    '''Raised for missing keys in the test result.
+
+    Attributes:
+        key: the missing key
+        file: the test result file
+        message: error message
+    '''
+    def __init__(self, key, file):
+        self.key = key
+        self.file = file
+        self.message = "key '{}' not found in test file '{}'".format(key, file)
+class InvalidTestResultError(Exception):
+    '''Raised for invalid test result value.
+
+    Attributes:
+        key: the missing key
+        value: the invalid value
+        file: the benchmark definition file
+        message: error message
+    '''
+    def __init__(self, key, value, file):
+        self.key = key
+        self.value = value
+        self.file = file
+        self.message = "value '{}' for key '{}' invalid in test file '{}'".format(
+                value, key, file)
+    
+    
+parser = argparse.ArgumentParser()
+parser.add_argument(
+        'benchmark',
+        action='append',
+        help='One or more benchmarks for which to collect test results.')
+
+def collect_results(benchmark):
+    '''Collect benchmark tests and write results to file.'''
+    print("Collecting results for benchmark '{}'".format(benchmark))
+
+    ## load the test definition for this benchmark
+    results = _load_benchmark(benchmark)
+
+    ## collect the test results
+    results['tests'] = _load_tests(benchmark)
+    
+    ## calculate aggregate test statistics
+    results = _aggregate_results(results)
+
+    ## save results to output file
+    _save(benchmark, results)
+
+    ## Summarize results
+    _print_summary(results)
+
+def _load_benchmark(benchmark):
+    '''Load benchmark definition.'''
+    benchfile = Path(BENCHMARK_FILE.format(benchmark))
+    if not benchfile.exists():
+        raise FileNotFoundError(benchfile)
+    print('  --> Loading benchmark definition from:', benchfile)
+    results = None
+    with benchfile.open() as f:
+        results = json.load(f)
+    ## ensure this is a valid benchmark file
+    for key in ('name', 'title', 'description', 'target'):
+        if not key in results:
+            raise InvalidBenchmarkDefinitionError('target', benchfile)
+    return results
+
+def _load_tests(benchmark):
+    '''Loop over all test results in benchmark folder and return results.'''
+    print('  --> Collecting all test results')
+    rootdir = Path(RESULTS_PATH.format(benchmark))
+    results = []
+    for file in rootdir.glob('**/*.json'):
+        print('    --> Loading file:', file, '... ', end='')
+        with open(file) as f:
+            new_results = json.load(f)
+            ## skip files that don't include test results
+            if not 'tests' in new_results:
+                print('not a test result')
+                continue
+            ## check if these are valid test results,
+            ## raise exception otherwise
+            for test in new_results['tests']:
+                for key in ('name', 'title', 'description', 'quantity', 'target',
+                        'value', 'result'):
+                    if not key in test:
+                        raise InvalidTestDefinitionError(key, file)
+                if test['result'] not in ('pass', 'fail', 'error'):
+                    raise InvalidTestResultError('result', test['result'], file)
+                ## ensure 'weight' key present, defaulting to 1 in needed
+                if not 'weight' in test:
+                    test['weight'] = 1.
+                ## Append to our test results
+                results.append(test)
+            print('done')
+    return results
+
+def _aggregate_results(results):
+    '''Aggregate test results for our benchmark.'''
+    print('  --> Aggregating benchmark statistics')
+    results['target'] = float(results['target'])
+    results['n_tests'] = len(results['tests'])
+    results['n_pass'] = len([1 for t in results['tests'] if t['result'] == 'pass'])
+    results['n_fail'] = len([1 for t in results['tests'] if t['result'] == 'fail'])
+    results['n_error'] = len([1 for t in results['tests'] if t['result'] == 'error'])
+    results['maximum'] = sum([t['weight'] for t in results['tests']])
+    results['sum'] = sum([t['weight'] for t in results['tests'] if t['result'] == 'pass'])
+    if (results['n_tests'] > 0):
+        results['value'] = results['sum'] / results['maximum']
+        if results['n_error'] > 0:
+            results['result'] = 'error'
+        elif results['value'] >= results['target']:
+            results['result'] = 'pass'
+        else:
+            results['result'] = 'fail'
+    else:
+        results['value'] = -1
+        results['result'] = 'error'
+    return results
+
+def _save(benchmark, results):
+    '''Save benchmark results'''
+    ofile = Path(OUTPUT_FILE.format(benchmark))
+    print('  --> Saving benchmark results to:', ofile)
+    with ofile.open('w') as f:
+        json.dump(results, f, indent=4)
+
+def _print_summary(results):
+    '''Print benchmark summary to the terminal.'''
+    print('====================================================================')
+    print('Summary for:', results['title'])
+    print('Pass: {}, Fail: {}, Error: {} out of {} total tests'.format(
+        results['n_pass'], results['n_fail'], results['n_error'],
+        results['n_tests']))
+    print('Weighted sum: {} / {}'.format(results['sum'], results['maximum']))
+    print('Benchmark value: {} (target: {})'.format(
+        results['value'], results['target']))
+    print('===> status:', results['result'])
+    print('====================================================================')
+
+
+if __name__ == "__main__":
+    args = parser.parse_args()
+    for benchmark in args.benchmark:
+        collect_results(benchmark)
diff --git a/util/compile_analyses.py b/util/compile_analyses.py
new file mode 100755
index 0000000000000000000000000000000000000000..153f2ea2f61429b6864a21b3ad625e6b53373ba3
--- /dev/null
+++ b/util/compile_analyses.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python3
+
+"""
+Compile all root analysis scripts under
+benchmarks/<BENCHMARK>/analysis/*.cxx
+
+Doing this step here rather than during the main benchmark script has
+multiple advantages:
+    1. Get feedback on syntax errors early on, without wasting compute resources
+    2. Avoid race conditions for large benchmarks run in parallel
+    3. Make it easier to properly handle the root build directory, as
+       this has to exist prior to our attempt to compile, else all will
+       fail (this is probably an old bug in root...)
+
+Analysis scripts are expected to have extension 'cxx' and be located in the analysis
+subdirectory
+"""
+
+## Our analysis path and file extension for glob
+ANALYSIS_PATH=r'benchmarks/{}/analysis'
+ANALYSIS_EXT = r'cxx'
+
+import argparse
+import os
+from pathlib import Path
+
+## Exceptions for this module
+class Error(Exception):
+    '''Base class for exceptions in this module.'''
+    pass
+
+class PathNotFoundError(Exception):
+    '''Path does not exist.
+
+    Attributes:
+        path: the path name
+        message: error message
+    '''
+    def __init__(self, path):
+        self.file = file
+        self.message = 'No such directory: {}'.format(file)
+class NoAnalysesFoundError(Exception):
+    '''Did not find any analysis scripts to complile
+
+    Attributes:
+        path: the analysis path
+        message: error message
+    '''
+    def __init__(self, path):
+        self.file = file
+        self.message = 'No analysis found (extension \'{}\' in path: {}'.format(file,
+                ANALYSIS_EXT)
+
+class CompilationError(Exception):
+    '''Raised when we failed to compile an analysis script
+
+    Attributes:
+        file: analysis file name
+        path: analysis path
+        message: error message
+    '''
+    def __init__(self, file):
+        self.file = file
+        self.message = "Analysis '{}' failed to compile".format(file)
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+        'benchmark',
+        help='A benchmarks for which to compile the analysis scripts.')
+
+def compile_analyses(benchmark):
+    '''Compile all analysis scripts for a benchmark.'''
+    print("Compiling all analyis scripts for '{}'".format(benchmark))
+
+    ## Ensure our build directory exists
+    _init_build_dir(benchmark)
+
+    ## Get a list of all analysis scripts
+    _compile_all(benchmark)
+
+    ## All done!
+    print('All analyses for', benchmark, 'compiled successfully')
+
+def _init_build_dir(benchmark):
+    '''Initialize our ROOT build directory (if using one).'''
+    print(' --> Initializing ROOT build directory ...')
+    build_prefix = os.getenv('ROOT_BUILD_DIR')
+    if build_prefix is None:
+        print('    --> ROOT_BUILD_DIR not set, no action needed.')
+        return
+    ## deduce the root build directory
+    pwd = os.getenv('PWD')
+    build_dir = '{}/{}/{}'.format(build_prefix, pwd, ANALYSIS_PATH.format(benchmark))
+    print("    --> Ensuring directory '{}' exists".format(build_dir))
+    os.system('mkdir -p {}'.format(build_dir))
+
+def _compile_all(benchmark):
+    '''Compile all analysis for this benchmark.'''
+    print(' --> Compiling analysis scripts')
+    anadir = Path(ANALYSIS_PATH.format(benchmark))
+    if not anadir.exists():
+        raise PathNotFoundError(anadir)
+    ana_list = []
+    for file in anadir.glob('*.{}'.format(ANALYSIS_EXT)):
+        ana_list.append(file)
+        print('    --> Compiling:', file, flush=True)
+        err = os.system(_compile_cmd(file))
+        if err:
+            raise CompilationError(file)
+    if len(ana_list) == 0:
+        raise NoAnalysesFoundError(anadir)
+
+def _compile_cmd(file):
+    '''Return a one-line shell command to compile an analysis script.'''
+    return r'bash -c "root -q -b -e \".L {}+\""'.format(file)
+
+if __name__ == "__main__":
+    args = parser.parse_args()
+    compile_analyses(args.benchmark)
diff --git a/util/parse_cmd.sh b/util/parse_cmd.sh
new file mode 100755
index 0000000000000000000000000000000000000000..04028d8958d03df22f7b2d964a75acf2d9b47317
--- /dev/null
+++ b/util/parse_cmd.sh
@@ -0,0 +1,127 @@
+#!/bin/bash
+
+## =============================================================================
+## Generic utility script to parse command line arguments for the various
+## bash scripts that control the CI. This script should be source'd with
+## command line arguments from a bash-like (non-POSIX) shell such as
+## bash or zsh.
+##
+## To control some of the functionality of the script, you can set the following
+## environment variables prior to calling the script:
+##   - REQUIRE_DECAY:     require the --decay flag to be set
+## =============================================================================
+
+## Commented out because this should be taken care of by the 
+## calling script to not enforce a fixed directory structure.
+## make sure we launch this script from the project root directory
+#PROJECT_ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"/..
+#pushd ${PROJECT_ROOT}
+
+## =============================================================================
+## Step 1: Process the command line arguments
+
+function print_the_help {
+  echo "USAGE:    --ebeam E --pbeam E --config C1 --decay D2"
+  echo "          [--config C2 --decay D2 --decay D3 ...]"
+  echo "REQUIRED ARGUMENTS:"
+  echo "          --ebeam       Electron beam energy"
+  echo "          --pbeam       Ion beam energy"
+  echo "          --config      Generator configuration identifiers (at least one)"
+  if [ ! -z ${REQUIRE_DECAY} ]; then
+    echo "        --decay       Specific decay particle (e.g. muon)."
+  fi
+  if [ ! -z ${REQUIRE_LEADING} ]; then
+    echo "        --leading     Leading particle of interest (e.g. jpsi)."
+  fi
+  echo "          -h,--help     Print this message"
+  echo ""
+  echo "  Generate multiple monte carlo samples for a desired process." 
+  exit
+}
+
+## Required variables
+EBEAM=
+PBEAM=
+DECAYS=
+CONFIG=
+
+while [ $# -gt 0 ]
+do
+  key="$1"
+  case $key in
+    --config)
+      CONFIG="$2"
+      shift # past argument
+      shift # past value
+      ;;
+    --ebeam)
+      EBEAM="$2"
+      shift # past argument
+      shift # past value
+      ;;
+    --pbeam)
+      PBEAM="$2"
+      shift # past argument
+      shift # past value
+      ;;
+    --leading)
+      LEADING="$2"
+      shift # past argument
+      shift # past value
+      ;;
+    --decay)
+      DECAY="$2"
+      shift # past argument
+      shift # past value
+      ;;
+    -h|--help)
+      print_the_help
+      exit 0
+      ;;
+    *)    # unknown option
+      echo "unknown option"
+      exit 1
+      ;;
+  esac
+done
+
+if [ -z $CONFIG ]; then
+  echo "ERROR: CONFIG not defined: --config <config>"
+  print_the_help
+  exit 1
+elif [ -z $EBEAM ]; then
+  echo "ERROR: EBEAM not defined: --ebeam <energy>"
+  print_the_help
+  exit 1
+elif [ -z $PBEAM ]; then
+  echo "ERROR: PBEAM not defined: --pbeam <energy>"
+  print_the_help
+  exit 1
+elif [ -z $LEADING ] && [ ! -z $REQUIRE_LEADING ]; then
+  echo "ERROR: LEADING not defined: --leading <channel>"
+  print_the_help
+  exit 1
+elif [ ! -z $LEADING ] && [ -z $REQUIRE_LEADING ]; then
+  echo "ERROR: LEADING flag specified but not required"
+  print_the_help
+  exit 1
+elif [ -z $DECAY ] && [ ! -z $REQUIRE_DECAY ]; then
+  echo "ERROR: DECAY not defined: --decay <channel>"
+  print_the_help
+  exit 1
+elif [ ! -z $DECAY ] && [ -z $REQUIRE_DECAY ]; then
+  echo "ERROR: DECAY flag specified but not required"
+  print_the_help
+  exit 1
+fi
+
+## Export the configured variables
+export CONFIG
+export EBEAM
+export PBEAM
+if [ ! -z $REQUIRE_LEADING ]; then
+  export LEADING
+fi
+if [ ! -z $REQUIRE_DECAY ]; then
+  export DECAY
+fi
diff --git a/util/print_env.sh b/util/print_env.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ce4010509e8763b3dba0fdc93bf0b6584f172e27
--- /dev/null
+++ b/util/print_env.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+echo "JUGGLER_TAG:                ${JUGGLER_TAG}"
+echo "JUGGLER_DETECTOR:           ${JUGGLER_DETECTOR}"
+echo "JUGGLER_DETECTOR_VERSION:   ${JUGGLER_DETECTOR_VERSION}"
+echo "JUGGLER_N_EVENTS:           ${JUGGLER_N_EVENTS}"
+echo "JUGGLER_N_THREADS:          ${JUGGLER_N_THREADS}"
+echo "JUGGLER_RNG_SEED:           ${JUGGLER_RNG_SEED}"
+echo "JUGGLER_INSTALL_PREFIX:     ${JUGGLER_INSTALL_PREFIX}"
+echo "LOCAL_PREFIX:               ${LOCAL_PREFIX}"
+echo "DETECTOR_PREFIX:            ${DETECTOR_PREFIX}"
+echo "DETECTOR_PATH:              ${DETECTOR_PATH}"
diff --git a/util/run_many.py b/util/run_many.py
new file mode 100755
index 0000000000000000000000000000000000000000..ccb7e83a7f81d1bc502fcddb32900e5a31eebdb1
--- /dev/null
+++ b/util/run_many.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+
+""" 
+This script will run a CI generator or processing script for multiple configurations.
+
+Author: Sylvester Joosten <sjoosten@anl.gov>
+"""
+
+import os
+import argparse
+from multiprocessing import Pool, get_context
+from tempfile import NamedTemporaryFile
+
+class InvalidArgumentError(Exception):
+    pass
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+        'command',
+        help="Script to be launched in parallel")
+parser.add_argument(
+        '--energy', '-e',
+        dest='energies',
+        action='append',
+        help='One or more beam energy pairs (e.g. 10x100)',
+        required=True)
+parser.add_argument(
+        '--config', '-c',
+        dest='configs',
+        action='append',
+        help='One or more configurations',
+        required=True)
+parser.add_argument(
+        '--leading',
+        dest='leads',
+        action='append',
+        help='One or more leading particles(opt.)',
+        required=False)
+parser.add_argument(
+        '--decay',
+        dest='decays',
+        action='append',
+        help='One or more decay channels (opt.)',
+        required=False)
+parser.add_argument(
+        '--nproc',
+        dest='nproc',
+        default=5,
+        type=int,
+        help='Number of processes to launch in parallel',
+        required=False)
+
+def worker(command):
+    '''Execute the command in a system call, with the supplied argument string.'''
+    ## use a temporary file to capture the terminal output, and then
+    ## print the terminal output once the command finishes
+    with NamedTemporaryFile() as f:
+        cmd = [command, ' 2>&1 >', f.name]
+        cmd = ' '.join(cmd)
+        print("Executing '{}'".format(cmd))
+        ret = os.system(cmd)
+        with open(f.name) as log:
+            print(log.read())
+        return ret
+
+if __name__ == '__main__':
+    args = parser.parse_args()
+    print('Launching CI script in parallel for multiple settings')
+    for e in args.energies:
+        beam_setting = e.split('x')
+        if not beam_setting[0].isnumeric() or not beam_setting[1].isnumeric():
+            print("Error: invalid beam energy setting:", e)
+            raise InvalidArgumentError
+
+    if not os.path.exists(args.command):
+        print("Error: Script not found:", args.command)
+        raise InvalidArgumentError
+
+    if args.nproc < 1 or args.nproc > 50:
+        print("Error: Invalid process limit (should be 1-50):", args.nproc)
+        raise InvalidArgumentError
+
+    print(' - command: {}'.format(args.command))
+    print(' - energies: {}'.format(args.energies))
+    print(' - config: {}'.format(args.configs))
+    print(' - nproc: {}'.format(args.nproc))
+    if (args.leads):
+        print(' - leading: {}'.format(args.leads))
+    if (args.decays):
+        print(' - decay: {}'.format(args.decays))
+
+    ## Expand our command and argument list for all combinatorics
+    cmds = []
+    decays = args.decays if args.decays else [None]
+    leads = args.leads if args.leads else [None]
+    for e in args.energies:
+        for c in args.configs:
+            for l in leads:
+                for d in decays:
+                    beam_setting = e.split('x')
+                    cmd = [args.command,
+                           '--ebeam', beam_setting[0],
+                           '--pbeam', beam_setting[1],
+                           '--config', c]
+                    if l is not None:
+                        cmd += ['--leading', l]
+                    if d is not None:
+                        cmd += ['--decay', d]
+                    cmds.append(' '.join(cmd))
+
+    ## create a process pool
+    ## note that I'm using themultiprocessing.get_context function to setup
+    ## a context where subprocesses are created using the new "spawn" process
+    ## which avoids deadlocks that sometimes happen in the default dispatch
+    with get_context('spawn').Pool(processes=args.nproc) as pool:
+        return_values = pool.map(worker, cmds)
+        ## check if we all exited nicely, else exit with status 1
+        if not all(ret == 0 for ret in return_values):
+            n_fail = sum([1 for ret in return_values if ret != 0])
+            print('ERROR, {} of {} jobs failed'.format(n_fail, len(cmds)))
+            print('Return values:', [ret for ret in return_values if ret != 0])
+            exit(1)
+
+    ## That's all!