Skip to content
Snippets Groups Projects
Commit a0be437a authored by Sylvester Joosten's avatar Sylvester Joosten
Browse files

Add benchmark/exception headers as symlink to the benchmarks for now, in the...

Add benchmark/exception headers as symlink to the benchmarks for now, in the future these should be moved to the util library
parent adccf39a
No related branches found
No related tags found
1 merge request!16Resolve "Benchmark definition standard"
../../util/benchmark.hh
\ No newline at end of file
../../util/exception.hh
\ No newline at end of file
#include "benchmark.hh"
#include "mt.h"
#include "plot.h"
#include "util.h"
......@@ -6,7 +7,9 @@
#include <cmath>
#include <fmt/color.h>
#include <fmt/core.h>
#include <fstream>
#include <iostream>
#include <nlohmann/json.hpp>
#include <string>
#include <vector>
......@@ -16,9 +19,19 @@
// file prefix), and labeled with our detector name.
// TODO: I think it would be better to pass small json configuration file to
// the test, instead of this ever-expanding list of function arguments.
int vm_mass(std::string_view rec_file, std::string_view vm_name,
std::string_view decay_name, std::string_view detector,
std::string output_prefix) {
int vm_mass(const std::string& config_name) {
// read our configuration
std::ifstream config_file{config_name};
nlohmann::json config;
config_file >> config;
const std::string rec_file = config["rec_file"];
const std::string vm_name = config["vm_name"];
const std::string decay_name = config["decay"];
const std::string detector = config["detector"];
std::string output_prefix = config["output_prefix"];
const std::string test_tag = config["test_tag"];
fmt::print(fmt::emphasis::bold | fg(fmt::color::forest_green),
"Running VM invariant mass analysis...\n");
fmt::print(" - Vector meson: {}\n", vm_name);
......@@ -26,6 +39,18 @@ int vm_mass(std::string_view rec_file, std::string_view vm_name,
fmt::print(" - Detector package: {}\n", detector);
fmt::print(" - output prefix: {}\n", output_prefix);
// create our test definition
// test_tag
juggler_util::test vm_mass_resolution_test{
{{"name",
fmt::format("{}_{}_{}_mass_resolution", test_tag, vm_name, decay_name)},
{"title",
fmt::format("{} -> {} Invariant Mass Resolution", vm_name, decay_name)},
{"description", "Invariant Mass Resolution calculated from raw "
"tracking data using a Gaussian fit."},
{"quantity", "resolution"},
{"target", ".1"}}};
// Run this in multi-threaded mode if desired
ROOT::EnableImplicitMT(kNumThreads);
......@@ -42,7 +67,8 @@ int vm_mass(std::string_view rec_file, std::string_view vm_name,
// Open our input file file as a dataframe
ROOT::RDataFrame d{"events", rec_file};
// utility lambda functions to bind the vector meson and decay particle types
// utility lambda functions to bind the vector meson and decay particle
// types
auto momenta_from_tracking =
[decay_mass](const std::vector<eic::TrackParametersData>& tracks) {
return util::momenta_from_tracking(tracks, decay_mass);
......@@ -103,6 +129,15 @@ int vm_mass(std::string_view rec_file, std::string_view vm_name,
// Print canvas to output file
c.Print(fmt::format("{}vm_mass.png", output_prefix).c_str());
}
// TODO we're not actually doing an IM fit yet, so for now just return an
// error for the test result
vm_mass_resolution_test.error(-1);
// write out our test data
juggler_util::write_test(vm_mass_resolution_test,
fmt::format("{}vm_mass.json", output_prefix));
// That's all!
return 0;
}
......@@ -42,6 +42,7 @@ source config/env.sh
## We also need the following benchmark-specific variables:
##
## - BENCHMARK_TAG: Unique identified for this benchmark process.
## - BEAM_TAG: Identifier for the chosen beam configuration
## - INPUT_PATH: Path for generator-level input to the benchmarks
## - TMP_PATH: Path for temporary data (not exported as artifacts)
## - RESULTS_PATH: Path for benchmark output figures and files
......@@ -105,16 +106,27 @@ if [ "$?" -ne "0" ] ; then
exit 1
fi
fi
ls -l
#ls -l
## =============================================================================
## Step 4: Analysis
root -b -q "dvmp/analysis/vm_mass.cxx(\
\"${REC_FILE}\", \
\"${LEADING}\", \
\"${DECAY}\", \
\"${JUGGLER_DETECTOR}\", \
\"${RESULTS_PATH}/${PLOT_TAG}\")"
## write a temporary configuration file for the analysis script
CONFIG="${TMP_PATH}/${PLOT_TAG}.json"
cat << EOF > ${CONFIG}
{
"rec_file": "${REC_FILE}",
"vm_name": "${LEADING}",
"decay": "${DECAY}",
"detector": "${JUGGLER_DETECTOR}",
"output_prefix": "${RESULTS_PATH}/${PLOT_TAG}",
"test_tag": "${LEADING}_${DECAY}_${BEAM_TAG}"
}
EOF
#cat ${CONFIG}
## run the analysis script with this configuration
root -b -q "dvmp/analysis/vm_mass.cxx(\"${CONFIG}\")"
if [ "$?" -ne "0" ] ; then
echo "ERROR running root script"
......
......@@ -5,6 +5,7 @@
## It defines the following additional variables:
##
## - BENCHMARK_TAG: Tag to identify this particular benchmark
## - BEAM_TAG Tag to identify the beam configuration
## - INPUT_PATH: Path for generator-level input to the benchmarks
## - TMP_PATH: Path for temporary data (not exported as artifacts)
## - RESULTS_PATH: Path for benchmark output figures and files
......@@ -21,22 +22,25 @@
export BENCHMARK_TAG="dvmp"
echo "Setting up the local environment for the ${BENCHMARK_TAG^^} benchmarks"
## Extra beam tag to identify the desired beam configuration
export BEAM_TAG="${EBEAM}on${PBEAM}"
## Data path for input data (generator-level hepmc file)
INPUT_PATH="input/${BENCHMARK_TAG}/${EBEAM}on${PBEAM}"
INPUT_PATH="input/${BENCHMARK_TAG}/${BEAM_TAG}"
mkdir -p ${INPUT_PATH}
export INPUT_PATH=`realpath ${INPUT_PATH}`
echo "INPUT_PATH: ${INPUT_PATH}"
## Data path for temporary data (not exported as artifacts)
TMP_PATH=${LOCAL_PREFIX}/tmp/${EBEAM}on${PBEAM}
TMP_PATH=${LOCAL_PREFIX}/tmp/${BEAM_TAG}
mkdir -p ${TMP_PATH}
export TMP_PATH=`realpath ${TMP_PATH}`
echo "TMP_PATH: ${TMP_PATH}"
## Data path for benchmark output (plots and reconstructed files
## if not too big).
RESULTS_PATH="results/${BENCHMARK_TAG}/${EBEAM}on${PBEAM}"
RESULTS_PATH="results/${BENCHMARK_TAG}/${BEAM_TAG}"
mkdir -p ${RESULTS_PATH}
export RESULTS_PATH=`realpath ${RESULTS_PATH}`
echo "RESULTS_PATH: ${RESULTS_PATH}"
......
#ifndef BENCHMARK_LOADED
#define BENCHMARK_LOADED
#include "exception.hh"
#include <fmt/core.h>
#include <fstream>
#include <iostream>
#include <nlohmann/json.hpp>
#include <string>
// Bookkeeping of test data to store data of one or more tests in a json file to
// facilitate future accounting.
//
// Usage Example 1 (single test):
// ==============================
// 1. define our test
// juggler_util::test test1{
// {{"name", "example_test"},
// {"title", "Example Test"},
// {"description", "This is an example of a test definition"},
// {"quantity", "efficiency"},
// {"target", "1"}}};
// 2. set pass/fail/error status and return value (in this case .99)
// test1.pass(0.99)
// 3. write our test data to a json file
// juggler_util::write_test(test1, "test1.json");
//
// Usage Example 2 (multiple tests):
// =================================
// 1. define our tests
// juggler_util::test test1{
// {{"name", "example_test"},
// {"title", "Example Test"},
// {"description", "This is an example of a test definition"},
// {"quantity", "efficiency"},
// {"target", "1"}}};
// juggler_util::test test2{
// {{"name", "another_test"},
// {"title", "Another example Test"},
// {"description", "This is a second example of a test definition"},
// {"quantity", "resolution"},
// {"target", "3."}}};
// 2. set pass/fail/error status and return value (in this case .99)
// test1.fail(10)
// 3. write our test data to a json file
// juggler_util::write_test({test1, test2}, "test.json");
// Namespace for utility scripts, FIXME this should be part of an independent
// library
namespace juggler_util {
struct test_definition_error : exception {
test_definition_error(std::string_view msg)
: exception(msg, "test_definition_error") {}
};
// Wrapper for our test data json, with three methods to set the status
// after test completion (to pass, fail or error). The default value
// is error.
// The following fields should be defined in the definitions json
// for the test to make sense:
// - name: unique identifier for this test
// - title: Slightly more verbose identifier for this test
// - description: Concise description of what is tested
// - quantity: What quantity is tested? Unites of value/target
// - target: Target value of <quantity> that we want to reach
// - value: Actual value of <quantity>
// - weight: Weight for this test (this is defaulted to 1.0 if not specified)
// - result: pass/fail/error
struct test {
test(nlohmann::json definition) : json{std::move(definition)} {
json["weight"] = 1.0;
// initialize with error (as we don't have a value yet)
error();
// Check that all required fields are present
for (const auto& field : {"name", "title", "description", "quantity",
"target", "value", "weight", "result"}) {
if (json.find(field) == json.end()) {
throw test_definition_error{
fmt::format("Error in test definition: field '{}' missing", field)};
}
}
}
// Set this test to pass/fail/error
void pass(double value) { update_result("pass", value); }
void fail(double value) { update_result("fail", value); }
void error(double value = 0) { update_result("error", value); }
nlohmann::json json;
private:
void update_result(std::string_view status, double value) {
json["result"] = status;
json["value"] = value;
}
};
void write_test(const std::vector<test>& data, const std::string& fname) {
nlohmann::json test;
for (auto& entry : data) {
test.push_back(entry.json);
}
std::cout << fmt::format("Writing test data to {}\n", fname);
std::ofstream output_file(fname);
output_file << std::setw(4) << test << "\n";
}
void write_test(const test& data, const std::string& fname) {
std::vector<test> vtd{data};
write_test(vtd, fname);
}
} // namespace juggler_util
#endif
#ifndef UTIL_EXCEPTION
#define UTIL_EXCEPTION
#include <exception>
#include <string>
namespace juggler_util {
class exception : public std::exception {
public:
exception(std::string_view msg, std::string_view type = "exception")
: msg_{msg}, type_{type} {}
virtual const char* what() const throw() { return msg_.c_str(); }
virtual const char* type() const throw() { return type_.c_str(); }
virtual ~exception() throw() {}
private:
std::string msg_;
std::string type_;
};
} // namespace juggler_util
#endif
......@@ -58,9 +58,10 @@ def worker(command):
cmd = [command, ' 2>&1 >', f.name]
cmd = ' '.join(cmd)
print("Executing '{}'".format(cmd))
os.system(cmd)
ret = os.system(cmd)
with open(f.name) as log:
print(log.read())
return ret
if __name__ == '__main__':
args = parser.parse_args()
......@@ -112,6 +113,9 @@ if __name__ == '__main__':
## a context where subprocesses are created using the new "spawn" process
## which avoids deadlocks that sometimes happen in the default dispatch
with get_context('spawn').Pool(processes=args.nproc) as pool:
pool.map(worker, cmds)
return_values = pool.map(worker, cmds)
## check if we all exited nicely, else exit with status 1
if not all(ret == 0 for ret in return_values):
exit(1)
## That's all!
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment