Skip to content
Snippets Groups Projects
Commit f8eba5d4 authored by Sylvester Joosten's avatar Sylvester Joosten
Browse files

Update the CI scripts for the DIS stub to be consistent with the more generic...

Update the CI scripts for the DIS stub to be consistent with the more generic DVMP scripts. Also did some more restructuring of the benchmark to make it easier to split off a generic benchmark library in the future.
parent 0bf9ff3f
No related branches found
No related tags found
1 merge request!10Update dis
#ifndef BENCHMARK_LOADED #ifndef BENCHMARK_H
#define BENCHMARK_LOADED #define BENCHMARK_H
#include "exception.hh" #include "exception.h"
#include <fmt/core.h> #include <fmt/core.h>
#include <fstream> #include <fstream>
#include <iomanip>
#include <iostream> #include <iostream>
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
#include <string> #include <string>
...@@ -50,8 +51,7 @@ ...@@ -50,8 +51,7 @@
namespace eic::util { namespace eic::util {
struct TestDefinitionError : Exception { struct TestDefinitionError : Exception {
TestDefinitionError(std::string_view msg) TestDefinitionError(std::string_view msg) : Exception(msg, "test_definition_error") {}
: Exception(msg, "test_definition_error") {}
}; };
// Wrapper for our test data json, with three methods to set the status // Wrapper for our test data json, with three methods to set the status
...@@ -68,12 +68,16 @@ struct TestDefinitionError : Exception { ...@@ -68,12 +68,16 @@ struct TestDefinitionError : Exception {
// - weight: Weight for this test (this is defaulted to 1.0 if not specified) // - weight: Weight for this test (this is defaulted to 1.0 if not specified)
// - result: pass/fail/error // - result: pass/fail/error
struct Test { struct Test {
Test(nlohmann::json definition) : json{std::move(definition)} { // note: round braces for the json constructor, as else it will pick the wrong
// initializer-list constructur (it will put everything in an array)
Test(const std::map<std::string, std::string>& definition) : json(definition)
{
// std::cout << json.dump() << std::endl;
// initialize with error (as we don't have a value yet) // initialize with error (as we don't have a value yet)
error(); error();
// Check that all required fields are present // Check that all required fields are present
for (const auto& field : {"name", "title", "description", "quantity", for (const auto& field :
"target", "value", "result"}) { {"name", "title", "description", "quantity", "target", "value", "result"}) {
if (json.find(field) == json.end()) { if (json.find(field) == json.end()) {
throw TestDefinitionError{ throw TestDefinitionError{
fmt::format("Error in test definition: field '{}' missing", field)}; fmt::format("Error in test definition: field '{}' missing", field)};
...@@ -92,13 +96,15 @@ struct Test { ...@@ -92,13 +96,15 @@ struct Test {
nlohmann::json json; nlohmann::json json;
private: private:
void update_result(std::string_view status, double value) { void update_result(std::string_view status, double value)
{
json["result"] = status; json["result"] = status;
json["value"] = value; json["value"] = value;
} }
}; };
void write_test(const std::vector<Test>& data, const std::string& fname) { void write_test(const std::vector<Test>& data, const std::string& fname)
{
nlohmann::json test; nlohmann::json test;
for (auto& entry : data) { for (auto& entry : data) {
test["tests"].push_back(entry.json); test["tests"].push_back(entry.json);
...@@ -107,7 +113,8 @@ void write_test(const std::vector<Test>& data, const std::string& fname) { ...@@ -107,7 +113,8 @@ void write_test(const std::vector<Test>& data, const std::string& fname) {
std::ofstream output_file(fname); std::ofstream output_file(fname);
output_file << std::setw(4) << test << "\n"; output_file << std::setw(4) << test << "\n";
} }
void write_test(const Test& data, const std::string& fname) { void write_test(const Test& data, const std::string& fname)
{
std::vector<Test> vtd{data}; std::vector<Test> vtd{data};
write_test(vtd, fname); write_test(vtd, fname);
} }
......
#ifndef UTIL_EXCEPTION_H
#define UTIL_EXCEPTION_H
#include <exception>
#include <string>
namespace eic::util {
class Exception : public std::exception {
public:
Exception(std::string_view msg, std::string_view type = "exception") : msg_{msg}, type_{type} {}
virtual const char* what() const throw() { return msg_.c_str(); }
virtual const char* type() const throw() { return type_.c_str(); }
virtual ~Exception() throw() {}
private:
std::string msg_;
std::string type_;
};
} // namespace eic::util
#endif
File moved
#ifndef PLOT_H
#define PLOT_H
#include <TCanvas.h>
#include <TColor.h>
#include <TPad.h>
#include <TPaveText.h>
#include <TStyle.h>
#include <fmt/core.h>
#include <vector>
namespace plot {
const int kMpBlue = TColor::GetColor(0x1f, 0x77, 0xb4);
const int kMpOrange = TColor::GetColor(0xff, 0x7f, 0x0e);
const int kMpGreen = TColor::GetColor(0x2c, 0xa0, 0x2c);
const int kMpRed = TColor::GetColor(0xd6, 0x27, 0x28);
const int kMpPurple = TColor::GetColor(0x94, 0x67, 0xbd);
const int kMpBrown = TColor::GetColor(0x8c, 0x56, 0x4b);
const int kMpPink = TColor::GetColor(0xe3, 0x77, 0xc2);
const int kMpGrey = TColor::GetColor(0x7f, 0x7f, 0x7f);
const int kMpMoss = TColor::GetColor(0xbc, 0xbd, 0x22);
const int kMpCyan = TColor::GetColor(0x17, 0xbe, 0xcf);
const std::vector<int> kPalette = {kMpBlue, kMpOrange, kMpGreen, kMpRed, kMpPurple,
kMpBrown, kMpPink, kMpGrey, kMpMoss, kMpCyan};
void draw_label(int ebeam, int pbeam, const std::string_view detector)
{
auto t = new TPaveText(.15, 0.800, .7, .925, "NB NDC");
t->SetFillColorAlpha(kWhite, 0.4);
t->SetTextFont(43);
t->SetTextSize(25);
t->AddText(fmt::format("#bf{{{} }}SIMULATION", detector).c_str());
t->AddText(fmt::format("{} GeV on {} GeV", ebeam, pbeam).c_str());
t->SetTextAlign(12);
t->Draw();
}
} // namespace plot
#endif
#ifndef UTIL_H
#define UTIL_H
// TODO: should probably be moved to a global benchmark utility library
#include <algorithm>
#include <cmath>
#include <exception>
#include <fmt/core.h>
#include <limits>
#include <string>
#include <vector>
#include <Math/Vector4D.h>
#include "dd4pod/Geant4ParticleCollection.h"
#include "eicd/TrackParametersCollection.h"
namespace util {
// Exception definition for unknown particle errors
// FIXME: A utility exception base class should be included in the analysis
// utility library, so we can skip most of this boilerplate
class unknown_particle_error : public std::exception {
public:
unknown_particle_error(std::string_view particle) : m_particle{particle} {}
virtual const char* what() const throw()
{
return fmt::format("Unknown particle type: {}", m_particle).c_str();
}
virtual const char* type() const throw() { return "unknown_particle_error"; }
private:
const std::string m_particle;
};
// Simple function to return the appropriate PDG mass for the particles
// we care about for this process.
// FIXME: consider something more robust (maybe based on hepPDT) to the
// analysis utility library
inline double get_pdg_mass(std::string_view part)
{
if (part == "electron") {
return 0.0005109989461;
} else if (part == "muon") {
return .1056583745;
} else if (part == "jpsi") {
return 3.0969;
} else if (part == "upsilon") {
return 9.49630;
} else {
throw unknown_particle_error{part};
}
}
// Get a vector of 4-momenta from raw tracking info, using an externally
// provided particle mass assumption.
inline auto momenta_from_tracking(const std::vector<eic::TrackParametersData>& tracks,
const double mass)
{
std::vector<ROOT::Math::PxPyPzMVector> momenta{tracks.size()};
// transform our raw tracker info into proper 4-momenta
std::transform(tracks.begin(), tracks.end(), momenta.begin(), [mass](const auto& track) {
// make sure we don't divide by zero
if (fabs(track.qOverP) < 1e-9) {
return ROOT::Math::PxPyPzMVector{};
}
const double p = fabs(1. / track.qOverP);
const double px = p * cos(track.phi) * sin(track.theta);
const double py = p * sin(track.phi) * sin(track.theta);
const double pz = p * cos(track.theta);
return ROOT::Math::PxPyPzMVector{px, py, pz, mass};
});
return momenta;
}
// Get a vector of 4-momenta from the simulation data.
// TODO: Add PID selector (maybe using ranges?)
inline auto momenta_from_simulation(const std::vector<dd4pod::Geant4ParticleData>& parts)
{
std::vector<ROOT::Math::PxPyPzMVector> momenta{parts.size()};
// transform our simulation particle data into 4-momenta
std::transform(parts.begin(), parts.end(), momenta.begin(), [](const auto& part) {
return ROOT::Math::PxPyPzMVector{part.psx, part.psy, part.psz, part.mass};
});
return momenta;
}
// Find the decay pair candidates from a vector of particles (parts),
// with invariant mass closest to a desired value (pdg_mass)
inline std::pair<ROOT::Math::PxPyPzMVector, ROOT::Math::PxPyPzMVector>
find_decay_pair(const std::vector<ROOT::Math::PxPyPzMVector>& parts, const double pdg_mass)
{
int first = -1;
int second = -1;
double best_mass = -1;
// go through all particle combinatorics, calculate the invariant mass
// for each combination, and remember which combination is the closest
// to the desired pdg_mass
for (size_t i = 0; i < parts.size(); ++i) {
for (size_t j = i + 1; j < parts.size(); ++j) {
const double new_mass{(parts[i] + parts[j]).mass()};
if (fabs(new_mass - pdg_mass) < fabs(best_mass - pdg_mass)) {
first = i;
second = j;
best_mass = new_mass;
}
}
}
if (first < 0) {
return {{}, {}};
}
return {parts[first], parts[second]};
}
// Calculate the magnitude of the momentum of a vector of 4-vectors
inline auto mom(const std::vector<ROOT::Math::PxPyPzMVector>& momenta)
{
std::vector<double> P(momenta.size());
// transform our raw tracker info into proper 4-momenta
std::transform(momenta.begin(), momenta.end(), P.begin(),
[](const auto& mom) { return mom.P(); });
return P;
}
// Calculate the transverse momentum of a vector of 4-vectors
inline auto pt(const std::vector<ROOT::Math::PxPyPzMVector>& momenta)
{
std::vector<double> pt(momenta.size());
// transform our raw tracker info into proper 4-momenta
std::transform(momenta.begin(), momenta.end(), pt.begin(),
[](const auto& mom) { return mom.pt(); });
return pt;
}
// Calculate the azimuthal angle phi of a vector of 4-vectors
inline auto phi(const std::vector<ROOT::Math::PxPyPzMVector>& momenta)
{
std::vector<double> phi(momenta.size());
// transform our raw tracker info into proper 4-momenta
std::transform(momenta.begin(), momenta.end(), phi.begin(),
[](const auto& mom) { return mom.phi(); });
return phi;
}
// Calculate the pseudo-rapidity of a vector of particles
inline auto eta(const std::vector<ROOT::Math::PxPyPzMVector>& momenta)
{
std::vector<double> eta(momenta.size());
// transform our raw tracker info into proper 4-momenta
std::transform(momenta.begin(), momenta.end(), eta.begin(),
[](const auto& mom) { return mom.eta(); });
return eta;
}
//=========================================================================================================
} // namespace util
#endif
...@@ -87,6 +87,10 @@ echo "DETECTOR_PREFIX: ${DETECTOR_PREFIX}" ...@@ -87,6 +87,10 @@ echo "DETECTOR_PREFIX: ${DETECTOR_PREFIX}"
export DETECTOR_PATH="${DETECTOR_PREFIX}/${JUGGLER_DETECTOR}" export DETECTOR_PATH="${DETECTOR_PREFIX}/${JUGGLER_DETECTOR}"
echo "DETECTOR_PATH: ${DETECTOR_PATH}" echo "DETECTOR_PATH: ${DETECTOR_PATH}"
## build dir for ROOT to put its binaries etc.
export ROOT_BUILD_DIR=$LOCAL_PREFIX/root_build
echo "ROOT_BUILD_DIR: ${ROOT_BUILD_DIR}"
## ============================================================================= ## =============================================================================
## Setup PATH and LD_LIBRARY_PATH to include our prefixes ## Setup PATH and LD_LIBRARY_PATH to include our prefixes
echo "Adding JUGGLER_INSTALL_PREFIX and LOCAL_PREFIX to PATH and LD_LIBRARY_PATH" echo "Adding JUGGLER_INSTALL_PREFIX and LOCAL_PREFIX to PATH and LD_LIBRARY_PATH"
......
File added
File moved
...@@ -18,9 +18,9 @@ pushd ${PROJECT_ROOT} ...@@ -18,9 +18,9 @@ pushd ${PROJECT_ROOT}
## - DETECTOR_PATH: full path for the detector definitions ## - DETECTOR_PATH: full path for the detector definitions
## this is the same as ${DETECTOR_PREFIX}/${JUGGLER_DETECTOR} ## this is the same as ${DETECTOR_PREFIX}/${JUGGLER_DETECTOR}
## ##
## You can read config/env.sh for more in-depth explanations of the variables ## You can read options/env.sh for more in-depth explanations of the variables
## and how they can be controlled. ## and how they can be controlled.
source config/env.sh source options/env.sh
## ============================================================================= ## =============================================================================
## Step 1: download/update the detector definitions (if needed) ## Step 1: download/update the detector definitions (if needed)
......
...@@ -9,7 +9,7 @@ directory. ...@@ -9,7 +9,7 @@ directory.
""" """
## Our master definition file, the benchmark project directory ## Our master definition file, the benchmark project directory
MASTER_FILE=r'benchmarks.json' MASTER_FILE=r'benchmarks/benchmarks.json'
## Our results directory ## Our results directory
RESULTS_PATH=r'results' RESULTS_PATH=r'results'
......
...@@ -15,7 +15,7 @@ files to identify them as benchmark components. ...@@ -15,7 +15,7 @@ files to identify them as benchmark components.
""" """
## Our benchmark definition file, stored in the benchmark root directory ## Our benchmark definition file, stored in the benchmark root directory
BENCHMARK_FILE=r'{}/benchmark.json' BENCHMARK_FILE=r'benchmarks/{}/benchmark.json'
## Our benchmark results directory ## Our benchmark results directory
RESULTS_PATH=r'results/{}' RESULTS_PATH=r'results/{}'
......
#!/usr/bin/env python3
"""
Compile all root analysis scripts under
benchmarks/<BENCHMARK>/analysis/*.cxx
Doing this step here rather than during the main benchmark script has
multiple advantages:
1. Get feedback on syntax errors early on, without wasting compute resources
2. Avoid race conditions for large benchmarks run in parallel
3. Make it easier to properly handle the root build directory, as
this has to exist prior to our attempt to compile, else all will
fail (this is probably an old bug in root...)
Analysis scripts are expected to have extension 'cxx' and be located in the analysis
subdirectory
"""
## Our analysis path and file extension for glob
ANALYSIS_PATH=r'benchmarks/{}/analysis'
ANALYSIS_EXT = r'cxx'
import argparse
import os
from pathlib import Path
## Exceptions for this module
class Error(Exception):
'''Base class for exceptions in this module.'''
pass
class PathNotFoundError(Exception):
'''Path does not exist.
Attributes:
path: the path name
message: error message
'''
def __init__(self, path):
self.file = file
self.message = 'No such directory: {}'.format(file)
class NoAnalysesFoundError(Exception):
'''Did not find any analysis scripts to complile
Attributes:
path: the analysis path
message: error message
'''
def __init__(self, path):
self.file = file
self.message = 'No analysis found (extension \'{}\' in path: {}'.format(file,
ANALYSIS_EXT)
class CompilationError(Exception):
'''Raised when we failed to compile an analysis script
Attributes:
file: analysis file name
path: analysis path
message: error message
'''
def __init__(self, file):
self.file = file
self.message = "Analysis '{}' failed to compile".format(file)
parser = argparse.ArgumentParser()
parser.add_argument(
'benchmark',
help='A benchmarks for which to compile the analysis scripts.')
def compile_analyses(benchmark):
'''Compile all analysis scripts for a benchmark.'''
print("Compiling all analyis scripts for '{}'".format(benchmark))
## Ensure our build directory exists
_init_build_dir(benchmark)
## Get a list of all analysis scripts
_compile_all(benchmark)
## All done!
print('All analyses for', benchmark, 'compiled successfully')
def _init_build_dir(benchmark):
'''Initialize our ROOT build directory (if using one).'''
print(' --> Initializing ROOT build directory ...')
build_prefix = os.getenv('ROOT_BUILD_DIR')
if build_prefix is None:
print(' --> ROOT_BUILD_DIR not set, no action needed.')
return
## deduce the root build directory
pwd = os.getenv('PWD')
build_dir = '{}/{}/{}'.format(build_prefix, pwd, ANALYSIS_PATH.format(benchmark))
print(" --> Ensuring directory '{}' exists".format(build_dir))
os.system('mkdir -p {}'.format(build_dir))
def _compile_all(benchmark):
'''Compile all analysis for this benchmark.'''
print(' --> Compiling analysis scripts')
anadir = Path(ANALYSIS_PATH.format(benchmark))
if not anadir.exists():
raise PathNotFoundError(anadir)
ana_list = []
for file in anadir.glob('*.{}'.format(ANALYSIS_EXT)):
ana_list.append(file)
print(' --> Compiling:', file, flush=True)
err = os.system(_compile_cmd(file))
if err:
raise CompilationError(file)
if len(ana_list) == 0:
raise NoAnalysesFoundError(anadir)
def _compile_cmd(file):
'''Return a one-line shell command to compile an analysis script.'''
return r'bash -c "root -q -b -e \".L {}+\""'.format(file)
if __name__ == "__main__":
args = parser.parse_args()
compile_analyses(args.benchmark)
#ifndef UTIL_EXCEPTION
#define UTIL_EXCEPTION
#include <exception>
#include <string>
namespace eic::util {
class Exception : public std::exception {
public:
Exception(std::string_view msg, std::string_view type = "exception")
: msg_{msg}, type_{type} {}
virtual const char* what() const throw() { return msg_.c_str(); }
virtual const char* type() const throw() { return type_.c_str(); }
virtual ~Exception() throw() {}
private:
std::string msg_;
std::string type_;
};
} // namespace eic::util
#endif
...@@ -11,9 +11,11 @@ ...@@ -11,9 +11,11 @@
## - REQUIRE_DECAY: require the --decay flag to be set ## - REQUIRE_DECAY: require the --decay flag to be set
## ============================================================================= ## =============================================================================
## Commented out because this should be taken care of by the
## calling script to not enforce a fixed directory structure.
## make sure we launch this script from the project root directory ## make sure we launch this script from the project root directory
PROJECT_ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"/.. #PROJECT_ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"/..
pushd ${PROJECT_ROOT} #pushd ${PROJECT_ROOT}
## ============================================================================= ## =============================================================================
## Step 1: Process the command line arguments ## Step 1: Process the command line arguments
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment