Skip to content
Snippets Groups Projects
Commit 6fe9dae5 authored by Whitney Armstrong's avatar Whitney Armstrong
Browse files

Moved the tools and utils to common_bench

parent 0db501ce
Branches
No related tags found
1 merge request!47Resolve "Cleanup old envrionment scripts"
Showing with 27 additions and 7084 deletions
...@@ -48,7 +48,7 @@ summary: ...@@ -48,7 +48,7 @@ summary:
stage: finish stage: finish
needs: ["dis:results", "dvcs:results", "dvmp:results"] needs: ["dis:results", "dvcs:results", "dvmp:results"]
script: script:
- ./util/collect_benchmarks.py - collect_benchmarks.py
artifacts: artifacts:
paths: paths:
- results/* - results/*
......
...@@ -3,6 +3,11 @@ Physics Benchmarks for the EIC ...@@ -3,6 +3,11 @@ Physics Benchmarks for the EIC
![pipeline status](https://eicweb.phy.anl.gov/EIC/physics_benchmarks/badges/master/pipeline.svg) ![pipeline status](https://eicweb.phy.anl.gov/EIC/physics_benchmarks/badges/master/pipeline.svg)
## Common bench
See [common_bench](https://eicweb.phy.anl.gov/EIC/benchmarks/common_bench) for details.
## Adding new benchmarks ## Adding new benchmarks
......
...@@ -23,7 +23,7 @@ echo "Running the DIS benchmarks" ...@@ -23,7 +23,7 @@ echo "Running the DIS benchmarks"
## - CONFIG: The specific generator configuration ## - CONFIG: The specific generator configuration
## - EBEAM: The electron beam energy ## - EBEAM: The electron beam energy
## - PBEAM: The ion beam energy ## - PBEAM: The ion beam energy
source util/parse_cmd.sh $@ source parse_cmd.sh $@
## To run the reconstruction, we need the following global variables: ## To run the reconstruction, we need the following global variables:
## - JUGGLER_INSTALL_PREFIX: Install prefix for Juggler (simu/recon) ## - JUGGLER_INSTALL_PREFIX: Install prefix for Juggler (simu/recon)
......
...@@ -2,13 +2,6 @@ dis:generate: ...@@ -2,13 +2,6 @@ dis:generate:
stage: generate stage: generate
needs: ["common:detector"] needs: ["common:detector"]
timeout: 1 hours timeout: 1 hours
cache:
key:
files:
- benchmarks/dis/generator/pythia_dis.cxx
prefix: "$CI_COMMIT_REF_SLUG"
paths:
- input/dis
script: script:
- bash benchmarks/dis/gen.sh --config barrel --ebeam 18 --pbeam 275 - bash benchmarks/dis/gen.sh --config barrel --ebeam 18 --pbeam 275
...@@ -17,23 +10,15 @@ dis:process: ...@@ -17,23 +10,15 @@ dis:process:
needs: ["common:detector", "dis:generate"] needs: ["common:detector", "dis:generate"]
timeout: 1 hour timeout: 1 hour
script: script:
- ./util/compile_analyses.py dis - compile_analyses.py dis
- ./benchmarks/dis/dis.sh --config barrel --ebeam 18 --pbeam 275 - ./benchmarks/dis/dis.sh --config barrel --ebeam 18 --pbeam 275
retry: retry:
max: 2 max: 2
when: when:
- runner_system_failure - runner_system_failure
cache:
key:
files:
- .rootlogon.C
- util/compile_analyses.py
prefix: "$CI_COMMIT_REF_SLUG"
paths:
- .local/root_build
dis:results: dis:results:
stage: collect stage: collect
needs: ["dis:process"] needs: ["dis:process"]
script: script:
- ./util/collect_tests.py dis - collect_tests.py dis
...@@ -23,7 +23,7 @@ echo "Running the DIS benchmarks" ...@@ -23,7 +23,7 @@ echo "Running the DIS benchmarks"
## - CONFIG: The specific generator configuration ## - CONFIG: The specific generator configuration
## - EBEAM: The electron beam energy ## - EBEAM: The electron beam energy
## - PBEAM: The ion beam energy ## - PBEAM: The ion beam energy
source util/parse_cmd.sh $@ source ${LOCAL_PREFIX}/bin/parse_cmd.sh $@
## To run the reconstruction, we need the following global variables: ## To run the reconstruction, we need the following global variables:
## - JUGGLER_INSTALL_PREFIX: Install prefix for Juggler (simu/recon) ## - JUGGLER_INSTALL_PREFIX: Install prefix for Juggler (simu/recon)
......
...@@ -25,17 +25,22 @@ echo "Setting up the local environment for the ${BENCHMARK_TAG^^} benchmarks" ...@@ -25,17 +25,22 @@ echo "Setting up the local environment for the ${BENCHMARK_TAG^^} benchmarks"
## Extra beam tag to identify the desired beam configuration ## Extra beam tag to identify the desired beam configuration
export BEAM_TAG="${EBEAM}on${PBEAM}" export BEAM_TAG="${EBEAM}on${PBEAM}"
if [[ ! -d "input" ]] ; then
echo " making local link to input "
mkdir_local_data_link input
fi
## Data path for input data (generator-level hepmc file) ## Data path for input data (generator-level hepmc file)
INPUT_PATH="input/${BENCHMARK_TAG}/${BEAM_TAG}" INPUT_PATH="input/${BENCHMARK_TAG}/${BEAM_TAG}"
mkdir -p ${INPUT_PATH} mkdir_local_data_link input
export INPUT_PATH=`realpath ${INPUT_PATH}` #export INPUT_PATH=`realpath ${INPUT_PATH}`
mkdir -p "${INPUT_PATH}"
echo "INPUT_PATH: ${INPUT_PATH}" echo "INPUT_PATH: ${INPUT_PATH}"
## Data path for temporary data (not exported as artifacts) ## Data path for temporary data (not exported as artifacts)
TMP_PATH=${LOCAL_PREFIX}/tmp/${BEAM_TAG} TMP_PATH=${LOCAL_DATA_PATH}/tmp/${BEAM_TAG}
mkdir -p ${TMP_PATH} mkdir -p ${TMP_PATH}
export TMP_PATH=`realpath ${TMP_PATH}` #export TMP_PATH=`realpath ${TMP_PATH}`
echo "TMP_PATH: ${TMP_PATH}" echo "TMP_PATH: ${TMP_PATH}"
## Data path for benchmark output (plots and reconstructed files ## Data path for benchmark output (plots and reconstructed files
......
...@@ -23,7 +23,7 @@ pushd ${PROJECT_ROOT} ...@@ -23,7 +23,7 @@ pushd ${PROJECT_ROOT}
## - CONFIG: The specific generator configuration --> not currenlty used FIXME ## - CONFIG: The specific generator configuration --> not currenlty used FIXME
## - EBEAM: The electron beam energy --> not currently used FIXME ## - EBEAM: The electron beam energy --> not currently used FIXME
## - PBEAM: The ion beam energy --> not currently used FIXME ## - PBEAM: The ion beam energy --> not currently used FIXME
source util/parse_cmd.sh $@ source parse_cmd.sh $@
## To run the generator, we need the following global variables: ## To run the generator, we need the following global variables:
## ##
...@@ -63,7 +63,7 @@ echo "Generator output for $GEN_TAG not found in cache, need to run generator" ...@@ -63,7 +63,7 @@ echo "Generator output for $GEN_TAG not found in cache, need to run generator"
echo "Compiling benchmarks/dis/generator/pythia_dis.cxx ..." echo "Compiling benchmarks/dis/generator/pythia_dis.cxx ..."
g++ benchmarks/dis/generator/pythia_dis.cxx -o ${TMP_PATH}/pythia_dis \ g++ benchmarks/dis/generator/pythia_dis.cxx -o ${TMP_PATH}/pythia_dis \
-I/usr/local/include -Iinclude \ -I/usr/local/include -I${LOCAL_PREFIX}/include \
-O2 -std=c++11 -pedantic -W -Wall -Wshadow -fPIC \ -O2 -std=c++11 -pedantic -W -Wall -Wshadow -fPIC \
-L/usr/local/lib -Wl,-rpath,/usr/local/lib -lpythia8 -ldl \ -L/usr/local/lib -Wl,-rpath,/usr/local/lib -lpythia8 -ldl \
-L/usr/local/lib -Wl,-rpath,/usr/local/lib -lHepMC3 -L/usr/local/lib -Wl,-rpath,/usr/local/lib -lHepMC3
......
...@@ -3,16 +3,8 @@ dvmp:generate: ...@@ -3,16 +3,8 @@ dvmp:generate:
image: eicweb.phy.anl.gov:4567/monte_carlo/lager/lager:unstable image: eicweb.phy.anl.gov:4567/monte_carlo/lager/lager:unstable
stage: generate stage: generate
timeout: 1 hours timeout: 1 hours
#cache:
# key:
# files:
# - benchmarks/dvmp/generator/jpsi_central.json
# - benchmarks/dvmp/scripts/jpsi_central-generate.sh
# prefix: "$CI_COMMIT_REF_SLUG"
# paths:
# - input/dvmp
script: script:
- ./util/run_many.py ./benchmarks/dvmp/gen.sh - run_many.py ./benchmarks/dvmp/gen.sh
-c jpsi_barrel -c jpsi_barrel
-e 10x100 -e 10x100
--decay muon --decay electron --decay muon --decay electron
...@@ -24,9 +16,9 @@ dvmp:process: ...@@ -24,9 +16,9 @@ dvmp:process:
timeout: 1 hour timeout: 1 hour
script: script:
- ls -lrth - ls -lrth
- ./util/compile_analyses.py dvmp - compile_analyses.py dvmp
- ls -lrth - ls -lrth
- ./util/run_many.py ./benchmarks/dvmp/dvmp.sh - run_many.py ./benchmarks/dvmp/dvmp.sh
-c jpsi_barrel -c jpsi_barrel
-e 10x100 -e 10x100
--decay muon --decay electron --decay muon --decay electron
...@@ -36,17 +28,9 @@ dvmp:process: ...@@ -36,17 +28,9 @@ dvmp:process:
max: 2 max: 2
when: when:
- runner_system_failure - runner_system_failure
cache:
key:
files:
- .rootlogon.C
- util/compile_analyses.py
prefix: "$CI_COMMIT_REF_SLUG"
paths:
- .local/root_build
dvmp:results: dvmp:results:
stage: collect stage: collect
needs: ["dvmp:process"] needs: ["dvmp:process"]
script: script:
- ./util/collect_tests.py dvmp - collect_tests.py dvmp
...@@ -28,7 +28,7 @@ echo "Running the DVMP benchmarks" ...@@ -28,7 +28,7 @@ echo "Running the DVMP benchmarks"
## - LEADING: Leading particle of interest (J/psi) ## - LEADING: Leading particle of interest (J/psi)
export REQUIRE_DECAY=1 export REQUIRE_DECAY=1
export REQUIRE_LEADING=1 export REQUIRE_LEADING=1
source util/parse_cmd.sh $@ source parse_cmd.sh $@
## We also need the following benchmark-specific variables: ## We also need the following benchmark-specific variables:
## ##
......
...@@ -25,7 +25,7 @@ pushd ${PROJECT_ROOT} ...@@ -25,7 +25,7 @@ pushd ${PROJECT_ROOT}
## - PBEAM: The ion beam energy ## - PBEAM: The ion beam energy
## - DECAY: The decay particle for the generator ## - DECAY: The decay particle for the generator
export REQUIRE_DECAY=1 export REQUIRE_DECAY=1
source util/parse_cmd.sh $@ source parse_cmd.sh $@
## To run the generator, we need the following global variables: ## To run the generator, we need the following global variables:
## ##
......
#ifndef BENCHMARK_H
#define BENCHMARK_H
#include "exception.h"
#include <fmt/core.h>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <nlohmann/json.hpp>
#include <string>
// Bookkeeping of test data to store data of one or more tests in a json file to
// facilitate future accounting.
//
// Usage Example 1 (single test):
// ==============================
// 1. define our test
// eic::util::Test test1{
// {{"name", "example_test"},
// {"title", "Example Test"},
// {"description", "This is an example of a test definition"},
// {"quantity", "efficiency"},
// {"target", "1"}}};
// 2. set pass/fail/error status and return value (in this case .99)
// test1.pass(0.99)
// 3. write our test data to a json file
// eic::util::write_test(test1, "test1.json");
//
// Usage Example 2 (multiple tests):
// =================================
// 1. define our tests
// eic::util::Test test1{
// {{"name", "example_test"},
// {"title", "Example Test"},
// {"description", "This is an example of a test definition"},
// {"quantity", "efficiency"},
// {"target", "1"}}};
// eic::util::Test test2{
// {{"name", "another_test"},
// {"title", "Another example Test"},
// {"description", "This is a second example of a test definition"},
// {"quantity", "resolution"},
// {"target", "3."}}};
// 2. set pass/fail/error status and return value (in this case .99)
// test1.fail(10)
// 3. write our test data to a json file
// eic::util::write_test({test1, test2}, "test.json");
// Namespace for utility scripts, FIXME this should be part of an independent
// library
namespace eic::util {
struct TestDefinitionError : Exception {
TestDefinitionError(std::string_view msg) : Exception(msg, "test_definition_error") {}
};
// Wrapper for our test data json, with three methods to set the status
// after test completion (to pass, fail or error). The default value
// is error.
// The following fields should be defined in the definitions json
// for the test to make sense:
// - name: unique identifier for this test
// - title: Slightly more verbose identifier for this test
// - description: Concise description of what is tested
// - quantity: What quantity is tested? Unites of value/target
// - target: Target value of <quantity> that we want to reach
// - value: Actual value of <quantity>
// - weight: Weight for this test (this is defaulted to 1.0 if not specified)
// - result: pass/fail/error
struct Test {
// note: round braces for the json constructor, as else it will pick the wrong
// initializer-list constructur (it will put everything in an array)
Test(const std::map<std::string, std::string>& definition) : json(definition)
{
// std::cout << json.dump() << std::endl;
// initialize with error (as we don't have a value yet)
error();
// Check that all required fields are present
for (const auto& field :
{"name", "title", "description", "quantity", "target", "value", "result"}) {
if (json.find(field) == json.end()) {
throw TestDefinitionError{
fmt::format("Error in test definition: field '{}' missing", field)};
}
}
// Default "weight" to 1 if not set
if (json.find("weight") == json.end()) {
json["weight"] = 1.0;
}
}
// Set this test to pass/fail/error
void pass(double value) { update_result("pass", value); }
void fail(double value) { update_result("fail", value); }
void error(double value = 0) { update_result("error", value); }
nlohmann::json json;
private:
void update_result(std::string_view status, double value)
{
json["result"] = status;
json["value"] = value;
}
};
void write_test(const std::vector<Test>& data, const std::string& fname)
{
nlohmann::json test;
for (auto& entry : data) {
test["tests"].push_back(entry.json);
}
std::cout << fmt::format("Writing test data to {}\n", fname);
std::ofstream output_file(fname);
output_file << std::setw(4) << test << "\n";
}
void write_test(const Test& data, const std::string& fname)
{
std::vector<Test> vtd{data};
write_test(vtd, fname);
}
} // namespace eic::util
#endif
This diff is collapsed.
#ifndef UTIL_EXCEPTION_H
#define UTIL_EXCEPTION_H
#include <exception>
#include <string>
namespace eic::util {
class Exception : public std::exception {
public:
Exception(std::string_view msg, std::string_view type = "exception") : msg_{msg}, type_{type} {}
virtual const char* what() const throw() { return msg_.c_str(); }
virtual const char* type() const throw() { return type_.c_str(); }
virtual ~Exception() throw() {}
private:
std::string msg_;
std::string type_;
};
} // namespace eic::util
#endif
#ifndef MT_H
#define MT_H
// Defines the number of threads to run within the ROOT analysis scripts.
// TODO: make this a file configured by the CI scripts so we can specify
// the number of threads (and the number of processes) at a global
// level
constexpr const int kNumThreads = 8;
#endif
#ifndef PLOT_H
#define PLOT_H
#include <TCanvas.h>
#include <TColor.h>
#include <TPad.h>
#include <TPaveText.h>
#include <TStyle.h>
#include <fmt/core.h>
#include <vector>
namespace plot {
const int kArgonneBlue = TColor::GetColor(0x1f, 0x77, 0xb4); // not really
const int kMpBlue = TColor::GetColor(0x1f, 0x77, 0xb4);
const int kMpOrange = TColor::GetColor(0xff, 0x7f, 0x0e);
const int kMpGreen = TColor::GetColor(0x2c, 0xa0, 0x2c);
const int kMpRed = TColor::GetColor(0xd6, 0x27, 0x28);
const int kMpPurple = TColor::GetColor(0x94, 0x67, 0xbd);
const int kMpBrown = TColor::GetColor(0x8c, 0x56, 0x4b);
const int kMpPink = TColor::GetColor(0xe3, 0x77, 0xc2);
const int kMpGrey = TColor::GetColor(0x7f, 0x7f, 0x7f);
const int kMpMoss = TColor::GetColor(0xbc, 0xbd, 0x22);
const int kMpCyan = TColor::GetColor(0x17, 0xbe, 0xcf);
const std::vector<int> kPalette = {kMpBlue, kMpOrange, kMpGreen, kMpRed, kMpPurple,
kMpBrown, kMpPink, kMpGrey, kMpMoss, kMpCyan};
void draw_label(int ebeam, int pbeam, const std::string_view detector)
{
auto t = new TPaveText(.15, 0.800, .7, .925, "NB NDC");
t->SetFillColorAlpha(kWhite, 0.4);
t->SetTextFont(43);
t->SetTextSize(25);
t->AddText(fmt::format("#bf{{{} }}SIMULATION", detector).c_str());
t->AddText(fmt::format("{} GeV on {} GeV", ebeam, pbeam).c_str());
t->SetTextAlign(12);
t->Draw();
}
} // namespace plot
#endif
#ifndef UTIL_H
#define UTIL_H
// TODO: should probably be moved to a global benchmark utility library
#include <algorithm>
#include <cmath>
#include <exception>
#include <fmt/core.h>
#include <limits>
#include <string>
#include <vector>
#include "TF1.h"
#include "TFitResult.h"
#include "TFitResultPtr.h"
#include <Math/Vector4D.h>
#include "dd4pod/Geant4ParticleCollection.h"
#include "eicd/TrackParametersCollection.h"
#include "eicd/ReconstructedParticleCollection.h"
#include "eicd/ReconstructedParticleData.h"
namespace util {
// Exception definition for unknown particle errors
// FIXME: A utility exception base class should be included in the analysis
// utility library, so we can skip most of this boilerplate
class unknown_particle_error : public std::exception {
public:
unknown_particle_error(std::string_view particle) : m_particle{particle} {}
virtual const char* what() const throw()
{
return fmt::format("Unknown particle type: {}", m_particle).c_str();
}
virtual const char* type() const throw() { return "unknown_particle_error"; }
private:
const std::string m_particle;
};
// Simple function to return the appropriate PDG mass for the particles
// we care about for this process.
// FIXME: consider something more robust (maybe based on hepPDT) to the
// analysis utility library
inline double get_pdg_mass(std::string_view part)
{
if (part == "electron") {
return 0.0005109989461;
} else if (part == "muon") {
return .1056583745;
} else if (part == "jpsi") {
return 3.0969;
} else if (part == "upsilon") {
return 9.49630;
} else if (part == "proton"){
return 0.938272;
} else {
throw unknown_particle_error{part};
}
}
// Get a vector of 4-momenta from raw tracking info, using an externally
// provided particle mass assumption. //outputTrackParameters
inline auto momenta_from_tracking(const std::vector<eic::TrackParametersData>& tracks,
const double mass)
{
std::vector<ROOT::Math::PxPyPzMVector> momenta{tracks.size()};
// transform our raw tracker info into proper 4-momenta
std::transform(tracks.begin(), tracks.end(), momenta.begin(), [mass](const auto& track) {
// make sure we don't divide by zero
if (fabs(track.qOverP) < 1e-9) {
return ROOT::Math::PxPyPzMVector{};
}
const double p = fabs(1. / track.qOverP);
const double px = p * cos(track.phi) * sin(track.theta);
const double py = p * sin(track.phi) * sin(track.theta);
const double pz = p * cos(track.theta);
return ROOT::Math::PxPyPzMVector{px, py, pz, mass};
});
return momenta;
}
//=====================================================================================
inline auto momenta_RC(const std::vector<eic::ReconstructedParticleData>& parts)
{
std::vector<ROOT::Math::PxPyPzMVector> momenta{parts.size()};
// transform our raw tracker info into proper 4-momenta
std::transform(parts.begin(), parts.end(), momenta.begin(), [](const auto& part) {
return ROOT::Math::PxPyPzMVector{part.p.x, part.p.y, part.p.z, part.mass};
});
return momenta;
}
//=====================================================================================
// Get a vector of 4-momenta from the simulation data.
// TODO: Add PID selector (maybe using ranges?)
inline auto momenta_from_simulation(const std::vector<dd4pod::Geant4ParticleData>& parts)
{
std::vector<ROOT::Math::PxPyPzMVector> momenta{parts.size()};
// transform our simulation particle data into 4-momenta
std::transform(parts.begin(), parts.end(), momenta.begin(), [](const auto& part) {
return ROOT::Math::PxPyPzMVector{part.psx, part.psy, part.psz, part.mass};
});
return momenta;
}
// Find the decay pair candidates from a vector of particles (parts),
// with invariant mass closest to a desired value (pdg_mass)
inline std::pair<ROOT::Math::PxPyPzMVector, ROOT::Math::PxPyPzMVector>
find_decay_pair(const std::vector<ROOT::Math::PxPyPzMVector>& parts, const double pdg_mass, const double daughter_mass)
{
int first = -1;
int second = -1;
double best_mass = -1;
// go through all particle combinatorics, calculate the invariant mass
// for each combination, and remember which combination is the closest
// to the desired pdg_mass
for (size_t i = 0; i < parts.size(); ++i) {
if( fabs(parts[i].mass() - daughter_mass)/daughter_mass > 0.01) continue;
for (size_t j = i + 1; j < parts.size(); ++j) {
if( fabs(parts[j].mass() - daughter_mass)/daughter_mass > 0.01) continue;
const double new_mass{(parts[i] + parts[j]).mass()};
if (fabs(new_mass - pdg_mass) < fabs(best_mass - pdg_mass)) {
first = i;
second = j;
best_mass = new_mass;
}
}
}
if (first < 0) {
return {{}, {}};
}
return {parts[first], parts[second]};
}
// Calculate the magnitude of the momentum of a vector of 4-vectors
inline auto mom(const std::vector<ROOT::Math::PxPyPzMVector>& momenta)
{
std::vector<double> P(momenta.size());
// transform our raw tracker info into proper 4-momenta
std::transform(momenta.begin(), momenta.end(), P.begin(),
[](const auto& mom) { return mom.P(); });
return P;
}
// Calculate the transverse momentum of a vector of 4-vectors
inline auto pt(const std::vector<ROOT::Math::PxPyPzMVector>& momenta)
{
std::vector<double> pt(momenta.size());
// transform our raw tracker info into proper 4-momenta
std::transform(momenta.begin(), momenta.end(), pt.begin(),
[](const auto& mom) { return mom.pt(); });
return pt;
}
// Calculate the azimuthal angle phi of a vector of 4-vectors
inline auto phi(const std::vector<ROOT::Math::PxPyPzMVector>& momenta)
{
std::vector<double> phi(momenta.size());
// transform our raw tracker info into proper 4-momenta
std::transform(momenta.begin(), momenta.end(), phi.begin(),
[](const auto& mom) { return mom.phi(); });
return phi;
}
// Calculate the pseudo-rapidity of a vector of particles
inline auto eta(const std::vector<ROOT::Math::PxPyPzMVector>& momenta)
{
std::vector<double> eta(momenta.size());
// transform our raw tracker info into proper 4-momenta
std::transform(momenta.begin(), momenta.end(), eta.begin(),
[](const auto& mom) { return mom.eta(); });
return eta;
}
//=========================================================================================================
} // namespace util
#endif
#!/bin/bash
## =============================================================================
## Setup (if needed) and start a development shell environment on Linux or MacOS
## =============================================================================
## make sure we launch this script from the project root directory
PROJECT_ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"/..
pushd ${PROJECT_ROOT}
## We do not load the global development environment here, as this script is
## to be executed on a "naked" system outside of any container
## =============================================================================
## Step 1: Parse command line options
## do we want to force-update the container (only affects Linux)
## default: we do not want to do this.
FORCE_UPDATE=
function print_the_help {
echo "USAGE: ./util/start_dev_shell [-f]"
echo "OPTIONS:"
echo " -f,--force Force-update container (Only affects Linux)"
echo " -h,--help Print this message"
echo ""
echo " This script will setup and launch a containerized development
environment"
exit
}
while [ $# -gt 0 ]
do
key="$1"
case $key in
-f|--force)
FORCE_UPDATE="true"
shift # past value
;;
-h|--help)
print_the_help
shift
;;
*) # unknown option
echo "unknown option $1"
exit 1
;;
esac
done
## get OS type
OS=`uname -s`
## =============================================================================
## Step 2: Update container and launch shell
echo "Launching a containerized development shell"
case ${OS} in
Linux)
echo " - Detected OS: Linux"
## Use the same prefix as we use for other local packages
export PREFIX=.local/lib
if [ ! -f $PREFIX/juggler_latest.sif ] || [ ! -z ${FORCE_UPDATE} ]; then
echo " - Fetching singularity image"
mkdir -p $PREFIX
wget https://eicweb.phy.anl.gov/eic/juggler/-/jobs/artifacts/master/raw/build/juggler.sif?job=singularity:latest -O $PREFIX/juggler_latest.sif
fi
echo " - Using singularity to launch shell..."
singularity exec $PREFIX/juggler_latest.sif eic-shell
;;
Darwin)
echo " - Detector OS: MacOS"
echo " - Syncing docker container"
docker pull sly2j/juggler:latest
echo " - Using docker to launch shell..."
docker run -v /Users:/Users -w=$PWD -i -t --rm sly2j/juggler:latest eic-shell
;;
*)
echo "ERROR: dev shell not available for this OS (${OS})"
exit 1
esac
## =============================================================================
## Step 3: All done
echo "Exiting development environment..."
#!/bin/bash
## =============================================================================
## Download generator & reconstruction artifacts for one or more physics
## processes.
## =============================================================================
## make sure we launch this script from the project root directory
PROJECT_ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"/..
pushd ${PROJECT_ROOT}
PROCS=()
BRANCH="master"
function print_the_help {
echo "USAGE: -p process [-p process2] [-b git_branch]"
echo "OPTIONS:"
echo " -p,--process Physics process name (can be defined multiple
times)."
echo " -b,--branch Git branch to download artifacts from (D:
$BRANCH)"
echo " -h,--help Print this message"
echo ""
echo " This script will download the relevant generator artifacts needed"
echo " for local testing of the benchmarks."
exit
}
while [ $# -gt 0 ]
do
key="$1"
case $key in
-p|--process)
PROCS+=("$2")
shift # past argument
shift # past value
;;
-b|--branch)
BRANCH="$2"
shift # past argument
shift # past value
;;
-h|--help)
print_the_help
shift
;;
*) # unknown option
echo "unknown option: $1"
exit 1
;;
esac
done
echo "Downloading generator & reconstruction artifacts for one or more physics processes"
if [ ${#PROCS[@]} -eq 0 ]; then
echo "ERROR: need one or more processes: -p <process name> "
exit 1
fi
for proc in ${PROCS[@]}; do
echo "Dowloading artifacts for $proc (branch: $BRANCH)"
wget https://eicweb.phy.anl.gov/EIC/benchmarks/physics_benchmarks/-/jobs/artifacts/$BRANCH/download?job=${proc}:generate -O results_gen.zip
## FIXME this needs to be smarter, probably through more flags...
wget https://eicweb.phy.anl.gov/EIC/benchmarks/physics_benchmarks/-/jobs/artifacts/$BRANCH/download?job=${proc}:process -O results_rec.zip
echo "Unpacking artifacts..."
unzip -u -o results_gen.zip
unzip -u -o results_rec.zip
echo "Cleaning up..."
rm results_???.zip
done
popd
echo "All done"
#!/bin/bash
## =============================================================================
## Build and install the JUGGLER_DETECTOR detector package into our local prefix
## =============================================================================
## make sure we launch this script from the project root directory
PROJECT_ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"/..
pushd ${PROJECT_ROOT}
## =============================================================================
## Load the environment variables. To build the detector we need the following
## variables:
##
## - JUGGLER_DETECTOR: the detector package we want to use for this benchmark
## - LOCAL_PREFIX: location where local packages should be installed
## - DETECTOR_PREFIX: prefix for the detector definitions
## - DETECTOR_PATH: full path for the detector definitions
## this is the same as ${DETECTOR_PREFIX}/${JUGGLER_DETECTOR}
##
## You can read options/env.sh for more in-depth explanations of the variables
## and how they can be controlled.
source options/env.sh
## =============================================================================
## Step 1: download/update the detector definitions (if needed)
pushd ${DETECTOR_PREFIX}
## We need an up-to-date copy of the detector
## start clean to avoid issues...
if [ -d ${JUGGLER_DETECTOR} ]; then
echo "cleaning up ${JUGGLER_DETECTOR}"
rm -rf ${JUGGLER_DETECTOR}
fi
echo "Fetching ${JUGGLER_DETECTOR}"
git clone -b ${JUGGLER_DETECTOR_VERSION} https://eicweb.phy.anl.gov/EIC/detectors/${JUGGLER_DETECTOR}.git
#else
#echo "Updating ${JUGGLER_DETECTOR}"
#pushd ${JUGGLER_DETECTOR}
#git pull --ff-only
#popd
#fi
## We also need an up-to-date copy of the accelerator. For now this is done
## manually. Down the road we could maybe automize this with cmake
if [ -d accelerator ]; then
echo "cleaning up accelerator"
rm -rf accelerator
fi
echo "Fetching accelerator"
git clone https://eicweb.phy.anl.gov/EIC/detectors/accelerator.git
#else
# echo "Updating accelerator"
# pushd accelerator
# git pull --ff-only
# popd
#fi
## Now symlink the accelerator definition into the detector definition
echo "Linking accelerator definition into detector definition"
ln -s -f ${DETECTOR_PREFIX}/accelerator/eic ${DETECTOR_PATH}/eic
## =============================================================================
## Step 2: Compile and install the detector definition
echo "Building and installing the ${JUGGLER_DETECTOR} package"
mkdir -p ${DETECTOR_PREFIX}/build
pushd ${DETECTOR_PREFIX}/build
cmake ${DETECTOR_PATH} -DCMAKE_INSTALL_PREFIX=${LOCAL_PREFIX} -DCMAKE_CXX_STANDARD=17 &&
make -j30 install || exit 1
## =============================================================================
## Step 3: That's all!
echo "Detector build/install complete!"
#!/usr/bin/env python3
"""
Combine the json files from the individual benchmark tests into
a final master json file combining all benchmarks.
Benchmark results are expected to be all json files in the results
directory.
"""
## Our master definition file, the benchmark project directory
MASTER_FILE=r'benchmarks/benchmarks.json'
## Our results directory
RESULTS_PATH=r'results'
## Output json file with all benchmark results
OUTPUT_FILE=r'results/summary.json'
import argparse
import json
from pathlib import Path
## Exceptions for this module
class Error(Exception):
'''Base class for exceptions in this module.'''
pass
class FileNotFoundError(Error):
'''File does not exist.
Attributes:
file: the file name
message: error message
'''
def __init__(self, file):
self.file = file
self.message = 'No such file or directory: {}'.format(file)
class InvalidDefinitionError(Error):
'''Raised for missing keys in the definitions.
Attributes:
key: the missing key
file: the definition file
message: error message
'''
def __init__(self, key, file):
self.key = key
self.file = file
self.message = "key '{}' not found in '{}'".format(key, file)
class InvalidResultError(Error):
'''Raised for invalid benchmark result value.
Attributes:
key: the missing key
value: the invalid value
file: the benchmark definition file
message: error message
'''
def __init__(self, key, value, file):
self.key = key
self.value = value
self.file = file
self.message = "value '{}' for key '{}' invalid in benchmark file '{}'".format(
value, key, file)
def collect_benchmarks():
'''Collect all benchmark results and write results to a single file.'''
print("Collecting all benchmark results")
## load the test definition for this benchmark
results = _load_master()
## collect the test results
results['benchmarks'] = _load_benchmarks()
## calculate aggregate test statistics
results = _aggregate_results(results)
## save results to output file
_save(results)
## Summarize results
for bm in results['benchmarks']:
_print_benchmark(bm)
_print_summary(results)
def _load_master():
'''Load master definition.'''
master_file = Path(MASTER_FILE)
if not master_file.exists():
raise FileNotFoundError(master_file)
print(' --> Loading master definition from:', master_file)
results = None
with master_file.open() as f:
results = json.load(f)
## ensure this is a valid benchmark file
for key in ('name', 'title', 'description'):
if not key in results:
raise InvalidDefinitionError('target', master_file)
return results
def _load_benchmarks():
'''Load all benchmark results from the results folder.'''
print(' --> Collecting all benchmarks')
rootdir = Path(RESULTS_PATH)
results = []
for file in rootdir.glob('*.json'):
print(' --> Loading file:', file, '... ', end='')
with open(file) as f:
bm = json.load(f)
## skip files that don't include test results
if not 'tests' in bm:
print('skipped (does not contain benchmark results).')
continue
## check if these are valid benchmark results,
## raise exception otherwise
for key in ('name', 'title', 'description', 'target', 'n_tests',
'n_pass', 'n_fail', 'n_error', 'maximum', 'sum', 'value',
'result'):
if not key in bm:
raise InvalidDefinitionError(key, file)
if bm['result'] not in ('pass', 'fail', 'error'):
raise InvalidResultError('result', bm['result'], file)
## Append to our test results
results.append(bm)
print('done')
return results
def _aggregate_results(results):
'''Aggregate benchmark results.'''
print(' --> Aggregating benchmark statistics')
results['n_benchmarks'] = len(results['benchmarks'])
results['n_pass'] = len([1 for t in results['benchmarks'] if t['result'] == 'pass'])
results['n_fail'] = len([1 for t in results['benchmarks'] if t['result'] == 'fail'])
results['n_error'] = len([1 for t in results['benchmarks'] if t['result'] == 'error'])
if results['n_error'] > 0:
results['result'] = 'error'
elif results['n_fail'] == 0:
results['result'] = 'pass'
else:
results['result'] = 'fail'
return results
def _save(results):
'''Save aggregated benchmark results'''
ofile = Path(OUTPUT_FILE)
print(' --> Saving results to:', ofile)
with ofile.open('w') as f:
json.dump(results, f, indent=4)
def _print_benchmark(bm):
'''Print benchmark summary to the terminal.'''
print('====================================================================')
print(' Summary for:', bm['title'])
print(' Pass: {}, Fail: {}, Error: {} out of {} total tests'.format(
bm['n_pass'], bm['n_fail'], bm['n_error'],
bm['n_tests']))
print(' Weighted sum: {} / {}'.format(bm['sum'], bm['maximum']))
print(' kBenchmark value: {} (target: {})'.format(
bm['value'], bm['target']))
print(' ===> status:', bm['result'])
def _print_summary(results):
'''Print master benchmark summary to the terminal.'''
print('====================================================================')
print('MASTER BENCHMARK SUMMARY FOR:', results['title'].upper())
print('Pass: {}, Fail: {}, Error: {} out of {} total benchmarks'.format(
results['n_pass'], results['n_fail'], results['n_error'],
results['n_benchmarks']))
print('===> status:', results['result'])
print('====================================================================')
if __name__ == "__main__":
try:
collect_benchmarks()
except Error as e:
print()
print('ERROR', e.message)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment