Skip to content
Snippets Groups Projects
Commit fb63cd9f authored by Chao Peng's avatar Chao Peng
Browse files

add a script to check aggregator occupancy

parent fad7d667
No related branches found
No related tags found
1 merge request!193add a script to check aggregator occupancy
'''
check occupancy for first-level aggregators
'''
import os
import ROOT
import pandas as pd
import numpy as np
import argparse
import matplotlib
import matplotlib.ticker as ticker
from matplotlib import cm
from matplotlib import pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
# load root macros, input is an argument string
def load_root_macros(arg_macros):
for path in arg_macros.split(','):
path = path.strip()
if os.path.exists(path):
gROOT.Macro(path)
print('Loading root macro: \'{}\' loaded.'.format(path))
else:
print('Loading root macros: \'{}\' does not exist, skip loading it.'.format(path))
# read from RDataFrame and flatten a given collection, return pandas dataframe
def flatten_collection(rdf, collection, cols=None):
if not cols:
cols = [str(c) for c in rdf.GetColumnNames() if str(c).startswith('{}.'.format(collection))]
cols += [str(c) for c in rdf.GetColumnNames() if str(c).startswith('{}Info.'.format(collection))]
else:
cols = ['{}.{}'.format(collection, c) for c in cols]
if not cols:
print('cannot find any branch under collection {}'.format(collection))
return pd.DataFrame()
data = rdf.AsNumpy(cols)
# flatten the data, add an event id to identify clusters from different events
evns = []
for i, vec in enumerate(data[cols[0]]):
evns += [i]*vec.size()
for n, vals in data.items():
# make sure ints are not converted to floats
typename = vals[0].__class__.__name__.lower()
dtype = np.int64 if 'int' in typename or 'long' in typename else np.float64
# type safe creation
data[n] = np.asarray([v for vec in vals for v in vec], dtype=dtype)
# build data frame
dfp = pd.DataFrame({c: pd.Series(v) for c, v in data.items()})
dfp.loc[:, 'event'] = evns
return dfp
def pos2segid(pos, segsize):
return np.floor(pos / segsize)
def segid2pos(sid, segsize):
return (sid + 0.5) * segsize
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('rec_file', help='Path to reconstruction output file.')
parser.add_argument('-o', dest='outdir', default='.', help='Output directory.')
parser.add_argument('-m', '--macros', dest='macros', default='rootlogon.C',
help='Macro files to be loaded by root, separated by \",\".')
parser.add_argument('-c', '--collection', dest='coll', default='RecoEcalBarrelImagingHits',
help='Name of the data branch for iamging layers')
parser.add_argument('-s', '--segment-size', dest='size', type=float, default=505.0,
help='Size of area for one aggregator to cover (mm)')
args = parser.parse_args()
# multi-threading for RDataFrame
nthreads = os.cpu_count()//2
ROOT.ROOT.EnableImplicitMT(nthreads)
print('CPU available {}, using {}'.format(os.cpu_count(), nthreads))
# declare some functions from c++ script, needed for data frame processing
# script_dir = os.path.dirname(os.path.realpath(__file__))
# fcode = open(os.path.join(script_dir, 'barrel_clusters.cxx')).read()
# ROOT.gInterpreter.Declare(fcode)
os.makedirs(args.outdir, exist_ok=True)
# load macros (add libraries/headers root cannot automatically found here)
load_root_macros(args.macros)
rdf = ROOT.RDataFrame('events', args.rec_file)
df = flatten_collection(rdf, args.coll)
df.rename(columns={c: c.replace(args.coll + '.', '') for c in df.columns}, inplace=True)
# half sizes
sx, sy = df[['local.x', 'local.y']].max().values
print(sx, sy, args.size)
df.loc[:, 'fpga'] = pos2segid(df['local.y'].values, args.size)
counts = df[df['sector'] == 8].groupby(['event', 'layer', 'fpga'])['local.z'].count().to_frame(name='nhits')
dfa = counts.groupby(['layer', 'fpga']).describe().reset_index()
# flatten column levels
dfa.columns = ['.'.join(col).strip('.') for col in dfa.columns.values]
dfa = dfa.astype({'fpga': int, 'nhits.count': int})
print(dfa)
fig, axs = plt.subplots(3, 2, figsize=(16, 9), dpi=160, sharex='all', sharey='all',
gridspec_kw={'hspace': 0, 'wspace':0.})
for ax, il in zip(axs.flat, np.arange(dfa['layer'].max() + 1)):
data = dfa.loc[dfa['layer'] == il + 1]
ax.errorbar(segid2pos(data['fpga'].values, args.size), data['nhits.max'], xerr=args.size/2., fmt='o', capsize=3)
ax.tick_params(labelsize=20)
ax.text(0.95, 0.95, 'Layer {}'.format(il + 1), fontsize=24, transform=ax.transAxes, ha='right', va='top')
ax.grid(linestyle=':')
fig.text(0.05, 0.5, 'Max. Number of Hits / Event / FPGA', fontsize=24, va='center', rotation=90)
fig.text(0.5, 0.03, 'z (mm)', fontsize=24, ha='center')
fig.savefig(os.path.join(args.outdir, 'occupancy_perlayer.png'))
fig, ax = plt.subplots(figsize=(16, 9))
ax.set_xlim(-sy, sy)
ax.set_ylim(0, dfa.layer.max())
# color map
cmap = truncate_colormap(plt.get_cmap('jet'), 0.1, 0.9)
dfa.loc[:, 'max_norm'] = dfa['nhits.max'].values / dfa['nhits.max'].max()
recs, colors = [], []
for il in np.arange(dfa['layer'].max() + 1):
data = dfa.loc[dfa['layer'] == il + 1]
recs += [Rectangle((y - args.size/2., il), args.size, 1) for y in segid2pos(data['fpga'].values, args.size)]
# print([((y - args.size, il), args.size*2, 1) for y in segid2pos(data['fpga'].values, args.size)])
colors += [cmap(v) for v in data['max_norm'].values]
ax.add_collection(PatchCollection(recs, facecolors=colors, edgecolors='k'))
ax.tick_params(labelsize=22)
ax.set_xlabel('z (mm)', fontsize=24)
ax.set_ylabel('Layer', fontsize=24)
cb = plt.colorbar(cm.ScalarMappable(norm=matplotlib.colors.Normalize(vmin=0, vmax=dfa['nhits.max'].max()),
cmap=cmap),
ax=ax, shrink=0.85)
cb.ax.tick_params(labelsize=22)
cb.ax.get_yaxis().labelpad = 22
cb.ax.set_ylabel('Max. Number of Hits / Event', rotation=90, fontsize=24)
fig.savefig(os.path.join(args.outdir, 'occupancy_map.png'))
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment