Commit 3c653cc1 authored by Whitney Armstrong's avatar Whitney Armstrong
Browse files

new file: 1q

	new file:   DE.sbatch
	new file:   DE.sh
	new file:   H2_diff.sh
	new file:   HMS_PID.sh
	new file:   LT.sbatch
	new file:   LT.sh
	new file:   LT_mac.sh
	new file:   SHMS_PID.sh
	new file:   TE.sbatch
	new file:   TE.sh
	new file:   build_runsinfo.sh
	new file:   cal_calib.sbatch
	new file:   cal_calib.sh
	modified:   coin_replay.sh
	new file:   do_sth.sbatch
	new file:   do_sth.sh
	new file:   do_sth2.sh
	new file:   do_sth_fallruns.sbatch
	new file:   do_sth_nosubmit.sh
	new file:   do_sth_runs.sh
	new file:   do_sth_springruns.sbatch
	new file:   fall_good_runlist.txt
	modified:   grep_current.sbatch
	modified:   grep_current.sh
	new file:   grep_current_json.sbatch
	new file:   grep_current_json.sh
	new file:   grep_current_manual.sh
	new file:   kin_acceptance.sh
	new file:   kin_acceptance_fall.sbatch
	new file:   kin_events.sh
	new file:   kin_events_fall.sbatch
	new file:   kin_events_spring.sbatch
	new file:   kin_events_spring.sh
	new file:   out.txt
	new file:   plot_histo.sbatch
	new file:   plot_histo.sh
	new file:   plot_te.sh
	new file:   pt.sh
	new file:   redo.txt
	new file:   replay_fall.sbatch
	new file:   replay_fall_1.sbatch
	new file:   replay_redo.sbatch
	new file:   replay_sidis.sbatch
	new file:   replay_spring.sbatch
	new file:   rf_DE.sbatch
	new file:   rf_DE.sh
	new file:   run.sh
	new file:   rungroups.txt
	new file:   simc.sbatch
	new file:   simc.sh
	new file:   simc_H2.sh
	new file:   simc_pos.sh
	new file:   skim.sbatch
	new file:   skim.sh
	new file:   spring_good_runlist.txt
	new file:   test.sbatch
	new file:   test.sh
	new file:   yield
	new file:   yield.sbatch
	new file:   yield.sh
	new file:   yield_ratio.sh
	new file:   yield_sim.sbatch
	new file:   yield_sim.sh
parent 4aa1ecf4
Pipeline #34301 failed with stages
#!/bin/bash
#SBATCH --account=jlab
#SBATCH --time=12:00:00 # Run time in hh:mm:ss
#SBATCH --mem-per-cpu=2024 # Maximum memory required per CPU (in megabytes)
#SBATCH --job-name=csv
#SBATCH --ntasks=180
#SBATCH --output=/home/sjia/out/coin-%A_%a.out
#SBATCH --error=/home/sjia/out/coin-%A_%a.err
# Load the default version of GNU parallel.
module purge
module load gcc/7.1.0-4bgguyp
module load parallel
# When running a large number of tasks simultaneously, it may be
# necessary to increase the user process limit.
ulimit -u 10000
# This specifies the options used to run srun. The "-N1 -n1" options are
# used to allocates a single core to each task.
srun="srun --exclusive -N1 -n1"
JOBLOG=/home/sjia/out/csv_replay.progress
LOGDIR=/lcrc/project/jlab/data/hallc/csv/replay/log
TMPDIR=/lcrc/globalscratch/sjia
# This specifies the options used to run GNU parallel:
#
# --delay of 0.2 prevents overloading the controlling node.
#
# -j is the number of tasks run simultaneously.
#
# The combination of --joblog and --resume create a task log that
# can be used to monitor progress.
#
#parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG} --resume --tmpdir ${TMPDIR}"
parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG} --tmpdir ${TMPDIR}"
# Run the script, coin_replay.sh, using GNU parallel and srun. Parallel
# will run the runtask script for the numbers 1 through $SLURM_NTASKS
$parallel "$srun ./coin_replay.sh {1} &> $LOGDIR/coin_replay.sh.{1}" ::: {7611..7655}
#!/bin/bash
#SBATCH --account=jlab
#SBATCH --time=24:00:00 # Run time in hh:mm:ss
#SBATCH --mem-per-cpu=4048 # Maximum memory required per CPU (in megabytes)
#SBATCH --job-name=DE
#SBATCH --ntasks=36
#SBATCH --output=/home/jias/out/DE-%A_%a.out
#SBATCH --error=/home/jias/out/DE-%A_%a.err
# Load the default version of GNU parallel.
module purge
module load gcc/7.1.0-4bgguyp
module load parallel
# When running a large number of tasks simultaneously, it may be
# necessary to increase the user process limit.
ulimit -u 10000
# This specifies the options used to run srun. The "-N1 -n1" options are
# used to allocates a single core to each task.
srun="srun --exclusive -N1 -n1"
JOBLOG=/home/jias/out/DE.progress
LOGDIR=/lcrc/project/jlab/data/hallc/csv/replay/log/
# This specifies the options used to run GNU parallel:
#
# --delay of 0.2 prevents overloading the controlling node.
#
# -j is the number of tasks run simultaneously.
#
# The combination of --joblog and --resume create a task log that
# can be used to monitor progress.
#
#parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG} --resume --tmpdir ${TMPDIR}"
#parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG} --tmpdir ${TMPDIR}"
parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG}"
# Run the script, using GNU parallel and srun. Parallel
# will run the runtask script for the numbers 1 through $SLURM_NTASKS
#$parallel "$srun ./DE.sh {1} &> $LOGDIR/DE.sh.{1}" ::: {59..93}
$parallel "$srun ./DE.sh {1} &> $LOGDIR/DE.sh.{1}" ::: {1..58}
#!/bin/bash
module load singularity
module use /lcrc/project/jlab/local/etc/modulefiles
module load hallac_container/1.9.1
RUNGROUP=10*$1
export SINGULARITY_BINDPATH="/lcrc,/scratch"
REPLAYDIR="/lcrc/project/jlab/csv/offline/online_csv"
ODIR="$REPLAYDIR/results"
echo "================================"
echo "do something for $RUNGROUP"
echo "================================"
cd ${REPLAYDIR}
#root -q -b "shuo_analysis/DE/rf_offset.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/DE/rf_offset_H2.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/DE/rf_offset_Dummy.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/DE/HMS_DE_clean.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/DE/HMS_DE_H2_clean.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/DE/HMS_DE_Dummy_clean.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/DE/HMS_cuts_compare.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/DE/SHMS_DE_clean.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/DE/SHMS_DE_Dummy_clean.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/DE/SHMS_DE_H2_clean.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/DE/SHMS_cuts_compare.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/DE/SHMS_hgcer.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/DE/SHMS_RF_twofit.cxx+($RUNGROUP)" || exit$?
root -q -b "shuo_analysis/DE/SHMS_rf_twocomplicatedfit_high.cxx+($RUNGROUP)" || exit$?
root -q -b "shuo_analysis/DE/SHMS_rf_twocomplicatedfit_low.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/DE/SHMS_RF_twofit_high.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/DE/SHMS_rf_cuts_compare.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/DE/SHMS_rf_cuts_compare_all.cxx+($RUNGROUP)" || exit$?
#for(( i = 1;i<59;i++))
#do
#root -q -b "shuo_analysis/DE/SHMS_rf_cuts_compare_all.cxx+(10*$i)" || exit$?
#root -q -b "shuo_analysis/DE/SHMS_rf_cuts_compare.cxx+(10*$i)" || exit$?
#done
echo "================================="
echo "end of do something $RUNGROUP"
echo "================================="
#!/bin/bash
module load singularity
module use /lcrc/project/jlab/local/etc/modulefiles
module load hallac_container/1.9.1
export SINGULARITY_BINDPATH="/lcrc,/scratch"
REPLAYDIR="/lcrc/project/jlab/csv/offline/online_csv"
ODIR="$REPLAYDIR/results"
cd ${REPLAYDIR}
root -q -b "shuo_analysis/Yield/plot_H2_ratio.cxx" || exit $?
root -q -b "shuo_analysis/Yield/plot_H2_D2.cxx" || exit $?
root -q -b "shuo_analysis/Yield/arrange_H2_diff_sum.cxx" || exit $?
root -q -b "shuo_analysis/Yield/weighted_average_H2_diff_sum.cxx" || exit $?
#!/bin/bash
module load singularity
module use /lcrc/project/jlab/local/etc/modulefiles
module load hallac_container/1.5.5
RUNGROUP=$1
export SINGULARITY_BINDPATH="/lcrc,/scratch"
REPLAYDIR="/lcrc/project/jlab/csv/offline/online_csv"
TMPDIR="/scratch/kin-$RUNGROUP"
ODIR="$REPLAYDIR/results"
echo "================================"
echo "HMS PID for $RUNGROUP"
echo "================================"
cd ${REPLAYDIR}
root -q -b "shuo_analysis/DE/HMS_DE.cxx+($RUNGROUP)" || exit $?
echo "================================="
echo "end of do something $RUNGROUP"
echo "================================="
#!/bin/bash
#SBATCH --account=jlab
#SBATCH --time=12:00:00 # Run time in hh:mm:ss
#SBATCH --mem-per-cpu=2024 # Maximum memory required per CPU (in megabytes)
#SBATCH --job-name=LTh
#SBATCH --ntasks=36
#SBATCH --output=/home/jias/out/LT-%A_%a.out
#SBATCH --error=/home/jias/out/LT-%A_%a.err
# Load the default version of GNU parallel.
module purge
module load gcc/7.1.0-4bgguyp
module load parallel
# When running a large number of tasks simultaneously, it may be
# necessary to increase the user process limit.
ulimit -u 10000
# This specifies the options used to run srun. The "-N1 -n1" options are
# used to allocates a single core to each task.
srun="srun --exclusive -N1 -n1"
JOBLOG=/home/jias/out/do_sth.progress
LOGDIR=/lcrc/project/jlab/data/hallc/csv/replay/log/
# This specifies the options used to run GNU parallel:
#
# --delay of 0.2 prevents overloading the controlling node.
#
# -j is the number of tasks run simultaneously.
#
# The combination of --joblog and --resume create a task log that
# can be used to monitor progress.
#
#parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG} --resume --tmpdir ${TMPDIR}"
#parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG} --tmpdir ${TMPDIR}"
parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG}"
# Run the script, using GNU parallel and srun. Parallel
# will run the runtask script for the numbers 1 through $SLURM_NTASKS
$parallel "$srun ./LT.sh {1} &> $LOGDIR/LT.sh.{1}" ::: {1..58}
#!/bin/bash
module load singularity
module use /lcrc/project/jlab/local/etc/modulefiles
module load hallac_container/1.9.1
RUNGROUP=10*$1
export SINGULARITY_BINDPATH="/lcrc,/scratch"
REPLAYDIR="/lcrc/project/jlab/csv/offline/online_csv"
ODIR="$REPLAYDIR/results"
echo "================================"
echo "do something for $RUNGROUP"
echo "================================"
cd ${REPLAYDIR}
#root -q -b "shuo_analysis/LT/Total_live_time.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/LT/Computer_live_time.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/LT/Total_live_time_Dummy.cxx+($RUNGROUP)" || exit$?
root -q -b "shuo_analysis/LT/Total_live_time_H2.cxx+($RUNGROUP)" || exit$?
echo "================================="
echo "end of do something $RUNGROUP"
echo "================================="
#!/bin/bash
REPLAYDIR="/lcrc/project/jlab/csv/offline/online_csv"
echo "================================"
echo "Get Live time"
echo "================================"
cd ${REPLAYDIR}
#root -q -b "shuo_analysis/LT/Computer_live_time.cxx+(10*$1)" || exit$?
root -q -b "shuo_analysis/LT/Total_live_time.cxx+(10*$1)" || exit$?
root -q -b "shuo_analysis/LT/Total_live_time_Dummy.cxx+(10*$1)" || exit$?
#for(( i = 1;i<59;i++))
#do
#root -q -b "shuo_analysis/LT/Computer_live_time.cxx+(10*$i)" || exit$?
#root -q -b "shuo_analysis/LT/Total_live_time.cxx+(10*$i)" || exit$?
#done
echo "================================="
echo "end of do something $RUNGROUP"
echo "================================="
#!/bin/bash
module load singularity
module use /lcrc/project/jlab/local/etc/modulefiles
module load hallac_container/1.5.5
RUNGROUP=$1
export SINGULARITY_BINDPATH="/lcrc,/scratch"
REPLAYDIR="/lcrc/project/jlab/csv/offline/online_csv"
TMPDIR="/scratch/kin-$RUNGROUP"
ODIR="$REPLAYDIR/results"
echo "================================"
echo "SHMS PID for $RUNGROUP"
echo "================================"
cd ${REPLAYDIR}
root -q -b "shuo_analysis/pid/SHMS/SHMS_PID_RunGroup_calcut.cxx+($RUNGROUP)" || exit $?
root -q -b "shuo_analysis/pid/SHMS/SHMS_PID_RunGroup_cercut.cxx+($RUNGROUP)" || exit $?
root -q -b "shuo_analysis/pid/SHMS/SHMS_PID_plots.cxx+($RUNGROUP)" || exit $?
root -q -b "shuo_analysis/pid/SHMS/SHMS_cuts_compare.cxx+($RUNGROUP)" || exit $?
echo "================================="
echo "end of do something $RUNGROUP"
echo "================================="
#!/bin/bash
#SBATCH --account=jlab
#SBATCH --time=12:00:00 # Run time in hh:mm:ss
#SBATCH --mem-per-cpu=2024 # Maximum memory required per CPU (in megabytes)
#SBATCH --job-name=TE
#SBATCH --ntasks=36
#SBATCH --output=/home/jias/out/TE-%A_%a.out
#SBATCH --error=/home/jias/out/TE-%A_%a.err
# Load the default version of GNU parallel.
module purge
module load gcc/7.1.0-4bgguyp
module load parallel
# When running a large number of tasks simultaneously, it may be
# necessary to increase the user process limit.
ulimit -u 10000
# This specifies the options used to run srun. The "-N1 -n1" options are
# used to allocates a single core to each task.
srun="srun --exclusive -N1 -n1"
JOBLOG=/home/jias/out/TE.progress
LOGDIR=/lcrc/project/jlab/data/hallc/csv/replay/log/
# This specifies the options used to run GNU parallel:
#
# --delay of 0.2 prevents overloading the controlling node.
#
# -j is the number of tasks run simultaneously.
#
# The combination of --joblog and --resume create a task log that
# can be used to monitor progress.
#
#parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG} --resume --tmpdir ${TMPDIR}"
#parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG} --tmpdir ${TMPDIR}"
parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG}"
# Run the script, using GNU parallel and srun. Parallel
# will run the runtask script for the numbers 1 through $SLURM_NTASKS
$parallel "$srun ./TE.sh {1} &> $LOGDIR/TE.sh.{1}" ::: {1..93}
#!/bin/bash
module load singularity
module use /lcrc/project/jlab/local/etc/modulefiles
module load hallac_container/1.9.1
RUNGROUP=10*$1
export SINGULARITY_BINDPATH="/lcrc,/scratch"
REPLAYDIR="/lcrc/project/jlab/csv/offline/online_csv"
ODIR="$REPLAYDIR/results"
echo "================================"
echo "do something for $RUNGROUP"
echo "================================"
cd ${REPLAYDIR}
root -q -b "shuo_analysis/TE/shms_TE_D2runs.cxx+($RUNGROUP)" || exit$?
root -q -b "shuo_analysis/TE/hms_TE_D2runs.cxx+($RUNGROUP)" || exit$?
root -q -b "shuo_analysis/TE/shms_TE_Dummyruns.cxx+($RUNGROUP)" || exit$?
root -q -b "shuo_analysis/TE/hms_TE_Dummyruns.cxx+($RUNGROUP)" || exit$?
root -q -b "shuo_analysis/TE/shms_TE_H2runs.cxx+($RUNGROUP)" || exit$?
root -q -b "shuo_analysis/TE/hms_TE_H2runs.cxx+($RUNGROUP)" || exit$?
#for i in {10..930..10}
#do
##
##root -q -b "shuo_analysis/TE/shms_TE_D2runs.cxx+($i)" || exit$?
##root -q -b "shuo_analysis/TE/hms_TE_D2runs.cxx+($i)" || exit$?
##root -q -b "shuo_analysis/TE/shms_TE_Dummyruns.cxx+($i)" || exit$?
##root -q -b "shuo_analysis/TE/hms_TE_Dummyruns.cxx+($i)" || exit$?
##root -q -b "shuo_analysis/TE/shms_TE_H2runs.cxx+($i)" || exit$?
##root -q -b "shuo_analysis/TE/hms_TE_H2runs.cxx+($i)" || exit$?
#root -q -b "shuo_analysis/TE/shms_rate_D2.cxx+($i)" || exit$?
##root -q -b "shuo_analysis/TE/shms_rate_Dummy.cxx+($i)" || exit$?
##root -q -b "shuo_analysis/TE/shms_rate_H2.cxx+($i)" || exit$?
##
#done
echo "================================="
echo "end of do something $RUNGROUP"
echo "================================="
#!/bin/bash
module load singularity
module use /lcrc/project/jlab/local/etc/modulefiles
module load hallac_container/1.9.1
export SINGULARITY_BINDPATH="/lcrc,/scratch"
REPLAYDIR="/lcrc/project/jlab/csv/offline/online_csv"
ODIR="$REPLAYDIR/results"
cd ${REPLAYDIR}
root -q -b "shuo_analysis/Yield/combine_json.cxx" || exit $?
root -q -b "shuo_analysis/TE/write_report_TE_to_json.cxx" || exit $?
root -q -b "shuo_analysis/DE/write_DE_to_json_updated.cxx" || exit $?
root -q -b "shuo_analysis/LT/write_lt_to_json.cxx" || exit $?
root -q -b "shuo_analysis/Eff/FADC_corr.cxx" || exit $?
root -q -b "shuo_analysis/Eff/Target_boiling.cxx" || exit $?
#!/bin/bash
#SBATCH --account=jlab
#SBATCH --time=12:00:00 # Run time in hh:mm:ss
#SBATCH --mem-per-cpu=2024 # Maximum memory required per CPU (in megabytes)
#SBATCH --job-name=param_json
#SBATCH --ntasks=36
#SBATCH --output=/home/sjia/out/param_json-%A_%a.out
#SBATCH --error=/home/sjia/out/param_json-%A_%a.err
# Load the default version of GNU parallel.
module purge
module load gcc/7.1.0-4bgguyp
module load parallel
# When running a large number of tasks simultaneously, it may be
# necessary to increase the user process limit.
ulimit -u 10000
# This specifies the options used to run srun. The "-N1 -n1" options are
# used to allocates a single core to each task.
srun="srun --exclusive -N1 -n1"
JOBLOG=/home/sjia/out/shms_cal_param_to_json.progress
LOGDIR=/lcrc/project/jlab/data/hallc/csv/replay/log
TMPDIR=/lcrc/globalscratch/sjia
# This specifies the options used to run GNU parallel:
#
# --delay of 0.2 prevents overloading the controlling node.
#
# -j is the number of tasks run simultaneously.
#
# The combination of --joblog and --resume create a task log that
# can be used to monitor progress.
#
#parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG} --resume --tmpdir ${TMPDIR}"
parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG} --tmpdir ${TMPDIR}"
# Run the script, coin_replay.sh, using GNU parallel and srun. Parallel
# will run the runtask script for the numbers 1 through $SLURM_NTASKS
$parallel "$srun ./cal_calib.sh {1} &> $LOGDIR/cal_calib.sh.{1}" ::: {6009..7830}
#!/bin/bash
module load singularity
module use /lcrc/project/jlab/local/etc/modulefiles
module load hallac_container/1.1.0
RUN=$1
export SINGULARITY_BINDPATH="/lcrc,/scratch"
REPLAYDIR="/lcrc/project/jlab/csv/offline/online_csv"
TMPDIR="/scratch/replay-$RUN"
ODIR="$REPLAYDIR/ROOTfiles"
echo "================================"
echo "SHMS cal param to json for COIN run $RUN"
echo "================================"
cd ${REPLAYDIR}
root -q -b "shuo_analysis/calorimeter_gain_analysis/shms_cal_param_to_json.cxx+($RUN)" || exit $?
echo "================================="
echo "SHMS cal param to json for run $RUN"
echo "================================="
#!/bin/bash
module load singularity
#echo $LD_LIBRARY_PATH
#module list
module purge
module use /lcrc/project/jlab/local/etc/modulefiles
module load hallac_container/1.1.0
module load singularity
module load hallac_container/1.8.0
#module list
RUN=$1
export SINGULARITY_BINDPATH="/lcrc,/scratch"
#export SINGULARITY_BINDPATH="/lcrc,/scratch"
REPLAYDIR="/lcrc/project/jlab/csv/offline/online_csv"
TMPDIR="/scratch/replay-$RUN"
#TMPDIR="/lcrc/project/jlab/data/hallc/csv/replay/tmp"
ODIR="$REPLAYDIR/ROOTfiles"
......@@ -17,13 +22,19 @@ echo "Running replay for COIN run $RUN"
echo "================================"
cd ${REPLAYDIR}
mkdir -p $TMPDIR/full
mkdir -p $TMPDIR/logs
hcana -q -b "scripts/replay_production_coin.cxx+($RUN,-1)" || exit $?
rsync -va $TMPDIR/full/* $ODIR/full
mkdir -p $ODIR/log/log-$RUN
rsync -va $TMPDIR/logs/* $ODIR/log/log-$RUN
rm -rf $TMPDIR
#mkdir -p $TMPDIR/full
#mkdir -p $TMPDIR/logs
#which hcana
#echo $LD_LIBRARY_PATH
#/soft/singularity/3.5.3/bin/singularity exec -B /blues:/blues -B /lcrc:/lcrc -B /scratch:/scratch -B /gpfs:/gpfs /lcrc/project/jlab/local/opt/hallac_container_1.9.1/lib/hallac-1.9.1.sif bash
#/soft/singularity/3.5.3/bin/singularity exec -B /blues:/blues -B /lcrc:/lcrc -B /scratch:/scratch -B /gpfs:/gpfs /lcrc/project/jlab/local/opt/hallac_container_1.9.1/lib/hallac-1.9.1.sif hcana "scripts/replay_production_coin.cxx+($RUN,-1)" || exit $?
#hcana "scripts/replay_production_coin.cxx+($RUN,10000)" || exit $?
hcana "scripts/replay_production_coin.cxx+($RUN,-1)" || exit $?
#chmod -R g+w $TMPDIR
#rsync -va $TMPDIR/full/* $ODIR/full
#mkdir -p $ODIR/log/log-$RUN
#rsync -va $TMPDIR/logs/* $ODIR/log/log-$RUN
#rm -rf $TMPDIR
echo "================================="
echo "Finished processing COIN run $RUN"
......
#!/bin/bash
#SBATCH --account=jlab
#SBATCH --time=12:00:00 # Run time in hh:mm:ss
#SBATCH --mem-per-cpu=2024 # Maximum memory required per CPU (in megabytes)
#SBATCH --job-name=do_sth
#SBATCH --ntasks=36
#SBATCH --output=/home/jias/out/sth-%A_%a.out
#SBATCH --error=/home/jias/out/sth-%A_%a.err
# Load the default version of GNU parallel.
module purge
module load gcc/7.1.0-4bgguyp
module load parallel
# When running a large number of tasks simultaneously, it may be
# necessary to increase the user process limit.
ulimit -u 10000
# This specifies the options used to run srun. The "-N1 -n1" options are
# used to allocates a single core to each task.
srun="srun --exclusive -N1 -n1"
JOBLOG=/home/jias/out/do_sth.progress
LOGDIR=/lcrc/project/jlab/data/hallc/csv/replay/log/
# This specifies the options used to run GNU parallel:
#
# --delay of 0.2 prevents overloading the controlling node.
#
# -j is the number of tasks run simultaneously.
#
# The combination of --joblog and --resume create a task log that
# can be used to monitor progress.
#
#parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG} --resume --tmpdir ${TMPDIR}"
#parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG} --tmpdir ${TMPDIR}"
parallel="parallel --delay 0.2 -j $SLURM_NTASKS --joblog ${JOBLOG}"
# Run the script, using GNU parallel and srun. Parallel
# will run the runtask script for the numbers 1 through $SLURM_NTASKS
$parallel "$srun ./do_sth.sh {1} &> $LOGDIR/do_sth.sh.{1}" ::: {59..93}
#$parallel "$srun ./do_sth.sh {1} &> $LOGDIR/do_sth.sh.{1}" ::: {1..58}
#$parallel "$srun ./do_sth2.sh {1} &> $LOGDIR/do_sth2.sh.{1}" ::: {33..42}
#!/bin/bash
module load singularity
module use /lcrc/project/jlab/local/etc/modulefiles
module load hallac_container/1.9.1
RUNGROUP=10*$1
export SINGULARITY_BINDPATH="/lcrc,/scratch"
REPLAYDIR="/lcrc/project/jlab/csv/offline/online_csv"
ODIR="$REPLAYDIR/results"
echo "================================"
echo "do something for $RUNGROUP"
echo "================================"
cd ${REPLAYDIR}
#root -q -b "shuo_analysis/check/skim_check.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/check/skim_check_H2.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/check/skim_check_Dummy.cxx+($RUNGROUP)" || exit $?
root -q -b "shuo_analysis/check/skim_check_ptsidis.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/check/skim_check_H2.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/check/skim_check_Dummy.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/simc_data/compare_sim_data_new.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/Yield/statistic_runs_D2.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/DE/rf_offset.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/DE/rf_offset_H2.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/DE/rf_offset_Dummy.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/DE/SHMS_DE.cxx+($RUNGROUP)" || exit $?
#root -q -b "shuo_analysis/DE/SHMS_RF_twofit_high.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/LT/Computer_live_time.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/LT/Total_live_time.cxx+($RUNGROUP)" || exit$?
#root -q -b "shuo_analysis/pt/pt_check.cxx+($RUNGROUP)" || exit$?
echo "================================="
echo "end of do something $RUNGROUP"
echo "================================="
#!/bin/bash
module load singularity
module use /lcrc/project/jlab/local/etc/modulefiles
module load hallac_container/1.9.1
RUNGROUP=10*$1
export SINGULARITY_BINDPATH="/lcrc,/scratch"
REPLAYDIR="/lcrc/project/jlab/csv/offline/online_csv"
ODIR="$REPLAYDIR/results"
echo "================================"
echo "do something for $RUNGROUP"
echo "================================"
cd ${REPLAYDIR}
root -q -b "shuo_analysis/Yield/main.cpp" || exit $?
echo "================================="
echo "end of do something $RUNGROUP"
echo "================================="
#!/bin/bash
#SBATCH --account=jlab
#SBATCH --time=12:00:00 # Run time in hh:mm:ss
#SBATCH --mem-per-cpu=2024 # Maximum memory required per CPU (in megabytes)
#SBATCH --job-name=csv
#SBATCH --ntasks=36