Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • containers/eic_container
  • wdconinc/eic_container
  • tooba/eic_container
3 results
Show changes
Commits on Source (37)
......@@ -3,20 +3,27 @@ image: eicweb.phy.anl.gov:4567/containers/image_recipes/ubuntu_dind:latest
variables:
## Application versions used for the main release
## note: nightly builds will always use the master/main branch
JUGGLER_VERSION: "v9.0.0"
NPDET_VERSION: "v1.4.1"
JUGGLER_VERSION: "v9.2.0"
## Spack organization and repository, e.g. spack/spack
SPACK_ORGREPO: "spack/spack"
## Spack github version, e.g. v0.18.1 or commit hash
SPACK_VERSION: "v0.19.0"
## Space-separated list of spack cherry-picks
SPACK_CHERRYPICKS: ""
SPACK_CHERRYPICKS: "b5ef5c2eb5145020f9de1bcb964626ce6ac2d02e 99056e03bd3e903b222e300636ec484d85d4b3fb"
## Ref: https://github.com/spack/spack/commit/[hash]
## [hash]: [description]
## b5ef5c2eb5145020f9de1bcb964626ce6ac2d02e: geant4: version bumps for Geant4 11.1.0
## 99056e03bd3e903b222e300636ec484d85d4b3fb: acts: new versions 19.11.0, 21.0.0, 21.1.0
## Spack github version, e.g. v0.18 branch, v0.18.1 tag, or commit hash
## EIC spack organization and repository, e.g. eic/eic-spack
EICSPACK_ORGREPO: "eic/eic-spack"
## EIC spack github version, e.g. v0.18 branch, v0.18.1 tag, or commit hash
EICSPACK_VERSION: "v0.19"
## Space-separated list of eic-spack cherry-picks
EICSPACK_CHERRYPICKS: ""
## Ref: https://github.com/eic/eic-spack/commit/[hash]
## [hash]: [description]
## We need to enable Docker Buildkit to use cache mounts and better
## build performance overall
......@@ -158,7 +165,7 @@ version:
.build:
rules:
- when: on_success
resource_group: build
resource_group: ${CI_COMMIT_REF_NAME}
## Use docker runner for docker builds
tags:
- docker-new
......@@ -241,10 +248,14 @@ jug_dev:default:
--target=builder
-f containers/jug/dev.Dockerfile
--build-arg BASE_IMAGE="${BASE_IMAGE}"
--build-arg SPACK_ORGREPO="${SPACK_ORGREPO}"
--build-arg SPACK_VERSION="${SPACK_VERSION}"
--build-arg SPACK_CHERRYPICKS="${SPACK_CHERRYPICKS}"
--build-arg EICSPACK_ORGREPO="${EICSPACK_ORGREPO}"
--build-arg EICSPACK_VERSION="${EICSPACK_VERSION}"
--build-arg EICSPACK_CHERRYPICKS="${EICSPACK_CHERRYPICKS}"
--build-arg S3_ACCESS_KEY="${S3_ACCESS_KEY}"
--build-arg S3_SECRET_KEY="${S3_SECRET_KEY}"
--build-arg CACHE_BUST=${PACKAGE_HASH}
--build-arg INTERNAL_TAG=${INTERNAL_TAG}
--build-arg JUG_VERSION=${INTERNAL_TAG}-$(git rev-parse HEAD)
......@@ -253,10 +264,14 @@ jug_dev:default:
- docker build -t ${CI_REGISTRY_IMAGE}/${BUILD_IMAGE}:${INTERNAL_TAG} ${CACHE_FLAG}
-f containers/jug/dev.Dockerfile
--build-arg BASE_IMAGE="${BASE_IMAGE}"
--build-arg SPACK_ORGREPO="${SPACK_ORGREPO}"
--build-arg SPACK_VERSION="${SPACK_VERSION}"
--build-arg SPACK_CHERRYPICKS="${SPACK_CHERRYPICKS}"
--build-arg EICSPACK_ORGREPO="${EICSPACK_ORGREPO}"
--build-arg EICSPACK_VERSION="${EICSPACK_VERSION}"
--build-arg EICSPACK_CHERRYPICKS="${EICSPACK_CHERRYPICKS}"
--build-arg S3_ACCESS_KEY="${S3_ACCESS_KEY}"
--build-arg S3_SECRET_KEY="${S3_SECRET_KEY}"
--build-arg CACHE_BUST=${PACKAGE_HASH}
--build-arg INTERNAL_TAG=${INTERNAL_TAG}
--build-arg JUG_VERSION=${INTERNAL_TAG}-$(git rev-parse HEAD)
......@@ -278,7 +293,6 @@ jug_xl:default:
# BUILD_IMAGE: jug_xl_oneapi
extends: .build
stage: jug
resource_group: build
needs:
- version
- jug_dev:default
......@@ -289,7 +303,6 @@ jug_xl:default:
--build-arg BASE_IMAGE=${BASE_IMAGE}
--build-arg INTERNAL_TAG=${INTERNAL_TAG}
--build-arg JUGGLER_VERSION=${JUGGLER_VERSION}
--build-arg NPDET_VERSION=${NPDET_VERSION}
--build-arg JUG_VERSION=${INTERNAL_TAG}-$(git rev-parse HEAD)
containers/jug
- !reference [.build, script]
......@@ -375,10 +388,10 @@ jug_xl:feature:
.singularity:
stage: deploy
interruptible: true
resource_group: ${CI_COMMIT_REF_NAME}
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: never
- when: on_success
artifacts:
expire_in: 1 days
paths:
......@@ -405,6 +418,11 @@ jug_xl:singularity:default:
jug_xl:singularity:nightly:
extends: .singularity
rules:
- !reference ['.singularity', rules]
- if: '$CI_COMMIT_BRANCH == "master"'
when: on_success
- when: never
needs:
- version
- jug_xl:nightly
......@@ -427,7 +445,6 @@ juggler:main:
variables:
TRIGGERED_BY_NIGHTLY: 1
JUGGLER_VERSION: main
NPDET_VERSION: master
DETECTOR_VERSION: master
IP6_VERSION: master
trigger:
......
EIC software container
============================================
Simple Installation
------------
*The environment has been tested on linux (requires singularity v3+) and MacOS (requires
docker)*
Please follow the steps below to setup and run the container in your environment.
1. Create a local directory that you want to work in, e.g `$HOME/eic`, and go into this
directory.
```bash
mkdir $HOME/eic
cd $HOME/eic
```
2. Execute the following line in your terminal to setup your environment in this directory
to install the latest stable container
```bash
curl https://eicweb.phy.anl.gov/containers/eic_container/-/raw/master/install.sh | bash
```
3. You can now load your development environment by executing the `eic-shell` script that
is in your top-level working directory.
```bash
eic-shell
```
4. Within your development environment (`eic-shell`), you can install software to the
internal `$EIC_SHELL_PREFIX`
Singularity Container setup for Development Usage
-------------
**Note: this container download script is meant for expert usage. If it is unclear to you
why you would want to do this, you are probably looking for the simple `jug_xl` installation
above.**
You can use the same install scripts to setup other container setups, including `jug_dev`
(the main development container). Note that for `jug_dev` there is no nighlty release, and
the appropriate version (tag) would be `testing`. To setup the `jug_dev:testing` environment, do
```bash
curl https://eicweb.phy.anl.gov/containers/eic_container/-/raw/master/install.sh | bash -s -- -c jug_dev -v testing
```
Included software:
------------------
- Included software (for the exact versions, check the file [spack.yaml](spack.yaml) or use the command `eic-info` inside the container):
- gcc
- madx
- cmake
- fmt cxxstd=17
- spdlog
- nlohmann-json
- heppdt
- clhep cxxstd=17
- eigen
- python
- py-numpy
- py-pip
- pkg-config
- xrootd cxxstd=17 +python
- root cxxstd=17
+fftw +fortran +gdml +http +mlp +pythia8
+root7 +tmva +vc +xrootd +ssl
^mesa swr=none +opengl -llvm -osmesa
- pythia8 +fastjet
- fastjet
- hepmc3 +python +rootio
- stow
- cairo +fc+ft+X+pdf+gobject
- podio
- geant4 cxxstd=17 +opengl +vecgeom +x11 +qt +threads ^qt +opengl
- dd4hep +geant4 +assimp +hepmc3 +ipo +lcio
- acts +dd4hep +digitization +identification +json +tgeo +ipo +examples +fatras +geant4
- genfit
- gaudi
- dawn
- dawncut
- opencascade
- emacs toolkit=athena
- imagemagick
- igprof
- The singularity build exports the following applications:
- eic-shell: a development shell in the image
Using the docker container for your CI purposes
-----------------------------------------------
**These instructions are old and need updating. In general we recommend using
`eicweb/juggler:latest` for most CI usages. This image is functionally identical to
`jug_xl:nightly`**
The docker containers are publicly accessible from
[Dockerhub](https://hub.docker.com/u/eicweb). You probably want to use the default
`jug_xl` container. Relevant versions are:
- `eicweb/jug_xl:nightly`: nightly release, with latest detector and reconstruction
version. This is probably what you want to use unless you are dispatching a large
simulation/reconstruciton job
- `eicweb/jug_xl:3.0-stable`: latest stable release, what you want to use for large
simulation jobs (for reproducibility). Please coordinate with the software group to
ensure all desired software changes are present in this container.
1. To load the container environment in your run scripts, you have to do nothing special.
The environment is already setup with good defaults, so you can use all the programs
in the container as usual and assume everything needed to run the included software
is already setup.
2. If using this container as a basis for a new container, you can direction access
the full container environment from a docker `RUN` shell command with no further
action needed. For the most optimal experience, you can install your software to
`/usr/local` to fully integrate with the existing environment. (Note that, internally,
`/usr/local` is a symlink to `/opt/view`).
EIC software environment container
==================================
For installation instructions of `eic-shell`, see https://github.com/eic/eic-shell.
4.0.0
22.12.0
#syntax=docker/dockerfile:1.2
# Minimal container based on Intel oneAPI for up-to-date packages.
# Very lightweight container with a minimal build environment (LOL)
FROM nvidia/cuda:11.6.0-devel-ubuntu18.04
LABEL maintainer="Wouter Deconinck <wouter.deconinck@umanitoba.ca" \
name="cuda_base" \
march="amd64"
COPY bashrc /root/.bashrc
ENV CLICOLOR_FORCE=1 \
LANGUAGE=en_US.UTF-8 \
LANG=en_US.UTF-8 \
LC_ALL=en_US.UTF-8
## Install additional packages. Remove the auto-cleanup functionality
## for docker, as we're using the new buildkit cache instead.
## We also install gitlab-runner, from the buster package (as bullseye is not available atm)
## TODO: libyaml-cpp-dev is a dependency for afterburner. We can probably remove
## this once afterburner is added to spack
RUN --mount=type=cache,target=/var/cache/apt \
rm -f /etc/apt/apt.conf.d/docker-clean \
&& ln -fs /usr/share/zoneinfo/America/New_York /etc/localtime \
&& echo "US/Eastern" > /etc/timezone \
&& apt-get -yqq update \
&& apt-get -yqq upgrade \
&& apt-get -yqq install --no-install-recommends \
bc \
ca-certificates \
clang-format \
clang-tidy \
curl \
file \
build-essential \
gdb \
ghostscript \
git \
gnupg2 \
gv \
iproute2 \
iputils-ping \
iputils-tracepath \
less \
libcbor-xs-perl \
libjson-xs-perl \
libyaml-cpp-dev \
locales \
lua-posix \
make \
nano \
openssh-client \
parallel \
poppler-utils \
time \
unzip \
valgrind \
vim-nox \
wget \
&& localedef -i en_US -f UTF-8 en_US.UTF-8 \
&& gcc --version \
&& curl -L \
"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh" \
| bash \
&& sed -i "s/bookworm/buster/" \
/etc/apt/sources.list.d/runner_gitlab-runner.list \
&& apt-get -yqq update \
&& apt-get -yqq install --no-install-recommends \
gitlab-runner \
&& apt-get -yqq autoremove \
&& rm -rf /var/lib/apt/lists/*
#!/bin/bash
iatest=$(expr index "$-" i)
#######################################################
# SOURCED ALIAS'S AND SCRIPTS BY zachbrowne.me
#######################################################
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# Enable bash programmable completion features in interactive shells
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
#######################################################
# EXPORTS
#######################################################
# Disable the bell
if [[ $iatest > 0 ]]; then bind "set bell-style visible"; fi
# Expand the history size
export HISTFILESIZE=10000
export HISTSIZE=500
# Don't put duplicate lines in the history and do not add lines that start with a space
export HISTCONTROL=erasedups:ignoredups:ignorespace
# Check the window size after each command and, if necessary, update the values of LINES and COLUMNS
shopt -s checkwinsize
# Causes bash to append to history instead of overwriting it so if you start a new terminal, you have old session history
shopt -s histappend
PROMPT_COMMAND='history -a'
# Allow ctrl-S for history navigation (with ctrl-R)
stty -ixon
# Ignore case on auto-completion
# Note: bind used instead of sticking these in .inputrc
if [[ $iatest > 0 ]]; then bind "set completion-ignore-case on"; fi
# Show auto-completion list automatically, without double tab
if [[ $iatest > 0 ]]; then bind "set show-all-if-ambiguous On"; fi
# Set the default editor
export EDITOR=vim
export VISUAL=vim
alias pico='edit'
alias spico='sedit'
alias nano='edit'
alias snano='sedit'
# To have colors for ls and all grep commands such as grep, egrep and zgrep
export CLICOLOR=1
export LS_COLORS='no=00:fi=00:di=00;34:ln=01;36:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arj=01;31:*.taz=01;31:*.lzh=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.gz=01;31:*.bz2=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.avi=01;35:*.fli=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.ogg=01;35:*.mp3=01;35:*.wav=01;35:*.xml=00;31:'
#export GREP_OPTIONS='--color=auto' #deprecated
#alias grep="/usr/bin/grep $GREP_OPTIONS"
#unset GREP_OPTIONS
# Color for manpages in less makes manpages a little easier to read
export LESS_TERMCAP_mb=$'\E[01;31m'
export LESS_TERMCAP_md=$'\E[01;31m'
export LESS_TERMCAP_me=$'\E[0m'
export LESS_TERMCAP_se=$'\E[0m'
export LESS_TERMCAP_so=$'\E[01;44;33m'
export LESS_TERMCAP_ue=$'\E[0m'
export LESS_TERMCAP_us=$'\E[01;32m'
#######################################################
# GENERAL ALIAS'S
#######################################################
# Alias's to modified commands
alias cp='cp -i'
alias mv='mv -i'
alias rm='rm -iv'
alias mkdir='mkdir -p'
alias ps='ps auxf'
alias ping='ping -c 10'
alias less='less -R'
alias cls='clear'
alias apt-get='apt-get'
alias multitail='multitail --no-repeat -c'
alias freshclam='freshclam'
alias vi='vim'
alias vis='vim "+set si"'
# Change directory aliases
alias home='cd ~'
alias cd..='cd ..'
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias .....='cd ../../../..'
# cd into the old directory
alias bd='cd "$OLDPWD"'
# Remove a directory and all files
alias rmd='/bin/rm --recursive --force --verbose '
# Alias's for multiple directory listing commands
alias la='ls -Alh' # show hidden files
alias ls='ls -aFh --color=always' # add colors and file type extensions
alias lx='ls -lXBh' # sort by extension
alias lk='ls -lSrh' # sort by size
alias lc='ls -lcrh' # sort by change time
alias lu='ls -lurh' # sort by access time
alias lr='ls -lRh' # recursive ls
alias lt='ls -ltrh' # sort by date
alias lm='ls -alh |more' # pipe through 'more'
alias lw='ls -xAh' # wide listing format
alias ll='ls -Fls' # long listing format
alias labc='ls -lap' #alphabetical sort
alias lf="ls -l | egrep -v '^d'" # files only
alias ldir="ls -l | egrep '^d'" # directories only
# alias chmod commands
alias mx='chmod a+x'
alias 000='chmod -R 000'
alias 644='chmod -R 644'
alias 666='chmod -R 666'
alias 755='chmod -R 755'
alias 777='chmod -R 777'
# Search command line history
alias h="history | grep "
# Search running processes
alias p="ps aux | grep "
alias topcpu="/bin/ps -eo pcpu,pid,user,args | sort -k 1 -r | head -10"
# Search files in the current folder
alias f="find . | grep "
# Count all files (recursively) in the current folder
alias countfiles="for t in files links directories; do echo \`find . -type \${t:0:1} | wc -l\` \$t; done 2> /dev/null"
# To see if a command is aliased, a file, or a built-in command
alias checkcommand="type -t"
# Show all logs in /var/log
alias logs="sudo find /var/log -type f -exec file {} \; | grep 'text' | cut -d' ' -f1 | sed -e's/:$//g' | grep -v '[0-9]$' | xargs tail -f"
# SHA1
alias sha1='openssl sha1'
#######################################################
# SPECIAL FUNCTIONS
#######################################################
# Extracts any archive(s) (if unp isn't installed)
extract () {
for archive in $*; do
if [ -f $archive ] ; then
case $archive in
*.tar.bz2) tar xvjf $archive ;;
*.tar.gz) tar xvzf $archive ;;
*.bz2) bunzip2 $archive ;;
*.rar) rar x $archive ;;
*.gz) gunzip $archive ;;
*.tar) tar xvf $archive ;;
*.tbz2) tar xvjf $archive ;;
*.tgz) tar xvzf $archive ;;
*.zip) unzip $archive ;;
*.Z) uncompress $archive ;;
*.7z) 7z x $archive ;;
*) echo "don't know how to extract '$archive'..." ;;
esac
else
echo "'$archive' is not a valid file!"
fi
done
}
# Searches for text in all files in the current folder
ftext ()
{
# -i case-insensitive
# -I ignore binary files
# -H causes filename to be printed
# -r recursive search
# -n causes line number to be printed
# optional: -F treat search term as a literal, not a regular expression
# optional: -l only print filenames and not the matching lines ex. grep -irl "$1" *
grep -iIHrn --color=always "$1" . | less -r
}
# Copy file with a progress bar
cpp()
{
set -e
strace -q -ewrite cp -- "${1}" "${2}" 2>&1 \
| awk '{
count += $NF
if (count % 10 == 0) {
percent = count / total_size * 100
printf "%3d%% [", percent
for (i=0;i<=percent;i++)
printf "="
printf ">"
for (i=percent;i<100;i++)
printf " "
printf "]\r"
}
}
END { print "" }' total_size=$(stat -c '%s' "${1}") count=0
}
# Copy and go to the directory
cpg ()
{
if [ -d "$2" ];then
cp $1 $2 && cd $2
else
cp $1 $2
fi
}
# Move and go to the directory
mvg ()
{
if [ -d "$2" ];then
mv $1 $2 && cd $2
else
mv $1 $2
fi
}
# Create and go to the directory
mkdirg ()
{
mkdir -p $1
cd $1
}
# Goes up a specified number of directories (i.e. up 4)
up ()
{
local d=""
limit=$1
for ((i=1 ; i <= limit ; i++))
do
d=$d/..
done
d=$(echo $d | sed 's/^\///')
if [ -z "$d" ]; then
d=..
fi
cd $d
}
#Automatically do an ls after each cd
# cd ()
# {
# if [ -n "$1" ]; then
# builtin cd "$@" && ls
# else
# builtin cd ~ && ls
# fi
# }
# Returns the last 2 fields of the working directory
pwdtail ()
{
pwd|awk -F/ '{nlast = NF -1;print $nlast"/"$NF}'
}
# Show the current version of the operating system
ver ()
{
local dtype
dtype=$(distribution)
if [ $dtype == "redhat" ]; then
if [ -s /etc/redhat-release ]; then
cat /etc/redhat-release && uname -a
else
cat /etc/issue && uname -a
fi
elif [ $dtype == "suse" ]; then
cat /etc/SuSE-release
elif [ $dtype == "debian" ]; then
lsb_release -a
# sudo cat /etc/issue && sudo cat /etc/issue.net && sudo cat /etc/lsb_release && sudo cat /etc/os-release # Linux Mint option 2
elif [ $dtype == "gentoo" ]; then
cat /etc/gentoo-release
elif [ $dtype == "mandriva" ]; then
cat /etc/mandriva-release
elif [ $dtype == "slackware" ]; then
cat /etc/slackware-version
else
if [ -s /etc/issue ]; then
cat /etc/issue
else
echo "Error: Unknown distribution"
exit 1
fi
fi
}
# For some reason, rot13 pops up everywhere
rot13 () {
if [ $# -eq 0 ]; then
tr '[a-m][n-z][A-M][N-Z]' '[n-z][a-m][N-Z][A-M]'
else
echo $* | tr '[a-m][n-z][A-M][N-Z]' '[n-z][a-m][N-Z][A-M]'
fi
}
# Trim leading and trailing spaces (for scripts)
trim()
{
local var=$@
var="${var#"${var%%[![:space:]]*}"}" # remove leading whitespace characters
var="${var%"${var##*[![:space:]]}"}" # remove trailing whitespace characters
echo -n "$var"
}
#######################################################
# Set the ultimate amazing command prompt
#######################################################
alias cpu="grep 'cpu ' /proc/stat | awk '{usage=(\$2+\$4)*100/(\$2+\$4+\$5)} END {print usage}' | awk '{printf(\"%.1f\n\", \$1)}'"
function __setprompt
{
local LAST_COMMAND=$? # Must come first!
# Define colors
local LIGHTGRAY="\033[0;37m"
local WHITE="\033[1;37m"
local BLACK="\033[0;30m"
local DARKGRAY="\033[1;30m"
local RED="\033[0;31m"
local LIGHTRED="\033[1;31m"
local GREEN="\033[0;32m"
local LIGHTGREEN="\033[1;32m"
local BROWN="\033[0;33m"
local YELLOW="\033[1;33m"
local BLUE="\033[0;34m"
local LIGHTBLUE="\033[1;34m"
local MAGENTA="\033[0;35m"
local LIGHTMAGENTA="\033[1;35m"
local CYAN="\033[0;36m"
local LIGHTCYAN="\033[1;36m"
local NOCOLOR="\033[0m"
# Show error exit code if there is one
if [[ $LAST_COMMAND != 0 ]]; then
# PS1="\[${RED}\](\[${LIGHTRED}\]ERROR\[${RED}\])-(\[${LIGHTRED}\]Exit Code \[${WHITE}\]${LAST_COMMAND}\[${RED}\])-(\[${LIGHTRED}\]"
PS1="\[${DARKGRAY}\](\[${LIGHTRED}\]ERROR\[${DARKGRAY}\])-(\[${RED}\]Exit Code \[${LIGHTRED}\]${LAST_COMMAND}\[${DARKGRAY}\])-(\[${RED}\]"
if [[ $LAST_COMMAND == 1 ]]; then
PS1+="General error"
elif [ $LAST_COMMAND == 2 ]; then
PS1+="Missing keyword, command, or permission problem"
elif [ $LAST_COMMAND == 126 ]; then
PS1+="Permission problem or command is not an executable"
elif [ $LAST_COMMAND == 127 ]; then
PS1+="Command not found"
elif [ $LAST_COMMAND == 128 ]; then
PS1+="Invalid argument to exit"
elif [ $LAST_COMMAND == 129 ]; then
PS1+="Fatal error signal 1"
elif [ $LAST_COMMAND == 130 ]; then
PS1+="Script terminated by Control-C"
elif [ $LAST_COMMAND == 131 ]; then
PS1+="Fatal error signal 3"
elif [ $LAST_COMMAND == 132 ]; then
PS1+="Fatal error signal 4"
elif [ $LAST_COMMAND == 133 ]; then
PS1+="Fatal error signal 5"
elif [ $LAST_COMMAND == 134 ]; then
PS1+="Fatal error signal 6"
elif [ $LAST_COMMAND == 135 ]; then
PS1+="Fatal error signal 7"
elif [ $LAST_COMMAND == 136 ]; then
PS1+="Fatal error signal 8"
elif [ $LAST_COMMAND == 137 ]; then
PS1+="Fatal error signal 9"
elif [ $LAST_COMMAND -gt 255 ]; then
PS1+="Exit status out of range"
else
PS1+="Unknown error code"
fi
PS1+="\[${DARKGRAY}\])\[${NOCOLOR}\]\n"
else
PS1=""
fi
# Date
PS1+="\[${DARKGRAY}\](\[${CYAN}\]\$(date +%a) $(date +%b-'%-m')" # Date
PS1+="${BLUE} $(date +'%-I':%M:%S%P)\[${DARKGRAY}\])-" # Time
# CPU
PS1+="(\[${MAGENTA}\]CPU $(cpu)%"
# Jobs
PS1+="\[${DARKGRAY}\]:\[${MAGENTA}\]\j"
# Network Connections (for a server - comment out for non-server)
PS1+="\[${DARKGRAY}\]:\[${MAGENTA}\]Net $(awk 'END {print NR}' /proc/net/tcp)"
PS1+="\[${DARKGRAY}\])-"
# User and server
local SSH_IP=`echo $SSH_CLIENT | awk '{ print $1 }'`
local SSH2_IP=`echo $SSH2_CLIENT | awk '{ print $1 }'`
if [ $SSH2_IP ] || [ $SSH_IP ] ; then
PS1+="(\[${RED}\]\u@\h"
else
PS1+="(\[${RED}\]\u"
fi
# Current directory
PS1+="\[${DARKGRAY}\]:\[${BROWN}\]\w\[${DARKGRAY}\])-"
# Total size of files in current directory
PS1+="(\[${GREEN}\]$(/bin/ls -lah | /bin/grep -m 1 total | /bin/sed 's/total //')\[${DARKGRAY}\]:"
# Number of files
PS1+="\[${GREEN}\]\$(/bin/ls -A -1 | /usr/bin/wc -l)\[${DARKGRAY}\])"
# Skip to the next line
PS1+="\n"
# singularity?
if [ -e /environment ] ; then
PS1+="singularity"
fi
if [[ $EUID -ne 0 ]]; then
PS1+="\[${GREEN}\]>\[${NOCOLOR}\] " # Normal user
else
PS1+="\[${RED}\]>\[${NOCOLOR}\] " # Root user
fi
# PS2 is used to continue a command using the \ character
PS2="\[${DARKGRAY}\]>\[${NOCOLOR}\] "
# PS3 is used to enter a number choice in a script
PS3='Please enter a number from above list: '
# PS4 is used for tracing a script in debug mode
PS4='\[${DARKGRAY}\]+\[${NOCOLOR}\] '
}
PROMPT_COMMAND='__setprompt'
......@@ -25,6 +25,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
&& apt-get -yqq update \
&& apt-get -yqq install --no-install-recommends \
bc \
bzip2 \
ca-certificates \
ccache \
curl \
......@@ -46,6 +47,7 @@ RUN --mount=type=cache,target=/var/cache/apt \
nano \
openssh-client \
parallel \
patch \
poppler-utils \
time \
unzip \
......
......@@ -17,53 +17,35 @@ RUN --mount=type=cache,target=/var/cache/apt \
python3 \
python3-dev \
python3-distutils \
python3-boto3 \
python-is-python3 \
&& rm -rf /var/lib/apt/lists/*
## Setup spack
## parts:
ENV SPACK_ROOT=/opt/spack
ARG SPACK_ORGREPO="spack/spack"
ARG SPACK_VERSION="develop"
ARG SPACK_CHERRYPICKS=""
ADD https://api.github.com/repos/spack/spack/commits/$SPACK_VERSION /tmp/spack.json
RUN echo "Part 1: regular spack install (as in containerize)" \
&& git clone https://github.com/spack/spack.git /tmp/spack-staging \
&& cd /tmp/spack-staging \
&& git checkout $SPACK_VERSION \
ADD https://api.github.com/repos/${SPACK_ORGREPO}/commits/${SPACK_VERSION} /tmp/spack.json
RUN git clone https://github.com/${SPACK_ORGREPO}.git ${SPACK_ROOT} \
&& git -C ${SPACK_ROOT} checkout ${SPACK_VERSION} \
&& if [ -n "$SPACK_CHERRYPICKS" ] ; then \
git cherry-pick -n $SPACK_CHERRYPICKS ; \
git -C ${SPACK_ROOT} cherry-pick -n $SPACK_CHERRYPICKS ; \
fi \
&& cd - \
&& mkdir -p $SPACK_ROOT \
&& cp -r /tmp/spack-staging/bin $SPACK_ROOT/bin \
&& cp -r /tmp/spack-staging/etc $SPACK_ROOT/etc \
&& cp -r /tmp/spack-staging/lib $SPACK_ROOT/lib \
&& cp -r /tmp/spack-staging/share $SPACK_ROOT/share \
&& cp -r /tmp/spack-staging/var $SPACK_ROOT/var \
&& cp -r /tmp/spack-staging/.git $SPACK_ROOT/.git \
&& rm -rf /tmp/spack-staging \
&& echo 'export LD_LIBRARY_PATH=/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH'\
>> $SPACK_ROOT/share/setup-env.sh \
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
/usr/sbin/docker-shell \
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
/usr/sbin/interactive-shell \
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
/usr/sbin/spack-env \
&& echo "Part 2: Set target to generic x86_64" \
&& echo "packages:" > $SPACK_ROOT/etc/spack/packages.yaml \
&& echo " all:" >> $SPACK_ROOT/etc/spack/packages.yaml \
&& echo " target: [x86_64]" >> $SPACK_ROOT/etc/spack/packages.yaml \
&& cat $SPACK_ROOT/etc/spack/packages.yaml \
&& echo "Part 3: Set config to allow use of more cores for builds" \
&& echo "(and some other settings)" \
&& echo "config:" > $SPACK_ROOT/etc/spack/config.yaml \
&& echo " suppress_gpg_warnings: true" \
>> $SPACK_ROOT/etc/spack/config.yaml \
&& echo " build_jobs: 64" >> $SPACK_ROOT/etc/spack/config.yaml \
&& echo " install_tree:" >> $SPACK_ROOT/etc/spack/config.yaml \
&& echo " root: /opt/software" >> $SPACK_ROOT/etc/spack/config.yaml \
&& cat $SPACK_ROOT/etc/spack/config.yaml
&& export PATH=${PATH}:${SPACK_ROOT}/bin \
&& spack config --scope site add "packages:all:require:arch=x86_64" \
&& spack config blame packages \
&& spack config --scope site add "config:suppress_gpg_warnings:true" \
&& spack config --scope site add "config:build_jobs:64" \
&& spack config --scope site add "config:install_tree:root:/opt/software" \
&& spack config blame config
SHELL ["docker-shell"]
......@@ -75,16 +57,32 @@ RUN --mount=type=cache,target=/var/cache/spack-mirror \
&& spack mirror add docker /var/cache/spack-mirror \
&& spack mirror list
## Setup eic-spack buildcache mirrors (FIXME: leaks credentials into layer)
ARG S3_ACCESS_KEY=""
ARG S3_SECRET_KEY=""
RUN --mount=type=cache,target=/var/cache/spack-mirror \
export PATH=$PATH:$SPACK_ROOT/bin \
&& if [ -n $S3_ACCESS_KEY ] ; then \
spack mirror add --scope site \
--s3-endpoint-url https://dtn01.sdcc.bnl.gov:9000 \
--s3-access-key-id ${S3_ACCESS_KEY} \
--s3-access-key-secret ${S3_SECRET_KEY} \
eic-spack s3://eictest/EPIC/spack \
; fi \
&& spack mirror list
## This variable will change whenevery either spack.yaml or our spack package
## overrides change, triggering a rebuild
ARG CACHE_BUST="hash"
ARG CACHE_NUKE=""
## Setup our custom package overrides
ENV EICSPACK_ROOT=$SPACK_ROOT/var/spack/repos/eic-spack
ENV EICSPACK_ROOT=${SPACK_ROOT}/var/spack/repos/eic-spack
ARG EICSPACK_ORGREPO="eic/eic-spack"
ARG EICSPACK_VERSION="$SPACK_VERSION"
ADD https://api.github.com/repos/eic/eic-spack/commits/$EICSPACK_VERSION /tmp/eic-spack.json
RUN git clone https://github.com/eic/eic-spack.git ${EICSPACK_ROOT} \
ARG EICSPACK_CHERRYPICKS=""
ADD https://api.github.com/repos/${EICSPACK_ORGREPO}/commits/${EICSPACK_VERSION} /tmp/eic-spack.json
RUN git clone https://github.com/${EICSPACK_ORGREPO}.git ${EICSPACK_ROOT} \
&& git -C ${EICSPACK_ROOT} checkout ${EICSPACK_VERSION} \
&& if [ -n "${EICSPACK_CHERRYPICKS}" ] ; then \
git -C ${EICSPACK_ROOT} cherry-pick -n ${EICSPACK_CHERRYPICKS} ; \
......@@ -95,7 +93,7 @@ RUN git clone https://github.com/eic/eic-spack.git ${EICSPACK_ROOT} \
COPY spack.yaml /opt/spack-environment/
RUN rm -r /usr/local \
&& spack env activate /opt/spack-environment/ \
&& spack concretize
&& spack concretize --fresh
## Now execute the main build (or fetch from cache if possible)
......@@ -183,23 +181,6 @@ FROM builder as staging
# Garbage collect in environment
RUN cd /opt/spack-environment && spack env activate . && spack gc -y
# Strip all the binaries
# This reduces the image by factor of x2, so worth the effort
# note that we do not strip python libraries as it can cause issues in some cases
RUN du -sh /opt/software/linux-*/gcc-*/* | sort -h \
&& find -L /opt/software/* \
-type d -name site-packages -prune -false \
-o \
-type d -name lib-dynload -prune -false \
-o \
-type f \
-exec realpath '{}' \; \
| xargs file -i \
| grep 'charset=binary' \
| grep 'x-executable\|x-sharedlib' \
| awk -F: '{print $1}' | xargs strip -s \
&& du -sh /opt/software/linux-*/gcc-*/* | sort -h
## Bugfix to address issues loading the Qt5 libraries on Linux kernels prior to 3.15
## See
#https://askubuntu.com/questions/1034313/ubuntu-18-4-libqt5core-so-5-cannot-open-shared-object-file-no-such-file-or-dir
......@@ -209,7 +190,9 @@ RUN strip --remove-section=.note.ABI-tag /usr/local/lib/libQt5Core.so
RUN spack debug report \
| sed "s/^/ - /" | sed "s/\* \*\*//" | sed "s/\*\*//" \
>> /etc/jug_info \
&& spack find --no-groups --long --variants | sed "s/^/ - /" >> /etc/jug_info
&& spack find --no-groups --long --variants | sed "s/^/ - /" >> /etc/jug_info \
&& spack graph --dot --installed > /opt/spack-environment/env.dot
COPY eic-shell /usr/local/bin/eic-shell
COPY eic-info /usr/local/bin/eic-info
......
......@@ -24,7 +24,7 @@ export JUGGLER_BEAMLINE_CONFIG_VERSION=$BEAMLINE_CONFIG_VERSION
export JUGGLER_INSTALL_PREFIX=/usr/local
## Export detector libraries
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{prefix}/lib
export LD_LIBRARY_PATH={prefix}/lib${{LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}}
## modify PS1 for this detector version
export PS1="${{PS1:-}}"
......@@ -40,7 +40,7 @@ export DETECTOR_CONFIG={detector}
export DETECTOR_VERSION={version}
## Export detector libraries
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{prefix}/lib
export LD_LIBRARY_PATH={prefix}/lib${{LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}}
## modify PS1 for this detector version
export PS1="${{PS1:-}}"
......
......@@ -11,25 +11,13 @@ FROM ${DOCKER_REGISTRY}${BASE_IMAGE}:${INTERNAL_TAG}
ARG EICWEB="https://eicweb.phy.anl.gov/api/v4/projects"
ARG JUGGLER_VERSION="main"
ARG NPDET_VERSION="master"
## version will automatically bust cache for nightly, as it includes
## the date
ARG JUG_VERSION=1
ADD ${EICWEB}/18/repository/tree?ref=${NPDET_VERSION} /tmp/18.json
RUN cd /tmp \
&& echo " - jug_xl: ${JUG_VERSION}" >> /etc/jug_info \
&& echo "INSTALLING NPDET" \
&& git config --global http.version HTTP/1.1 \
&& git clone -b ${NPDET_VERSION} --depth 1 \
https://eicweb.phy.anl.gov/EIC/NPDet.git \
&& cmake -B build -S NPDet -DCMAKE_CXX_STANDARD=17 \
&& cmake --build build -j12 -- install \
&& pushd NPDet \
&& echo " - NPDet: ${NPDET_VERSION}-$(git rev-parse HEAD)">> /etc/jug_info \
&& popd \
&& rm -rf build NPDet
&& echo " - jug_xl: ${JUG_VERSION}" >> /etc/jug_info
ADD ${EICWEB}/369/repository/tree?ref=${JUGGLER_VERSION} /tmp/369.json
RUN cd /tmp \
......@@ -118,3 +106,6 @@ RUN cd /tmp
&& /tmp/setup_detectors.py --prefix /opt/detector --config /tmp/detectors.yaml \
$NIGHTLY_FLAG \
&& rm /tmp/setup_detectors.py
## Hotfix for misbehaving OSG nodes
RUN mkdir /hadoop
#syntax=docker/dockerfile:1.2
ARG BASEIMAGE="intel/oneapi-hpckit:2022.2-devel-ubuntu20.04"
# Minimal container based on Intel oneAPI for up-to-date packages.
# Very lightweight container with a minimal build environment (LOL)
FROM ${BASEIMAGE}
LABEL maintainer="Wouter Deconinck <wouter.deconinck@umanitoba.ca" \
name="oneapi_base" \
march="amd64"
COPY bashrc /root/.bashrc
ENV CLICOLOR_FORCE=1 \
LANGUAGE=en_US.UTF-8 \
LANG=en_US.UTF-8 \
LC_ALL=en_US.UTF-8
## Install additional packages. Remove the auto-cleanup functionality
## for docker, as we're using the new buildkit cache instead.
## We also install gitlab-runner, from the buster package (as bullseye is not available atm)
RUN --mount=type=cache,target=/var/cache/apt \
rm -f /etc/apt/apt.conf.d/docker-clean \
&& ln -fs /usr/share/zoneinfo/America/New_York /etc/localtime \
&& echo "US/Eastern" > /etc/timezone \
&& apt-get -yqq update \
&& apt-get -yqq upgrade \
&& apt-get -yqq install --no-install-recommends \
bc \
ca-certificates \
clang-format \
clang-tidy \
curl \
file \
build-essential \
gdb \
ghostscript \
git \
gnupg2 \
gv \
iproute2 \
iputils-ping \
iputils-tracepath \
less \
libcbor-xs-perl \
libjson-xs-perl \
locales \
lua-posix \
make \
nano \
openssh-client \
parallel \
poppler-utils \
time \
unzip \
valgrind \
vim-nox \
wget \
&& localedef -i en_US -f UTF-8 en_US.UTF-8 \
&& gcc --version \
&& curl -L \
"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh" \
| bash \
&& sed -i "s/bookworm/buster/" \
/etc/apt/sources.list.d/runner_gitlab-runner.list \
&& apt-get -yqq update \
&& apt-get -yqq install --no-install-recommends \
gitlab-runner \
&& apt-get -yqq autoremove \
&& rm -rf /var/lib/apt/lists/*
#!/bin/bash
iatest=$(expr index "$-" i)
#######################################################
# SOURCED ALIAS'S AND SCRIPTS BY zachbrowne.me
#######################################################
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# Enable bash programmable completion features in interactive shells
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
#######################################################
# EXPORTS
#######################################################
# Disable the bell
if [[ $iatest > 0 ]]; then bind "set bell-style visible"; fi
# Expand the history size
export HISTFILESIZE=10000
export HISTSIZE=500
# Don't put duplicate lines in the history and do not add lines that start with a space
export HISTCONTROL=erasedups:ignoredups:ignorespace
# Check the window size after each command and, if necessary, update the values of LINES and COLUMNS
shopt -s checkwinsize
# Causes bash to append to history instead of overwriting it so if you start a new terminal, you have old session history
shopt -s histappend
PROMPT_COMMAND='history -a'
# Allow ctrl-S for history navigation (with ctrl-R)
stty -ixon
# Ignore case on auto-completion
# Note: bind used instead of sticking these in .inputrc
if [[ $iatest > 0 ]]; then bind "set completion-ignore-case on"; fi
# Show auto-completion list automatically, without double tab
if [[ $iatest > 0 ]]; then bind "set show-all-if-ambiguous On"; fi
# Set the default editor
export EDITOR=vim
export VISUAL=vim
alias pico='edit'
alias spico='sedit'
alias nano='edit'
alias snano='sedit'
# To have colors for ls and all grep commands such as grep, egrep and zgrep
export CLICOLOR=1
export LS_COLORS='no=00:fi=00:di=00;34:ln=01;36:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arj=01;31:*.taz=01;31:*.lzh=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.gz=01;31:*.bz2=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.avi=01;35:*.fli=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.ogg=01;35:*.mp3=01;35:*.wav=01;35:*.xml=00;31:'
#export GREP_OPTIONS='--color=auto' #deprecated
#alias grep="/usr/bin/grep $GREP_OPTIONS"
#unset GREP_OPTIONS
# Color for manpages in less makes manpages a little easier to read
export LESS_TERMCAP_mb=$'\E[01;31m'
export LESS_TERMCAP_md=$'\E[01;31m'
export LESS_TERMCAP_me=$'\E[0m'
export LESS_TERMCAP_se=$'\E[0m'
export LESS_TERMCAP_so=$'\E[01;44;33m'
export LESS_TERMCAP_ue=$'\E[0m'
export LESS_TERMCAP_us=$'\E[01;32m'
#######################################################
# GENERAL ALIAS'S
#######################################################
# Alias's to modified commands
alias cp='cp -i'
alias mv='mv -i'
alias rm='rm -iv'
alias mkdir='mkdir -p'
alias ps='ps auxf'
alias ping='ping -c 10'
alias less='less -R'
alias cls='clear'
alias apt-get='apt-get'
alias multitail='multitail --no-repeat -c'
alias freshclam='freshclam'
alias vi='vim'
alias vis='vim "+set si"'
# Change directory aliases
alias home='cd ~'
alias cd..='cd ..'
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias .....='cd ../../../..'
# cd into the old directory
alias bd='cd "$OLDPWD"'
# Remove a directory and all files
alias rmd='/bin/rm --recursive --force --verbose '
# Alias's for multiple directory listing commands
alias la='ls -Alh' # show hidden files
alias ls='ls -aFh --color=always' # add colors and file type extensions
alias lx='ls -lXBh' # sort by extension
alias lk='ls -lSrh' # sort by size
alias lc='ls -lcrh' # sort by change time
alias lu='ls -lurh' # sort by access time
alias lr='ls -lRh' # recursive ls
alias lt='ls -ltrh' # sort by date
alias lm='ls -alh |more' # pipe through 'more'
alias lw='ls -xAh' # wide listing format
alias ll='ls -Fls' # long listing format
alias labc='ls -lap' #alphabetical sort
alias lf="ls -l | egrep -v '^d'" # files only
alias ldir="ls -l | egrep '^d'" # directories only
# alias chmod commands
alias mx='chmod a+x'
alias 000='chmod -R 000'
alias 644='chmod -R 644'
alias 666='chmod -R 666'
alias 755='chmod -R 755'
alias 777='chmod -R 777'
# Search command line history
alias h="history | grep "
# Search running processes
alias p="ps aux | grep "
alias topcpu="/bin/ps -eo pcpu,pid,user,args | sort -k 1 -r | head -10"
# Search files in the current folder
alias f="find . | grep "
# Count all files (recursively) in the current folder
alias countfiles="for t in files links directories; do echo \`find . -type \${t:0:1} | wc -l\` \$t; done 2> /dev/null"
# To see if a command is aliased, a file, or a built-in command
alias checkcommand="type -t"
# Show all logs in /var/log
alias logs="sudo find /var/log -type f -exec file {} \; | grep 'text' | cut -d' ' -f1 | sed -e's/:$//g' | grep -v '[0-9]$' | xargs tail -f"
# SHA1
alias sha1='openssl sha1'
#######################################################
# SPECIAL FUNCTIONS
#######################################################
# Extracts any archive(s) (if unp isn't installed)
extract () {
for archive in $*; do
if [ -f $archive ] ; then
case $archive in
*.tar.bz2) tar xvjf $archive ;;
*.tar.gz) tar xvzf $archive ;;
*.bz2) bunzip2 $archive ;;
*.rar) rar x $archive ;;
*.gz) gunzip $archive ;;
*.tar) tar xvf $archive ;;
*.tbz2) tar xvjf $archive ;;
*.tgz) tar xvzf $archive ;;
*.zip) unzip $archive ;;
*.Z) uncompress $archive ;;
*.7z) 7z x $archive ;;
*) echo "don't know how to extract '$archive'..." ;;
esac
else
echo "'$archive' is not a valid file!"
fi
done
}
# Searches for text in all files in the current folder
ftext ()
{
# -i case-insensitive
# -I ignore binary files
# -H causes filename to be printed
# -r recursive search
# -n causes line number to be printed
# optional: -F treat search term as a literal, not a regular expression
# optional: -l only print filenames and not the matching lines ex. grep -irl "$1" *
grep -iIHrn --color=always "$1" . | less -r
}
# Copy file with a progress bar
cpp()
{
set -e
strace -q -ewrite cp -- "${1}" "${2}" 2>&1 \
| awk '{
count += $NF
if (count % 10 == 0) {
percent = count / total_size * 100
printf "%3d%% [", percent
for (i=0;i<=percent;i++)
printf "="
printf ">"
for (i=percent;i<100;i++)
printf " "
printf "]\r"
}
}
END { print "" }' total_size=$(stat -c '%s' "${1}") count=0
}
# Copy and go to the directory
cpg ()
{
if [ -d "$2" ];then
cp $1 $2 && cd $2
else
cp $1 $2
fi
}
# Move and go to the directory
mvg ()
{
if [ -d "$2" ];then
mv $1 $2 && cd $2
else
mv $1 $2
fi
}
# Create and go to the directory
mkdirg ()
{
mkdir -p $1
cd $1
}
# Goes up a specified number of directories (i.e. up 4)
up ()
{
local d=""
limit=$1
for ((i=1 ; i <= limit ; i++))
do
d=$d/..
done
d=$(echo $d | sed 's/^\///')
if [ -z "$d" ]; then
d=..
fi
cd $d
}
#Automatically do an ls after each cd
# cd ()
# {
# if [ -n "$1" ]; then
# builtin cd "$@" && ls
# else
# builtin cd ~ && ls
# fi
# }
# Returns the last 2 fields of the working directory
pwdtail ()
{
pwd|awk -F/ '{nlast = NF -1;print $nlast"/"$NF}'
}
# Show the current version of the operating system
ver ()
{
local dtype
dtype=$(distribution)
if [ $dtype == "redhat" ]; then
if [ -s /etc/redhat-release ]; then
cat /etc/redhat-release && uname -a
else
cat /etc/issue && uname -a
fi
elif [ $dtype == "suse" ]; then
cat /etc/SuSE-release
elif [ $dtype == "debian" ]; then
lsb_release -a
# sudo cat /etc/issue && sudo cat /etc/issue.net && sudo cat /etc/lsb_release && sudo cat /etc/os-release # Linux Mint option 2
elif [ $dtype == "gentoo" ]; then
cat /etc/gentoo-release
elif [ $dtype == "mandriva" ]; then
cat /etc/mandriva-release
elif [ $dtype == "slackware" ]; then
cat /etc/slackware-version
else
if [ -s /etc/issue ]; then
cat /etc/issue
else
echo "Error: Unknown distribution"
exit 1
fi
fi
}
# For some reason, rot13 pops up everywhere
rot13 () {
if [ $# -eq 0 ]; then
tr '[a-m][n-z][A-M][N-Z]' '[n-z][a-m][N-Z][A-M]'
else
echo $* | tr '[a-m][n-z][A-M][N-Z]' '[n-z][a-m][N-Z][A-M]'
fi
}
# Trim leading and trailing spaces (for scripts)
trim()
{
local var=$@
var="${var#"${var%%[![:space:]]*}"}" # remove leading whitespace characters
var="${var%"${var##*[![:space:]]}"}" # remove trailing whitespace characters
echo -n "$var"
}
#######################################################
# Set the ultimate amazing command prompt
#######################################################
alias cpu="grep 'cpu ' /proc/stat | awk '{usage=(\$2+\$4)*100/(\$2+\$4+\$5)} END {print usage}' | awk '{printf(\"%.1f\n\", \$1)}'"
function __setprompt
{
local LAST_COMMAND=$? # Must come first!
# Define colors
local LIGHTGRAY="\033[0;37m"
local WHITE="\033[1;37m"
local BLACK="\033[0;30m"
local DARKGRAY="\033[1;30m"
local RED="\033[0;31m"
local LIGHTRED="\033[1;31m"
local GREEN="\033[0;32m"
local LIGHTGREEN="\033[1;32m"
local BROWN="\033[0;33m"
local YELLOW="\033[1;33m"
local BLUE="\033[0;34m"
local LIGHTBLUE="\033[1;34m"
local MAGENTA="\033[0;35m"
local LIGHTMAGENTA="\033[1;35m"
local CYAN="\033[0;36m"
local LIGHTCYAN="\033[1;36m"
local NOCOLOR="\033[0m"
# Show error exit code if there is one
if [[ $LAST_COMMAND != 0 ]]; then
# PS1="\[${RED}\](\[${LIGHTRED}\]ERROR\[${RED}\])-(\[${LIGHTRED}\]Exit Code \[${WHITE}\]${LAST_COMMAND}\[${RED}\])-(\[${LIGHTRED}\]"
PS1="\[${DARKGRAY}\](\[${LIGHTRED}\]ERROR\[${DARKGRAY}\])-(\[${RED}\]Exit Code \[${LIGHTRED}\]${LAST_COMMAND}\[${DARKGRAY}\])-(\[${RED}\]"
if [[ $LAST_COMMAND == 1 ]]; then
PS1+="General error"
elif [ $LAST_COMMAND == 2 ]; then
PS1+="Missing keyword, command, or permission problem"
elif [ $LAST_COMMAND == 126 ]; then
PS1+="Permission problem or command is not an executable"
elif [ $LAST_COMMAND == 127 ]; then
PS1+="Command not found"
elif [ $LAST_COMMAND == 128 ]; then
PS1+="Invalid argument to exit"
elif [ $LAST_COMMAND == 129 ]; then
PS1+="Fatal error signal 1"
elif [ $LAST_COMMAND == 130 ]; then
PS1+="Script terminated by Control-C"
elif [ $LAST_COMMAND == 131 ]; then
PS1+="Fatal error signal 3"
elif [ $LAST_COMMAND == 132 ]; then
PS1+="Fatal error signal 4"
elif [ $LAST_COMMAND == 133 ]; then
PS1+="Fatal error signal 5"
elif [ $LAST_COMMAND == 134 ]; then
PS1+="Fatal error signal 6"
elif [ $LAST_COMMAND == 135 ]; then
PS1+="Fatal error signal 7"
elif [ $LAST_COMMAND == 136 ]; then
PS1+="Fatal error signal 8"
elif [ $LAST_COMMAND == 137 ]; then
PS1+="Fatal error signal 9"
elif [ $LAST_COMMAND -gt 255 ]; then
PS1+="Exit status out of range"
else
PS1+="Unknown error code"
fi
PS1+="\[${DARKGRAY}\])\[${NOCOLOR}\]\n"
else
PS1=""
fi
# Date
PS1+="\[${DARKGRAY}\](\[${CYAN}\]\$(date +%a) $(date +%b-'%-m')" # Date
PS1+="${BLUE} $(date +'%-I':%M:%S%P)\[${DARKGRAY}\])-" # Time
# CPU
PS1+="(\[${MAGENTA}\]CPU $(cpu)%"
# Jobs
PS1+="\[${DARKGRAY}\]:\[${MAGENTA}\]\j"
# Network Connections (for a server - comment out for non-server)
PS1+="\[${DARKGRAY}\]:\[${MAGENTA}\]Net $(awk 'END {print NR}' /proc/net/tcp)"
PS1+="\[${DARKGRAY}\])-"
# User and server
local SSH_IP=`echo $SSH_CLIENT | awk '{ print $1 }'`
local SSH2_IP=`echo $SSH2_CLIENT | awk '{ print $1 }'`
if [ $SSH2_IP ] || [ $SSH_IP ] ; then
PS1+="(\[${RED}\]\u@\h"
else
PS1+="(\[${RED}\]\u"
fi
# Current directory
PS1+="\[${DARKGRAY}\]:\[${BROWN}\]\w\[${DARKGRAY}\])-"
# Total size of files in current directory
PS1+="(\[${GREEN}\]$(/bin/ls -lah | /bin/grep -m 1 total | /bin/sed 's/total //')\[${DARKGRAY}\]:"
# Number of files
PS1+="\[${GREEN}\]\$(/bin/ls -A -1 | /usr/bin/wc -l)\[${DARKGRAY}\])"
# Skip to the next line
PS1+="\n"
# singularity?
if [ -e /environment ] ; then
PS1+="singularity"
fi
if [[ $EUID -ne 0 ]]; then
PS1+="\[${GREEN}\]>\[${NOCOLOR}\] " # Normal user
else
PS1+="\[${RED}\]>\[${NOCOLOR}\] " # Root user
fi
# PS2 is used to continue a command using the \ character
PS2="\[${DARKGRAY}\]>\[${NOCOLOR}\] "
# PS3 is used to enter a number choice in a script
PS3='Please enter a number from above list: '
# PS4 is used for tracing a script in debug mode
PS4='\[${DARKGRAY}\]+\[${NOCOLOR}\] '
}
PROMPT_COMMAND='__setprompt'
# syntax=docker/dockerfile:1.2
# Container based on Jug_dev with Intel oneAPI support.
# Start with Debian-stable and layer oneAPI on top, prevents spack compiler errors.
## ========================================================================================
## STAGE 1: spack builder image
## EIC builder image with spack
## ========================================================================================
ARG DOCKER_REGISTRY="eicweb.phy.anl.gov:4567/containers/eic_container/"
ARG BASEIMAGE="debian_base"
# Internal Tag will be set by GitLab CI
ARG INTERNAL_TAG="testing"
FROM ${DOCKER_REGISTRY}${BASEIMAGE}:${INTERNAL_TAG} as builder
## Install some extra spack dependencies
## Do not use Cache mount as it conflicts with oneapi stage
RUN \
rm -f /etc/apt/apt.conf.d/docker-clean \
&& apt-get -yqq update \
&& apt-get -yqq install --no-install-recommends \
python3 \
python3-dev \
python3-distutils \
python-is-python3 \
&& rm -rf /var/lib/apt/lists/*
## Setup spack
## parts:
ARG SPACK_ROOT=/opt/spack
ARG SPACK_VERSION="develop"
ARG SPACK_CHERRYPICKS=""
RUN echo "Part 1: regular spack install (as in containerize)" \
&& git clone https://github.com/spack/spack.git /tmp/spack-staging \
&& cd /tmp/spack-staging \
&& git checkout $SPACK_VERSION \
&& if [ -n "$SPACK_CHERRYPICKS" ] ; then \
git cherry-pick -n $SPACK_CHERRYPICKS ; \
fi \
&& cd - \
&& mkdir -p $SPACK_ROOT/opt/spack \
&& cp -r /tmp/spack-staging/bin $SPACK_ROOT/bin \
&& cp -r /tmp/spack-staging/etc $SPACK_ROOT/etc \
&& cp -r /tmp/spack-staging/lib $SPACK_ROOT/lib \
&& cp -r /tmp/spack-staging/share $SPACK_ROOT/share \
&& cp -r /tmp/spack-staging/var $SPACK_ROOT/var \
&& cp -r /tmp/spack-staging/.git $SPACK_ROOT/.git \
&& rm -rf /tmp/spack-staging \
&& echo 'export LD_LIBRARY_PATH=/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH'\
>> $SPACK_ROOT/share/setup-env.sh \
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
/usr/sbin/docker-shell \
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
/usr/sbin/interactive-shell \
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
/usr/sbin/spack-env \
&& echo "Part 2: Set target to generic x86_64" \
&& echo "packages:" > $SPACK_ROOT/etc/spack/packages.yaml \
&& echo " all:" >> $SPACK_ROOT/etc/spack/packages.yaml \
&& echo " target: [x86_64]" >> $SPACK_ROOT/etc/spack/packages.yaml \
&& cat $SPACK_ROOT/etc/spack/packages.yaml \
&& echo "Part 3: Set config to allow use of more cores for builds" \
&& echo "(and some other settings)" \
&& echo "config:" > $SPACK_ROOT/etc/spack/config.yaml \
&& echo " suppress_gpg_warnings: true" \
>> $SPACK_ROOT/etc/spack/config.yaml \
&& echo " build_jobs: 64" >> $SPACK_ROOT/etc/spack/config.yaml \
&& echo " install_tree:" >> $SPACK_ROOT/etc/spack/config.yaml \
&& echo " root: /opt/software" >> $SPACK_ROOT/etc/spack/config.yaml \
&& cat $SPACK_ROOT/etc/spack/config.yaml
SHELL ["docker-shell"]
## Setup spack buildcache mirrors, including an internal
## spack mirror using the docker build cache, and
## a backup mirror on the internal B010 network
RUN --mount=type=cache,target=/var/cache/spack-mirror \
export OLD_PATH=$PATH \
&& export PATH=$PATH:$SPACK_ROOT/bin \
&& spack mirror add docker /var/cache/spack-mirror \
&& spack mirror list
## This variable will change whenevery either spack.yaml or our spack package
## overrides change, triggering a rebuild
ARG CACHE_BUST="hash"
ARG CACHE_NUKE=""
## Setup our custom package overrides
ENV EICSPACK_ROOT=$SPACK_ROOT/var/spack/repos/eic-spack
ARG EICSPACK_VERSION="$SPACK_VERSION"
RUN git clone https://github.com/eic/eic-spack.git ${EICSPACK_ROOT} \
&& git -C ${EICSPACK_ROOT} checkout ${EICSPACK_VERSION} \
&& if [ -n "${EICSPACK_CHERRYPICKS}" ] ; then \
git -C ${EICSPACK_ROOT} cherry-pick -n ${EICSPACK_CHERRYPICKS} ; \
fi \
&& spack repo add --scope site "${EICSPACK_ROOT}"
## Setup our custom environment
COPY spack.yaml /opt/spack-environment/
RUN rm -r /usr/local \
&& spack env activate /opt/spack-environment/ \
&& spack concretize
## Now execute the main build (or fetch from cache if possible)
## note, no-check-signature is needed to allow the quicker signature-less
## packages from the internal (docker) buildcache
##
## Optional, nuke the buildcache after install, before (re)caching
## This is useful when going to completely different containers,
## or intermittently to keep the buildcache step from taking too much time
##
## Update the local build cache if needed. Consists of 3 steps:
## 1. Remove the B010 network buildcache (silicon)
## 2. Get a list of all packages, and compare with what is already on
## the buildcache (using package hash)
## 3. Add packages that need to be added to buildcache if any
RUN --mount=type=cache,target=/var/cache/spack-mirror \
cd /opt/spack-environment \
&& ls /var/cache/spack-mirror \
&& spack env activate . \
&& status=0 \
&& spack install -j64 --no-check-signature \
|| spack install -j64 --no-check-signature \
|| spack install -j64 --no-check-signature \
|| status=$? \
&& [ -z "${CACHE_NUKE}" ] \
|| rm -rf /var/cache/spack-mirror/build_cache/* \
&& mkdir -p /var/cache/spack-mirror/build_cache \
&& spack buildcache update-index -d /var/cache/spack-mirror \
&& spack buildcache list --allarch --very-long \
| sed '/^$/d;/^--/d;s/@.\+//;s/\([a-z0-9]*\) \(.*\)/\2\/\1/' \
| sort > tmp.buildcache.txt \
&& spack find --format {name}/{hash} | sort \
| comm -23 - tmp.buildcache.txt \
| xargs --no-run-if-empty \
spack buildcache create --allow-root --only package --unsigned \
--directory /var/cache/spack-mirror \
--rebuild-index \
&& spack clean -a \
&& exit $status
## Extra post-spack steps:
## - Python packages
COPY requirements.txt /usr/local/etc/requirements.txt
RUN --mount=type=cache,target=/var/cache/pip \
echo "Installing additional python packages" \
&& cd /opt/spack-environment && spack env activate . \
&& python -m pip install \
--trusted-host pypi.org \
--trusted-host files.pythonhosted.org \
--cache-dir /var/cache/pip \
--requirement /usr/local/etc/requirements.txt \
--no-warn-script-location
# ^ Supress not on PATH Warnings
## Including some small fixes:
## - Somehow PODIO env isn't automatically set,
## - and Gaudi likes BINARY_TAG to be set
RUN cd /opt/spack-environment \
&& echo -n "" \
&& echo "Grabbing environment info" \
&& spack env activate --sh -d . \
| sed "s?LD_LIBRARY_PATH=?&/lib/x86_64-linux-gnu:?" \
| sed '/MANPATH/ s/;$/:;/' \
> /etc/profile.d/z10_spack_environment.sh \
&& cd /opt/spack-environment && spack env activate . \
&& echo -n "" \
&& echo "Add extra environment variables for Jug, Podio and Gaudi" \
&& echo "export PODIO=$(spack location -i podio);" \
>> /etc/profile.d/z10_spack_environment.sh \
&& echo -n "" \
&& echo "Executing cmake patch for dd4hep 16.1" \
&& sed -i "s/FIND_PACKAGE(Python/#&/" /usr/local/cmake/DD4hepBuild.cmake
## make sure we have the entrypoints setup correctly
ENTRYPOINT []
CMD ["bash", "--rcfile", "/etc/profile", "-l"]
USER 0
WORKDIR /
## ========================================================================================
## STAGE 2: staging image with unnecessariy packages removed and stripped binaries
## ========================================================================================
FROM builder AS staging
RUN cd /opt/spack-environment && spack env activate . && spack gc -y
# Strip all the binaries
# This reduces the image by factor of x2, so worth the effort
# note that we do not strip python libraries as can cause issues in some cases
RUN find -L /usr/local/* \
-type d -name site-packages -prune -false -o \
-type f -not -name "zdll.lib" -not -name libtensorflow-lite.a \
-exec realpath '{}' \; \
| xargs file -i \
| grep 'charset=binary' \
| grep 'x-executable\|x-archive\|x-sharedlib' \
| awk -F: '{print $1}' | xargs strip -s
## Bugfix to address issues loading the Qt5 libraries on Linux kernels prior to 3.15
## See
#https://askubuntu.com/questions/1034313/ubuntu-18-4-libqt5core-so-5-cannot-open-shared-object-file-no-such-file-or-dir
## and links therin for more info
RUN strip --remove-section=.note.ABI-tag /usr/local/lib/libQt5Core.so
RUN spack debug report \
| sed "s/^/ - /" | sed "s/\* \*\*//" | sed "s/\*\*//" \
>> /etc/jug_info \
&& spack find --no-groups --long --variants | sed "s/^/ - /" >> /etc/jug_info
COPY eic-shell /usr/local/bin/eic-shell
COPY eic-info /usr/local/bin/eic-info
COPY entrypoint.sh /usr/local/sbin/entrypoint.sh
COPY eic-env.sh /etc/eic-env.sh
COPY profile.d/a00_cleanup.sh /etc/profile.d
COPY profile.d/z11_jug_env.sh /etc/profile.d
COPY singularity.d /.singularity.d
## Add minio client into /usr/local/bin
ADD https://dl.min.io/client/mc/release/linux-amd64/mc /usr/local/bin
RUN chmod a+x /usr/local/bin/mc
## ========================================================================================
## STAGE 3: Set up Intel OneAPI Toolkit
## Use Intel's HPC-kit and Create Lean image from staging
## ========================================================================================
FROM intel/oneapi-hpckit:2022.2-devel-ubuntu20.04 AS oneapi
COPY bashrc /root/.bashrc
ENV CLICOLOR_FORCE=1 \
LANGUAGE=en_US.UTF-8 \
LANG=en_US.UTF-8 \
LC_ALL=en_US.UTF-8
## Install additional packages. Remove the auto-cleanup functionality
## for docker, as we're using the new buildkit cache instead.
## We install gcc-10 and g++-10 as they are not installed by default in
## Ubuntu 20.04.
## TODO: libyaml-cpp-dev is a dependency for afterburner. We can probably remove
## this once afterburner is added to spack
RUN --mount=type=cache,target=/var/cache/apt \
rm -f /etc/apt/apt.conf.d/docker-clean \
&& ln -fs /usr/share/zoneinfo/America/New_York /etc/localtime \
&& echo "US/Eastern" > /etc/timezone \
&& apt-get -yqq update \
&& apt-get -yqq upgrade \
&& apt-get -yqq install --no-install-recommends \
bc \
ca-certificates \
clang-format \
clang-tidy \
curl \
file \
build-essential \
g++-10 \
gcc-10 \
gdb \
ghostscript \
gfortran-10 \
git \
gnupg2 \
gv \
iproute2 \
iputils-ping \
iputils-tracepath \
less \
libcbor-xs-perl \
libjson-xs-perl \
libyaml-cpp-dev \
locales \
lua-posix \
make \
nano \
openssh-client \
parallel \
poppler-utils \
python3 \
python3-dev \
python3-distutils \
time \
unzip \
valgrind \
vim-nox \
wget \
&& localedef -i en_US -f UTF-8 en_US.UTF-8 \
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 \
&& update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100 \
&& update-alternatives --install /usr/bin/gfortran gfortran \
/usr/bin/gfortran-10 100 \
&& gcc --version \
&& curl -L \
"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh" \
| bash \
&& apt-get -yqq update \
&& apt-get -yqq install --no-install-recommends \
gitlab-runner \
&& apt-get -yqq autoremove \
&& rm -rf /var/lib/apt/lists/*
## copy over everything we need from staging in a single layer
RUN --mount=from=staging,target=/staging \
rm -rf /usr/local \
&& cp -r /staging/opt/spack-environment /opt/spack-environment \
&& cp -r /staging/opt/software /opt/software \
&& cp -r /staging/usr/._local /usr/._local \
&& cd /usr/._local \
&& PREFIX_PATH=$(realpath $(ls | tail -n1)) \
&& echo "Found spack true prefix path to be $PREFIX_PATH" \
&& cd - \
&& ln -s ${PREFIX_PATH} /usr/local \
&& cp /staging/etc/profile.d/*.sh /etc/profile.d/ \
&& cp /staging/etc/eic-env.sh /etc/eic-env.sh \
&& cp /staging/etc/jug_info /etc/jug_info \
&& cp -r /staging/.singularity.d /.singularity.d
## set the jug_dev version and add the afterburner
## TODO: move afterburner to spack when possible
ARG JUG_VERSION=1
ARG AFTERBURNER_VERSION=main
RUN echo "" >> /etc/jug_info \
&& echo " - jug_dev: ${JUG_VERSION}" >> /etc/jug_info
## make entrypoint executable and set up oneAPI environment
RUN chmod +x /usr/local/sbin/entrypoint.sh \
&& echo "source /opt/intel/oneapi/setvars.sh --force 2> /dev/null" \
>> /etc/profile
## make sure we have the entrypoints setup correctly
ENTRYPOINT ["/usr/local/sbin/entrypoint.sh"]
CMD ["bash", "--rcfile", "/etc/profile", "-l"]
USER 0
WORKDIR /
SHELL ["/usr/local/bin/eic-shell"]
......@@ -3,6 +3,8 @@ detectors:
nightly:
default: true
version: main
main:
version: main
22.10.0:
version: 22.10.0
ip:
......@@ -28,15 +30,10 @@ detectors:
ip:
config: ip6
version: 22.11.0
athena:
nightly:
version: master
22.11.3:
version: 22.11.3
ip:
config: ip6
version: master
ecce:
nightly:
version: main
ip:
config: ip6
version: master
version: 22.11.0
22.12.0:
version: 22.12.0
#!/bin/bash
CONTAINER="jug_dev"
VERSION="testing"
ODIR="$PWD"
function print_the_help {
echo "USAGE: ./download_dev.sh [-o DIR] [-v VERSION]"
echo "OPTIONAL ARGUMENTS:"
echo " -o,--outdir Directory to download the container to (D: $ODIR)"
echo " -c,--container Container to download (D: $CONTAINER)"
echo " -v,--version Version to download (D: $VERSION)"
echo " -h,--help Print this message"
echo ""
echo " Download development container into an output directory"
echo ""
echo "EXAMPLE: ./download_dev.sh"
exit
}
echo "WARNING: this container download script is meant for expert usage"
echo " if you don't know what this script does, you are probably"
echo " looking for install.sh"
while [ $# -gt 0 ]; do
key=$1
case $key in
-o|--outdir)
ODIR=$2
shift
shift
;;
-c|--container)
CONTAINER=$2
shift
shift
;;
-v|--version)
VERSION=$2
shift
shift
;;
-h|--help)
print_the_help
exit 0
;;
*)
echo "ERROR: unknown argument: $key"
echo "use --help for more info"
exit 1
;;
esac
done
mkdir -p $ODIR || exit 1
if [ ! -d $ODIR ]; then
echo "ERROR: not a valid directory: $ODIR"
echo "use --help for more info"
exit 1
fi
echo "Deploying development container for eicweb/$CONTAINER:$VERSION to $ODIR"
## Simple setup script that installs the container
## in your local environment under $ODIR/local/lib
## and creates a simple top-level launcher script
## that launches the container for this working directory
## with the $ATHENA_ODIR variable pointing
## to the $ODIR/local directory
mkdir -p local/lib || exit 1
## Always deploy the SIF image using the python installer,
## as this is for experts only anyway
SIF=
## work in temp directory
tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX)
pushd $tmp_dir
wget https://eicweb.phy.anl.gov/containers/eic_container/-/raw/master/install.py
chmod +x install.py
./install.py -f -c $CONTAINER -v $VERSION .
SIF=lib/$CONTAINER-$VERSION.sif
chmod +x ${SIF}
## That's all
if [ -z $SIF -o ! -f $SIF ]; then
echo "ERROR: no singularity image found"
else
echo "Container download succesfull"
fi
## move over the container to our output directory
mv $SIF $ODIR
## cleanup
popd
rm -rf $tmp_dir
#!/usr/bin/env python3
## eic_container: Argonne Universal EIC Container
'''
Deploy the singularity container built by the CI for this version of the software.
The current version is determined from the currently loaded git branch or tag,
unless it is explicitly set on the command line.
Authors:
- Whitney Armstrong <warmstrong@anl.gov>
- Sylvester Joosten <sjoosten@anl.gov>
'''
import os
import argparse
import re
import urllib.request
## Gitlab group and project/program name.
DEFAULT_IMG='jug_xl'
DEFAULT_VERSION='3.0.1'
SHORTCUTS = ['eic-shell']
## URL for the current container (git tag will be filled in by the script)
## components:
## - {ref}:
## - branch/tag --> git branch or tag
## - MR XX --> refs/merge-requests/XX/head
## - nightly --> just use fallback singularity pull
## - {img}: image name
## - {job}: the CI job that built the artifact
CONTAINER_URL = r'https://eicweb.phy.anl.gov/api/v4/projects/290/jobs/artifacts/{ref}/raw/build/{img}.sif?job={job}'
## Docker ref is used as fallback in case regular artifact download fails
## The components are:
## - {img}: image name
## - {tag}: docker tag associated with image
## - master --> testing
## - branch/tag --> branch/tag without leading v
## - MR XX --> unstable (may be incorrect if multiple MRs active)
## - nightly --> nightly
DOCKER_REF = r'docker://eicweb/{img}:{tag}'
## Singularity bind directive
BIND_DIRECTIVE= '-B {0}:{0}'
class UnknownVersionError(Exception):
pass
class ContainerDownloadError(Exception):
pass
class InvalidArgumentError(Exception):
pass
def smart_mkdir(dir):
'''functions as mkdir -p, with a write-check.
Raises an exception if the directory is not writeable.
'''
if not os.path.exists(dir):
try:
os.makedirs(dir)
except Exception as e:
print('ERROR: unable to create directory', dir)
raise e
if not os.access(dir, os.W_OK):
print('ERROR: We do not have the write privileges to', dir)
raise InvalidArgumentError()
## generic launcher bash script to launch the application
_LAUNCHER='''#!/usr/bin/env bash
## Boilerplate to make pipes work
piped_args=
if [ -p /dev/stdin ]; then
# If we want to read the input line by line
while IFS= read line; do
if [ -z "$piped_args" ]; then
piped_args="${{line}}"
else
piped_args="${{piped_args}}\n${{line}}"
fi
done
fi
## Fire off the application wrapper
if [ ${{piped_args}} ] ; then
echo -e ${{piped_args}} | singularity exec {bind} {container} {exe} $@
else
singularity exec {bind} {container} {exe} $@
fi
'''
def _write_script(path, content):
print(' - creating', path)
with open(path, 'w') as file:
file.write(content)
os.system('chmod +x {}'.format(path))
def make_launcher(app, container, bindir,
bind='', exe=None):
'''Configure and install a launcher.
Generic launcher script to launch applications in this container.
The launcher script calls the desired executable from the singularity image.
As the new images have the environment properly setup, we can accomplish this
without using any wrapper scripts.
Arguments:
- app: our application
- container: absolute path to container
- bindir: absolute launcher install path
Optional:
- bind: singularity bind directives
- exe: executable to be associated with app.
Default is app.
- env: environment directives to be added to the wrapper.
Multiline string. Default is nothing
'''
if not exe:
exe = app
## paths
launcher_path = '{}/{}'.format(bindir, app)
## scripts --> use absolute path for wrapper path inside launcher
launcher = _LAUNCHER.format(container=container,
bind=bind,
exe=exe)
## write our scripts
_write_script(launcher_path, launcher)
## Generic module file
_MODULEFILE='''#%Module1.0#####################################################################
##
## for {name} {version}
##
proc ModulesHelp {{ }} {{
puts stderr "This module sets up the environment for the {name} container"
}}
module-whatis "{name} {version}"
# For Tcl script use only
set version 4.1.4
prepend-path PATH {bindir}
'''
def make_modulefile(project, version, moduledir, bindir):
'''Configure and install a modulefile for this project.
Arguments:
- project: project name
- version: project version
- moduledir: root modulefile directory
- bindir: where executables for this project are located
'''
## create our modulefile
content = _MODULEFILE.format(name=project, version=version, bindir=bindir)
fname = '{}/{}'.format(moduledir, version)
print(' - creating', fname)
with open(fname, 'w') as file:
file.write(content)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'prefix',
help='Install prefix. This is where the container will be deployed.')
parser.add_argument(
'-c', '--container',
dest='container',
default=DEFAULT_IMG,
help='(opt.) Container to install. '
'D: {} (also available: jug_dev, and legacy "eic" container).')
parser.add_argument(
'-v', '--version',
dest='version',
# default=project_version(),
default=DEFAULT_VERSION,
help='(opt.) project version. '
'D: {}. For MRs, use mr-XXX.'.format(DEFAULT_VERSION))
parser.add_argument(
'-f', '--force',
action='store_true',
help='Force-overwrite already downloaded container',
default=False)
parser.add_argument(
'-b', '--bind-path',
dest='bind_paths',
action='append',
help='(opt.) extra bind paths for singularity.')
parser.add_argument(
'-m', '--module-path',
dest='module_path',
help='(opt.) Root module path to install a modulefile. '
'D: Do not install a modulefile')
args = parser.parse_args()
print('Deploying', args.container, 'version', args.version)
## Check if our bind paths are valid
bind_directive = ''
if args.bind_paths and len(args.bind_paths):
print('Singularity bind paths:')
for path in args.bind_paths:
print(' -', path)
if not os.path.exists(path):
print('ERROR: path', path, 'does not exist.')
raise InvalidArgumentError()
bind_directive = ' '.join([BIND_DIRECTIVE.format(path) for path in args.bind_paths])
## Naming schemes:
## We need to deduce both the correct git branch and an appropriate
## local version number from the desired version number
## by default we use whatever version number is given in VERSION, but we want
## to allow users to specify either X.Y.Z or vX.Y.Z for versions (same for stable
## branches).
##
## Policy:
## numbered releases: (v)X.Y.Z --> git vX.Y.Z and local X.Y.Z
## stable branches: (v)X.Y-stable --> git vX.Y-stable and local X.Y-stable
## master branch: latest/master --> git master and local stable
## for other branches --> git <BRANCH> and local unstable
version_docker = None
version_gitlab = None
build_job = '{}:singularity:default'.format(args.container)
## firs look for spacialty containers
if args.container == 'acts_material_scan':
version_docker = args.version
version_gitlab = 'acts-material-scan' #dashes, not underscores
elif args.version in ('master', 'testing'):
version_docker = 'testing'
version_gitlab = 'master'
elif re.search('[0-9]+\.[0-9]', args.version) is not None:
suffix='-stable'
if re.search('{}$'.format(suffix), args.version):
suffix=''
version_docker = args.version + suffix
version_gitlab = args.version + suffix
if version_docker[0] == 'v':
version_docker = version_docker[1:]
if version_gitlab[0].isdigit():
version_gitlab = 'v{}'.format(version_gitlab)
elif args.version[:3] == 'mr-':
version_docker = 'unstable'
version_gitlab = 'refs/merge-requests/{}/head'.format(args.version[3:])
elif args.version == 'nightly':
version_docker = 'nightly'
version_gitlab = 'master'
build_job = '{}:singularity:nightly'.format(args.container)
else:
## fixme add proper error handling
print('Unknown requested version:', args.version)
raise UnknownVersionError()
## 'master' is always docker-tagged as testing
if version_docker == 'master':
version_docker = testing
## when working with the old container, the build job is just 'singularity'
if args.container == 'eic':
build_job = 'singularity'
## Create our install prefix if needed and ensure it is writable
args.prefix = os.path.abspath(args.prefix)
if not args.module_path:
deploy_local=True
else:
deploy_local=False
print('Install prefix:', args.prefix)
print('Creating install prefix if needed...')
bindir = '{}/bin'.format(args.prefix)
libdir = '{}/lib'.format(args.prefix)
libexecdir = '{}/libexec'.format(args.prefix)
root_prefix = os.path.abspath('{}/..'.format(args.prefix))
dirs = [bindir, libdir, libexecdir]
if not deploy_local:
moduledir = '{}/{}'.format(args.module_path, args.container)
dirs.append(moduledir)
for dir in dirs:
print(' -', dir)
smart_mkdir(dir)
## At this point we know we can write to our desired prefix and that we have a set of
## valid bind paths
## Get the container
## We want to slightly modify our version specifier: if it leads with a 'v' drop the v
img = args.container
## Builder SIF is not built anymore, deprecated
#if args.builder:
#img += "_builder"
container = '{}/{}-{}.sif'.format(libdir, img, version_docker)
if not os.path.exists(container) or args.force:
url = CONTAINER_URL.format(ref=version_gitlab, img=img, job=build_job)
print('Downloading container from:', url)
print('Destination:', container)
try:
urllib.request.urlretrieve(url, container)
except:
print('WARNING: failed to retrieve container artifact')
print('Attempting alternative download from docker registry')
cmd = ['singularity pull', '--force', container, DOCKER_REF.format(img=img, tag=version_docker)]
cmd = ' '.join(cmd)
print('Executing:', cmd)
err = os.system(cmd)
if err:
raise ContainerDownloadError()
else:
print('WARNING: Container found at', container)
print(' ---> run with -f to force a re-download')
if not deploy_local:
make_modulefile(args.container, version_docker, moduledir, bindir)
## configure the application launchers
print('Configuring applications launchers: ')
for prog in SHORTCUTS:
app = prog
exe = prog
if type(prog) == tuple:
app = prog[0]
exe = prog[1]
make_launcher(app, container, bindir,
bind=bind_directive,
exe=exe)
print('Container deployment successful!')
#!/bin/bash
## Simple setup script that installs the container
## in your local environment under $PREFIX/local/lib
## and creates a simple top-level launcher script
## that launches the container for this working directory
## with the $EIC_SHELL_PREFIX variable pointing
## to the $PREFIX/local directory
CONTAINER="jug_xl"
VERSION="nightly"
PREFIX="$PWD"
function print_the_help {
echo "USAGE: ./install.sh [-p PREFIX] [-v VERSION]"
echo "OPTIONAL ARGUMENTS:"
echo " -p,--prefix Working directory to deploy the environment (D: $PREFIX)"
echo " -t,--tmpdir Change tmp directory (D: $([[ -z "$TMPDIR" ]] && echo "/tmp" || echo "$TMPDIR"))"
echo " -n,--no-cvmfs Disable check for local CVMFS (D: enabled)"
echo " -c,--container Container version (D: $CONTAINER)"
echo " -v,--version Version to install (D: $VERSION)"
echo " -h,--help Print this message"
echo ""
echo " Set up containerized development environment."
echo ""
echo "EXAMPLE: ./install.sh"
exit
}
while [ $# -gt 0 ]; do
key=$1
case $key in
-p|--prefix)
PREFIX=$(realpath $2)
shift
shift
;;
-t|--tmpdir)
export TMPDIR=$2
export SINGULARITY_TMPDIR=$2
shift
shift
;;
-n|--no-cvmfs)
DISABLE_CVMFS_USAGE=true
shift
;;
-c|--container)
CONTAINER=$2
shift
shift
;;
-v|--version)
VERSION=$2
shift
shift
;;
-h|--help)
print_the_help
exit 0
;;
*)
echo "ERROR: unknown argument: $key"
echo "use --help for more info"
exit 1
;;
esac
done
## create prefix if needed
mkdir -p $PREFIX || exit 1
pushd $PREFIX
if [ ! -d $PREFIX ]; then
echo "ERROR: not a valid directory: $PREFIX"
echo "use --help for more info"
exit 1
fi
echo "Setting up development environment for eicweb/$CONTAINER:$VERSION"
mkdir -p $PREFIX/local/lib || exit 1
function install_singularity() {
SINGULARITY=
## check for a singularity install
## default singularity if new enough
if [ $(type -P singularity ) ]; then
SINGULARITY=$(which singularity)
SINGULARITY_VERSION=`$SINGULARITY --version`
if [ ${SINGULARITY_VERSION:0:1} = 2 ]; then
## too old, look for something else
SINGULARITY=
fi
fi
if [ -z $SINGULARITY ]; then
## first priority: a known good install (this one is on JLAB)
if [ -d "/apps/singularity/3.7.1/bin/" ]; then
SINGULARITY="/apps/singularity/3.7.1/bin/singularity"
## whatever is in the path is next
elif [ $(type -P singularity ) ]; then
SINGULARITY=$(which singularity)
## cvmfs singularity is last resort (sandbox mode can cause issues)
elif [ -f "/cvmfs/oasis.opensciencegrid.org/mis/singularity/bin/singularity" ]; then
SINGULARITY="/cvmfs/oasis.opensciencegrid.org/mis/singularity/bin/singularity"
## not good...
else
echo "ERROR: no singularity found, please make sure you have singularity in your \$PATH"
exit 1
fi
fi
echo " - Found singularity at $SINGULARITY"
## get singularity version
## we only care if is 2.x or not, so we can use singularity --version
## which returns 2.xxxxx for version 2
SINGULARITY_VERSION=`$SINGULARITY --version`
SIF=
if [ ${SINGULARITY_VERSION:0:1} = 2 ]; then
SIF="$PREFIX/local/lib/${CONTAINER}-${VERSION}.simg"
echo "WARNING: your singularity version $SINGULARITY_VERSION is ancient, we strongly recommend using version 3.x"
echo "We will attempt to use a fall-back SIMG image to be used with this singularity version"
if [ -f /gpfs02/eic/athena/${CONTAINER}-${VERSION}.simg ]; then
ln -sf /gpfs02/eic/athena/${CONTAINER}-${VERSION}.simg ${SIF}
else
echo "Attempting last-resort singularity pull for old image"
echo "This may take a few minutes..."
INSIF=`basename ${SIF}`
singularity pull --name "${INSIF}" docker://eicweb/$CONTAINER:$VERSION
mv ${INSIF} $SIF
chmod +x ${SIF}
unset INSIF
fi
## we are in sane territory, yay!
else
## check if we can just use cvmfs for the image
SIF="$PREFIX/local/lib/${CONTAINER}-${VERSION}.sif"
if [ -z "$DISABLE_CVMFS_USAGE" -a -d /cvmfs/singularity.opensciencegrid.org/eicweb/${CONTAINER}:${VERSION} ]; then
SIF="$PREFIX/local/lib/${CONTAINER}-${VERSION}"
## need to cleanup in this case, else it will try to make a subdirectory
rm -rf ${SIF}
ln -sf /cvmfs/singularity.opensciencegrid.org/eicweb/${CONTAINER}:${VERSION} ${SIF}
elif [ -f /cvmfs/eic.opensciencegrid.org/singularity/athena/${CONTAINER}_v${VERSION}.sif ]; then
ln -sf /cvmfs/eic.opensciencegrid.org/singularity/athena/${CONTAINER}_v${VERSION}.sif ${SIF}
elif [ -f /gpfs02/cvmfst0/eic.opensciencegrid.org/singularity/athena/${CONTAINER}_v${VERSION}.sif ]; then
ln -sf /gpfs02/cvmfst0/eic.opensciencegrid.org/singularity/athena/${CONTAINER}_v${VERSION}.sif ${SIF}
## check if we have an internal CI image we will use for testing purposes
elif [ -f $PWD/.gitlab-ci/${CONTAINER}-${VERSION}.sif ]; then
ln -sf $PWD/.gitlab-ci/${CONTAINER}-${VERSION}.sif ${SIF}
## if not, download the container to the system
else
## get the python installer and run the old-style install
## work in temp directory
tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX)
pushd $tmp_dir
wget https://eicweb.phy.anl.gov/containers/eic_container/-/raw/master/install.py
chmod +x install.py
./install.py -f -c $CONTAINER -v $VERSION .
INSIF=lib/`basename ${SIF}`
mv $INSIF $SIF
chmod +x ${SIF}
## cleanup
popd
rm -rf $tmp_dir
unset INSIF
fi
fi
echo $SIF
ls $SIF 2>&1 > /dev/null && GOOD_SIF=1
if [ -z "$SIF" -o -z "$GOOD_SIF" ]; then
echo "ERROR: no singularity image found"
exit 1
else
echo " - Deployed ${CONTAINER} image: $SIF"
fi
## We want to make sure the root directory of the install directory
## is always bound. We also check for the existence of a few standard
## locations (/scratch /volatile /cache) and bind those too if found
echo " - Determining additional bind paths"
BINDPATH=${SINGULARITY_BINDPATH}
echo " --> system bindpath: $BINDPATH"
PREFIX_ROOT="/$(realpath $PREFIX | cut -d "/" -f2)"
for dir in /w /work /scratch /volatile /cache /gpfs /gpfs01 /gpfs02 $PREFIX_ROOT; do
## only add directories once
if [[ ${BINDPATH} =~ $(basename $dir) ]]; then
continue
fi
if [ -d $dir ]; then
echo " --> $dir"
BINDPATH=${dir}${BINDPATH:+,$BINDPATH}
fi
done
## create a new top-level eic-shell launcher script
## that sets the EIC_SHELL_PREFIX and then starts singularity
cat << EOF > eic-shell
#!/bin/bash
## capture environment setup for upgrades
CONTAINER=$CONTAINER
TMPDIR=$TMPDIR
VERSION=$VERSION
PREFIX=$PREFIX
DISABLE_CVMFS_USAGE=${DISABLE_CVMFS_USAGE}
function print_the_help {
echo "USAGE: ./eic-shell [OPTIONS] [ -- COMMAND ]"
echo "OPTIONAL ARGUMENTS:"
echo " -u,--upgrade Upgrade the container to the latest version"
echo " -n,--no-cvmfs Disable check for local CVMFS when updating. (D: enabled)"
echo " -h,--help Print this message"
echo ""
echo " Start the eic-shell containerized software environment (Singularity version)."
echo ""
echo "EXAMPLES: "
echo " - Start an interactive shell: ./eic-shell"
echo " - Upgrade the container: ./eic-shell --upgrade"
echo " - Execute a single command: ./eic-shell -- <COMMAND>"
echo ""
exit
}
UPGRADE=
while [ \$# -gt 0 ]; do
key=\$1
case \$key in
-u|--upgrade)
UPGRADE=1
shift
;;
-n|--no-cvmfs)
DISABLE_CVMFS_USAGE=true
shift
;;
-h|--help)
print_the_help
exit 0
;;
--)
shift
break
;;
*)
echo "ERROR: unknown argument: \$key"
echo "use --help for more info"
exit 1
;;
esac
done
if [ ! -z \${UPGRADE} ]; then
echo "Upgrading eic-shell..."
if [ -z "\$DISABLE_CVMFS_USAGE" -a -d /cvmfs/singularity.opensciencegrid.org/eicweb/\${CONTAINER}:\${VERSION} ]; then
echo ""
echo "Note: You cannot manually update the container as you are using the CVMFS version."
echo " The container will automatically update every 24 hours."
echo " You can override this by setting the '--no-cvmfs' flag, which will"
echo " instantiate a local version."
echo " This is only recommended for expert usage."
echo ""
echo "Exiting without upgrade"
exit 0
fi
FLAGS="-p \${PREFIX} -v \${VERSION}"
if [ ! -z \${TMPDIR} ]; then
FLAGS="\${FLAGS} -t \${TMPDIR}"
fi
if [ ! -z \${DISABLE_CVMFS_USAGE} ]; then
FLAGS="\${FLAGS} --no-cvmfs"
fi
curl https://eicweb.phy.anl.gov/containers/eic_container/-/raw/master/install.sh \
| bash -s -- \${FLAGS}
echo "eic-shell upgrade sucessful"
exit 0
fi
export EIC_SHELL_PREFIX=$PREFIX/local
export SINGULARITY_BINDPATH=$BINDPATH
\${SINGULARITY:-$SINGULARITY} exec \${SINGULARITY_OPTIONS:-} \${SIF:-$SIF} eic-shell \$@
EOF
chmod +x eic-shell
echo " - Created custom eic-shell excecutable"
}
function install_docker() {
## check for docker install
DOCKER=$(which docker)
if [ -z ${DOCKER} ]; then
echo "ERROR: no docker install found, docker is required for the docker-based install"
fi
echo " - Found docker at ${DOCKER}"
IMG=eicweb/${CONTAINER}:${VERSION}
docker pull ${IMG}
echo " - Deployed ${CONTAINER} image: ${IMG}"
## We want to make sure the root directory of the install directory
## is always bound. We also check for the existence of a few standard
## locations (/Volumes /Users /tmp) and bind those too if found
echo " - Determining mount paths"
PREFIX_ROOT="/$(realpath $PREFIX | cut -d "/" -f2)"
MOUNT=""
echo " --> $PREFIX_ROOT"
for dir in /Volumes /Users /tmp; do
## only add directories once
if [[ ${MOUNT} =~ $(basename $dir) ]]; then
continue
fi
if [ -d $dir ]; then
echo " --> $dir"
MOUNT="$MOUNT -v $dir:$dir"
fi
done
echo " - Docker mount directive: '$MOUNT'"
PLATFORM_FLAG=''
if [ `uname -m` = 'arm64' ]; then
PLATFORM_FLAG='--platform linux/amd64'
echo " - Additional platform flag to run on arm64"
fi
## create a new top-level eic-shell launcher script
## that sets the EIC_SHELL_PREFIX and then starts singularity
cat << EOF > eic-shell
#!/bin/bash
## capture environment setup for upgrades
CONTAINER=$CONTAINER
TMPDIR=$TMPDIR
VERSION=$VERSION
PREFIX=$PREFIX
DISABLE_CVMFS_USAGE=${DISABLE_CVMFS_USAGE}
function print_the_help {
echo "USAGE: ./eic-shell [OPTIONS] [ -- COMMAND ]"
echo "OPTIONAL ARGUMENTS:"
echo " -u,--upgrade Upgrade the container to the latest version"
echo " -h,--help Print this message"
echo ""
echo " Start the eic-shell containerized software environment (Docker version)."
echo ""
echo "EXAMPLES: "
echo " - Start an interactive shell: ./eic-shell"
echo " - Upgrade the container: ./eic-shell --upgrade"
echo " - Execute a single command: ./eic-shell -- <COMMAND>"
echo ""
exit
}
UPGRADE=
while [ \$# -gt 0 ]; do
key=\$1
case \$key in
-u|--upgrade)
UPGRADE=1
shift
;;
-h|--help)
print_the_help
exit 0
;;
--)
shift
break
;;
*)
echo "ERROR: unknown argument: \$key"
echo "use --help for more info"
exit 1
;;
esac
done
if [ ! -z \${UPGRADE} ]; then
echo "Upgrading eic-shell..."
docker pull $IMG || exit 1
echo "eic-shell upgrade sucessful"
exit 0
fi
docker run $PLATFORM_FLAG $MOUNT -w=$PWD -it --rm -e EIC_SHELL_PREFIX=$PREFIX/local $IMG eic-shell \$@
EOF
chmod +x eic-shell
echo " - Created custom eic-shell excecutable"
}
## detect OS
OS=`uname -s`
CPU=`uname -m`
case ${OS} in
Linux)
echo " - Detected OS: Linux"
echo " - Detected CPU: $CPU"
if [ "$CPU" = "arm64" ]; then
install_docker
else
install_singularity
fi
;;
Darwin)
echo " - Detected OS: MacOS"
echo " - Detected CPU: $CPU"
install_docker
;;
*)
echo "ERROR: OS '${OS}' not currently supported"
exit 1
;;
esac
popd
echo "Environment setup succesfull"
echo "You can start the development environment by running './eic-shell'"
# Pass the buck
curl -L https://github.com/eic/eic-shell/raw/main/install.sh | exec bash -s -- $*
spack:
specs:
- acts@19.10.0 +dd4hep +identification +json +tgeo +ipo +examples +fatras +geant4 +python
- acts@21.1.0 +dd4hep +identification +json +tgeo +examples +fatras +geant4 +python
- actsvg@0.4.26 +examples
- acts-dd4hep@1.0.1
- afterburner@0.1.2 +root +zlib
- cairo@1.16.0 +fc+ft+X+pdf+gobject
- clhep@2.4.6.0 cxxstd=17
- cli11@2.1.1
- cmake@3.24.3
- cnpy@master
- cppcoro@git.10bbcdbf2be3ad3aa56febcf4c7662d771460a99
- dawn@3_91a
- dawncut@1_54a
- dd4hep@1.23 +ddg4 +ddcad +hepmc3 +ipo +lcio
- edm4eic@1.2.1 cxxstd=17 -ipo
- edm4hep@0.7 cxxstd=17 -ipo
- eicrecon@0.3.5
- dd4hep@1.23 +ddg4 +ddcad +hepmc3 +lcio
- edm4eic@1.2.2 cxxstd=17
- edm4hep@0.7.2 cxxstd=17
- eic-smear@1.1.10
- eicrecon@0.4.0
- eigen@3.4.0
- emacs
- fastjet@3.4.0
- fmt@8.1.1 +shared cxxstd=17
- gaudi@36.7 +aida
- geant4@11.0.3 cxxstd=17 +opengl +vecgeom +x11 +qt +threads
- geant4@11.1.0 cxxstd=17 +opengl +vecgeom +x11 +qt +threads
- harfbuzz@5.1.0
- hepmc3@3.2.5 +python +rootio
- heppdt@2.06.01
- igprof@5.9.16
- imagemagick@7.0.8-7
- intel-tbb@2020.3
- irt@1.0.0
- jana2@2.0.8 +root +zmq
- jana2@2.0.8 -ipo +root +zmq
- madx@5.08.01
- mesa@22.1.2 +glx -llvm +opengl +opengles +osmesa
- nlohmann-json@3.11.2
- npsim@1.1.0 +geocad +http
- opencascade@7.6.3
- pkg-config@0.29.2
- podio@0.15
- prmon@3.0.2 +plot
- pythia8@8.306 +fastjet
- python@3.10.8
- py-numpy@1.23.4
......@@ -39,9 +46,9 @@ spack:
- py-toml@0.10.2
- qt@5.15.5 +opengl
- root@6.26.06 cxxstd=17
+fftw +fortran +gdml +http +mlp +pythia8
+fftw +fortran +gdml +http -ipo +mlp +pythia8
+root7 +tmva +vc -webgui +xrootd +ssl
- spdlog@1.10.0
- spdlog@1.10.0 +fmt_external
- stow@2.3.1
- tensorflow-lite@2.8.0 -xnnpack
- xrootd@5.3.2 cxxstd=17 +python
......@@ -51,5 +58,5 @@ spack:
all:
compiler: [gcc]
variants:
build_type=Release
+ipo build_type=Release
view: /usr/local