Skip to content
Snippets Groups Projects
Commit 6a5ab680 authored by Ujwal Kundur's avatar Ujwal Kundur
Browse files

Initial integration testing

Modified GitLab CI Config for a minimal test run
parent e367aff2
No related branches found
No related tags found
1 merge request!306OneAPI Integration Testing
...@@ -16,7 +16,7 @@ variables: ...@@ -16,7 +16,7 @@ variables:
## adc9f887eac78a81bb8189d603f4dc45ed3509c1: acts-dd4hep: new package; acts: new version ## adc9f887eac78a81bb8189d603f4dc45ed3509c1: acts-dd4hep: new package; acts: new version
## We need to enable Docker Buildkit to use cache mounts and better ## We need to enable Docker Buildkit to use cache mounts and better
## build performance overal ## build performance overall
DOCKER_BUILDKIT: 1 DOCKER_BUILDKIT: 1
## Dockerhub registry ## Dockerhub registry
...@@ -39,10 +39,8 @@ variables: ...@@ -39,10 +39,8 @@ variables:
stages: stages:
- config - config
- build:base ## base OS image - build:oneapi_jug ## OneAPI + Jug_Dev Image
- build:jug ## jug container images - deploy ## build/deploy singularity images
- deploy ## build/deploy singularity images
- test
- finalize - finalize
default: default:
...@@ -167,6 +165,11 @@ version: ...@@ -167,6 +165,11 @@ version:
## debian_base --> jug_dev --> jug_xl ## debian_base --> jug_dev --> jug_xl
## ---------------> jug_sim ## ---------------> jug_sim
## ---------------> jug_ml ## ---------------> jug_ml
## debian_stable_base --> oneapi_jug_dev
## oneapi_jug_dev + jug_xl --> oneapi_jug_xl
## TODO
## oneapi_runtime + jug_xl --> oneapi_prod
debian_base:default: debian_base:default:
extends: .build extends: .build
...@@ -204,151 +207,26 @@ cuda_base:default: ...@@ -204,151 +207,26 @@ cuda_base:default:
containers/cuda containers/cuda
- !reference [.build, script] - !reference [.build, script]
jug_dev:default:
extends: .build
stage: build:jug
needs:
- version
- debian_base:default
variables:
BUILD_IMAGE: "jug_dev"
script:
## calculate a hash based on the spack.yaml file and the spack directory
## and use this spack as a docker variable to force a rebuild when there
## is a change (versus rerun from cache)
- PACKAGE_HASH=$(tar cf - spack* | sha1sum | head -c40)
- echo "PACKAGE_HASH= ${PACKAGE_HASH}"
## move spacke directory and spack.yaml into the container build directory
- cp -r spack containers/jug
- cp -r spack.yaml containers/jug/spack/spack.yaml
- CACHE_FLAG=""
- |
if [ $FORCE_NOCACHE = 1 ]; then
echo "FORCE_NOCACHE set"
export CACHE_FLAG="--no-cache"
fi
## Optionally build the raw builder image
- test ${EXPORT_BUILDER} = 1 && docker build ${CACHE_FLAG}
-t ${CI_REGISTRY_IMAGE}/${BUILD_IMAGE}:builder-${INTERNAL_TAG}
--target=builder
-f containers/jug/dev.Dockerfile
--build-arg SPACK_VERSION="${SPACK_VERSION}"
--build-arg SPACK_CHERRYPICKS="${SPACK_CHERRYPICKS}"
--build-arg CACHE_BUST=${PACKAGE_HASH}
--build-arg INTERNAL_TAG=${INTERNAL_TAG}
--build-arg JUG_VERSION=${INTERNAL_TAG}-$(git rev-parse HEAD)
containers/jug
## now build our image
- docker build -t ${CI_REGISTRY_IMAGE}/${BUILD_IMAGE}:${INTERNAL_TAG} ${CACHE_FLAG}
-f containers/jug/dev.Dockerfile
--build-arg SPACK_VERSION="${SPACK_VERSION}"
--build-arg SPACK_CHERRYPICKS="${SPACK_CHERRYPICKS}"
--build-arg CACHE_BUST=${PACKAGE_HASH}
--build-arg INTERNAL_TAG=${INTERNAL_TAG}
--build-arg JUG_VERSION=${INTERNAL_TAG}-$(git rev-parse HEAD)
containers/jug
## push builder image do DH if desired
- test ${EXPORT_BUILDER} = 1 && ./gitlab-ci/docker_push.sh
-i ${BUILD_IMAGE} -l builder-${INTERNAL_TAG}
-n ${DOCKER_NTRIES} -t ${DOCKER_WAIT_TIME}
builder-${EXPORT_TAG} --dockerhub
## standard exports
- !reference [.build, script]
jug_xl:default: oneapi_jug_dev:default:
extends: .build extends: .build
stage: build:jug stage: build:oneapi_jug
resource_group: build
needs: needs:
- version - version
- jug_dev:default
variables: variables:
BUILD_IMAGE: "jug_xl" BUILD_IMAGE: "oneapi_jug_dev"
script: script:
- cp detectors.yaml containers/jug ## Copy spack directory and spack.yaml into build context
- cp -r spack containers/oneapi/
- cp spack.yaml containers/oneapi/spack/spack.yaml
## Copy jug_dev files into build context
- cp -r containers/jug/* containers/oneapi
- docker build -t ${CI_REGISTRY_IMAGE}/${BUILD_IMAGE}:${INTERNAL_TAG} - docker build -t ${CI_REGISTRY_IMAGE}/${BUILD_IMAGE}:${INTERNAL_TAG}
-f containers/jug/xl.Dockerfile -f containers/oneapi/dev.Dockerfile
--build-arg INTERNAL_TAG=${INTERNAL_TAG} containers/oneapi
--build-arg JUGGLER_VERSION=${JUGGLER_VERSION}
--build-arg NPDET_VERSION=${NPDET_VERSION}
--build-arg EICD_VERSION=${EICD_VERSION}
--build-arg AFTERBURNER_VERSION=${AFTERBURNER_VERSION}
--build-arg JUG_VERSION=${INTERNAL_TAG}-$(git rev-parse HEAD)
containers/jug
- !reference [.build, script] - !reference [.build, script]
jug_xl:nightly:
extends: .build
stage: build:jug
rules:
- if: '$CI_COMMIT_BRANCH == "master"'
when: on_success
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: on_success
- when: never
needs:
- version
- jug_dev:default
variables:
BUILD_IMAGE: "jug_xl"
script:
- cp detectors.yaml containers/jug
- docker build -t ${CI_REGISTRY_IMAGE}/${BUILD_IMAGE}:${NIGHTLY_TAG}
-f containers/jug/xl.Dockerfile
--build-arg INTERNAL_TAG=${INTERNAL_TAG}
--build-arg JUG_VERSION=nightly-$(date +%Y-%m-%d_%H-%M-%S)-${INTERNAL_TAG}-$(git rev-parse HEAD)
--build-arg NIGHTLY=1
containers/jug
- |
PUSH_FLAG=""
if [ "$CI_PIPELINE_SOURCE" == "merge_request_event" ]; then
PUSH_FLAG="--eicweb"
fi
- ./gitlab-ci/docker_push.sh -i ${BUILD_IMAGE} -l ${NIGHTLY_TAG}
-n $DOCKER_NTRIES -t $DOCKER_WAIT_TIME
${NIGHTLY_TAG} ${PUSH_FLAG}
- if [ -z "${PUSH_FLAG}" ] ; then
./gitlab-ci/docker_push.sh -i ${BUILD_IMAGE} -l ${NIGHTLY_TAG}
-n $DOCKER_NTRIES -t $DOCKER_WAIT_TIME
${NIGHTLY_TAG}-$(date +%Y-%m-%d) --dockerhub ;
fi
jug_xl:feature:
extends: .build
stage: build:jug
rules:
- if: '$CI_COMMIT_BRANCH == "master"'
when: on_success
- when: never
needs:
- version
#- jug_xl:default
- jug_xl:nightly
variables:
BUILD_IMAGE: "jug_xl"
parallel:
matrix:
- DETECTOR: athena
DETECTOR_BRANCH:
- canyonlands
- deathvalley
- deathvalley-1.5T
script:
- docker build -t ${CI_REGISTRY_IMAGE}/${BUILD_IMAGE}:${INTERNAL_TAG}-${DETECTOR}-${DETECTOR_BRANCH}
-f containers/jug/feature.Dockerfile
--build-arg INTERNAL_TAG=${NIGHTLY_TAG}
--build-arg DETECTOR=${DETECTOR}
--build-arg DETECTOR_BRANCH=${DETECTOR_BRANCH}
containers/jug
- |
PUSH_FLAG=""
if [ "$CI_PIPELINE_SOURCE" == "merge_request_event" ]; then
PUSH_FLAG="--eicweb"
fi
FEATURE_EXPORT_TAG="${VERSION_SHORT}-${DETECTOR_BRANCH}-stable"
- ./gitlab-ci/docker_push.sh -i ${BUILD_IMAGE} -l ${INTERNAL_TAG}-${DETECTOR}-${DETECTOR_BRANCH}
-n ${DOCKER_NTRIES} -t ${DOCKER_WAIT_TIME}
${FEATURE_EXPORT_TAG} ${PUSH_FLAG}
.singularity: .singularity:
stage: deploy stage: deploy
...@@ -365,74 +243,15 @@ jug_xl:feature: ...@@ -365,74 +243,15 @@ jug_xl:feature:
- mkdir build - mkdir build
- singularity pull build/${BUILD_IMAGE}.sif docker://${CI_REGISTRY_IMAGE}/${BUILD_IMAGE}:${INTERNAL_TAG} - singularity pull build/${BUILD_IMAGE}.sif docker://${CI_REGISTRY_IMAGE}/${BUILD_IMAGE}:${INTERNAL_TAG}
jug_dev:singularity:default: oneapi_jug_dev:singularity:default:
extends: .singularity
needs:
- version
- jug_dev:default
variables:
BUILD_IMAGE: "jug_dev"
jug_xl:singularity:default:
extends: .singularity extends: .singularity
needs:
- version
- jug_xl:default
variables:
BUILD_IMAGE: "jug_xl"
jug_xl:singularity:nightly:
stage: deploy
extends: .singularity
needs:
- version
- jug_xl:nightly
variables:
BUILD_IMAGE: "jug_xl"
script:
- mkdir build
- singularity pull build/${BUILD_IMAGE}.sif docker://${CI_REGISTRY_IMAGE}/${BUILD_IMAGE}:${NIGHTLY_TAG}
## trigger juggler rebuild on nightly schedule to ensure both images remain in sync
juggler:master:
stage: deploy
rules:
- if: '$NIGHTLY != "0" && $CI_COMMIT_BRANCH == "master"'
when: on_success
- when: never
needs: needs:
- version - version
- jug_xl:nightly - oneapi_jug_dev:default
variables: variables:
TRIGGERED_BY_NIGHTLY: 1 BUILD_IMAGE: "oneapi_jug_dev"
JUGGLER_VERSION: master
EICD_VERSION: master
NPDET_VERSION: master
DETECTOR_VERSION: master
IP6_VERSION: master
trigger:
project: EIC/juggler
allow_failure: true
.test:
image: eicweb.phy.anl.gov:4567/containers/eic_container/jug_xl:${NIGHTLY_TAG}
stage: test
needs:
- version
- jug_xl:nightly
rules:
- if: '$CI_COMMIT_BRANCH == "master"'
when: on_success
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: on_success
- when: never
before_script:
- echo "Testing the new container setup"
after_script:
- echo "Testing completed"
include:
- local: 'tests/tutorial/config.yml'
cleanup: cleanup:
stage: finalize stage: finalize
......
File moved
# syntax=docker/dockerfile:1.2
# Container based on Jug_dev with Intel oneAPI support.
## ========================================================================================
## STAGE 1: Set up Intel OneAPI Toolkit
## Use Intel's HPC-kit
## ========================================================================================
ARG DOCKER_REGISTRY="eicweb.phy.anl.gov:4567/containers/eic_container/"
ARG INTERNAL_TAG="testing"
FROM intel/oneapi-hpckit:2022.1.2-devel-ubuntu18.04 as oneapi
COPY bashrc /root/.bashrc
ENV CLICOLOR_FORCE=1 \
LANGUAGE=en_US.UTF-8 \
LANG=en_US.UTF-8 \
LC_ALL=en_US.UTF-8
## Install additional packages. Remove the auto-cleanup functionality
## for docker, as we're using the new buildkit cache instead.
## We also install gitlab-runner, from the buster package (as bullseye is not available atm)
## TODO: libyaml-cpp-dev is a dependency for afterburner. We can probably remove
## this once afterburner is added to spack
RUN --mount=type=cache,target=/var/cache/apt \
rm -f /etc/apt/apt.conf.d/docker-clean \
&& ln -fs /usr/share/zoneinfo/America/New_York /etc/localtime \
&& echo "US/Eastern" > /etc/timezone \
&& apt-get -yqq update \
&& apt-get -yqq upgrade \
&& apt-get -yqq install --no-install-recommends \
bc \
ca-certificates \
clang-format \
clang-tidy \
curl \
file \
build-essential \
gdb \
ghostscript \
git \
gnupg2 \
gv \
iproute2 \
iputils-ping \
iputils-tracepath \
less \
libcbor-xs-perl \
libjson-xs-perl \
libyaml-cpp-dev \
locales \
lua-posix \
make \
nano \
openssh-client \
parallel \
poppler-utils \
python3 \
python3-dev \
python3-distutils \
python-is-python3 \
time \
unzip \
valgrind \
vim-nox \
wget \
&& localedef -i en_US -f UTF-8 en_US.UTF-8 \
&& gcc --version \
&& curl -L \
"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh" \
| bash \
&& sed -i "s/bookworm/buster/" \
/etc/apt/sources.list.d/runner_gitlab-runner.list \
&& apt-get -yqq update \
&& apt-get -yqq install --no-install-recommends \
gitlab-runner \
&& apt-get -yqq autoremove \
&& rm -rf /var/lib/apt/lists/*
## ========================================================================================
## STAGE 2: spack builder image
## EIC builder image with spack
## ========================================================================================
FROM oneapi as builder
## Setup spack
## parts:
ARG SPACK_ROOT=/opt/spack
ARG SPACK_VERSION="develop"
ARG SPACK_CHERRYPICKS=""
RUN echo "Part 1: regular spack install (as in containerize)" \
&& git clone https://github.com/spack/spack.git /tmp/spack-staging \
&& cd /tmp/spack-staging \
&& git checkout $SPACK_VERSION \
&& if [ -n "$SPACK_CHERRYPICKS" ] ; then \
git cherry-pick -n $SPACK_CHERRYPICKS ; \
fi \
&& cd - \
&& mkdir -p $SPACK_ROOT/opt/spack \
&& cp -r /tmp/spack-staging/bin $SPACK_ROOT/bin \
&& cp -r /tmp/spack-staging/etc $SPACK_ROOT/etc \
&& cp -r /tmp/spack-staging/lib $SPACK_ROOT/lib \
&& cp -r /tmp/spack-staging/share $SPACK_ROOT/share \
&& cp -r /tmp/spack-staging/var $SPACK_ROOT/var \
&& cp -r /tmp/spack-staging/.git $SPACK_ROOT/.git \
&& rm -rf /tmp/spack-staging \
&& echo 'export LD_LIBRARY_PATH=/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH'\
>> $SPACK_ROOT/share/setup-env.sh \
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
/usr/sbin/docker-shell \
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
/usr/sbin/interactive-shell \
&& ln -s $SPACK_ROOT/share/spack/docker/entrypoint.bash \
/usr/sbin/spack-env \
&& echo "Part 2: Set target to generic x86_64" \
&& echo "packages:" > $SPACK_ROOT/etc/spack/packages.yaml \
&& echo " all:" >> $SPACK_ROOT/etc/spack/packages.yaml \
&& echo " target: [x86_64]" >> $SPACK_ROOT/etc/spack/packages.yaml \
&& cat $SPACK_ROOT/etc/spack/packages.yaml \
&& echo "Part 3: Set config to allow use of more cores for builds" \
&& echo "(and some other settings)" \
&& echo "config:" > $SPACK_ROOT/etc/spack/config.yaml \
&& echo " suppress_gpg_warnings: true" \
>> $SPACK_ROOT/etc/spack/config.yaml \
&& echo " build_jobs: 64" >> $SPACK_ROOT/etc/spack/config.yaml \
&& echo " install_tree:" >> $SPACK_ROOT/etc/spack/config.yaml \
&& echo " root: /opt/software" >> $SPACK_ROOT/etc/spack/config.yaml \
&& cat $SPACK_ROOT/etc/spack/config.yaml
SHELL ["docker-shell"]
## Setup spack buildcache mirrors, including an internal
## spack mirror using the docker build cache, and
## a backup mirror on the internal B010 network
RUN --mount=type=cache,target=/var/cache/spack-mirror \
export OLD_PATH=$PATH \
&& export PATH=$PATH:$SPACK_ROOT/bin \
&& spack mirror add docker /var/cache/spack-mirror \
&& spack mirror list
## Setup our custom environment and package overrides
COPY spack $SPACK_ROOT/eic-spack
RUN spack repo add --scope site "$SPACK_ROOT/eic-spack" \
&& mkdir /opt/spack-environment \
&& cd /opt/spack-environment \
&& mv $SPACK_ROOT/eic-spack/spack.yaml . \
&& rm -r /usr/local \
&& spack env activate . \
&& spack concretize
## This variable will change whenevery either spack.yaml or our spack package
## overrides change, triggering a rebuild
ARG CACHE_BUST="hash"
ARG CACHE_NUKE=""
## Now execute the main build (or fetch from cache if possible)
## note, no-check-signature is needed to allow the quicker signature-less
## packages from the internal (docker) buildcache
##
## Optional, nuke the buildcache after install, before (re)caching
## This is useful when going to completely different containers,
## or intermittently to keep the buildcache step from taking too much time
##
## Update the local build cache if needed. Consists of 3 steps:
## 1. Remove the B010 network buildcache (silicon)
## 2. Get a list of all packages, and compare with what is already on
## the buildcache (using package hash)
## 3. Add packages that need to be added to buildcache if any
RUN --mount=type=cache,target=/var/cache/spack-mirror \
cd /opt/spack-environment \
&& ls /var/cache/spack-mirror \
&& spack env activate . \
&& status=0 \
&& spack install -j64 --no-check-signature \
|| spack install -j64 --no-check-signature \
|| spack install -j64 --no-check-signature \
|| status=$? \
&& [ -z "${CACHE_NUKE}" ] \
|| rm -rf /var/cache/spack-mirror/build_cache/* \
&& mkdir -p /var/cache/spack-mirror/build_cache \
&& spack buildcache update-index -d /var/cache/spack-mirror \
&& spack buildcache list --allarch --very-long \
| sed '/^$/d;/^--/d;s/@.\+//;s/\([a-z0-9]*\) \(.*\)/\2\/\1/' \
| sort > tmp.buildcache.txt \
&& spack find --format {name}/{hash} | sort \
| comm -23 - tmp.buildcache.txt \
| xargs --no-run-if-empty \
spack buildcache create --allow-root --only package --unsigned \
--directory /var/cache/spack-mirror \
--rebuild-index \
&& spack clean -a \
&& exit $status
## Extra post-spack steps:
## - Python packages
COPY requirements.txt /usr/local/etc/requirements.txt
RUN --mount=type=cache,target=/var/cache/pip \
echo "Installing additional python packages" \
&& cd /opt/spack-environment && spack env activate . \
&& python -m pip install \
--trusted-host pypi.org \
--trusted-host files.pythonhosted.org \
--cache-dir /var/cache/pip \
--requirement /usr/local/etc/requirements.txt
## Including some small fixes:
## - Somehow PODIO env isn't automatically set,
## - and Gaudi likes BINARY_TAG to be set
RUN cd /opt/spack-environment \
&& echo -n "" \
&& echo "Grabbing environment info" \
&& spack env activate --sh -d . \
| sed "s?LD_LIBRARY_PATH=?&/lib/x86_64-linux-gnu:?" \
| sed '/MANPATH/ s/;$/:;/' \
> /etc/profile.d/z10_spack_environment.sh \
&& cd /opt/spack-environment && spack env activate . \
&& echo -n "" \
&& echo "Add extra environment variables for Jug, Podio and Gaudi" \
&& echo "export PODIO=$(spack location -i podio);" \
>> /etc/profile.d/z10_spack_environment.sh \
&& echo -n "" \
&& echo "Executing cmake patch for dd4hep 16.1" \
&& sed -i "s/FIND_PACKAGE(Python/#&/" /usr/local/cmake/DD4hepBuild.cmake
## make sure we have the entrypoints setup correctly
ENTRYPOINT []
CMD ["bash", "--rcfile", "/etc/profile", "-l"]
USER 0
WORKDIR /
## ========================================================================================
## STAGE 3: staging image with unnecessariy packages removed and stripped binaries
## ========================================================================================
FROM builder as staging
RUN cd /opt/spack-environment && spack env activate . && spack gc -y
# Strip all the binaries
# This reduces the image by factor of x2, so worth the effort
# note that we do not strip python libraries as can cause issues in some cases
RUN find -L /usr/local/* \
-type d -name site-packages -prune -false -o \
-type f -not -name "zdll.lib" -not -name libtensorflow-lite.a \
-exec realpath '{}' \; \
| xargs file -i \
| grep 'charset=binary' \
| grep 'x-executable\|x-archive\|x-sharedlib' \
| awk -F: '{print $1}' | xargs strip -s
## Bugfix to address issues loading the Qt5 libraries on Linux kernels prior to 3.15
## See
#https://askubuntu.com/questions/1034313/ubuntu-18-4-libqt5core-so-5-cannot-open-shared-object-file-no-such-file-or-dir
## and links therin for more info
RUN strip --remove-section=.note.ABI-tag /usr/local/lib/libQt5Core.so
## Address Issue #72
## missing precompiled headers for cppyy due to missing symlink in root
## install (should really be addressed by ROOT spack package)
RUN cd /opt/spack-environment && spack env activate . \
&& if [ ! -e $(spack location -i root)/lib/cppyy_backend/etc ]; then \
ln -sf $(spack location -i root)/etc \
$(spack location -i root)/lib/cppyy_backend/etc; \
fi
RUN spack debug report \
| sed "s/^/ - /" | sed "s/\* \*\*//" | sed "s/\*\*//" \
>> /etc/jug_info \
&& spack find --no-groups --long --variants | sed "s/^/ - /" >> /etc/jug_info
COPY eic-shell /usr/local/bin/eic-shell
COPY eic-info /usr/local/bin/eic-info
COPY entrypoint.sh /usr/local/sbin/entrypoint.sh
COPY eic-env.sh /etc/eic-env.sh
COPY profile.d/a00_cleanup.sh /etc/profile.d
COPY profile.d/z11_jug_env.sh /etc/profile.d
COPY singularity.d /.singularity.d
## Add minio client into /usr/local/bin
ADD https://dl.min.io/client/mc/release/linux-amd64/mc /usr/local/bin
RUN chmod a+x /usr/local/bin/mc
## ========================================================================================
## STAGE 4
## Lean target image
## ========================================================================================
# We cannot use a new base image and copy from staging
# So, remove spack and clean-up existing image
RUN rm -rf $SPACK_ROOT \
&& rm -rf /var/cache/spack-mirror \
&& export PATH=$OLD_PATH \
&& unset OLD_PATH \
&& rm -rf /usr/local \
&& cd /usr/._local \
&& PREFIX_PATH=$(realpath $(ls | tail -n1)) \
&& echo "Found spack true prefix path to be $PREFIX_PATH" \
&& cd - \
&& ln -s ${PREFIX_PATH} /usr/local
## set the jug_dev version and add the afterburner
## TODO: move afterburner to spack when possible
ARG JUG_VERSION=1
ARG AFTERBURNER_VERSION=main
RUN echo "" >> /etc/jug_info \
&& echo " - jug_dev: ${JUG_VERSION}" >> /etc/jug_info
## make sure we have the entrypoints setup correctly
ENTRYPOINT ["/usr/local/sbin/entrypoint.sh"]
CMD ["bash", "--rcfile", "/etc/profile", "-l"]
USER 0
WORKDIR /
SHELL ["/usr/local/bin/eic-shell"]
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment