stages: - deploy variables: OPENSHIFT_SERVER: $OPENSHIFT_DEV_SERVER BUILD_ENV: $BUILD_ENV OP: $OPERATION #SPACK_ENV_TAR_FILE: ebrains-spack-builds${CI_PIPELINE_ID}.tar.gz SPACK_ENV_TAR_FILE: ebrains-spack-builds.tar.gz deploy-build-environment: stage: deploy before_script: - oc login "$OPENSHIFT_SERVER" --token="$OPENSHIFT_TOKEN" - tar czf ${SPACK_ENV_TAR_FILE} packages/ repo.yaml spack.yaml create_JupyterLab_kernel.sh - mkdir copy_folder - mv ${SPACK_ENV_TAR_FILE} copy_folder script: # create job description file - chmod a+x create_job.sh - ./create_job.sh $BUILD_ENV $OP $SPACK_ENV_TAR_FILE $CI_PIPELINE_ID - cat simplejob.yml # select the project in openshift - oc project jupyterhub-int # start the deploy job - oc create -f simplejob.yml ## wait for job to finish https://stackoverflow.com/questions/5073453wait-for-kubernetes-job-to-complete-on-either-failure-success-using-command-line - oc get job/simplejob${CI_PIPELINE_ID} -o=jsonpath='{.status}' -w && oc get job/simplejob${CI_PIPELINE_ID} -o=jsonpath='{.status.conditions[*].type}' | grep -i -E 'failed|complete' || echo 'Failed' # wait for job's pod to become available so as to copy from the gitlab runner to the OpenShift pod # the necessary files that define the environment that spack needs to build - while true; do x=$(oc get pods |grep simplejob${CI_PIPELINE_ID}|awk '{ print $3}');if [ $x == "Running" ]; then break; fi; sleep 1; done - pod=$(oc get pods |grep simplejob${CI_PIPELINE_ID}|awk '{ print $1}') - oc rsync copy_folder $pod:/opt/app-root/src # when build job finishes get the logs - while true; do sleep 10; x=$(oc get pods |grep $pod|awk '{ print $3}');if [ $x != "Running" ]; then break; fi; done - oc logs jobs/simplejob${CI_PIPELINE_ID} | tee log.txt # if spack install has failed, fail the pipeline - if [ $(cat log.txt |grep "Error:"|wc -l) -gt 0 ]; then exit 1;fi; # delete the job from OpenShift as we have the logs here - oc delete job simplejob${CI_PIPELINE_ID} || true tags: - shell-runner