diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 1ae570a4a9b5fc69358da568a755a557a4762b50..066ce6db066772dc5c8b291d8a20b393e288fceb 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -7,27 +7,25 @@ variables:
   SPACK_VERSION: v0.21.1
   SPACK_PATH_GITLAB: /mnt/spack_v0.21.1
   SYSTEMNAME: ebrainslab
-  OC_PROJECT: jupyterhub
 
 # ===================================================================
 # LAB DEPLOYMENTS
 # ===================================================================
 
-# start an OpenShift Job that will build the Spack environment
+# start a k8s Job that will build the Spack environment
 .deploy-build-environment:
   stage: build
   script:
-    # login and select project in openshift
-    - oc login "$OPENSHIFT_SERVER" --token="$OPENSHIFT_TOKEN"
-    - oc project $OC_PROJECT
+    # use the site-specific kubectl context
+    - kubectl config use-context $KUBE_CONTEXT
     # create job description file
     - chmod a+x create_job.sh
     - ./create_job.sh $CI_PIPELINE_ID $BUILD_ENV_DOCKER_IMAGE $INSTALLATION_ROOT $SPACK_VERSION $SPACK_ENV $CI_COMMIT_BRANCH $RELEASE_NAME $LAB_KERNEL_ROOT
     - cat simplejob.yml
     # start the deploy job
-    - oc create -f simplejob.yml
+    - kubectl create -f simplejob.yml
     # wait for job to finish to get the logs
-    - while true; do sleep 300; x=$(oc get pods | grep simplejob${CI_PIPELINE_ID} | awk '{ print $3}'); if [ $x != "Running" ]; then break; fi; done 
+    - while true; do sleep 300; x=$(kubectl get pods | grep simplejob${CI_PIPELINE_ID} | awk '{ print $3}'); if [ $x != "Running" ]; then break; fi; done 
     # # copy logs of failed packages locally, to keep as job artifacts
     # - oc rsync $(oc get pods -l job-name=simplejob${CI_PIPELINE_ID} -o name):/tmp ./ --include="*/" --include="spack/spack-stage/*/*.txt" --exclude="*"
     # - mv tmp/spack/spack-stage spack_logs
@@ -36,10 +34,10 @@ variables:
     # - oc rsync $(oc get pods -l job-name=simplejob${CI_PIPELINE_ID} -o name):$LAB_KERNEL_PATH ./
     # - mv .$LAB_KERNEL_PATH kernel_specs
     # if spack install has failed, fail the pipeline
-    - oc logs jobs/simplejob${CI_PIPELINE_ID} | tee log.txt
+    - kubectl logs jobs/simplejob${CI_PIPELINE_ID} | tee log.txt
     - if [ $(cat log.txt | grep "No module available for package" | wc -l) -gt 0 ]; then exit 1; fi;
-    # delete the job from OpenShift as we have the logs here
-    - oc delete job simplejob${CI_PIPELINE_ID} || true
+    # delete the job, as we have the logs here
+    - kubectl delete job simplejob${CI_PIPELINE_ID} || true
   tags:
     - shell-runner
   # artifacts:
@@ -70,10 +68,8 @@ variables:
 .deploy-dev-server-cscs:
   extends: .deploy-dev-server
   variables:
-    OPENSHIFT_SERVER: $CSCS_OPENSHIFT_DEV_SERVER
-    OPENSHIFT_TOKEN: $CSCS_OPENSHIFT_DEV_TOKEN
+    KUBE_CONTEXT: cscs-dev
     BUILD_ENV_DOCKER_IMAGE: docker-registry.ebrains.eu/tc/ebrains-spack-build-env/okd:okd_23.06
-    OC_PROJECT: jupyterhub-int
   resource_group: shared-NFS-mount-dev-cscs
   tags:             # this is just to ensure that the two jobs will run on different runners
     - read-write    # to avoid issues with common environment variables
@@ -83,8 +79,7 @@ variables:
 .deploy-prod-server-cscs:
   extends: .deploy-prod-server
   variables:
-    OPENSHIFT_SERVER: $CSCS_OPENSHIFT_PROD_SERVER
-    OPENSHIFT_TOKEN: $CSCS_OPENSHIFT_PROD_TOKEN
+    KUBE_CONTEXT: cscs-prod
     BUILD_ENV_DOCKER_IMAGE: docker-registry.ebrains.eu/tc/ebrains-spack-build-env/okd:okd_23.06
   resource_group: shared-NFS-mount-prod-cscs
   tags:             # this is just to ensure that the two jobs will run on different runners
@@ -95,8 +90,7 @@ variables:
 .deploy-dev-server-cineca:
   extends: .deploy-dev-server
   variables:
-    OPENSHIFT_SERVER: $CINECA_K8S_DEV_SERVER
-    OPENSHIFT_TOKEN: $CINECA_K8S_DEV_TOKEN
+    KUBE_CONTEXT: cineca-int
   resource_group: shared-NFS-mount-dev-cineca
   tags:             # this is just to ensure that the two jobs will run on different runners
     - read-only     # to avoid issues with common environment variables
@@ -106,8 +100,7 @@ variables:
 .deploy-prod-server-jsc:
   extends: .deploy-prod-server
   variables:
-    OPENSHIFT_SERVER: $JSC_K8S_PROD_SERVER
-    OPENSHIFT_TOKEN: $JSC_K8S_PROD_TOKEN
+    KUBE_CONTEXT: jsc-prod
   resource_group: shared-NFS-mount-prod-jsc
   tags:             # this is just to ensure that the two jobs will run on different runners
     - read-only     # to avoid issues with common environment variables