diff --git a/.github/workflows/dev_env_tests.yml b/.github/workflows/dev_env_tests.yml
index 00fe7933289309d57c6a87dbbd237bc2262c441d..5b52a8032d951c5ddef3481330452605168f97e7 100644
--- a/.github/workflows/dev_env_tests.yml
+++ b/.github/workflows/dev_env_tests.yml
@@ -104,4 +104,4 @@ jobs:
           poetry run inv start-controller --detached
 
       - name: Run development env tests
-        run: poetry run pytest tests/dev_env_tests --verbosity=2
+        run: poetry run pytest tests/dev_env_tests --verbosity=4
diff --git a/.github/workflows/prod_env_tests.yml b/.github/workflows/prod_env_tests.yml
index 0869103fbb6f1c0d525b6aeefe8652b5545f7ec9..81bca4d442a6dcef165d60054ac74b2b1500ef99 100644
--- a/.github/workflows/prod_env_tests.yml
+++ b/.github/workflows/prod_env_tests.yml
@@ -175,13 +175,21 @@ jobs:
       - name: Get container disk space
         run: df -h
 
-      - name: Load docker images to kind
+      - name: Load docker images to kind containers and delete them locally
         run: |
           kind load docker-image madgik/mipengine_node:dev
+          docker image rm madgik/mipengine_node:dev
           kind load docker-image madgik/mipengine_controller:dev --nodes kind-control-plane
+          docker image rm madgik/mipengine_controller:dev
           kind load docker-image madgik/mipenginedb:dev
+          docker image rm madgik/mipenginedb:dev
           kind load docker-image madgik/mipengine_mipdb:dev
+          docker image rm madgik/mipengine_mipdb:dev
           kind load docker-image madgik/mipengine_rabbitmq:dev
+          docker image rm madgik/mipengine_rabbitmq:dev
+
+      - name: Get container disk space
+        run: df -h
 
       - name: Copy prod_env_tests values.yaml
         run: cp -r tests/prod_env_tests/deployment_configs/kubernetes_values.yaml kubernetes/values.yaml
diff --git a/.github/workflows/smpc_env_tests.yml b/.github/workflows/smpc_env_tests.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b3ecded4e2c40fd8b5e9b16c009f90abf5306ff3
--- /dev/null
+++ b/.github/workflows/smpc_env_tests.yml
@@ -0,0 +1,354 @@
+name: SMPC Env Tests
+
+on:
+  push:
+    branches:
+      - master
+  pull_request:
+    branches:
+      - master
+
+jobs:
+  run_tests:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Check out repository
+        uses: actions/checkout@v2
+
+      - name: Set up python
+        uses: actions/setup-python@v2
+        with:
+          python-version: 3.8
+
+      - name: Install Poetry
+        uses: snok/install-poetry@v1
+        with:
+          virtualenvs-create: true
+          virtualenvs-in-project: true
+
+      - name: Load cached venv
+        id: cached-poetry-dependencies
+        uses: actions/cache@v2
+        with:
+          path: .venv
+          key: venv-${{ runner.os }}-${{ hashFiles('poetry.lock') }}
+
+      - name: Install dependencies
+        if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
+        run: poetry install --no-interaction --no-root
+
+      - name: Set up Docker Buildx
+        uses: docker/setup-buildx-action@v1
+
+      - name: Load MONETDB cached image
+        uses: actions/cache@v2
+        with:
+          path: /tmp/.buildx-cache/monetdb
+          key: ${{ runner.os }}-buildx-monetdb-${{hashFiles('monetdb/**')}}-${{ hashFiles('mipengine/udfgen/udfio.py')}}
+          restore-keys: |
+            ${{ runner.os }}-buildx-monetdb-
+
+      - name: Build MONETDB docker image
+        uses: docker/build-push-action@v2
+        with:
+          context: .
+          file: monetdb/Dockerfile
+          push: false
+          load: true
+          tags: madgik/mipenginedb:dev
+          cache-from: type=local,src=/tmp/.buildx-cache/monetdb
+          cache-to: type=local,dest=/tmp/.buildx-cache-new/monetdb
+
+      - name: Load MIPDB container cached image
+        uses: actions/cache@v2
+        with:
+          path: /tmp/.buildx-cache/mipdb
+          key: ${{ runner.os }}-buildx-mipdb-${{hashFiles('mipdb/**')}}
+          restore-keys: |
+            ${{ runner.os }}-buildx-mipdb-
+
+      - name: Build MIPDB container docker image
+        uses: docker/build-push-action@v2
+        with:
+          context: .
+          file: mipdb/Dockerfile
+          push: false
+          load: true
+          tags: madgik/mipengine_mipdb:dev
+          cache-from: type=local,src=/tmp/.buildx-cache/mipdb
+          cache-to: type=local,dest=/tmp/.buildx-cache-new/mipdb
+
+      - name: Load RABBITMQ cached image
+        uses: actions/cache@v2
+        with:
+          path: /tmp/.buildx-cache/rabbitmq
+          key: ${{ runner.os }}-buildx-rabbitmq-${{hashFiles( 'rabbitmq/**' )}}
+          restore-keys: |
+            ${{ runner.os }}-buildx-rabbitmq-
+
+      - name: Build RABBITMQ docker image
+        uses: docker/build-push-action@v2
+        with:
+          context: .
+          file: rabbitmq/Dockerfile
+          push: false
+          load: true
+          tags: madgik/mipengine_rabbitmq:dev
+          cache-from: type=local,src=/tmp/.buildx-cache/rabbitmq
+          cache-to: type=local,dest=/tmp/.buildx-cache-new/rabbitmq
+
+      - name: Load NODE service cached image
+        uses: actions/cache@v2
+        with:
+          path: /tmp/.buildx-cache/node
+          key: ${{ runner.os }}-buildx-node-${{hashFiles('mipengine/**')}}
+          restore-keys: |
+            ${{ runner.os }}-buildx-node-
+
+      - name: Build NODE service docker image
+        uses: docker/build-push-action@v2
+        with:
+          context: .
+          file: mipengine/node/Dockerfile
+          push: false
+          load: true
+          tags: madgik/mipengine_node:dev
+          cache-from: type=local,src=/tmp/.buildx-cache/node
+          cache-to: type=local,dest=/tmp/.buildx-cache-new/node
+
+      - name: Load CONTROLLER service cached image
+        uses: actions/cache@v2
+        with:
+          path: /tmp/.buildx-cache/controller
+          key: ${{ runner.os }}-buildx-controller-${{hashFiles('mipengine/**')}}
+          restore-keys: |
+            ${{ runner.os }}-buildx-controller-
+
+      - name: Build CONTROLLER service docker image
+        uses: docker/build-push-action@v2
+        with:
+          context: .
+          file: mipengine/controller/Dockerfile
+          push: false
+          load: true
+          tags: madgik/mipengine_controller:dev
+          cache-from: type=local,src=/tmp/.buildx-cache/controller
+          cache-to: type=local,dest=/tmp/.buildx-cache-new/controller
+
+        # Temp fix
+        # https://github.com/docker/build-push-action/issues/252
+        # https://github.com/moby/buildkit/issues/1896
+      - name: Move Docker images cache
+        run: |
+          rm -rf /tmp/.buildx-cache
+          mv /tmp/.buildx-cache-new /tmp/.buildx-cache
+
+      - name: Create k8s Kind Cluster
+        uses: helm/kind-action@v1.2.0
+        with:
+          cluster_name: kind
+          config: tests/smpc_env_tests/deployment_configs/kind_configuration/kind_cluster.yaml
+
+      - name: Install Helm
+        uses: azure/setup-helm@v1
+        with:
+          version: 3.6.3
+        id: install
+
+      - name: Taint Nodes
+        run: |
+          kubectl taint nodes master node-role.kubernetes.io/master-
+          kubectl label node master nodeType=master
+          kubectl label node localnode1 nodeType=worker
+          kubectl label node localnode2 nodeType=worker
+          kubectl label node localnode1 smpcType=player
+          kubectl label node localnode2 smpcType=player
+
+      - name: Get container disk space
+        run: df -h
+
+      - name: Free up space, by removing dotnet, android and haskell unused libs.
+        run: |
+          rm -rf /usr/share/dotnet
+          rm -rf /opt/ghc
+          sudo rm -rf /usr/local/lib/android
+
+      - name: Get container disk space
+        run: df -h
+
+      - name: Pull SMPC images
+        run: |
+          docker pull gpikra/coordinator:v6.0.0
+          docker pull mongo:5.0.8
+          docker pull redis:alpine3.15
+
+      - name: Load docker images to kind containers and delete them locally
+        run: |
+          kind load docker-image madgik/mipengine_node:dev
+          docker image rm madgik/mipengine_node:dev
+          kind load docker-image madgik/mipengine_controller:dev --nodes kind-control-plane
+          docker image rm madgik/mipengine_controller:dev
+          kind load docker-image madgik/mipenginedb:dev
+          docker image rm madgik/mipenginedb:dev
+          kind load docker-image madgik/mipengine_mipdb:dev
+          docker image rm madgik/mipengine_mipdb:dev
+          kind load docker-image madgik/mipengine_rabbitmq:dev
+          docker image rm madgik/mipengine_rabbitmq:dev
+          kind load docker-image gpikra/coordinator:v6.0.0
+          docker image rm gpikra/coordinator:v6.0.0
+          kind load docker-image mongo:5.0.8 --nodes kind-control-plane
+          docker image rm mongo:5.0.8
+          kind load docker-image redis:alpine3.15 --nodes kind-control-plane
+          docker image rm redis:alpine3.15
+
+      - name: Get container disk space
+        run: df -h
+
+      - name: Copy smpc_env_tests values.yaml
+        run: cp -r tests/smpc_env_tests/deployment_configs/kubernetes_values.yaml kubernetes/values.yaml
+
+      - name: Deploy Helm
+        run: helm install mipengine kubernetes/
+
+      - name: Wait for services to start
+        uses: jakejarvis/wait-action@master
+        with:
+          time: '60s'  #https://team-1617704806227.atlassian.net/browse/MIP-248
+
+      - name: K8s pods
+        run: kubectl get pods
+
+      - name: MonetDB logs
+        run: kubectl logs -l nodeType=localnode --tail -1 -c monetdb
+
+      - name: MonetDB logs (post run)
+        uses: webiny/action-post-run@2.0.1
+        with:
+          run: kubectl logs -l nodeType=localnode --tail -1 -c monetdb
+
+      - name: Initialize MONETDB from mipdb container
+        run: |
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb init --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb init --ip $DB_IP --port $DB_PORT'
+
+      - name: Load dementia data model into localnodes
+        run: |
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/dementia_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/dementia_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
+
+      - name: Load dementia dataset csvs with suffix '0,2,4,6,8' into localnode 1
+        run: |
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd0.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi0.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata0.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd2.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd4.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd6.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd8.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi2.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi4.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi6.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi8.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata2.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata4.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata6.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata8.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+
+      - name: Load dementia dataset csvs with suffix '1,3,5,7,9' into localnode 2
+        run: |
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd1.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd3.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd5.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd7.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/edsd9.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi1.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi3.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi5.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi7.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/ppmi9.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata1.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata3.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata5.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata7.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/dementia_v_0_1/desd-synthdata9.csv -d dementia -v 0.1 --ip $DB_IP --port $DB_PORT'
+
+      - name: Load tbi data model into localnodes
+        run: |
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/tbi_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-data-model /opt/data/tbi_v_0_1/CDEsMetadata.json --ip $DB_IP --port $DB_PORT'
+
+      - name: Load tbi dataset csvs with suffix '0,2,4,6,8' into localnode 1
+        run: |
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi0.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi2.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi4.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi6.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[0].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi8.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
+
+      - name: Load tbi dataset csvs with suffix '1,3,5,7,9' into localnode 2
+        run: |
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi1.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi3.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi5.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi7.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
+          kubectl exec $(kubectl get pods -l=nodeType=localnode -o json | jq -r '.items[1].metadata.name') -c db-importer -- sh -c 'mipdb add-dataset /opt/data/tbi_v_0_1/dummy_tbi9.csv -d tbi -v 0.1 --ip $DB_IP --port $DB_PORT'
+
+      - name: K8s pods
+        run: kubectl get pods
+
+      - name: Wait for all services to sync
+        uses: jakejarvis/wait-action@master
+        with:
+          time: '60s'  #https://team-1617704806227.atlassian.net/browse/MIP-248
+
+      - name: Controller logs
+        run: kubectl logs -l app=mipengine-controller --tail -1 -c controller
+
+      - name: Globalnode logs
+        run: kubectl logs -l nodeType=globalnode --tail -1 -c node
+
+      - name: Localnode logs
+        run: kubectl logs -l nodeType=localnode --tail -1 -c node
+
+      - name: SMPC Coordinator logs
+        run: kubectl logs -l app=mipengine-controller --tail -1 -c smpc-coordinator
+
+      - name: SMPC Player logs
+        run: kubectl logs -l app=mipengine-smpc-players --tail -1
+
+      - name: SMPC Client logs
+        run: kubectl logs -l nodeType=localnode --tail -1 -c smpc-client
+
+      - name: Controller logs (post run)
+        uses: webiny/action-post-run@2.0.1
+        with:
+          run: kubectl logs -l app=mipengine-controller --tail -1  -c controller
+
+      - name: Globalnode logs (post run)
+        uses: webiny/action-post-run@2.0.1
+        with:
+          run: kubectl logs -l nodeType=globalnode --tail -1 -c node
+
+      - name: Localnode logs (post run)
+        uses: webiny/action-post-run@2.0.1
+        with:
+          run: kubectl logs -l nodeType=localnode --tail -1 -c node
+
+      - name: SMPC coordinator logs (post run)
+        uses: webiny/action-post-run@2.0.1
+        with:
+          run: kubectl logs -l app=mipengine-controller --tail -1 -c smpc-coordinator
+
+      - name: SMPC player logs (post run)
+        uses: webiny/action-post-run@2.0.1
+        with:
+          run: kubectl logs -l app=mipengine-smpc-players --tail -1
+
+      - name: SMPC client logs (post run)
+        uses: webiny/action-post-run@2.0.1
+        with:
+          run: kubectl logs -l nodeType=localnode --tail -1 -c smpc-client
+
+# TODO SMPC currently doesn't support decimals
+#      - name: Run the first 5 algorithm validation tests from each algorithm
+#        run: poetry run pytest tests/algorithm_validation_tests/ -k "input0- or input1- or input2- or input3- or input4-" -vvvv
diff --git a/.github/workflows/standalone_tests.yml b/.github/workflows/standalone_tests.yml
index 4ca09dbb087a3c29885edadaa2c660f985007ad2..b3bc5593946f5736e978e62a01f432bbd3e0cf53 100644
--- a/.github/workflows/standalone_tests.yml
+++ b/.github/workflows/standalone_tests.yml
@@ -86,10 +86,34 @@ jobs:
           rm -rf /tmp/.buildx-cache
           mv /tmp/.buildx-cache-new /tmp/.buildx-cache
 
-      - name: Run standalone tests
-        run: |
-          source .venv/bin/activate
-          poetry run pytest --cov=mipengine --cov-report=xml tests/standalone_tests --verbosity=2 #--capture=tee-sys
+      - name: SMPC Controller logs (post run)
+        uses: webiny/action-post-run@2.0.1
+        with:
+          run: cat /tmp/mipengine/test_smpc_controller.out
+
+      - name: SMPC Coordinator logs (post run)
+        uses: webiny/action-post-run@2.0.1
+        with:
+          run: docker logs smpc_test_coordinator
+
+      - name: SMPC Client 1 logs (post run)
+        uses: webiny/action-post-run@2.0.1
+        with:
+          run: docker logs smpc_test_client1
+
+      - name: SMPC Player 1 logs (post run)
+        uses: webiny/action-post-run@2.0.1
+        with:
+          run: docker logs smpc_test_player1
+
+      - name: Run all standalone tests except the SMPC
+        run: poetry run pytest -s -m "not smpc" --cov=mipengine --cov-report=xml:non_smpc_cov.xml tests/standalone_tests --verbosity=4
+
+      - name: Cleanup running containers to have resources for the SMPC tests
+        run: poetry run inv cleanup
+
+      - name: Run SMPC specific standalone tests
+        run: poetry run pytest -s -m "smpc" --cov=mipengine --cov-report=xml:smpc_cov.xml tests/standalone_tests --verbosity=4
 
       - name: Publish coverage on codeclimate
         uses: paambaati/codeclimate-action@v2.7.5
@@ -97,4 +121,5 @@ jobs:
           CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}
         with:
           coverageLocations: |
-            ./coverage.xml:coverage.py
+            ./non_smpc_cov.xml:coverage.py
+            ./smpc_cov.xml:coverage.py
diff --git a/README.md b/README.md
index 9f7d929801c608257037fd088e19bd4052a175dd..4a4a7bbeb4d3b94cb8ff83adc75975b5afbfa080 100644
--- a/README.md
+++ b/README.md
@@ -42,7 +42,7 @@
 
    ```
    ip = "172.17.0.1"
-   log_level = "INFO"
+   log_level = "DEBUG"
    framework_log_level ="INFO"
    monetdb_image = "madgik/mipenginedb:dev"
    rabbitmq_image = "madgik/mipengine_rabbitmq:dev"
@@ -50,7 +50,8 @@
    algorithm_folders = "./mipengine/algorithms,./tests/algorithms"
 
    node_landscape_aggregator_update_interval = 30
-   celery_tasks_timeout = 10
+   celery_tasks_timeout = 20
+   celery_run_udf_task_timeout = 120
 
    [privacy]
    minimum_row_count = 10
@@ -62,6 +63,11 @@
    [smpc]
    enabled=false
    optional=false
+   get_result_interval = 10
+   get_result_max_retries = 100
+   smpc_image="gpikra/coordinator:v6.0.0"
+   db_image="mongo:5.0.8"
+   queue_image="redis:alpine3.15"
 
    [[nodes]]
    id = "globalnode"
@@ -74,12 +80,14 @@
    role = "LOCALNODE"
    monetdb_port=50001
    rabbitmq_port=5671
+   smpc_client_port=9001
 
    [[nodes]]
    id = "localnode2"
    role = "LOCALNODE"
    monetdb_port=50002
    rabbitmq_port=5672
+   smpc_client_port=9002
 
    ```
 
diff --git a/kubernetes/templates/mipengine-controller.yaml b/kubernetes/templates/mipengine-controller.yaml
index 0ce4bfc8eaf34cd66497ee746d403b5fa8456921..6250169c2174a6e51000f4370fb3fed005fda590 100644
--- a/kubernetes/templates/mipengine-controller.yaml
+++ b/kubernetes/templates/mipengine-controller.yaml
@@ -16,6 +16,10 @@ spec:
     spec:
       nodeSelector:
         nodeType: master
+      volumes:
+      - name: cleanup-file
+        hostPath:
+          path: {{ .Values.controller.cleanup_file_folder }}
       containers:
       - name: controller
         image: {{ .Values.mipengine_images.repository }}/mipengine_controller:{{ .Values.mipengine_images.version }}
@@ -40,19 +44,78 @@ spec:
           value: "86400"  # One day in seconds
         - name: CELERY_TASKS_TIMEOUT
           value: "{{ .Values.controller.celery_tasks_timeout }}"
+        - name: CELERY_RUN_UDF_TASK_TIMEOUT
+          value: "{{ .Values.controller.celery_run_udf_task_timeout }}"
         - name: LOCALNODES_DNS
           value: "mipengine-nodes-service"
         - name: LOCALNODES_PORT
           value: "5672"
         - name: SMPC_ENABLED
           value: {{ quote .Values.smpc.enabled }}
+        {{ if .Values.smpc.enabled }}
         - name: SMPC_OPTIONAL
           value: {{ quote .Values.smpc.optional }}
+        {{ end }}
 
-      volumes:
-      - name: cleanup-file
-        hostPath:
-          path: {{ .Values.controller.cleanup_file_folder }}
+      ### --- SMPC components ---
+      {{ if .Values.smpc.enabled }}
+      - name: smpc-db
+        image: {{ .Values.smpc.db_image}}
+        imagePullPolicy: IfNotPresent
+        ports:
+          - containerPort: 27017
+        env:
+        - name: MONGO_INITDB_ROOT_USERNAME
+          value: "sysadmin"
+        - name: MONGO_INITDB_ROOT_PASSWORD
+          value: "123qwe"
+
+      - name: smpc-queue
+        image: {{ .Values.smpc.queue_image}}
+        imagePullPolicy: IfNotPresent
+        command: ["redis-server", "--requirepass", "agora"]
+        ports:
+          - containerPort: 6379
+        env:
+          - name: REDIS_REPLICATION_MODE
+            value: "master"
+
+      - name: smpc-coordinator
+        image: {{ .Values.smpc.image }}
+        imagePullPolicy: IfNotPresent
+        command: ["python", "coordinator.py"]
+        ports:
+          - containerPort: 12134
+        env:
+        - name: POD_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: DB_URL
+          value: "$(POD_IP):27017"
+        - name: DB_UNAME
+          value: "sysadmin"
+        - name: DB_PSWD
+          value: "123qwe"
+        - name: REDIS_HOST
+          value: "$(POD_IP)"
+        - name: REDIS_PORT
+          value: "6379"
+        - name: REDIS_PSWD
+          value: "agora"
+        - name: PLAYER_IP_0
+          value: mipengine-smpc-player0-service
+        - name: PLAYER_REPO_0
+          value: "http://$(PLAYER_IP_0):7000"
+        - name: PLAYER_IP_1
+          value: mipengine-smpc-player1-service
+        - name: PLAYER_REPO_1
+          value: "http://$(PLAYER_IP_1):7001"
+        - name: PLAYER_IP_2
+          value: mipengine-smpc-player2-service
+        - name: PLAYER_REPO_2
+          value: "http://$(PLAYER_IP_2):7002"
+      {{ end }}
 
 ---
 
@@ -84,3 +147,281 @@ spec:
     - protocol: TCP
       port: 5672
       targetPort: 5672
+
+
+{{ if .Values.smpc.enabled }}
+---
+
+### --- SMPC Coordinator Service ---
+apiVersion: v1
+kind: Service
+metadata:
+  name: mipengine-smpc-coordinator-service
+spec:
+  type: LoadBalancer
+  selector:
+    app: mipengine-controller
+  ports:
+    - protocol: TCP
+      port: 12314
+      targetPort: 12314
+#      nodePort: 31000  # Used for SMPC cluster debugging
+
+
+### --- SMPC Coordinator DB ---
+
+---
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: mipengine-smpc-coordinator-db-service
+spec:
+  selector:
+    app: mipengine-controller
+  ports:
+    - protocol: TCP
+      port: 27017
+      targetPort: 27017
+
+
+### --- SMPC Player Pods ---
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: mipengine-smpc-player0
+  labels:
+    app: mipengine-smpc-players
+    smpc_player: player0
+spec:
+  nodeSelector:
+    smpcType: player
+  affinity:
+    podAntiAffinity:
+      requiredDuringSchedulingIgnoredDuringExecution:
+      - labelSelector:
+          matchExpressions:
+          - key: app
+            operator: In
+            values:
+            - mipengine-smpc-players
+        topologyKey: "kubernetes.io/hostname"
+  containers:
+  - name: smpc-player
+    image: {{ .Values.smpc.image}}
+    imagePullPolicy: IfNotPresent
+    command: ["python", "player.py", "0"]
+    ports:
+      - containerPort: 6000
+      - containerPort: 7000
+      - containerPort: 14000
+    env:
+    - name: COORDINATOR_IP
+      value: mipengine-smpc-coordinator-service
+    - name: COORDINATOR_URL
+      value: "http://$(COORDINATOR_IP):12314"
+    - name: DB_IP
+      value: mipengine-smpc-coordinator-db-service
+    - name: DB_URL
+      value: "$(DB_IP):27017"
+    - name: DB_UNAME
+      value: "sysadmin"
+    - name: DB_PSWD
+      value: "123qwe"
+    - name: PLAYER_IP_0
+      value: mipengine-smpc-player0-service
+    - name: PLAYER_REPO_0
+      value: "http://$(PLAYER_IP_0):7000"
+    - name: PLAYER_IP_1
+      value: mipengine-smpc-player1-service
+    - name: PLAYER_REPO_1
+      value: "http://$(PLAYER_IP_1):7001"
+    - name: PLAYER_IP_2
+      value: mipengine-smpc-player2-service
+    - name: PLAYER_REPO_2
+      value: "http://$(PLAYER_IP_2):7002"
+
+---
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: mipengine-smpc-player0-service
+spec:
+  selector:
+    smpc_player: player0
+  ports:
+    - name: port-6000
+      protocol: TCP
+      port: 6000
+      targetPort: 6000
+    - name: port-7000
+      protocol: TCP
+      port: 7000
+      targetPort: 7000
+    - name: port-14000
+      protocol: TCP
+      port: 14000
+      targetPort: 14000
+
+---
+
+apiVersion: v1
+kind: Pod
+metadata:
+  name: mipengine-smpc-player1
+  labels:
+    app: mipengine-smpc-players
+    smpc_player: player1
+spec:
+  nodeSelector:
+    smpcType: player
+  affinity:
+    podAntiAffinity:
+      requiredDuringSchedulingIgnoredDuringExecution:
+      - labelSelector:
+          matchExpressions:
+          - key: app
+            operator: In
+            values:
+            - mipengine-smpc-players
+        topologyKey: "kubernetes.io/hostname"
+  containers:
+  - name: smpc-player
+    image: {{ .Values.smpc.image}}
+    imagePullPolicy: IfNotPresent
+    command: ["python", "player.py", "1"]
+    ports:
+      - containerPort: 6001
+      - containerPort: 7001
+      - containerPort: 14001
+    env:
+    - name: COORDINATOR_IP
+      value: mipengine-smpc-coordinator-service
+    - name: COORDINATOR_URL
+      value: "http://$(COORDINATOR_IP):12314"
+    - name: DB_IP
+      value: mipengine-smpc-coordinator-db-service
+    - name: DB_URL
+      value: "$(DB_IP):27017"
+    - name: DB_UNAME
+      value: "sysadmin"
+    - name: DB_PSWD
+      value: "123qwe"
+    - name: PLAYER_IP_0
+      value: mipengine-smpc-player0-service
+    - name: PLAYER_REPO_0
+      value: "http://$(PLAYER_IP_0):7000"
+    - name: PLAYER_IP_1
+      value: mipengine-smpc-player1-service
+    - name: PLAYER_REPO_1
+      value: "http://$(PLAYER_IP_1):7001"
+    - name: PLAYER_IP_2
+      value: mipengine-smpc-player2-service
+    - name: PLAYER_REPO_2
+      value: "http://$(PLAYER_IP_2):7002"
+
+---
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: mipengine-smpc-player1-service
+spec:
+  selector:
+    smpc_player: player1
+  ports:
+    - name: port-6001
+      protocol: TCP
+      port: 6001
+      targetPort: 6001
+    - name: port-7001
+      protocol: TCP
+      port: 7001
+      targetPort: 7001
+    - name: port-14001
+      protocol: TCP
+      port: 14001
+      targetPort: 14001
+
+---
+
+apiVersion: v1
+kind: Pod
+metadata:
+  name: mipengine-smpc-player2
+  labels:
+    app: mipengine-smpc-players
+    smpc_player: player2
+spec:
+  nodeSelector:
+    smpcType: player
+  affinity:
+    podAntiAffinity:
+      requiredDuringSchedulingIgnoredDuringExecution:
+      - labelSelector:
+          matchExpressions:
+          - key: app
+            operator: In
+            values:
+            - mipengine-smpc-players
+        topologyKey: "kubernetes.io/hostname"
+  containers:
+  - name: smpc-player
+    image: {{ .Values.smpc.image}}
+    imagePullPolicy: IfNotPresent
+    command: ["python", "player.py", "2"]
+    ports:
+      - containerPort: 6002
+      - containerPort: 7002
+      - containerPort: 14002
+    env:
+    - name: COORDINATOR_IP
+      value: mipengine-smpc-coordinator-service
+    - name: COORDINATOR_URL
+      value: "http://$(COORDINATOR_IP):12314"
+    - name: DB_IP
+      value: mipengine-smpc-coordinator-db-service
+    - name: DB_URL
+      value: "$(DB_IP):27017"
+    - name: DB_UNAME
+      value: "sysadmin"
+    - name: DB_PSWD
+      value: "123qwe"
+    - name: PLAYER_IP_0
+      value: mipengine-smpc-player0-service
+    - name: PLAYER_REPO_0
+      value: "http://$(PLAYER_IP_0):7000"
+    - name: PLAYER_IP_1
+      value: mipengine-smpc-player1-service
+    - name: PLAYER_REPO_1
+      value: "http://$(PLAYER_IP_1):7001"
+    - name: PLAYER_IP_2
+      value: mipengine-smpc-player2-service
+    - name: PLAYER_REPO_2
+      value: "http://$(PLAYER_IP_2):7002"
+
+---
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: mipengine-smpc-player2-service
+spec:
+  selector:
+    smpc_player: player2
+  ports:
+    - name: port-6002
+      protocol: TCP
+      port: 6002
+      targetPort: 6002
+    - name: port-7002
+      protocol: TCP
+      port: 7002
+      targetPort: 7002
+    - name: port-14002
+      protocol: TCP
+      port: 14002
+      targetPort: 14002
+{{ end }}
diff --git a/kubernetes/templates/mipengine-globalnode.yaml b/kubernetes/templates/mipengine-globalnode.yaml
index e0382b1a73e2837a5392bfdc0a36b8db634789c4..ba2a25a072b545d6f919fe225b1f92443122095e 100644
--- a/kubernetes/templates/mipengine-globalnode.yaml
+++ b/kubernetes/templates/mipengine-globalnode.yaml
@@ -18,6 +18,10 @@ spec:
     spec:
       nodeSelector:
         nodeType: master
+      volumes:
+      - name: db-data
+        hostPath:
+          path: {{ .Values.monetdb_storage }}
       containers:
       - name: monetdb
         image: {{ .Values.mipengine_images.repository }}/mipenginedb:{{ .Values.mipengine_images.version }}
@@ -72,10 +76,15 @@ spec:
           value: "50000"
         - name: SMPC_ENABLED
           value: {{ quote .Values.smpc.enabled }}
+        {{ if .Values.smpc.enabled }}
         - name: SMPC_OPTIONAL
           value: {{ quote .Values.smpc.optional }}
-
-      volumes:
-      - name: db-data
-        hostPath:
-          path: {{ .Values.monetdb_storage }}
+        - name: SMPC_CLIENT_ID
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: SMPC_COORDINATOR_IP
+          value: mipengine-smpc-coordinator-service
+        - name: SMPC_COORDINATOR_ADDRESS
+          value: "http://$(SMPC_COORDINATOR_IP):12314"
+        {{ end }}
diff --git a/kubernetes/templates/mipengine-localnode.yaml b/kubernetes/templates/mipengine-localnode.yaml
index 826f58b5adab6a042f4fba7adf624b1c3af4ed08..4193e4ccd313c52038760979979472f0742bcb21 100644
--- a/kubernetes/templates/mipengine-localnode.yaml
+++ b/kubernetes/templates/mipengine-localnode.yaml
@@ -28,6 +28,13 @@ spec:
                 values:
                 - mipengine-node
             topologyKey: "kubernetes.io/hostname"
+      volumes:
+      - name: db-data
+        hostPath:
+          path: {{ .Values.monetdb_storage }}
+      - name: csv-data
+        hostPath:
+          path: {{ .Values.csvs_datapath }}
       containers:
       - name: monetdb
         image: {{ .Values.mipengine_images.repository }}/mipenginedb:{{ .Values.mipengine_images.version }}
@@ -98,13 +105,65 @@ spec:
           value: "50000"
         - name: SMPC_ENABLED
           value: {{ quote .Values.smpc.enabled }}
+        {{ if .Values.smpc.enabled }}
         - name: SMPC_OPTIONAL
           value: {{ quote .Values.smpc.optional }}
+        - name: SMPC_CLIENT_ID
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: SMPC_CLIENT_IP
+          valueFrom:
+            fieldRef:
+              fieldPath: status.podIP
+        - name: SMPC_CLIENT_ADDRESS
+          value: "http://$(SMPC_CLIENT_IP)9000"
+        {{ end }}
 
-      volumes:
-      - name: db-data
-        hostPath:
-          path: {{ .Values.monetdb_storage }}
-      - name: csv-data
-        hostPath:
-          path: {{ .Values.csvs_datapath }}
+      {{ if .Values.smpc.enabled }}
+      - name: smpc-client
+        image: {{ .Values.smpc.image }}
+        imagePullPolicy: IfNotPresent
+        command: ["python", "client.py"]
+        ports:
+          - containerPort: 9000
+        env:
+        - name: ID
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: PORT
+          value: "9000"
+        - name: COORDINATOR_IP
+          value: mipengine-smpc-coordinator-service
+        - name: COORDINATOR_URL
+          value: "http://$(COORDINATOR_IP):12314"
+        - name: PLAYER_IP_0
+          value: mipengine-smpc-player0-service
+        - name: PLAYER_REPO_0
+          value: "http://$(PLAYER_IP_0):7000"
+        - name: PLAYER_IP_1
+          value: mipengine-smpc-player1-service
+        - name: PLAYER_REPO_1
+          value: "http://$(PLAYER_IP_1):7001"
+        - name: PLAYER_IP_2
+          value: mipengine-smpc-player2-service
+        - name: PLAYER_REPO_2
+          value: "http://$(PLAYER_IP_2):7002"
+      {{ end }}
+
+#---  # Used for SMPC cluster debugging
+#
+#apiVersion: v1
+#kind: Service
+#metadata:
+#  name: mipengine-node-service
+#spec:
+#  type: LoadBalancer
+#  selector:
+#    app: mipengine-node
+#  ports:
+#    - protocol: TCP
+#      port: 9000
+#      targetPort: 9000
+#      nodePort: 32000
diff --git a/kubernetes/values.yaml b/kubernetes/values.yaml
index 1bcc2228b89a9f7debd55837976c4de30f4a3ded..6f3e225d7e81a944758938a8091989827d7d5068 100644
--- a/kubernetes/values.yaml
+++ b/kubernetes/values.yaml
@@ -12,10 +12,14 @@ csvs_datapath: /opt/mipengine/csvs
 
 controller:
   node_landscape_aggregator_update_interval: 30
-  celery_tasks_timeout: 60
+  celery_tasks_timeout: 20
+  celery_run_udf_task_timeout: 120
   nodes_cleanup_interval: 60
   cleanup_file_folder: /opt/cleanup
 
 smpc:
   enabled: false
   optional: false
+  image: gpikra/coordinator:v6.0.0
+  db_image: mongo:5.0.8
+  queue_image: redis:alpine3.15
diff --git a/mipengine/controller/README.md b/mipengine/controller/README.md
index ed6134e8e219c24fceb7599d7bfe19e7fe95b21e..c96447cd4ce97785f7e30fb5770105923b96a49a 100644
--- a/mipengine/controller/README.md
+++ b/mipengine/controller/README.md
@@ -21,7 +21,6 @@ Create an env_file with the following variables:
 ```
 LOG_LEVEL=INFO
 FRAMEWORK_LOG_LEVEL=INFO
-CDES_METADATA_PATH=172.17.0.1
 DEPLOYMENT_TYPE=LOCAL
 NODE_LANDSCAPE_AGGREGATOR_UPDATE_INTERVAL=30
 LOCALNODES_CONFIG_FILE=/home/user/localnodes_config.json
diff --git a/mipengine/controller/algorithm_executor.py b/mipengine/controller/algorithm_executor.py
index e8e3fd392404f5581b0d956fca0a7189465fbaf1..4949e7cd474429962bb31c4e0ab1c39a7449d631 100644
--- a/mipengine/controller/algorithm_executor.py
+++ b/mipengine/controller/algorithm_executor.py
@@ -1,4 +1,5 @@
 import traceback
+from logging import Logger
 from typing import Any
 from typing import Callable
 from typing import Dict
@@ -26,8 +27,9 @@ from mipengine.controller.algorithm_executor_smpc_helper import get_smpc_results
 from mipengine.controller.algorithm_executor_smpc_helper import (
     load_data_to_smpc_clients,
 )
+from mipengine.controller.algorithm_executor_smpc_helper import trigger_smpc_operations
 from mipengine.controller.algorithm_executor_smpc_helper import (
-    trigger_smpc_computations,
+    wait_for_smpc_results_to_be_ready,
 )
 from mipengine.controller.algorithm_flow_data_objects import AlgoFlowData
 from mipengine.controller.algorithm_flow_data_objects import GlobalNodeData
@@ -152,6 +154,7 @@ class AlgorithmExecutor:
             data_model=self._algorithm_execution_dto.data_model,
             datasets_per_local_node=self._algorithm_execution_dto.datasets_per_local_node,
             use_smpc=self._get_use_smpc_flag(),
+            logger=self._logger,
         )
         if len(self._local_nodes) > 1:
             self._execution_interface = _AlgorithmExecutionInterface(
@@ -201,6 +204,7 @@ class _AlgorithmExecutionInterfaceDTO(BaseModel):
     data_model: str
     datasets_per_local_node: Dict[str, List[str]]
     use_smpc: bool
+    logger: Logger
 
     class Config:
         arbitrary_types_allowed = True
@@ -231,6 +235,8 @@ class _AlgorithmExecutionInterface:
             if varname in varnames
         }
 
+        self._logger = algo_execution_interface_dto.logger
+
     @property
     def algorithm_parameters(self) -> Dict[str, Any]:
         return self._algorithm_parameters
@@ -447,12 +453,23 @@ class _AlgorithmExecutionInterface:
             command_id, local_nodes_smpc_tables
         )
 
-        (sum_op, min_op, max_op, union_op,) = trigger_smpc_computations(
+        (sum_op, min_op, max_op, union_op) = trigger_smpc_operations(
+            logger=self._logger,
             context_id=self._global_node.context_id,
             command_id=command_id,
             smpc_clients_per_op=smpc_clients_per_op,
         )
 
+        wait_for_smpc_results_to_be_ready(
+            logger=self._logger,
+            context_id=self._global_node.context_id,
+            command_id=command_id,
+            sum_op=sum_op,
+            min_op=min_op,
+            max_op=max_op,
+            union_op=union_op,
+        )
+
         (
             sum_op_result_table,
             min_op_result_table,
diff --git a/mipengine/controller/algorithm_executor_node_data_objects.py b/mipengine/controller/algorithm_executor_node_data_objects.py
index 917660a80a54268fa8ed9211b18998afd3b95f20..6ed7455629f684e8e1da2717f02a110b0454941a 100644
--- a/mipengine/controller/algorithm_executor_node_data_objects.py
+++ b/mipengine/controller/algorithm_executor_node_data_objects.py
@@ -73,3 +73,6 @@ class SMPCTableNames(NodeData):
         self.min_op = min_op
         self.max_op = max_op
         self.union_op = union_op
+
+    def __repr__(self):
+        return self.full_table_name
diff --git a/mipengine/controller/algorithm_executor_nodes.py b/mipengine/controller/algorithm_executor_nodes.py
index 2f3aaa0ded179482a9720b39cfacfac13dd4133e..16c9c36b93aaefc0b8e91cfc06833d761387d0bc 100644
--- a/mipengine/controller/algorithm_executor_nodes.py
+++ b/mipengine/controller/algorithm_executor_nodes.py
@@ -294,9 +294,9 @@ class LocalNode(_Node):
                 raise NotImplementedError
         return udf_results
 
-    def load_data_to_smpc_client(self, table_name: str, jobid: str) -> int:
+    def load_data_to_smpc_client(self, table_name: str, jobid: str) -> str:
         return self._node_tasks_handler.load_data_to_smpc_client(
-            self.context_id, table_name, jobid
+            self.request_id, table_name, jobid
         )
 
 
@@ -327,7 +327,7 @@ class GlobalNode(_Node):
         table_name: str,
     ):
         self._node_tasks_handler.validate_smpc_templates_match(
-            self.context_id, table_name
+            self.request_id, table_name
         )
 
     def get_smpc_result(
@@ -337,6 +337,7 @@ class GlobalNode(_Node):
         command_subid: Optional[str] = "0",
     ) -> str:
         return self._node_tasks_handler.get_smpc_result(
+            request_id=self.request_id,
             jobid=jobid,
             context_id=self.context_id,
             command_id=str(command_id),
diff --git a/mipengine/controller/algorithm_executor_smpc_helper.py b/mipengine/controller/algorithm_executor_smpc_helper.py
index 8ea8025f3f7f6a5aead512c2503f90e8ad06569f..19d3efd51c708baa90eead5c306d20612e321670 100644
--- a/mipengine/controller/algorithm_executor_smpc_helper.py
+++ b/mipengine/controller/algorithm_executor_smpc_helper.py
@@ -1,15 +1,20 @@
+from logging import Logger
+from time import sleep
 from typing import List
+from typing import Optional
 from typing import Tuple
 
+from mipengine import smpc_cluster_comm_helpers as smpc_cluster
 from mipengine.controller import config as ctrl_config
-from mipengine.controller.algorithm_executor_node_data_objects import SMPCTableNames
 from mipengine.controller.algorithm_executor_node_data_objects import TableName
 from mipengine.controller.algorithm_executor_nodes import GlobalNode
-from mipengine.controller.algorithm_flow_data_objects import GlobalNodeTable
 from mipengine.controller.algorithm_flow_data_objects import LocalNodesSMPCTables
 from mipengine.controller.algorithm_flow_data_objects import LocalNodesTable
-from mipengine.smpc_cluster_comm_helpers import trigger_smpc_computation
+from mipengine.smpc_cluster_comm_helpers import SMPCComputationError
+from mipengine.smpc_cluster_comm_helpers import trigger_smpc
 from mipengine.smpc_DTOs import SMPCRequestType
+from mipengine.smpc_DTOs import SMPCResponse
+from mipengine.smpc_DTOs import SMPCResponseStatus
 
 
 def get_smpc_job_id(
@@ -19,8 +24,10 @@ def get_smpc_job_id(
 
 
 def load_operation_data_to_smpc_clients(
-    command_id: int, local_nodes_table: LocalNodesTable, op_type: SMPCRequestType
-) -> List[int]:
+    command_id: int,
+    local_nodes_table: Optional[LocalNodesTable],
+    op_type: SMPCRequestType,
+) -> List[str]:
     smpc_clients = []
     if local_nodes_table:
         for node, table in local_nodes_table.nodes_tables.items():
@@ -35,18 +42,18 @@ def load_operation_data_to_smpc_clients(
 
 def load_data_to_smpc_clients(
     command_id: int, smpc_tables: LocalNodesSMPCTables
-) -> Tuple[List[int], List[int], List[int], List[int]]:
+) -> Tuple[List[str], List[str], List[str], List[str]]:
     sum_op_smpc_clients = load_operation_data_to_smpc_clients(
-        command_id, smpc_tables.sum_op, SMPCRequestType.SUM
+        command_id, smpc_tables.sum_op_local_nodes_table, SMPCRequestType.SUM
     )
     min_op_smpc_clients = load_operation_data_to_smpc_clients(
-        command_id, smpc_tables.min_op, SMPCRequestType.MIN
+        command_id, smpc_tables.min_op_local_nodes_table, SMPCRequestType.MIN
     )
     max_op_smpc_clients = load_operation_data_to_smpc_clients(
-        command_id, smpc_tables.max_op, SMPCRequestType.MAX
+        command_id, smpc_tables.max_op_local_nodes_table, SMPCRequestType.MAX
     )
     union_op_smpc_clients = load_operation_data_to_smpc_clients(
-        command_id, smpc_tables.union_op, SMPCRequestType.UNION
+        command_id, smpc_tables.union_op_local_nodes_table, SMPCRequestType.UNION
     )
     return (
         sum_op_smpc_clients,
@@ -56,13 +63,15 @@ def load_data_to_smpc_clients(
     )
 
 
-def trigger_smpc_operation_computation(
+def trigger_smpc_operation(
+    logger: Logger,
     context_id: str,
     command_id: int,
     op_type: SMPCRequestType,
-    smpc_op_clients: List[int],
+    smpc_op_clients: List[str],
 ) -> bool:
-    trigger_smpc_computation(
+    trigger_smpc(
+        logger=logger,
         coordinator_address=ctrl_config.smpc.coordinator_address,
         jobid=get_smpc_job_id(
             context_id=context_id,
@@ -76,10 +85,11 @@ def trigger_smpc_operation_computation(
     return True if smpc_op_clients else False
 
 
-def trigger_smpc_computations(
+def trigger_smpc_operations(
+    logger: Logger,
     context_id: str,
     command_id: int,
-    smpc_clients_per_op: Tuple[List[int], List[int], List[int], List[int]],
+    smpc_clients_per_op: Tuple[List[str], List[str], List[str], List[str]],
 ) -> Tuple[bool, bool, bool, bool]:
     (
         sum_op_smpc_clients,
@@ -87,21 +97,88 @@ def trigger_smpc_computations(
         max_op_smpc_clients,
         union_op_smpc_clients,
     ) = smpc_clients_per_op
-    sum_op = trigger_smpc_operation_computation(
-        context_id, command_id, SMPCRequestType.SUM, sum_op_smpc_clients
+    sum_op = trigger_smpc_operation(
+        logger, context_id, command_id, SMPCRequestType.SUM, sum_op_smpc_clients
     )
-    min_op = trigger_smpc_operation_computation(
-        context_id, command_id, SMPCRequestType.MIN, min_op_smpc_clients
+    min_op = trigger_smpc_operation(
+        logger, context_id, command_id, SMPCRequestType.MIN, min_op_smpc_clients
     )
-    max_op = trigger_smpc_operation_computation(
-        context_id, command_id, SMPCRequestType.MAX, max_op_smpc_clients
+    max_op = trigger_smpc_operation(
+        logger, context_id, command_id, SMPCRequestType.MAX, max_op_smpc_clients
     )
-    union_op = trigger_smpc_operation_computation(
-        context_id, command_id, SMPCRequestType.UNION, union_op_smpc_clients
+    union_op = trigger_smpc_operation(
+        logger, context_id, command_id, SMPCRequestType.UNION, union_op_smpc_clients
     )
     return sum_op, min_op, max_op, union_op
 
 
+def wait_for_smpc_result_to_be_ready(
+    logger: Logger,
+    context_id: str,
+    command_id: int,
+    operation: SMPCRequestType,
+):
+    jobid = get_smpc_job_id(
+        context_id=context_id,
+        command_id=command_id,
+        operation=operation,
+    )
+
+    logger.info(f"Waiting for SMPC, with jobid: '{jobid}', to finish.")
+
+    attempts = 0
+    while True:
+        sleep(ctrl_config.smpc.get_result_interval)
+
+        response = smpc_cluster.get_smpc_result(
+            coordinator_address=ctrl_config.smpc.coordinator_address,
+            jobid=jobid,
+        )
+        try:
+            smpc_response = SMPCResponse.parse_raw(response)
+        except Exception as exc:
+            raise SMPCComputationError(
+                f"The SMPC response could not be parsed. \nResponse{response}. \nException: {exc}"
+            )
+
+        if smpc_response.status == SMPCResponseStatus.FAILED:
+            raise SMPCComputationError(
+                f"The SMPC returned a {SMPCResponseStatus.FAILED} status. Body: {response}"
+            )
+        elif smpc_response.status == SMPCResponseStatus.COMPLETED:
+            break
+
+        if attempts > ctrl_config.smpc.get_result_max_retries:
+            raise SMPCComputationError(
+                f"Max retries for the SMPC exceeded the limit: {ctrl_config.smpc.get_result_max_retries}"
+            )
+        attempts += 1
+    logger.info(f"SMPC, with jobid: '{jobid}', finished.")
+
+
+def wait_for_smpc_results_to_be_ready(
+    logger: Logger,
+    context_id: str,
+    command_id: int,
+    sum_op: bool,
+    min_op: bool,
+    max_op: bool,
+    union_op: bool,
+):
+    wait_for_smpc_result_to_be_ready(
+        logger, context_id, command_id, SMPCRequestType.SUM
+    ) if sum_op else None
+    wait_for_smpc_result_to_be_ready(
+        logger, context_id, command_id, SMPCRequestType.MIN
+    ) if min_op else None
+    wait_for_smpc_result_to_be_ready(
+        logger, context_id, command_id, SMPCRequestType.MAX
+    ) if max_op else None
+    wait_for_smpc_result_to_be_ready(
+        logger, context_id, command_id, SMPCRequestType.UNION
+    ) if union_op else None
+
+
 def get_smpc_results(
     node: GlobalNode,
     context_id: str,
@@ -112,61 +189,69 @@ def get_smpc_results(
     union_op: bool,
 ) -> Tuple[TableName, TableName, TableName, TableName]:
     sum_op_result_table = (
-        node.get_smpc_result(
-            jobid=get_smpc_job_id(
-                context_id=context_id,
-                command_id=command_id,
-                operation=SMPCRequestType.SUM,
-            ),
-            command_id=str(command_id),
-            command_subid="0",
+        TableName(
+            table_name=node.get_smpc_result(
+                jobid=get_smpc_job_id(
+                    context_id=context_id,
+                    command_id=command_id,
+                    operation=SMPCRequestType.SUM,
+                ),
+                command_id=str(command_id),
+                command_subid="0",
+            )
         )
         if sum_op
         else None
     )
     min_op_result_table = (
-        node.get_smpc_result(
-            jobid=get_smpc_job_id(
-                context_id=context_id,
-                command_id=command_id,
-                operation=SMPCRequestType.MIN,
-            ),
-            command_id=str(command_id),
-            command_subid="1",
+        TableName(
+            table_name=node.get_smpc_result(
+                jobid=get_smpc_job_id(
+                    context_id=context_id,
+                    command_id=command_id,
+                    operation=SMPCRequestType.MIN,
+                ),
+                command_id=str(command_id),
+                command_subid="1",
+            )
         )
         if min_op
         else None
     )
     max_op_result_table = (
-        node.get_smpc_result(
-            jobid=get_smpc_job_id(
-                context_id=context_id,
-                command_id=command_id,
-                operation=SMPCRequestType.MAX,
-            ),
-            command_id=str(command_id),
-            command_subid="2",
+        TableName(
+            table_name=node.get_smpc_result(
+                jobid=get_smpc_job_id(
+                    context_id=context_id,
+                    command_id=command_id,
+                    operation=SMPCRequestType.MAX,
+                ),
+                command_id=str(command_id),
+                command_subid="2",
+            )
         )
         if max_op
         else None
     )
     union_op_result_table = (
-        node.get_smpc_result(
-            jobid=get_smpc_job_id(
-                context_id=context_id,
-                command_id=command_id,
-                operation=SMPCRequestType.UNION,
-            ),
-            command_id=str(command_id),
-            command_subid="3",
+        TableName(
+            table_name=node.get_smpc_result(
+                jobid=get_smpc_job_id(
+                    context_id=context_id,
+                    command_id=command_id,
+                    operation=SMPCRequestType.UNION,
+                ),
+                command_id=str(command_id),
+                command_subid="3",
+            )
         )
         if union_op
         else None
     )
 
     return (
-        TableName(sum_op_result_table),
-        TableName(min_op_result_table),
-        TableName(max_op_result_table),
-        TableName(union_op_result_table),
+        sum_op_result_table,
+        min_op_result_table,
+        max_op_result_table,
+        union_op_result_table,
     )
diff --git a/mipengine/controller/algorithm_flow_data_objects.py b/mipengine/controller/algorithm_flow_data_objects.py
index acb073e4436c2004051ee8e1962231f19d1a1be0..ef7f68dd8c3ae8c9adc832dfde2c1869056dad75 100644
--- a/mipengine/controller/algorithm_flow_data_objects.py
+++ b/mipengine/controller/algorithm_flow_data_objects.py
@@ -3,6 +3,7 @@ from abc import ABC
 from typing import Any
 from typing import Dict
 from typing import List
+from typing import Optional
 from typing import Union
 
 from mipengine.controller.algorithm_executor_node_data_objects import SMPCTableNames
@@ -160,6 +161,42 @@ class LocalNodesSMPCTables(LocalNodesData):
             {node: tables.template for node, tables in self.nodes_smpc_tables.items()}
         )
 
+    @property
+    def sum_op_local_nodes_table(self) -> Optional[LocalNodesTable]:
+        nodes_tables = {}
+        for node, tables in self.nodes_smpc_tables.items():
+            if not tables.sum_op:
+                return None
+            nodes_tables[node] = tables.sum_op
+        return LocalNodesTable(nodes_tables)
+
+    @property
+    def min_op_local_nodes_table(self) -> Optional[LocalNodesTable]:
+        nodes_tables = {}
+        for node, tables in self.nodes_smpc_tables.items():
+            if not tables.min_op:
+                return None
+            nodes_tables[node] = tables.min_op
+        return LocalNodesTable(nodes_tables)
+
+    @property
+    def max_op_local_nodes_table(self) -> Optional[LocalNodesTable]:
+        nodes_tables = {}
+        for node, tables in self.nodes_smpc_tables.items():
+            if not tables.max_op:
+                return None
+            nodes_tables[node] = tables.max_op
+        return LocalNodesTable(nodes_tables)
+
+    @property
+    def union_op_local_nodes_table(self) -> Optional[LocalNodesTable]:
+        nodes_tables = {}
+        for node, tables in self.nodes_smpc_tables.items():
+            if not tables.union_op:
+                return None
+            nodes_tables[node] = tables.union_op
+        return LocalNodesTable(nodes_tables)
+
 
 class GlobalNodeSMPCTables(GlobalNodeData):
     _node: GlobalNode
diff --git a/mipengine/controller/cleaner.py b/mipengine/controller/cleaner.py
index 08e1152c21704c1ea3a675335f0a3b6bd3acc37b..9cf58e18a928010c9ad8b3e6e5343da36710dacd 100644
--- a/mipengine/controller/cleaner.py
+++ b/mipengine/controller/cleaner.py
@@ -22,6 +22,7 @@ class _NodeInfoDTO(BaseModel):
     queue_address: str
     db_address: str
     tasks_timeout: int
+    run_udf_task_timeout: int
 
     class Config:
         allow_mutation = False
@@ -107,6 +108,7 @@ class Cleaner:
                 queue_address=":".join([str(global_node.ip), str(global_node.port)]),
                 db_address=":".join([str(global_node.db_ip), str(global_node.db_port)]),
                 tasks_timeout=controller_config.rabbitmq.celery_tasks_timeout,
+                run_udf_task_timeout=controller_config.rabbitmq.celery_run_udf_task_timeout,
             )
 
         if node_id in local_nodes.keys():
@@ -116,6 +118,7 @@ class Cleaner:
                 queue_address=":".join([str(local_node.ip), str(local_node.port)]),
                 db_address=":".join([str(local_node.db_ip), str(local_node.db_port)]),
                 tasks_timeout=controller_config.rabbitmq.celery_tasks_timeout,
+                run_udf_task_timeout=controller_config.rabbitmq.celery_run_udf_task_timeout,
             )
 
         raise KeyError(f"Node with id '{node_id}' is not currently available.")
@@ -132,6 +135,7 @@ def _create_node_task_handler(node_info: _NodeInfoDTO) -> NodeTasksHandlerCelery
         node_queue_addr=node_info.queue_address,
         node_db_addr=node_info.db_address,
         tasks_timeout=node_info.tasks_timeout,
+        run_udf_task_timeout=node_info.run_udf_task_timeout,
     )
 
 
diff --git a/mipengine/controller/config.toml b/mipengine/controller/config.toml
index e129a365875e7d890feb1f6028993596c9f31da5..af536776f98a0e9594798e686422fe7462f528bc 100644
--- a/mipengine/controller/config.toml
+++ b/mipengine/controller/config.toml
@@ -20,6 +20,7 @@ user = "user"
 password = "password"
 vhost = "user_vhost"
 celery_tasks_timeout="$CELERY_TASKS_TIMEOUT"
+celery_run_udf_task_timeout="$CELERY_RUN_UDF_TASK_TIMEOUT"
 celery_tasks_max_retries=3
 celery_tasks_interval_start=0
 celery_tasks_interval_step=0.2
@@ -29,3 +30,5 @@ celery_tasks_interval_max=0.5
 enabled = "$SMPC_ENABLED"
 optional = "$SMPC_OPTIONAL"
 coordinator_address = "$SMPC_COORDINATOR_ADDRESS"
+get_result_interval = "$SMPC_GET_RESULT_INTERVAL"
+get_result_max_retries = "$SMPC_GET_RESULT_MAX_RETRIES"
diff --git a/mipengine/controller/controller.py b/mipengine/controller/controller.py
index 5317ec76ac8140724b30921f850f6b2329d58c18..4ac8c5580a1fbeef7652870fd1132dc6f3a39a9b 100644
--- a/mipengine/controller/controller.py
+++ b/mipengine/controller/controller.py
@@ -27,6 +27,7 @@ class _NodeInfoDTO(BaseModel):
     queue_address: str
     db_address: str
     tasks_timeout: int
+    run_udf_task_timeout: int
 
     class Config:
         allow_mutation = False
@@ -199,6 +200,7 @@ class Controller:
                 queue_address=":".join([str(global_node.ip), str(global_node.port)]),
                 db_address=":".join([str(global_node.db_ip), str(global_node.db_port)]),
                 tasks_timeout=controller_config.rabbitmq.celery_tasks_timeout,
+                run_udf_task_timeout=controller_config.rabbitmq.celery_run_udf_task_timeout,
             )
         )
 
@@ -222,6 +224,7 @@ class Controller:
             queue_address=":".join([str(node.ip), str(node.port)]),
             db_address=":".join([str(node.db_ip), str(node.db_port)]),
             tasks_timeout=controller_config.rabbitmq.celery_tasks_timeout,
+            run_udf_task_timeout=controller_config.rabbitmq.celery_run_udf_task_timeout,
         )
 
     def _get_nodes_info_by_dataset(
@@ -247,6 +250,7 @@ class Controller:
                         [str(local_node.db_ip), str(local_node.db_port)]
                     ),
                     tasks_timeout=controller_config.rabbitmq.celery_tasks_timeout,
+                    run_udf_task_timeout=controller_config.rabbitmq.celery_run_udf_task_timeout,
                 )
             )
 
@@ -259,6 +263,7 @@ def _create_node_task_handler(node_info: _NodeInfoDTO) -> NodeTasksHandlerCelery
         node_queue_addr=node_info.queue_address,
         node_db_addr=node_info.db_address,
         tasks_timeout=node_info.tasks_timeout,
+        run_udf_task_timeout=node_info.run_udf_task_timeout,
     )
 
 
diff --git a/mipengine/controller/node_landscape_aggregator.py b/mipengine/controller/node_landscape_aggregator.py
index 6a539c33a799e5cc7630b10fc53182690d9a44c1..0fdc3f51727fad736a67e9ad1a0186d3ae93851f 100644
--- a/mipengine/controller/node_landscape_aggregator.py
+++ b/mipengine/controller/node_landscape_aggregator.py
@@ -42,7 +42,6 @@ async def _get_nodes_info(nodes_socket_addr: List[str]) -> List[NodeInfo]:
         celery_app: celery_app.signature(GET_NODE_INFO_SIGNATURE)
         for celery_app in celery_apps
     }
-
     tasks_coroutines = [
         _task_to_async(task, app=app)(request_id=NODE_LANDSCAPE_AGGREGATOR_REQUEST_ID)
         for app, task in nodes_task_signature.items()
diff --git a/mipengine/controller/node_tasks_handler_celery.py b/mipengine/controller/node_tasks_handler_celery.py
index 7dbf8da561e6935df76c123919775073907df526..c3622dc9560b99358d68264e86ff24dda3df26f8 100644
--- a/mipengine/controller/node_tasks_handler_celery.py
+++ b/mipengine/controller/node_tasks_handler_celery.py
@@ -120,12 +120,18 @@ class NodeTasksHandlerCelery(INodeTasksHandler):
 
     # TODO create custom type and validator for the socket address
     def __init__(
-        self, node_id: str, node_queue_addr: str, node_db_addr: str, tasks_timeout
+        self,
+        node_id: str,
+        node_queue_addr: str,
+        node_db_addr: str,
+        tasks_timeout: int,
+        run_udf_task_timeout: int,
     ):
         self._node_id = node_id
         self._celery_app = get_node_celery_app(node_queue_addr)
         self._db_address = node_db_addr
         self._tasks_timeout = tasks_timeout
+        self._run_udf_task_timeout = run_udf_task_timeout
 
     def close(self):
         self._celery_app.close()
@@ -341,7 +347,7 @@ class NodeTasksHandlerCelery(INodeTasksHandler):
     @time_limit_exceeded_handler
     @broker_connection_closed_handler
     def get_queued_udf_result(self, async_result: QueuedUDFAsyncResult) -> UDFResults:
-        result_str = async_result.get(self._tasks_timeout)
+        result_str = async_result.get(self._run_udf_task_timeout)
         return UDFResults.parse_raw(result_str)
 
     @time_limit_exceeded_handler
@@ -383,7 +389,7 @@ class NodeTasksHandlerCelery(INodeTasksHandler):
     @broker_connection_closed_handler
     def validate_smpc_templates_match(
         self,
-        context_id: str,
+        request_id: str,
         table_name: str,
     ):
         task_signature = self._celery_app.signature(
@@ -391,21 +397,21 @@ class NodeTasksHandlerCelery(INodeTasksHandler):
         )
         self._apply_async(
             task_signature=task_signature,
-            context_id=context_id,
+            request_id=request_id,
             table_name=table_name,
         ).get(self._tasks_timeout)
 
     @time_limit_exceeded_handler
     @broker_connection_closed_handler
     def load_data_to_smpc_client(
-        self, context_id: str, table_name: str, jobid: str
-    ) -> int:
+        self, request_id: str, table_name: str, jobid: str
+    ) -> str:
         task_signature = self._celery_app.signature(
             TASK_SIGNATURES["load_data_to_smpc_client"]
         )
         return self._apply_async(
             task_signature=task_signature,
-            context_id=context_id,
+            request_id=request_id,
             table_name=table_name,
             jobid=jobid,
         ).get(self._tasks_timeout)
@@ -414,6 +420,7 @@ class NodeTasksHandlerCelery(INodeTasksHandler):
     @broker_connection_closed_handler
     def get_smpc_result(
         self,
+        request_id: str,
         jobid: str,
         context_id: str,
         command_id: str,
@@ -422,6 +429,7 @@ class NodeTasksHandlerCelery(INodeTasksHandler):
         task_signature = self._celery_app.signature(TASK_SIGNATURES["get_smpc_result"])
         return self._apply_async(
             task_signature=task_signature,
+            request_id=request_id,
             jobid=jobid,
             context_id=context_id,
             command_id=command_id,
diff --git a/mipengine/controller/node_tasks_handler_interface.py b/mipengine/controller/node_tasks_handler_interface.py
index 8d6f1a9f4daf9cd2e545bc4910ead8bbf69a7184..11e0700d9782bee4c70e1114ad2cd9d61474d0b3 100644
--- a/mipengine/controller/node_tasks_handler_interface.py
+++ b/mipengine/controller/node_tasks_handler_interface.py
@@ -167,20 +167,21 @@ class INodeTasksHandler(ABC):
     @abstractmethod
     def validate_smpc_templates_match(
         self,
-        context_id: str,
+        request_id: str,
         table_name: str,
     ):
         pass
 
     @abstractmethod
     def load_data_to_smpc_client(
-        self, context_id: str, table_name: str, jobid: str
-    ) -> int:
+        self, request_id: str, table_name: str, jobid: str
+    ) -> str:
         pass
 
     @abstractmethod
     def get_smpc_result(
         self,
+        request_id: str,
         jobid: str,
         context_id: str,
         command_id: str,
diff --git a/mipengine/node/config.toml b/mipengine/node/config.toml
index 6669821a293f90bc097cff3a0607581a2fd6457c..efe0e9684aa0e36568f65827218b6c696f39727e 100644
--- a/mipengine/node/config.toml
+++ b/mipengine/node/config.toml
@@ -4,8 +4,6 @@ role = "$NODE_ROLE"
 log_level = "$LOG_LEVEL"
 framework_log_level = "$FRAMEWORK_LOG_LEVEL"
 
-cdes_metadata_path = "./tests/test_data"
-
 [privacy]
 minimum_row_count = 10
 
@@ -36,5 +34,3 @@ optional = "$SMPC_OPTIONAL"
 client_id = "$SMPC_CLIENT_ID"
 client_address = "$SMPC_CLIENT_ADDRESS"
 coordinator_address = "$SMPC_COORDINATOR_ADDRESS"
-get_result_interval = "$SMPC_GET_RESULT_INTERVAL"
-get_result_max_retries = "$SMPC_GET_RESULT_MAX_RETRIES"
diff --git a/mipengine/node/tasks/smpc.py b/mipengine/node/tasks/smpc.py
index 00f148613e046dc0c18a33bba471b283c3b723ac..751142f2bec7cff85a3bcb4fb8c8a722f06e3dfb 100644
--- a/mipengine/node/tasks/smpc.py
+++ b/mipengine/node/tasks/smpc.py
@@ -1,5 +1,4 @@
 import json
-from time import sleep
 from typing import List
 from typing import Optional
 
@@ -19,8 +18,6 @@ from mipengine.node_tasks_DTOs import TableSchema
 from mipengine.node_tasks_DTOs import TableType
 from mipengine.smpc_cluster_comm_helpers import SMPCComputationError
 from mipengine.smpc_DTOs import SMPCRequestType
-from mipengine.smpc_DTOs import SMPCResponse
-from mipengine.smpc_DTOs import SMPCResponseStatus
 from mipengine.smpc_DTOs import SMPCResponseWithOutput
 from mipengine.table_data_DTOs import ColumnData
 
@@ -45,9 +42,7 @@ def validate_smpc_templates_match(
     Nothing, only throws exception if they don't match.
     """
 
-    templates = _get_smpc_values_from_table_data(
-        get_table_data(table_name), SMPCRequestType.SUM
-    )
+    templates = _get_smpc_values_from_table_data(get_table_data(table_name))
     first_template, *_ = templates
     for template in templates[1:]:
         if template != first_template:
@@ -58,7 +53,7 @@ def validate_smpc_templates_match(
 
 @shared_task
 @initialise_logger
-def load_data_to_smpc_client(request_id: str, table_name: str, jobid: str) -> int:
+def load_data_to_smpc_client(request_id: str, table_name: str, jobid: str) -> str:
     """
     Loads SMPC data into the SMPC client to be used for a computation.
 
@@ -77,9 +72,7 @@ def load_data_to_smpc_client(request_id: str, table_name: str, jobid: str) -> in
             "load_data_to_smpc_client is allowed only for a LOCALNODE."
         )
 
-    smpc_values, *_ = _get_smpc_values_from_table_data(
-        get_table_data(table_name), SMPCRequestType.SUM
-    )
+    smpc_values, *_ = _get_smpc_values_from_table_data(get_table_data(table_name))
 
     smpc_cluster.load_data_to_smpc_client(
         node_config.smpc.client_address, jobid, smpc_values
@@ -116,36 +109,27 @@ def get_smpc_result(
     if node_config.role != NodeRole.GLOBALNODE:
         raise PermissionError("get_smpc_result is allowed only for a GLOBALNODE.")
 
-    attempts = 0
-    while True:
-        sleep(node_config.smpc.get_result_interval)
+    response = smpc_cluster.get_smpc_result(
+        coordinator_address=node_config.smpc.coordinator_address,
+        jobid=jobid,
+    )
 
-        response = smpc_cluster.get_smpc_result(
-            coordinator_address=node_config.smpc.coordinator_address,
-            jobid=jobid,
+    # We do not need to wait for the result to be ready since the CONTROLLER will do that.
+    # The CONTROLLER will trigger this task only when the result is ready.
+    try:
+        smpc_response = SMPCResponseWithOutput.parse_raw(response)
+    except Exception as exc:
+        raise SMPCComputationError(
+            f"The smpc response could not be parsed into an SMPCResponseWithOutput. "
+            f"\nResponse: {response} \nException: {exc}"
         )
-        smpc_response = SMPCResponse.parse_raw(response)
-        if smpc_response.status == SMPCResponseStatus.FAILED:
-            raise SMPCComputationError(
-                f"The SMPC returned a {SMPCResponseStatus.FAILED} status. Body: {response}"
-            )
-        elif smpc_response.status == SMPCResponseStatus.COMPLETED:
-            # SMPCResponse contains the output only when the Status is COMPLETED
-            smpc_response_with_output = SMPCResponseWithOutput.parse_raw(response)
-            break
-
-        if attempts > node_config.smpc.get_result_max_retries:
-            raise SMPCComputationError(
-                f"Max retries for the SMPC exceeded the limit: {node_config.smpc.get_result_max_retries}"
-            )
-        attempts += 1
 
     results_table_name = _create_smpc_results_table(
         request_id=request_id,
         context_id=context_id,
         command_id=command_id,
         command_subid=command_subid,
-        smpc_op_result_data=smpc_response_with_output.computationOutput,
+        smpc_op_result_data=smpc_response.computationOutput,
     )
 
     return results_table_name
@@ -186,10 +170,6 @@ def _create_smpc_results_table(
     return table_name
 
 
-def _get_smpc_values_from_table_data(table_data: List[ColumnData], op: SMPCRequestType):
-    if op == SMPCRequestType.SUM:
-        node_id_column, values_column = table_data
-        sum_op_values = values_column.data
-    else:
-        raise NotImplementedError
-    return sum_op_values
+def _get_smpc_values_from_table_data(table_data: List[ColumnData]):
+    node_id_column, values_column = table_data
+    return values_column.data
diff --git a/mipengine/node_exceptions.py b/mipengine/node_exceptions.py
index b08f1448cc98ba3f10ebb5f435e57009622c26a8..64b5574569954833d8acfaa4c4222287455391ae 100644
--- a/mipengine/node_exceptions.py
+++ b/mipengine/node_exceptions.py
@@ -64,18 +64,6 @@ class IncompatibleTableTypes(Exception):
         super().__init__(self.message)
 
 
-class SMPCUsageError(Exception):
-    pass
-
-
-class SMPCCommunicationError(Exception):
-    pass
-
-
-class SMPCComputationError(Exception):
-    pass
-
-
 class RequestIDNotFound(Exception):
     """Exception raised while checking the presence of request_id in task's arguments.
 
diff --git a/mipengine/smpc_DTOs.py b/mipengine/smpc_DTOs.py
index 6448aa3a9266c1c7314782d342acda78b87c8052..53380bbe18757cfd0853eb09dbb93c36a0a075a5 100644
--- a/mipengine/smpc_DTOs.py
+++ b/mipengine/smpc_DTOs.py
@@ -28,7 +28,7 @@ class SMPCResponseStatus(enum.Enum):
 
 class SMPCRequestData(BaseModel):
     computationType: SMPCRequestType
-    clients: List[int]
+    clients: List[str]
 
 
 class SMPCResponse(BaseModel):
diff --git a/mipengine/smpc_cluster_comm_helpers.py b/mipengine/smpc_cluster_comm_helpers.py
index 2931d28ee7218964f9b84a6e913be150770249c8..01cc8507bba379f1afb214ea656acc4f43386778 100644
--- a/mipengine/smpc_cluster_comm_helpers.py
+++ b/mipengine/smpc_cluster_comm_helpers.py
@@ -1,4 +1,5 @@
 import json
+from logging import Logger
 from typing import List
 
 import requests
@@ -14,9 +15,12 @@ GET_RESULT_ENDPOINT = "/api/get-result/job-id/"
 def load_data_to_smpc_client(client_address: str, jobid: str, values: str):
     request_url = client_address + ADD_DATASET_ENDPOINT + jobid
     request_headers = {"Content-type": "application/json", "Accept": "text/plain"}
+    # TODO (SMPC) Currently only ints are supported so it's hardcoded
+    # https://team-1617704806227.atlassian.net/browse/MIP-518
+    data = {"type": "int", "data": json.loads(values)}
     response = requests.post(
         url=request_url,
-        data=values,
+        data=json.dumps(data),
         headers=request_headers,
     )
     if response.status_code != 200:
@@ -39,17 +43,22 @@ def get_smpc_result(coordinator_address: str, jobid: str) -> str:
     return response.text
 
 
-def trigger_smpc_computation(
+def trigger_smpc(
+    logger: Logger,
     coordinator_address: str,
     jobid: str,
     computation_type: SMPCRequestType,
-    clients: List[int],
+    clients: List[str],
 ):
     request_url = coordinator_address + TRIGGER_COMPUTATION_ENDPOINT + jobid
     request_headers = {"Content-type": "application/json", "Accept": "text/plain"}
+    data = SMPCRequestData(computationType=computation_type, clients=clients).json()
+    logger.info(f"Starting SMPC with {jobid=}...")
+    logger.debug(f"{request_url=}")
+    logger.debug(f"{data=}")
     response = requests.post(
         url=request_url,
-        data=SMPCRequestData(computationType=computation_type, clients=clients).json(),
+        data=data,
         headers=request_headers,
     )
     if response.status_code != 200:
diff --git a/pyproject.toml b/pyproject.toml
index 5cae757e143ce036465a5af93fbe33c9471eb7eb..e26f4f904e0c3136be6275f36e77f2674558686d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -52,6 +52,7 @@ psutil = "^5.9.0"
 markers = [
     "database: these tests depend on an external dockerized MonetDB service running (deselect with '-m \"not database\"')",
     "slow: marks tests as slow (deselect with '-m \"not slow\"')",
+    "smpc: marks the tests that need smpc deployment (deselect with '-m \"not smpc\"')",
 ]
 filterwarnings = ["ignore::DeprecationWarning"]
 norecursedirs = ["tests/testcase_generators"]
diff --git a/tasks.py b/tasks.py
index 97911385829cdc18d63c2872677853c64ad4c835..f12929f9b06d59a225f781c87156ba444b208ce8 100644
--- a/tasks.py
+++ b/tasks.py
@@ -46,6 +46,7 @@ the node service will be called 'localnode1' and should be referenced using that
 Paths are subject to change so in the following documentation the global variables will be used.
 
 """
+import copy
 import itertools
 import json
 import pathlib
@@ -87,6 +88,18 @@ TEST_DATA_FOLDER = Path(tests.__file__).parent / "test_data"
 ALGORITHM_FOLDERS_ENV_VARIABLE = "ALGORITHM_FOLDERS"
 MIPENGINE_NODE_CONFIG_FILE = "MIPENGINE_NODE_CONFIG_FILE"
 
+SMPC_COORDINATOR_PORT = 12314
+SMPC_COORDINATOR_DB_PORT = 27017
+SMPC_COORDINATOR_QUEUE_PORT = 6379
+SMPC_PLAYER_BASE_PORT = 7000
+SMPC_CLIENT_BASE_PORT = 9000
+SMPC_COORDINATOR_NAME = "smpc_coordinator"
+SMPC_COORDINATOR_DB_NAME = "smpc_coordinator_db"
+SMPC_COORDINATOR_QUEUE_NAME = "smpc_coordinator_queue"
+SMPC_PLAYER_BASE_NAME = "smpc_player"
+SMPC_CLIENT_BASE_NAME = "smpc_client"
+
+
 # TODO Add pre-tasks when this is implemented https://github.com/pyinvoke/invoke/issues/170
 # Right now if we call a task from another task, the "pre"-task is not executed
 
@@ -112,7 +125,7 @@ def create_configs(c):
         template_node_config = toml.load(fp)
 
     for node in deployment_config["nodes"]:
-        node_config = template_node_config.copy()
+        node_config = copy.deepcopy(template_node_config)
 
         node_config["identifier"] = node["id"]
         node_config["role"] = node["role"]
@@ -130,7 +143,17 @@ def create_configs(c):
         ]
 
         node_config["smpc"]["enabled"] = deployment_config["smpc"]["enabled"]
-        node_config["smpc"]["optional"] = deployment_config["smpc"]["optional"]
+        if node_config["smpc"]["enabled"]:
+            node_config["smpc"]["optional"] = deployment_config["smpc"]["optional"]
+            if node["role"] == "GLOBALNODE":
+                node_config["smpc"][
+                    "coordinator_address"
+                ] = f"http://{deployment_config['ip']}:{SMPC_COORDINATOR_PORT}"
+            else:
+                node_config["smpc"]["client_id"] = node["id"]
+                node_config["smpc"][
+                    "client_address"
+                ] = f"http://{deployment_config['ip']}:{node['smpc_client_port']}"
 
         node_config_file = NODES_CONFIG_DIR / f"{node['id']}.toml"
         with open(node_config_file, "w+") as fp:
@@ -139,7 +162,7 @@ def create_configs(c):
     # Create the controller config file
     with open(CONTROLLER_CONFIG_TEMPLATE_FILE) as fp:
         template_controller_config = toml.load(fp)
-    controller_config = template_controller_config.copy()
+    controller_config = copy.deepcopy(template_controller_config)
     controller_config["log_level"] = deployment_config["log_level"]
     controller_config["framework_log_level"] = deployment_config["framework_log_level"]
 
@@ -149,6 +172,9 @@ def create_configs(c):
     controller_config["rabbitmq"]["celery_tasks_timeout"] = deployment_config[
         "celery_tasks_timeout"
     ]
+    controller_config["rabbitmq"]["celery_run_udf_task_timeout"] = deployment_config[
+        "celery_run_udf_task_timeout"
+    ]
     controller_config["deployment_type"] = "LOCAL"
 
     controller_config["localnodes"]["config_file"] = str(
@@ -166,7 +192,18 @@ def create_configs(c):
     ]["contextid_release_timelimit"]
 
     controller_config["smpc"]["enabled"] = deployment_config["smpc"]["enabled"]
-    controller_config["smpc"]["optional"] = deployment_config["smpc"]["optional"]
+    if controller_config["smpc"]["enabled"]:
+        controller_config["smpc"]["optional"] = deployment_config["smpc"]["optional"]
+        controller_config["smpc"][
+            "coordinator_address"
+        ] = f"http://{deployment_config['ip']}:{SMPC_COORDINATOR_PORT}"
+
+        controller_config["smpc"]["get_result_interval"] = deployment_config["smpc"][
+            "get_result_interval"
+        ]
+        controller_config["smpc"]["get_result_max_retries"] = deployment_config["smpc"][
+            "get_result_max_retries"
+        ]
 
     CONTROLLER_CONFIG_DIR.mkdir(parents=True, exist_ok=True)
     controller_config_file = CONTROLLER_CONFIG_DIR / "controller.toml"
@@ -191,13 +228,14 @@ def install_dependencies(c):
 
 
 @task
-def rm_containers(c, container_name=None, monetdb=False, rabbitmq=False):
+def rm_containers(c, container_name=None, monetdb=False, rabbitmq=False, smpc=False):
     """
     Remove the specified docker containers, either by container or relative name.
 
     :param container_name: If set, removes the container with the specified name.
     :param monetdb: If True, it will remove all monetdb containers.
     :param rabbitmq: If True, it will remove all rabbitmq containers.
+    :param smpc: If True, it will remove all smpc related containers.
 
     If nothing is set, nothing is removed.
     """
@@ -206,6 +244,8 @@ def rm_containers(c, container_name=None, monetdb=False, rabbitmq=False):
         names.append("monetdb")
     if rabbitmq:
         names.append("rabbitmq")
+    if smpc:
+        names.append("smpc")
     if container_name:
         names.append(container_name)
     if not names:
@@ -216,11 +256,11 @@ def rm_containers(c, container_name=None, monetdb=False, rabbitmq=False):
     for name in names:
         container_ids = run(c, f"docker ps -qa --filter name={name}", show_ok=False)
         if container_ids.stdout:
-            message(f"Removing {name} container...", Level.HEADER)
+            message(f"Removing {name} container(s)...", Level.HEADER)
             cmd = f"docker rm -vf $(docker ps -qa --filter name={name})"
             run(c, cmd)
         else:
-            message(f"No {name} container to remove", level=Level.HEADER)
+            message(f"No {name} container to remove.", level=Level.HEADER)
 
 
 @task(iterable=["node"])
@@ -571,7 +611,7 @@ def start_controller(c, detached=False, algorithm_folders=None):
             with c.prefix("export QUART_APP=mipengine/controller/api/app:app"):
                 outpath = OUTDIR / "controller.out"
                 if detached:
-                    cmd = f"PYTHONPATH={PROJECT_ROOT} poetry run quart run --host=0.0.0.0>> {outpath} 2>&1"
+                    cmd = f"PYTHONPATH={PROJECT_ROOT} poetry run quart run --host=0.0.0.0 >> {outpath} 2>&1"
                     run(c, cmd, wait=False)
                 else:
                     cmd = (
@@ -591,6 +631,7 @@ def deploy(
     framework_log_level=None,
     monetdb_image=None,
     algorithm_folders=None,
+    smpc=None,
 ):
     """
     Install dependencies, (re)create all the containers and (re)start all the services.
@@ -602,7 +643,8 @@ def deploy(
     :param log_level: Used for the dev logs. If not provided, it looks in the `DEPLOYMENT_CONFIG_FILE`.
     :param framework_log_level: Used for the engine services. If not provided, it looks in the `DEPLOYMENT_CONFIG_FILE`.
     :param monetdb_image: Used for the db containers. If not provided, it looks in the `DEPLOYMENT_CONFIG_FILE`.
-    :param algorithm_folders: Used from the services.
+    :param algorithm_folders: Used from the services. If not provided, it looks in the `DEPLOYMENT_CONFIG_FILE`.
+    :param smpc: Deploy the SMPC cluster as well. If not provided, it looks in the `DEPLOYMENT_CONFIG_FILE`.
     """
 
     if not log_level:
@@ -614,10 +656,16 @@ def deploy(
     if not monetdb_image:
         monetdb_image = get_deployment_config("monetdb_image")
 
+    if not algorithm_folders:
+        algorithm_folders = get_deployment_config("algorithm_folders")
+
+    if smpc is None:
+        smpc = get_deployment_config("smpc", subconfig="enabled")
+
     if install_dep:
         install_dependencies(c)
 
-    # start NODE services
+    # Start NODE services
     config_files = [NODES_CONFIG_DIR / file for file in listdir(NODES_CONFIG_DIR)]
     if not config_files:
         message(
@@ -650,10 +698,13 @@ def deploy(
             algorithm_folders=algorithm_folders,
         )
 
-    # start CONTROLLER service
+    # Start CONTROLLER service
     if start_controller_ or start_all:
         start_controller(c, detached=True, algorithm_folders=algorithm_folders)
 
+    if smpc:
+        deploy_smpc(c)
+
 
 @task
 def attach(c, node=None, controller=False, db=None):
@@ -681,7 +732,7 @@ def cleanup(c):
     """Kill all node/controller services and remove all monetdb/rabbitmq containers."""
     kill_controller(c)
     kill_node(c, all_=True)
-    rm_containers(c, monetdb=True, rabbitmq=True)
+    rm_containers(c, monetdb=True, rabbitmq=True, smpc=True)
     if OUTDIR.exists():
         message(f"Removing {OUTDIR}...", level=Level.HEADER)
         for outpath in OUTDIR.glob("*.out"):
@@ -746,6 +797,200 @@ def kill_all_flowers(c):
         message(f"No flower container to remove", level=Level.HEADER)
 
 
+def start_smpc_coordinator_db(c, image):
+    container_ports = f"{SMPC_COORDINATOR_DB_PORT}:27017"
+    message(
+        f"Starting container {SMPC_COORDINATOR_DB_NAME} on ports {container_ports}...",
+        Level.HEADER,
+    )
+    env_variables = (
+        "-e MONGO_INITDB_ROOT_USERNAME=sysadmin "
+        "-e MONGO_INITDB_ROOT_PASSWORD=123qwe "
+    )
+    cmd = f"docker run -d -p {container_ports} {env_variables} --name {SMPC_COORDINATOR_DB_NAME} {image}"
+    run(c, cmd)
+
+
+def start_smpc_coordinator_queue(c, image):
+    container_ports = f"{SMPC_COORDINATOR_QUEUE_PORT}:6379"
+    message(
+        f"Starting container {SMPC_COORDINATOR_QUEUE_NAME} on ports {container_ports}...",
+        Level.HEADER,
+    )
+    container_cmd = "redis-server --requirepass agora"
+    cmd = f"""docker run -d -p {container_ports} -e REDIS_REPLICATION_MODE=master --name {SMPC_COORDINATOR_QUEUE_NAME} {image} {container_cmd}"""
+    run(c, cmd)
+
+
+def start_smpc_coordinator_container(c, ip, image):
+    container_ports = f"{SMPC_COORDINATOR_PORT}:12314"
+    message(
+        f"Starting container {SMPC_COORDINATOR_NAME} on ports {container_ports}...",
+        Level.HEADER,
+    )
+    container_cmd = "python coordinator.py"
+    env_variables = (
+        f"-e PLAYER_REPO_0=http://{ip}:7000 "
+        f"-e PLAYER_REPO_1=http://{ip}:7001 "
+        f"-e PLAYER_REPO_2=http://{ip}:7002 "
+        f"-e REDIS_HOST={ip} "
+        f"-e REDIS_PORT={SMPC_COORDINATOR_QUEUE_PORT} "
+        "-e REDIS_PSWD=agora "
+        f"-e DB_URL={ip}:{SMPC_COORDINATOR_DB_PORT} "
+        "-e DB_UNAME=sysadmin "
+        "-e DB_PSWD=123qwe "
+    )
+    cmd = f"""docker run -d -p {container_ports} {env_variables} --name {SMPC_COORDINATOR_NAME} {image} {container_cmd}"""
+    run(c, cmd)
+
+
+@task
+def start_smpc_coordinator(
+    c, ip=None, smpc_image=None, smpc_db_image=None, smpc_queue_image=None
+):
+    """
+    (Re)Creates all needed SMPC coordinator containers. If the containers exist, it will remove them and create them again.
+
+    :param ip: The ip to use for container communication. If not set, it will read it from the `DEPLOYMENT_CONFIG_FILE`.
+    :param smpc_image: The coordinator image to deploy. If not set, it will read it from the `DEPLOYMENT_CONFIG_FILE`.
+    :param smpc_db_image: The db image to deploy. If not set, it will read it from the `DEPLOYMENT_CONFIG_FILE`.
+    :param smpc_queue_image: The queue image to deploy. If not set, it will read it from the `DEPLOYMENT_CONFIG_FILE`.
+    """
+
+    if not ip:
+        ip = get_deployment_config("ip")
+    if not smpc_image:
+        smpc_image = get_deployment_config("smpc", subconfig="smpc_image")
+    if not smpc_db_image:
+        smpc_db_image = get_deployment_config("smpc", subconfig="db_image")
+    if not smpc_queue_image:
+        smpc_queue_image = get_deployment_config("smpc", subconfig="queue_image")
+
+    get_docker_image(c, smpc_image)
+    get_docker_image(c, smpc_db_image)
+    get_docker_image(c, smpc_queue_image)
+
+    rm_containers(c, container_name="smpc_coordinator")
+
+    start_smpc_coordinator_db(c, smpc_db_image)
+    start_smpc_coordinator_queue(c, smpc_queue_image)
+    start_smpc_coordinator_container(c, ip, smpc_image)
+
+
+def start_smpc_player(c, ip, id, image):
+    name = f"{SMPC_PLAYER_BASE_NAME}_{id}"
+    message(
+        f"Starting container {name} ...",
+        Level.HEADER,
+    )
+    container_cmd = f"python player.py {id}"  # SMPC player id cannot be alphanumeric
+    env_variables = (
+        f"-e PLAYER_REPO_0=http://{ip}:7000 "
+        f"-e PLAYER_REPO_1=http://{ip}:7001 "
+        f"-e PLAYER_REPO_2=http://{ip}:7002 "
+        f"-e COORDINATOR_URL=http://{ip}:{SMPC_COORDINATOR_PORT} "
+        f"-e DB_URL={ip}:{SMPC_COORDINATOR_DB_PORT} "
+        "-e DB_UNAME=sysadmin "
+        "-e DB_PSWD=123qwe "
+    )
+    container_ports = (
+        f"-p {6000 + id}:{6000 + id} "
+        f"-p {SMPC_PLAYER_BASE_PORT + id}:{7000 + id} "
+        f"-p {14000 + id}:{14000 + id} "
+    )  # SMPC player port is increasing using the player id
+    cmd = f"""docker run -d {container_ports} {env_variables} --name {name} {image} {container_cmd}"""
+    run(c, cmd)
+
+
+@task
+def start_smpc_players(c, ip=None, image=None):
+    """
+    (Re)Creates 3 SMPC player containers. If the containers exist, it will remove them and create them again.
+
+    :param ip: The ip to use for container communication. If not set, it will read it from the `DEPLOYMENT_CONFIG_FILE`.
+    :param image: The smpc player image to deploy. If not set, it will read it from the `DEPLOYMENT_CONFIG_FILE`.
+    """
+
+    if not ip:
+        ip = get_deployment_config("ip")
+    if not image:
+        image = get_deployment_config("smpc", subconfig="smpc_image")
+
+    get_docker_image(c, image)
+
+    rm_containers(c, container_name="smpc_player")
+
+    for i in range(3):
+        start_smpc_player(c, ip, i, image)
+
+
+def start_smpc_client(c, node_id, ip, image):
+    node_config_file = NODES_CONFIG_DIR / f"{node_id}.toml"
+    with open(node_config_file) as fp:
+        node_config = toml.load(fp)
+
+    client_id = node_config["smpc"]["client_id"]
+    client_port = node_config["smpc"]["client_address"].split(":")[
+        2
+    ]  # Get the port from the address e.g. 'http://172.17.0.1:9000'
+
+    name = f"{SMPC_CLIENT_BASE_NAME}_{client_id}"
+    message(
+        f"Starting container {name} ...",
+        Level.HEADER,
+    )
+    container_cmd = f"python client.py"
+    env_variables = (
+        f"-e PLAYER_REPO_0=http://{ip}:7000 "
+        f"-e PLAYER_REPO_1=http://{ip}:7001 "
+        f"-e PLAYER_REPO_2=http://{ip}:7002 "
+        f"-e COORDINATOR_URL=http://{ip}:{SMPC_COORDINATOR_PORT} "
+        f"-e ID={client_id} "
+        f"-e PORT={client_port} "
+    )
+    container_ports = f"-p {client_port}:{client_port} "
+    cmd = f"""docker run -d {container_ports} {env_variables} --name {name} {image} {container_cmd}"""
+    run(c, cmd)
+
+
+@task
+def start_smpc_clients(c, ip=None, image=None):
+    """
+    (Re)Creates 3 SMPC player containers. If the containers exist, it will remove them and create them again.
+
+    :param ip: The ip to use for container communication. If not set, it will read it from the `DEPLOYMENT_CONFIG_FILE`.
+    :param image: The smpc player image to deploy. If not set, it will read it from the `DEPLOYMENT_CONFIG_FILE`.
+    """
+
+    if not ip:
+        ip = get_deployment_config("ip")
+    if not image:
+        image = get_deployment_config("smpc", subconfig="smpc_image")
+
+    get_docker_image(c, image)
+
+    rm_containers(c, container_name="smpc_client")
+
+    for node_id in get_localnode_ids():
+        start_smpc_client(c, node_id, ip, image)
+
+
+@task
+def deploy_smpc(c, ip=None, smpc_image=None, smpc_db_image=None, smpc_queue_image=None):
+    """
+    (Re)Creates all needed SMPC containers. If the containers exist, it will remove them and create them again.
+
+    :param ip: The ip to use for container communication. If not set, it will read it from the `DEPLOYMENT_CONFIG_FILE`.
+    :param smpc_image: The coordinator image to deploy. If not set, it will read it from the `DEPLOYMENT_CONFIG_FILE`.
+    :param smpc_db_image: The db image to deploy. If not set, it will read it from the `DEPLOYMENT_CONFIG_FILE`.
+    :param smpc_queue_image: The queue image to deploy. If not set, it will read it from the `DEPLOYMENT_CONFIG_FILE`.
+    """
+    rm_containers(c, smpc=True)
+    start_smpc_coordinator(c, ip, smpc_image, smpc_db_image, smpc_queue_image)
+    start_smpc_players(c, ip, smpc_image)
+    start_smpc_clients(c, ip, smpc_image)
+
+
 @task(iterable=["db"])
 def reload_udfio(c, db):
     """
@@ -847,13 +1092,14 @@ def spin_wheel(promise=None, time=None):
                 break
 
 
-def get_deployment_config(config):
+def get_deployment_config(config, subconfig=None):
     if not Path(DEPLOYMENT_CONFIG_FILE).is_file():
         raise FileNotFoundError(
             f"Please provide a --{config} parameter or create a deployment config file '{DEPLOYMENT_CONFIG_FILE}'"
         )
-
     with open(DEPLOYMENT_CONFIG_FILE) as fp:
+        if subconfig:
+            return toml.load(fp)[config][subconfig]
         return toml.load(fp)[config]
 
 
@@ -880,6 +1126,18 @@ def get_node_ids(all_=False, node=None):
     return node_ids
 
 
+def get_localnode_ids():
+    all_node_ids = get_node_ids(all_=True)
+    local_node_ids = []
+    for node_id in all_node_ids:
+        node_config_file = NODES_CONFIG_DIR / f"{node_id}.toml"
+        with open(node_config_file) as fp:
+            node_config = toml.load(fp)
+        if node_config["role"] == "LOCALNODE":
+            local_node_ids.append(node_id)
+    return local_node_ids
+
+
 def get_docker_image(c, image, always_pull=False):
     """
     Fetches a docker image locally.
diff --git a/tests/README.md b/tests/README.md
index 732125f90dbb3b510399020271da73abbf518e7a..dc605cbc4853626351df0aa54501ecc076516c38 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -16,7 +16,7 @@ These tests are run:
 
 - with the basic (monetdb, rabbitmq) docker images pre-built,
 - with 3 nodes(globalnode, 2 localnodes) and 1 controller,
-- with the demo data loaded using `inv load-data` and
+- with the test data loaded using `inv load-data` and
 - with the production and testing algorithms loaded,
 - using the `inv deploy` command of the `tasks.py` with a `.deployment.toml` template,
 - can be run based on the nodes' information in the `.deployment.toml`.
@@ -27,11 +27,23 @@ These tests are run:
 
 - with all (monetdb, rabbitmq, node, controller) docker images pre-built,
 - with 4 nodes(globalnode, 3 localnodes) and 1 controller,
-- with the demo data loaded using `inv load-data` combined with the backup logic,
+- with the test data loaded through the mip_db container,
 - with the production algorithms loaded,
 - using `helm` charts and `kind` for a pseudo federated kubernetes environment,
 - having only the controller endpoints exposed and
-- the tests contained can only be of prod_env_tests nature.
+- the tests contained can only be of production nature.
+
+### SMPC Environment
+
+These tests are run:
+
+- with the basic (monetdb, rabbitmq) docker images pre-built,
+- with 3 nodes(globalnode, 2 localnodes) and 1 controller,
+- with the test data loaded through the mip_db container,
+- with the production algorithms loaded,
+- using `helm` charts and `kind` for a pseudo federated kubernetes environment,
+- having only the controller endpoints exposed and
+- the tests contained can only be of production nature.
 
 ### Algorithm Validation Tests
 
@@ -40,7 +52,7 @@ These tests are run:
 - with the basic (monetdb, rabbitmq) docker images pre-built,
 - with 1 localnode, 1 globalnode and 1 controller,
 - with 10 localnodes, 1 globalnode and 1 controller in a different job,
-- with the demo data loaded using `inv load-data` and
+- with the test data loaded using `inv load-data` and
 - with the production algorithms loaded,
 - using the `inv deploy` command of the `tasks.py` with a `.deployment.toml` template,
 - can be run based on the nodes' information in the `.deployment.toml`.
diff --git a/tests/algorithm_validation_tests/five_node_deployment_template.toml b/tests/algorithm_validation_tests/five_node_deployment_template.toml
index dbb015f93198e2ca73a9169fa3421c221445ab56..274fe9c73893cc370376cb13d43feb6c101fb372 100644
--- a/tests/algorithm_validation_tests/five_node_deployment_template.toml
+++ b/tests/algorithm_validation_tests/five_node_deployment_template.toml
@@ -8,6 +8,7 @@ algorithm_folders = "./mipengine/algorithms"
 
 node_landscape_aggregator_update_interval = 30
 celery_tasks_timeout = 120
+celery_run_udf_task_timeout = 300
 
 [privacy]
 minimum_row_count = 0
diff --git a/tests/algorithm_validation_tests/one_node_deployment_template.toml b/tests/algorithm_validation_tests/one_node_deployment_template.toml
index 7bab6802128b9a6ea7772500af45478711df8253..46e4f4e086339d03c2b64c7543b52be1e0dbfb82 100644
--- a/tests/algorithm_validation_tests/one_node_deployment_template.toml
+++ b/tests/algorithm_validation_tests/one_node_deployment_template.toml
@@ -8,6 +8,7 @@ algorithm_folders = "./mipengine/algorithms"
 
 node_landscape_aggregator_update_interval = 30
 celery_tasks_timeout = 60
+celery_run_udf_task_timeout = 120
 
 [privacy]
 minimum_row_count = 0
diff --git a/tests/algorithms/smpc_standard_deviation_int_only.json b/tests/algorithms/smpc_standard_deviation_int_only.json
new file mode 100644
index 0000000000000000000000000000000000000000..3d8529b102bb99f88ea236e46c5b9096c250b0df
--- /dev/null
+++ b/tests/algorithms/smpc_standard_deviation_int_only.json
@@ -0,0 +1,21 @@
+{
+    "name": "smpc_standard_deviation_int_only",
+    "desc": "Standard Deviation of a column, transferring only integers, using SMPC",
+    "label": "SMPC Standard Deviation",
+    "enabled": true,
+    "inputdata": {
+        "x": {
+            "label": "column",
+            "desc": "Column",
+            "types": [
+                "real",
+                "int"
+            ],
+            "stattypes": [
+                "numerical"
+            ],
+            "notblank": true,
+            "multiple": false
+        }
+    }
+}
diff --git a/tests/algorithms/smpc_standard_deviation_int_only.py b/tests/algorithms/smpc_standard_deviation_int_only.py
new file mode 100644
index 0000000000000000000000000000000000000000..b883d9745afa5d5cee78c47c5fe32ba68813df9f
--- /dev/null
+++ b/tests/algorithms/smpc_standard_deviation_int_only.py
@@ -0,0 +1,153 @@
+import json
+from typing import TypeVar
+
+from mipengine.algorithm_result_DTOs import TabularDataResult
+from mipengine.table_data_DTOs import ColumnDataFloat
+from mipengine.table_data_DTOs import ColumnDataStr
+from mipengine.udfgen import relation
+from mipengine.udfgen import secure_transfer
+from mipengine.udfgen import state
+from mipengine.udfgen import tensor
+from mipengine.udfgen import transfer
+from mipengine.udfgen import udf
+
+
+def run(algo_interface):
+    local_run = algo_interface.run_udf_on_local_nodes
+    global_run = algo_interface.run_udf_on_global_node
+
+    X_relation, *_ = algo_interface.create_primary_data_views(
+        variable_groups=[algo_interface.x_variables],
+    )
+
+    X = local_run(
+        func=relation_to_matrix,
+        positional_args=[X_relation],
+    )
+
+    local_state, local_result = local_run(
+        func=smpc_local_step_1,
+        positional_args=[X],
+        share_to_global=[False, True],
+    )
+
+    global_state, global_result = global_run(
+        func=smpc_global_step_1,
+        positional_args=[local_result],
+        share_to_locals=[False, True],
+    )
+
+    local_result = local_run(
+        func=smpc_local_step_2,
+        positional_args=[local_state, global_result],
+        share_to_global=True,
+    )
+
+    global_result = global_run(
+        func=smpc_global_step_2,
+        positional_args=[global_state, local_result],
+    )
+
+    std_deviation = json.loads(global_result.get_table_data()[1][0])["deviation"]
+    min_value = json.loads(global_result.get_table_data()[1][0])["min_value"]
+    max_value = json.loads(global_result.get_table_data()[1][0])["max_value"]
+    x_variables = algo_interface.x_variables
+
+    result = TabularDataResult(
+        title="Standard Deviation",
+        columns=[
+            ColumnDataStr(name="variable", data=x_variables),
+            ColumnDataFloat(name="std_deviation", data=[std_deviation]),
+            ColumnDataFloat(name="min_value", data=[min_value]),
+            ColumnDataFloat(name="max_value", data=[max_value]),
+        ],
+    )
+    return result
+
+
+# ~~~~~~~~~~~~~~~~~~~~~~~~ UDFs ~~~~~~~~~~~~~~~~~~~~~~~~~~ #
+
+
+S = TypeVar("S")
+
+
+@udf(rel=relation(S), return_type=tensor(float, 2))
+def relation_to_matrix(rel):
+    return rel
+
+
+@udf(
+    table=tensor(S, 2),
+    return_type=[state(), secure_transfer(sum_op=True, min_op=True, max_op=True)],
+)
+def smpc_local_step_1(table):
+    state_ = {"table": table}
+    sum_ = 0
+    min_value = table[0][0]
+    max_value = table[0][0]
+    for (element,) in table:
+        sum_ += element
+        if element < min_value:
+            min_value = element
+        if element > max_value:
+            max_value = element
+    secure_transfer_ = {
+        "sum": {"data": int(sum_), "operation": "sum"},
+        "min": {"data": int(min_value), "operation": "min"},
+        "max": {"data": int(max_value), "operation": "max"},
+        "count": {"data": len(table), "operation": "sum"},
+    }
+    return state_, secure_transfer_
+
+
+@udf(
+    locals_result=secure_transfer(sum_op=True, min_op=True, max_op=True),
+    return_type=[state(), transfer()],
+)
+def smpc_global_step_1(locals_result):
+    total_sum = locals_result["sum"]
+    total_count = locals_result["count"]
+    average = total_sum / total_count
+    state_ = {
+        "count": total_count,
+        "min_value": locals_result["min"],
+        "max_value": locals_result["max"],
+    }
+    transfer_ = {"average": average}
+    return state_, transfer_
+
+
+@udf(
+    prev_state=state(),
+    global_transfer=transfer(),
+    return_type=secure_transfer(sum_op=True),
+)
+def smpc_local_step_2(prev_state, global_transfer):
+    deviation_sum = 0
+    for (element,) in prev_state["table"]:
+        deviation_sum += pow(element - global_transfer["average"], 2)
+    secure_transfer_ = {
+        "deviation_sum": {
+            "data": int(deviation_sum),
+            "type": "int",
+            "operation": "sum",
+        }
+    }
+    return secure_transfer_
+
+
+@udf(
+    prev_state=state(),
+    locals_result=secure_transfer(sum_op=True),
+    return_type=transfer(),
+)
+def smpc_global_step_2(prev_state, locals_result):
+    total_deviation = locals_result["deviation_sum"]
+    from math import sqrt
+
+    deviation = {
+        "deviation": sqrt(total_deviation / prev_state["count"]),
+        "min_value": prev_state["min_value"],
+        "max_value": prev_state["max_value"],
+    }
+    return deviation
diff --git a/tests/dev_env_tests/deployment_template.toml b/tests/dev_env_tests/deployment_template.toml
index 3874cccee68987c8f46c6e82f09e1751559cad29..0ea52b1e961fed1f87f43f44b66335164d8e59b7 100644
--- a/tests/dev_env_tests/deployment_template.toml
+++ b/tests/dev_env_tests/deployment_template.toml
@@ -8,6 +8,7 @@ algorithm_folders = "./mipengine/algorithms,./tests/algorithms"
 
 node_landscape_aggregator_update_interval = 10
 celery_tasks_timeout = 30
+celery_run_udf_task_timeout = 120
 
 [privacy]
 minimum_row_count = 10
diff --git a/tests/prod_env_tests/deployment_configs/kind_configuration/kind_cluster.yaml b/tests/prod_env_tests/deployment_configs/kind_configuration/kind_cluster.yaml
index a9c028704945a7583a2a9c4e07843892951a121c..b22d789f303bf38344444fa8b0edb40ed0371b5e 100644
--- a/tests/prod_env_tests/deployment_configs/kind_configuration/kind_cluster.yaml
+++ b/tests/prod_env_tests/deployment_configs/kind_configuration/kind_cluster.yaml
@@ -10,18 +10,21 @@ nodes:
     containerPath: /etc/hostname
   - hostPath: ./tests/test_data
     containerPath: /opt/mipengine/data
+
 - role: worker
   extraMounts:
   - hostPath: ./tests/prod_env_tests/deployment_configs/kind_configuration/hostname_worker1
     containerPath: /etc/hostname
   - hostPath: ./tests/test_data
     containerPath: /opt/mipengine/csvs
+
 - role: worker
   extraMounts:
   - hostPath: ./tests/prod_env_tests/deployment_configs/kind_configuration/hostname_worker2
     containerPath: /etc/hostname
   - hostPath: ./tests/test_data
     containerPath: /opt/mipengine/csvs
+
 - role: worker
   extraMounts:
   - hostPath: ./tests/prod_env_tests/deployment_configs/kind_configuration/hostname_worker3
diff --git a/tests/prod_env_tests/deployment_configs/kubernetes_values.yaml b/tests/prod_env_tests/deployment_configs/kubernetes_values.yaml
index 38cd557ab7e47677479466a69b438700fa054f68..866b201caad90b9a3cb026b599ddad8aa1c0cd45 100644
--- a/tests/prod_env_tests/deployment_configs/kubernetes_values.yaml
+++ b/tests/prod_env_tests/deployment_configs/kubernetes_values.yaml
@@ -13,9 +13,9 @@ csvs_datapath: /opt/mipengine/csvs
 controller:
   node_landscape_aggregator_update_interval: 20
   celery_tasks_timeout: 10
+  celery_run_udf_task_timeout: 120
   nodes_cleanup_interval: 60
   cleanup_file_folder: /opt/cleanup
 
 smpc:
   enabled: false
-  optional: false
diff --git a/tests/smpc_env_tests/deployment_configs/kind_configuration/hostname_master b/tests/smpc_env_tests/deployment_configs/kind_configuration/hostname_master
new file mode 100644
index 0000000000000000000000000000000000000000..1f7391f92b6a3792204e07e99f71f643cc35e7e1
--- /dev/null
+++ b/tests/smpc_env_tests/deployment_configs/kind_configuration/hostname_master
@@ -0,0 +1 @@
+master
diff --git a/tests/smpc_env_tests/deployment_configs/kind_configuration/hostname_worker1 b/tests/smpc_env_tests/deployment_configs/kind_configuration/hostname_worker1
new file mode 100644
index 0000000000000000000000000000000000000000..949efd7cd0b3629a3ece03c50e68fa44b1808645
--- /dev/null
+++ b/tests/smpc_env_tests/deployment_configs/kind_configuration/hostname_worker1
@@ -0,0 +1 @@
+localnode1
diff --git a/tests/smpc_env_tests/deployment_configs/kind_configuration/hostname_worker2 b/tests/smpc_env_tests/deployment_configs/kind_configuration/hostname_worker2
new file mode 100644
index 0000000000000000000000000000000000000000..08839fff717a334d8bed14aac1b51c77b73bf298
--- /dev/null
+++ b/tests/smpc_env_tests/deployment_configs/kind_configuration/hostname_worker2
@@ -0,0 +1 @@
+localnode2
diff --git a/tests/smpc_env_tests/deployment_configs/kind_configuration/kind_cluster.yaml b/tests/smpc_env_tests/deployment_configs/kind_configuration/kind_cluster.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5a812bd6b705e16ccc3662ffccb44e5211935d0a
--- /dev/null
+++ b/tests/smpc_env_tests/deployment_configs/kind_configuration/kind_cluster.yaml
@@ -0,0 +1,34 @@
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+  extraPortMappings:
+  - containerPort: 30000
+    hostPort: 5000
+  - containerPort: 31000	# Used for debugging smpc cluster
+    hostPort: 12314		# Used for debugging smpc cluster
+  extraMounts:
+  - hostPath: ./tests/smpc_env_tests/deployment_configs/kind_configuration/hostname_master
+    containerPath: /etc/hostname
+  - hostPath: ./tests/test_data
+    containerPath: /opt/mipengine/data
+
+- role: worker
+  extraMounts:
+  - hostPath: ./tests/smpc_env_tests/deployment_configs/kind_configuration/hostname_worker1
+    containerPath: /etc/hostname
+  - hostPath: ./tests/test_data
+    containerPath: /opt/mipengine/csvs
+  extraPortMappings:
+  - containerPort: 32000	# Used for debugging smpc cluster
+    hostPort: 9000		# Used for debugging smpc cluster
+
+- role: worker
+  extraMounts:
+  - hostPath: ./tests/smpc_env_tests/deployment_configs/kind_configuration/hostname_worker2
+    containerPath: /etc/hostname
+  - hostPath: ./tests/test_data
+    containerPath: /opt/mipengine/csvs
+  extraPortMappings:
+  - containerPort: 32000	# Used for debugging smpc cluster
+    hostPort: 9001		# Used for debugging smpc cluster
diff --git a/tests/smpc_env_tests/deployment_configs/kubernetes_values.yaml b/tests/smpc_env_tests/deployment_configs/kubernetes_values.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..268ba55aac8c7fad26864cd55bf5a476aeabaa12
--- /dev/null
+++ b/tests/smpc_env_tests/deployment_configs/kubernetes_values.yaml
@@ -0,0 +1,25 @@
+localnodes: 2
+
+mipengine_images:
+  repository: madgik
+  version: dev
+
+log_level: DEBUG
+framework_log_level: INFO
+
+monetdb_storage: /opt/mipengine/db
+csvs_datapath: /opt/mipengine/csvs
+
+controller:
+  node_landscape_aggregator_update_interval: 20
+  celery_tasks_timeout: 10
+  celery_run_udf_task_timeout: 120
+  nodes_cleanup_interval: 60
+  cleanup_file_folder: /opt/cleanup
+
+smpc:
+  enabled: true
+  optional: false
+  image: gpikra/coordinator:v6.0.0
+  db_image: mongo:5.0.8
+  queue_image: redis:alpine3.15
diff --git a/tests/smpc_env_tests/test_smpc_algorithms.py b/tests/smpc_env_tests/test_smpc_algorithms.py
deleted file mode 100644
index 51ead72cb74440dce0b835bea4cff0ebc4fc48dd..0000000000000000000000000000000000000000
--- a/tests/smpc_env_tests/test_smpc_algorithms.py
+++ /dev/null
@@ -1,207 +0,0 @@
-import json
-import re
-
-import numpy as np
-import pytest
-import requests
-
-from tests.prod_env_tests import algorithms_url
-
-
-def get_parametrization_list_success_cases():
-    parametrization_list = []
-
-    # ~~~~~~~~~~success case 1~~~~~~~~~~
-    algorithm_name = "smpc_standard_deviation"
-    request_dict = {
-        "inputdata": {
-            "data_model": "dementia:0.1",
-            "datasets": ["edsd"],
-            "x": [
-                "lefthippocampus",
-            ],
-            "filters": {
-                "condition": "AND",
-                "rules": [
-                    {
-                        "id": "dataset",
-                        "type": "string",
-                        "value": ["edsd"],
-                        "operator": "in",
-                    },
-                    {
-                        "condition": "AND",
-                        "rules": [
-                            {
-                                "id": variable,
-                                "type": "string",
-                                "operator": "is_not_null",
-                                "value": None,
-                            }
-                            for variable in [
-                                "lefthippocampus",
-                            ]
-                        ],
-                    },
-                ],
-                "valid": True,
-            },
-        },
-    }
-    expected_response = {
-        "title": "Standard Deviation",
-        "columns": [
-            {"name": "variable", "data": ["lefthippocampus"], "type": "STR"},
-            {"name": "std_deviation", "data": [0.3634506955662605], "type": "FLOAT"},
-            {"name": "min_value", "data": [1.3047], "type": "FLOAT"},
-            {"name": "max_value", "data": [4.4519], "type": "FLOAT"},
-        ],
-    }
-    parametrization_list.append((algorithm_name, request_dict, expected_response))
-    # END ~~~~~~~~~~success case 1~~~~~~~~~~
-
-    # ~~~~~~~~~~success case 2~~~~~~~~~~
-    algorithm_name = "smpc_standard_deviation"
-    request_dict = {
-        "inputdata": {
-            "data_model": "dementia:0.1",
-            "datasets": ["edsd"],
-            "x": [
-                "lefthippocampus",
-            ],
-            "filters": {
-                "condition": "AND",
-                "rules": [
-                    {
-                        "id": "dataset",
-                        "type": "string",
-                        "value": ["edsd"],
-                        "operator": "in",
-                    },
-                    {
-                        "condition": "AND",
-                        "rules": [
-                            {
-                                "id": variable,
-                                "type": "string",
-                                "operator": "is_not_null",
-                                "value": None,
-                            }
-                            for variable in [
-                                "lefthippocampus",
-                            ]
-                        ],
-                    },
-                ],
-                "valid": True,
-            },
-        },
-        "flags": {
-            "smpc": True,
-        },
-    }
-    expected_response = {
-        "title": "Standard Deviation",
-        "columns": [
-            {"name": "variable", "data": ["lefthippocampus"], "type": "STR"},
-            {"name": "std_deviation", "data": [0.3634506955662605], "type": "FLOAT"},
-            {"name": "min_value", "data": [1.3047], "type": "FLOAT"},
-            {"name": "max_value", "data": [4.4519], "type": "FLOAT"},
-        ],
-    }
-    parametrization_list.append((algorithm_name, request_dict, expected_response))
-    # END ~~~~~~~~~~success case 2~~~~~~~~~~
-    return parametrization_list
-
-
-@pytest.mark.skip(
-    reason="SMPC is not deployed in the CI yet. https://team-1617704806227.atlassian.net/browse/MIP-344"
-)
-@pytest.mark.parametrize(
-    "algorithm_name, request_dict, expected_response",
-    get_parametrization_list_success_cases(),
-)
-def test_post_smpc_algorithm(algorithm_name, request_dict, expected_response):
-    algorithm_url = algorithms_url + "/" + algorithm_name
-
-    headers = {"Content-type": "application/json", "Accept": "text/plain"}
-    response = requests.post(
-        algorithm_url,
-        data=json.dumps(request_dict),
-        headers=headers,
-    )
-    assert response.status_code == 200
-    assert json.loads(response.text) == expected_response
-
-
-def get_parametrization_list_exception_cases():
-    parametrization_list = []
-    algorithm_name = "smpc_standard_deviation"
-    request_dict = {
-        "inputdata": {
-            "data_model": "dementia:0.1",
-            "datasets": ["edsd"],
-            "x": [
-                "lefthippocampus",
-            ],
-            "filters": {
-                "condition": "AND",
-                "rules": [
-                    {
-                        "id": "dataset",
-                        "type": "string",
-                        "value": ["edsd"],
-                        "operator": "in",
-                    },
-                    {
-                        "condition": "AND",
-                        "rules": [
-                            {
-                                "id": variable,
-                                "type": "string",
-                                "operator": "is_not_null",
-                                "value": None,
-                            }
-                            for variable in [
-                                "lefthippocampus",
-                            ]
-                        ],
-                    },
-                ],
-                "valid": True,
-            },
-        },
-        "flags": {
-            "smpc": False,
-        },
-    }
-
-    expected_response = (
-        462,
-        "The computation cannot be made without SMPC.",
-    )
-
-    parametrization_list.append((algorithm_name, request_dict, expected_response))
-
-    return parametrization_list
-
-
-@pytest.mark.skip(
-    reason="SMPC is not deployed in the CI yet. https://team-1617704806227.atlassian.net/browse/MIP-344"
-)
-@pytest.mark.parametrize(
-    "algorithm_name, request_dict, expected_response",
-    get_parametrization_list_exception_cases(),
-)
-def test_post_smpc_algorithm_exception(algorithm_name, request_dict, expected_response):
-    algorithm_url = algorithms_url + "/" + algorithm_name
-
-    headers = {"Content-type": "application/json", "Accept": "text/plain"}
-    response = requests.post(
-        algorithm_url,
-        data=json.dumps(request_dict),
-        headers=headers,
-    )
-    exp_response_status, exp_response_message = expected_response
-    assert response.status_code == exp_response_status
-    assert re.search(exp_response_message, response.text)
diff --git a/tests/standalone_tests/conftest.py b/tests/standalone_tests/conftest.py
index 32339fb43ccc66b0feac7bedc7fcc0193189508a..c5b4d238413d1f6b7827fff2948bbab7c495c0c9 100644
--- a/tests/standalone_tests/conftest.py
+++ b/tests/standalone_tests/conftest.py
@@ -33,7 +33,7 @@ OUTDIR = Path("/tmp/mipengine/")
 if not OUTDIR.exists():
     OUTDIR.mkdir()
 
-COMMON_IP = "127.0.0.1"
+COMMON_IP = "172.17.0.1"
 RABBITMQ_GLOBALNODE_NAME = "rabbitmq_test_globalnode"
 RABBITMQ_LOCALNODE1_NAME = "rabbitmq_test_localnode1"
 RABBITMQ_LOCALNODE2_NAME = "rabbitmq_test_localnode2"
@@ -65,6 +65,8 @@ MONETDB_LOCALNODETMP_PORT = 61003
 MONETDB_SMPC_GLOBALNODE_PORT = 61004
 MONETDB_SMPC_LOCALNODE1_PORT = 61005
 MONETDB_SMPC_LOCALNODE2_PORT = 61006
+CONTROLLER_PORT = 4500
+CONTROLLER_SMPC_PORT = 4501
 
 GLOBALNODE_CONFIG_FILE = "testglobalnode.toml"
 LOCALNODE1_CONFIG_FILE = "testlocalnode1.toml"
@@ -73,6 +75,69 @@ LOCALNODETMP_CONFIG_FILE = "testlocalnodetmp.toml"
 GLOBALNODE_SMPC_CONFIG_FILE = "smpc_globalnode.toml"
 LOCALNODE1_SMPC_CONFIG_FILE = "smpc_localnode1.toml"
 LOCALNODE2_SMPC_CONFIG_FILE = "smpc_localnode2.toml"
+CONTROLLER_CONFIG_FILE = "testcontroller.toml"
+CONTROLLER_SMPC_CONFIG_FILE = "test_smpc_controller.toml"
+CONTROLLER_LOCALNODES_CONFIG_FILE = "test_localnodes_addresses.json"
+CONTROLLER_SMPC_LOCALNODES_CONFIG_FILE = "test_smpc_localnodes_addresses.json"
+CONTROLLER_OUTPUT_FILE = "test_controller.out"
+SMPC_CONTROLLER_OUTPUT_FILE = "test_smpc_controller.out"
+
+TASKS_TIMEOUT = 10
+RUN_UDF_TASK_TIMEOUT = 120
+SMPC_CLUSTER_SLEEP_TIME = 60
+
+########### SMPC Cluster ############
+SMPC_CLUSTER_IMAGE = "gpikra/coordinator:v6.0.0"
+SMPC_COORD_DB_IMAGE = "mongo:5.0.8"
+SMPC_COORD_QUEUE_IMAGE = "redis:alpine3.15"
+
+SMPC_COORD_CONT_NAME = "smpc_test_coordinator"
+SMPC_COORD_DB_CONT_NAME = "smpc_test_coordinator_db"
+SMPC_COORD_QUEUE_CONT_NAME = "smpc_test_coordinator_queue"
+SMPC_PLAYER1_CONT_NAME = "smpc_test_player1"
+SMPC_PLAYER2_CONT_NAME = "smpc_test_player2"
+SMPC_PLAYER3_CONT_NAME = "smpc_test_player3"
+SMPC_CLIENT1_CONT_NAME = "smpc_test_client1"
+SMPC_CLIENT2_CONT_NAME = "smpc_test_client2"
+
+SMPC_COORD_PORT = 12314
+SMPC_COORD_DB_PORT = 27017
+SMPC_COORD_QUEUE_PORT = 6379
+SMPC_PLAYER1_PORT1 = 6000
+SMPC_PLAYER1_PORT2 = 7000
+SMPC_PLAYER1_PORT3 = 14000
+SMPC_PLAYER2_PORT1 = 6001
+SMPC_PLAYER2_PORT2 = 7001
+SMPC_PLAYER2_PORT3 = 14001
+SMPC_PLAYER3_PORT1 = 6002
+SMPC_PLAYER3_PORT2 = 7002
+SMPC_PLAYER3_PORT3 = 14002
+SMPC_CLIENT1_PORT = 9005
+SMPC_CLIENT2_PORT = 9006
+#####################################
+
+
+# TODO Instead of the fixtures having scope session, it could be function,
+# but when the fixture start, it should check if it already exists, thus
+# not creating it again (fast). This could solve the problem of some
+# tests destroying some containers to test things.
+
+
+def _search_for_string_in_logfile(
+    log_to_search_for: str, logspath: Path, retries: int = 100
+):
+    for _ in range(retries):
+        try:
+            with open(logspath) as logfile:
+                if bool(re.search(log_to_search_for, logfile.read())):
+                    return
+        except FileNotFoundError:
+            pass
+        time.sleep(0.5)
+
+    raise TimeoutError(
+        f"Could not find the log '{log_to_search_for}' after '{retries}' tries.  Logs available at: '{logspath}'."
+    )
 
 
 class MonetDBSetupError(Exception):
@@ -225,6 +290,7 @@ def _load_data_monetdb_container(db_ip, db_port):
         cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
     )
     print(f"\nData loaded to database ({db_ip}:{db_port})")
+    time.sleep(2)  # Needed to avoid db crash while loading
 
 
 def _remove_data_model_from_localnodetmp_monetdb(data_model_code, data_model_version):
@@ -260,6 +326,20 @@ def load_data_localnodetmp(monetdb_localnodetmp):
     yield
 
 
+@pytest.fixture(scope="session")
+def load_data_smpc_localnode1(monetdb_smpc_localnode1):
+    _init_database_monetdb_container(COMMON_IP, MONETDB_SMPC_LOCALNODE1_PORT)
+    _load_data_monetdb_container(COMMON_IP, MONETDB_SMPC_LOCALNODE1_PORT)
+    yield
+
+
+@pytest.fixture(scope="session")
+def load_data_smpc_localnode2(monetdb_smpc_localnode2):
+    _init_database_monetdb_container(COMMON_IP, MONETDB_SMPC_LOCALNODE2_PORT)
+    _load_data_monetdb_container(COMMON_IP, MONETDB_SMPC_LOCALNODE2_PORT)
+    yield
+
+
 def _create_db_cursor(db_port):
     class MonetDBTesting:
         """MonetDB class used for testing."""
@@ -272,7 +352,7 @@ def _create_db_cursor(db_port):
             url = f"monetdb://{username}:{password}@{COMMON_IP}:{port}/{dbfarm}:"
             self._executor = sql.create_engine(url, echo=True)
 
-        def execute(self, query, *args, **kwargs) -> list:
+        def execute(self, query, *args, **kwargs):
             return self._executor.execute(query, *args, **kwargs)
 
     return MonetDBTesting()
@@ -543,28 +623,14 @@ def _create_node_service(algo_folders_env_variable_val, node_config_filepath):
     )
 
     # Check that celery started
-    for _ in range(100):
-        try:
-            with open(logpath) as logfile:
-                if bool(
-                    re.search("CELERY - FRAMEWORK - celery@.* ready.", logfile.read())
-                ):
-                    break
-        except FileNotFoundError:
-            pass
-        time.sleep(0.5)
-    else:
-        with open(logpath) as logfile:
-            raise TimeoutError(
-                f"The node service '{node_id}' didn't manage to start in the designated time. Logs: \n{logfile.read()}"
-            )
+    _search_for_string_in_logfile("CELERY - FRAMEWORK - celery@.* ready.", logpath)
 
     print(f"Created node service with id '{node_id}' and process id '{proc.pid}'.")
     return proc
 
 
-def kill_node_service(proc):
-    print(f"\nKilling node service with process id '{proc.pid}'...")
+def kill_service(proc):
+    print(f"\nKilling service with process id '{proc.pid}'...")
     psutil_proc = psutil.Process(proc.pid)
     proc.kill()
     for _ in range(100):
@@ -573,9 +639,9 @@ def kill_node_service(proc):
         time.sleep(0.1)
     else:
         raise TimeoutError(
-            f"Node service is still running, status: '{psutil_proc.status()}'."
+            f"Service is still running, status: '{psutil_proc.status()}'."
         )
-    print(f"Killed node service with process id '{proc.pid}'.")
+    print(f"Killed service with process id '{proc.pid}'.")
 
 
 @pytest.fixture(scope="session")
@@ -585,7 +651,7 @@ def globalnode_node_service(rabbitmq_globalnode, monetdb_globalnode):
     node_config_filepath = path.join(TEST_ENV_CONFIG_FOLDER, node_config_file)
     proc = _create_node_service(algo_folders_env_variable_val, node_config_filepath)
     yield
-    kill_node_service(proc)
+    kill_service(proc)
 
 
 @pytest.fixture(scope="session")
@@ -595,7 +661,7 @@ def localnode1_node_service(rabbitmq_localnode1, monetdb_localnode1):
     node_config_filepath = path.join(TEST_ENV_CONFIG_FOLDER, node_config_file)
     proc = _create_node_service(algo_folders_env_variable_val, node_config_filepath)
     yield
-    kill_node_service(proc)
+    kill_service(proc)
 
 
 @pytest.fixture(scope="session")
@@ -605,7 +671,7 @@ def localnode2_node_service(rabbitmq_localnode2, monetdb_localnode2):
     node_config_filepath = path.join(TEST_ENV_CONFIG_FOLDER, node_config_file)
     proc = _create_node_service(algo_folders_env_variable_val, node_config_filepath)
     yield
-    kill_node_service(proc)
+    kill_service(proc)
 
 
 @pytest.fixture(scope="session")
@@ -615,7 +681,7 @@ def smpc_globalnode_node_service(rabbitmq_smpc_globalnode, monetdb_smpc_globalno
     node_config_filepath = path.join(TEST_ENV_CONFIG_FOLDER, node_config_file)
     proc = _create_node_service(algo_folders_env_variable_val, node_config_filepath)
     yield
-    kill_node_service(proc)
+    kill_service(proc)
 
 
 @pytest.fixture(scope="session")
@@ -625,7 +691,7 @@ def smpc_localnode1_node_service(rabbitmq_smpc_localnode1, monetdb_smpc_localnod
     node_config_filepath = path.join(TEST_ENV_CONFIG_FOLDER, node_config_file)
     proc = _create_node_service(algo_folders_env_variable_val, node_config_filepath)
     yield
-    kill_node_service(proc)
+    kill_service(proc)
 
 
 @pytest.fixture(scope="session")
@@ -635,24 +701,24 @@ def smpc_localnode2_node_service(rabbitmq_smpc_localnode2, monetdb_smpc_localnod
     node_config_filepath = path.join(TEST_ENV_CONFIG_FOLDER, node_config_file)
     proc = _create_node_service(algo_folders_env_variable_val, node_config_filepath)
     yield
-    kill_node_service(proc)
+    kill_service(proc)
 
 
 @pytest.fixture(scope="function")
 def localnodetmp_node_service(rabbitmq_localnodetmp, monetdb_localnodetmp):
     """
     ATTENTION!
-    This node service fixture is the only one returning the process so it can be killed.
-    The scope of the fixture is function so it won't break tests if the node service is killed.
-    The rabbitmq and monetdb containers have also function scope so this is VERY slow.
-    This should be used only when the service should be killed etc for testing.
+    This node service fixture is the only one returning the process, so it can be killed.
+    The scope of the fixture is function, so it won't break tests if the node service is killed.
+    The rabbitmq and monetdb containers have also 'function' scope so this is VERY slow.
+    This should be used only when the service should be killed e.g. for testing.
     """
     node_config_file = LOCALNODETMP_CONFIG_FILE
     algo_folders_env_variable_val = ALGORITHM_FOLDERS_ENV_VARIABLE_VALUE
     node_config_filepath = path.join(TEST_ENV_CONFIG_FOLDER, node_config_file)
     proc = _create_node_service(algo_folders_env_variable_val, node_config_filepath)
     yield proc
-    kill_node_service(proc)
+    kill_service(proc)
 
 
 def create_node_tasks_handler_celery(node_config_filepath):
@@ -663,7 +729,6 @@ def create_node_tasks_handler_celery(node_config_filepath):
         queue_port = tmp["rabbitmq"]["port"]
         db_domain = tmp["monetdb"]["ip"]
         db_port = tmp["monetdb"]["port"]
-        tasks_timeout = tmp["celery"]["task_time_limit"]
     queue_address = ":".join([str(queue_domain), str(queue_port)])
     db_address = ":".join([str(db_domain), str(db_port)])
 
@@ -671,7 +736,8 @@ def create_node_tasks_handler_celery(node_config_filepath):
         node_id=node_id,
         node_queue_addr=queue_address,
         node_db_addr=db_address,
-        tasks_timeout=tasks_timeout,
+        tasks_timeout=TASKS_TIMEOUT,
+        run_udf_task_timeout=RUN_UDF_TASK_TIMEOUT,
     )
 
 
@@ -777,4 +843,334 @@ def reset_node_landscape_aggregator():
     nla._data_model_registry = DataModelRegistry(
         get_request_logger("DATA-MODEL-REGISTRY")
     )
+
+
+@pytest.fixture(scope="session")
+def controller_service():
+    service_port = CONTROLLER_PORT
+    controller_config_filepath = path.join(
+        TEST_ENV_CONFIG_FOLDER, CONTROLLER_CONFIG_FILE
+    )
+    localnodes_config_filepath = path.join(
+        TEST_ENV_CONFIG_FOLDER, CONTROLLER_LOCALNODES_CONFIG_FILE
+    )
+
+    proc = _create_controller_service(
+        service_port,
+        controller_config_filepath,
+        localnodes_config_filepath,
+        CONTROLLER_OUTPUT_FILE,
+    )
+    yield
+    kill_service(proc)
+
+
+@pytest.fixture(scope="session")
+def smpc_controller_service():
+    service_port = CONTROLLER_SMPC_PORT
+    controller_config_filepath = path.join(
+        TEST_ENV_CONFIG_FOLDER, CONTROLLER_SMPC_CONFIG_FILE
+    )
+    localnodes_config_filepath = path.join(
+        TEST_ENV_CONFIG_FOLDER, CONTROLLER_SMPC_LOCALNODES_CONFIG_FILE
+    )
+
+    proc = _create_controller_service(
+        service_port,
+        controller_config_filepath,
+        localnodes_config_filepath,
+        SMPC_CONTROLLER_OUTPUT_FILE,
+    )
+    yield
+    kill_service(proc)
+
+
+def _create_controller_service(
+    service_port: int,
+    controller_config_filepath: str,
+    localnodes_config_filepath: str,
+    logs_filename: str,
+):
+    print(f"\nCreating controller service on port '{service_port}'...")
+
+    logpath = OUTDIR / logs_filename
+    if os.path.isfile(logpath):
+        os.remove(logpath)
+
+    env = os.environ.copy()
+    env["ALGORITHM_FOLDERS"] = ALGORITHM_FOLDERS_ENV_VARIABLE_VALUE
+    env["LOCALNODES_CONFIG_FILE"] = localnodes_config_filepath
+    env["MIPENGINE_CONTROLLER_CONFIG_FILE"] = controller_config_filepath
+    env["QUART_APP"] = "mipengine/controller/api/app:app"
+    env["PYTHONPATH"] = str(Path(__file__).parent.parent.parent)
+
+    cmd = (
+        f"poetry run quart run --host=0.0.0.0 --port {service_port} >> {logpath} 2>&1 "
+    )
+
+    # if executed without "exec" it is spawned as a child process of the shell, so it is difficult to kill it
+    # https://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true
+    proc = subprocess.Popen(
+        "exec " + cmd,
+        shell=True,
+        stderr=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+        env=env,
+    )
+
+    # Check that quart started
+    _search_for_string_in_logfile("CONTROLLER - WEBAPI - Running on ", logpath)
+
+    # Check that nodes were loaded
+    _search_for_string_in_logfile(
+        "INFO - CONTROLLER - BACKGROUND - federation_info_logs", logpath
+    )
+    print(f"\nCreated controller service on port '{service_port}'.")
+    return proc
+
+
+@pytest.fixture(scope="session")
+def smpc_coordinator():
+    docker_cli = docker.from_env()
+
+    print(f"\nWaiting for smpc coordinator db to be ready...")
+    # Start coordinator db
+    try:
+        docker_cli.containers.get(SMPC_COORD_DB_CONT_NAME)
+    except docker.errors.NotFound:
+        docker_cli.containers.run(
+            image=SMPC_COORD_DB_IMAGE,
+            name=SMPC_COORD_DB_CONT_NAME,
+            detach=True,
+            ports={27017: SMPC_COORD_DB_PORT},
+            environment={
+                "MONGO_INITDB_ROOT_USERNAME": "sysadmin",
+                "MONGO_INITDB_ROOT_PASSWORD": "123qwe",
+            },
+        )
+    print("Created controller db service.")
+
+    # Start coordinator queue
+    print(f"\nWaiting for smpc coordinator queue to be ready...")
+    try:
+        docker_cli.containers.get(SMPC_COORD_QUEUE_CONT_NAME)
+    except docker.errors.NotFound:
+        docker_cli.containers.run(
+            image=SMPC_COORD_QUEUE_IMAGE,
+            name=SMPC_COORD_QUEUE_CONT_NAME,
+            detach=True,
+            ports={6379: SMPC_COORD_QUEUE_PORT},
+            environment={
+                "REDIS_REPLICATION_MODE": "master",
+            },
+            command="redis-server --requirepass agora",
+        )
+    print("Created controller queue service.")
+
+    # Start coordinator
+    print(f"\nWaiting for smpc coordinator to be ready...")
+    try:
+        docker_cli.containers.get(SMPC_COORD_CONT_NAME)
+    except docker.errors.NotFound:
+        docker_cli.containers.run(
+            image=SMPC_CLUSTER_IMAGE,
+            name=SMPC_COORD_CONT_NAME,
+            detach=True,
+            ports={12314: SMPC_COORD_PORT},
+            environment={
+                "PLAYER_REPO_0": f"http://{COMMON_IP}:{SMPC_PLAYER1_PORT2}",
+                "PLAYER_REPO_1": f"http://{COMMON_IP}:{SMPC_PLAYER2_PORT2}",
+                "PLAYER_REPO_2": f"http://{COMMON_IP}:{SMPC_PLAYER3_PORT2}",
+                "REDIS_HOST": f"{COMMON_IP}",
+                "REDIS_PORT": f"{SMPC_COORD_QUEUE_PORT}",
+                "REDIS_PSWD": "agora",
+                "DB_URL": f"{COMMON_IP}:{SMPC_COORD_DB_PORT}",
+                "DB_UNAME": "sysadmin",
+                "DB_PSWD": "123qwe",
+            },
+            command="python coordinator.py",
+        )
+    print("Created controller service.")
+
+    yield
+
+    # TODO Very slow development if containers are always removed afterwards
+    # db_cont = docker_cli.containers.get(SMPC_COORD_DB_CONT_NAME)
+    # db_cont.remove(v=True, force=True)
+    # queue_cont = docker_cli.containers.get(SMPC_COORD_QUEUE_CONT_NAME)
+    # queue_cont.remove(v=True, force=True)
+    # coord_cont = docker_cli.containers.get(SMPC_COORD_CONT_NAME)
+    # coord_cont.remove(v=True, force=True)
+
+
+@pytest.fixture(scope="session")
+def smpc_players():
+    docker_cli = docker.from_env()
+
+    # Start player 1
+    print(f"\nWaiting for smpc player 1 to be ready...")
+    try:
+        docker_cli.containers.get(SMPC_PLAYER1_CONT_NAME)
+    except docker.errors.NotFound:
+        docker_cli.containers.run(
+            image=SMPC_CLUSTER_IMAGE,
+            name=SMPC_PLAYER1_CONT_NAME,
+            detach=True,
+            ports={
+                6000: SMPC_PLAYER1_PORT1,
+                7000: SMPC_PLAYER1_PORT2,
+                14000: SMPC_PLAYER1_PORT3,
+            },
+            environment={
+                "PLAYER_REPO_0": f"http://{COMMON_IP}:{SMPC_PLAYER1_PORT2}",
+                "PLAYER_REPO_1": f"http://{COMMON_IP}:{SMPC_PLAYER2_PORT2}",
+                "PLAYER_REPO_2": f"http://{COMMON_IP}:{SMPC_PLAYER3_PORT2}",
+                "COORDINATOR_URL": f"http://{COMMON_IP}:{SMPC_COORD_PORT}",
+                "DB_URL": f"{COMMON_IP}:{SMPC_COORD_DB_PORT}",
+                "DB_UNAME": "sysadmin",
+                "DB_PSWD": "123qwe",
+            },
+            command="python player.py 0",
+        )
+    print("Created smpc player 1 service.")
+
+    # Start player 2
+    print(f"\nWaiting for smpc player 2 to be ready...")
+    try:
+        docker_cli.containers.get(SMPC_PLAYER2_CONT_NAME)
+    except docker.errors.NotFound:
+        docker_cli.containers.run(
+            image=SMPC_CLUSTER_IMAGE,
+            name=SMPC_PLAYER2_CONT_NAME,
+            detach=True,
+            ports={
+                6001: SMPC_PLAYER2_PORT1,
+                7001: SMPC_PLAYER2_PORT2,
+                14001: SMPC_PLAYER2_PORT3,
+            },
+            environment={
+                "PLAYER_REPO_0": f"http://{COMMON_IP}:{SMPC_PLAYER1_PORT2}",
+                "PLAYER_REPO_1": f"http://{COMMON_IP}:{SMPC_PLAYER2_PORT2}",
+                "PLAYER_REPO_2": f"http://{COMMON_IP}:{SMPC_PLAYER3_PORT2}",
+                "COORDINATOR_URL": f"http://{COMMON_IP}:{SMPC_COORD_PORT}",
+                "DB_URL": f"{COMMON_IP}:{SMPC_COORD_DB_PORT}",
+                "DB_UNAME": "sysadmin",
+                "DB_PSWD": "123qwe",
+            },
+            command="python player.py 1",
+        )
+    print("Created smpc player 2 service.")
+
+    # Start player 3
+    print(f"\nWaiting for smpc player 3 to be ready...")
+    try:
+        docker_cli.containers.get(SMPC_PLAYER3_CONT_NAME)
+    except docker.errors.NotFound:
+        docker_cli.containers.run(
+            image=SMPC_CLUSTER_IMAGE,
+            name=SMPC_PLAYER3_CONT_NAME,
+            detach=True,
+            ports={
+                6002: SMPC_PLAYER3_PORT1,
+                7002: SMPC_PLAYER3_PORT2,
+                14002: SMPC_PLAYER3_PORT3,
+            },
+            environment={
+                "PLAYER_REPO_0": f"http://{COMMON_IP}:{SMPC_PLAYER1_PORT2}",
+                "PLAYER_REPO_1": f"http://{COMMON_IP}:{SMPC_PLAYER2_PORT2}",
+                "PLAYER_REPO_2": f"http://{COMMON_IP}:{SMPC_PLAYER3_PORT2}",
+                "COORDINATOR_URL": f"http://{COMMON_IP}:{SMPC_COORD_PORT}",
+                "DB_URL": f"{COMMON_IP}:{SMPC_COORD_DB_PORT}",
+                "DB_UNAME": "sysadmin",
+                "DB_PSWD": "123qwe",
+            },
+            command="python player.py 2",
+        )
+    print("Created smpc player 3 service.")
+
+    yield
+
+    # TODO Very slow development if containers are always removed afterwards
+    # player1_cont = docker_cli.containers.get(SMPC_PLAYER1_CONT_NAME)
+    # player1_cont.remove(v=True, force=True)
+    # player2_cont = docker_cli.containers.get(SMPC_PLAYER2_CONT_NAME)
+    # player2_cont.remove(v=True, force=True)
+    # player3_cont = docker_cli.containers.get(SMPC_PLAYER3_CONT_NAME)
+    # player3_cont.remove(v=True, force=True)
+
+
+@pytest.fixture(scope="session")
+def smpc_clients():
+    docker_cli = docker.from_env()
+
+    # Start client 1
+    print(f"\nWaiting for smpc client 1 to be ready...")
+    try:
+        docker_cli.containers.get(SMPC_CLIENT1_CONT_NAME)
+    except docker.errors.NotFound:
+        with open(path.join(TEST_ENV_CONFIG_FOLDER, LOCALNODE1_SMPC_CONFIG_FILE)) as fp:
+            tmp = toml.load(fp)
+            client_id = tmp["smpc"]["client_id"]
+        docker_cli.containers.run(
+            image=SMPC_CLUSTER_IMAGE,
+            name=SMPC_CLIENT1_CONT_NAME,
+            detach=True,
+            ports={
+                SMPC_CLIENT1_PORT: SMPC_CLIENT1_PORT,
+            },
+            environment={
+                "PLAYER_REPO_0": f"http://{COMMON_IP}:{SMPC_PLAYER1_PORT2}",
+                "PLAYER_REPO_1": f"http://{COMMON_IP}:{SMPC_PLAYER2_PORT2}",
+                "PLAYER_REPO_2": f"http://{COMMON_IP}:{SMPC_PLAYER3_PORT2}",
+                "COORDINATOR_URL": f"http://{COMMON_IP}:{SMPC_COORD_PORT}",
+                "ID": client_id,
+                "PORT": f"{SMPC_CLIENT1_PORT}",
+            },
+            command=f"python client.py",
+        )
+    print("Created smpc client 1 service.")
+
+    # Start client 2
+    print(f"\nWaiting for smpc client 2 to be ready...")
+    try:
+        docker_cli.containers.get(SMPC_CLIENT2_CONT_NAME)
+    except docker.errors.NotFound:
+        with open(path.join(TEST_ENV_CONFIG_FOLDER, LOCALNODE2_SMPC_CONFIG_FILE)) as fp:
+            tmp = toml.load(fp)
+            client_id = tmp["smpc"]["client_id"]
+        docker_cli.containers.run(
+            image=SMPC_CLUSTER_IMAGE,
+            name=SMPC_CLIENT2_CONT_NAME,
+            detach=True,
+            ports={
+                SMPC_CLIENT2_PORT: SMPC_CLIENT2_PORT,
+            },
+            environment={
+                "PLAYER_REPO_0": f"http://{COMMON_IP}:{SMPC_PLAYER1_PORT2}",
+                "PLAYER_REPO_1": f"http://{COMMON_IP}:{SMPC_PLAYER2_PORT2}",
+                "PLAYER_REPO_2": f"http://{COMMON_IP}:{SMPC_PLAYER3_PORT2}",
+                "COORDINATOR_URL": f"http://{COMMON_IP}:{SMPC_COORD_PORT}",
+                "ID": client_id,
+                "PORT": f"{SMPC_CLIENT2_PORT}",
+            },
+            command="python client.py",
+        )
+    print("Created smpc client 2 service.")
+
+    yield
+
+    # TODO Very slow development if containers are always removed afterwards
+    # client1_cont = docker_cli.containers.get(SMPC_CLIENT1_CONT_NAME)
+    # client1_cont.remove(v=True, force=True)
+    # client2_cont = docker_cli.containers.get(SMPC_CLIENT2_CONT_NAME)
+    # client2_cont.remove(v=True, force=True)
+
+
+@pytest.fixture(scope="session")
+def smpc_cluster(smpc_coordinator, smpc_players, smpc_clients):
+    print(f"\nWaiting for smpc cluster to be ready...")
+    time.sleep(
+        SMPC_CLUSTER_SLEEP_TIME
+    )  # TODO Check when the smpc cluster is actually ready
+    print(f"\nFinished waiting '{SMPC_CLUSTER_SLEEP_TIME}' secs for SMPC cluster.")
     yield
diff --git a/tests/standalone_tests/test_cleanup_after_algorithm_execution.py b/tests/standalone_tests/test_cleanup_after_algorithm_execution.py
index 8a4dbbfcb02f0994a5c4190f3353c0dfec1fb4a9..448a4ad6aa331d5a07e358a018e6e89b37f594e5 100644
--- a/tests/standalone_tests/test_cleanup_after_algorithm_execution.py
+++ b/tests/standalone_tests/test_cleanup_after_algorithm_execution.py
@@ -14,6 +14,7 @@ from mipengine.controller.api.algorithm_request_dto import AlgorithmRequestDTO
 from mipengine.controller.controller import Controller
 from mipengine.controller.controller import get_a_uniqueid
 from tests.standalone_tests.conftest import ALGORITHM_FOLDERS_ENV_VARIABLE_VALUE
+from tests.standalone_tests.conftest import CONTROLLER_LOCALNODES_CONFIG_FILE
 from tests.standalone_tests.conftest import LOCALNODETMP_CONFIG_FILE
 from tests.standalone_tests.conftest import RABBITMQ_LOCALNODETMP_NAME
 from tests.standalone_tests.conftest import RABBITMQ_LOCALNODETMP_PORT
@@ -21,7 +22,7 @@ from tests.standalone_tests.conftest import TEST_ENV_CONFIG_FOLDER
 from tests.standalone_tests.conftest import _create_node_service
 from tests.standalone_tests.conftest import _create_rabbitmq_container
 from tests.standalone_tests.conftest import create_node_tasks_handler_celery
-from tests.standalone_tests.conftest import kill_node_service
+from tests.standalone_tests.conftest import kill_service
 from tests.standalone_tests.conftest import remove_localnodetmp_rabbitmq
 
 WAIT_CLEANUP_TIME_LIMIT = 40
@@ -43,7 +44,8 @@ def controller_config_dict_mock():
             "contextid_release_timelimit": 3600,  # 1hour
         },
         "localnodes": {
-            "config_file": "./tests/standalone_tests/testing_env_configs/test_localnodes_addresses.json",
+            "config_file": "./tests/standalone_tests/testing_env_configs/"
+            + CONTROLLER_LOCALNODES_CONFIG_FILE,
             "dns": "",
             "port": "",
         },
@@ -52,6 +54,7 @@ def controller_config_dict_mock():
             "password": "password",
             "vhost": "user_vhost",
             "celery_tasks_timeout": 40,
+            "celery_run_udf_task_timeout": 40,
             "celery_tasks_max_retries": 3,
             "celery_tasks_interval_start": 0,
             "celery_tasks_interval_step": 0.2,
@@ -604,7 +607,7 @@ async def test_cleanup_rabbitmq_down_algorithm_execution(
     )
 
     remove_localnodetmp_rabbitmq()
-    kill_node_service(localnodetmp_node_service)
+    kill_service(localnodetmp_node_service)
 
     controller._cleaner.release_contextid_for_cleanup(context_id=context_id)
 
@@ -660,7 +663,7 @@ async def test_cleanup_rabbitmq_down_algorithm_execution(
     # the node service was started in here so it must manually killed, otherwise it is
     # alive through the whole pytest session and is erroneously accessed by other tests
     # where teh node service is supposedly down
-    kill_node_service(localnodetmp_node_service_proc)
+    kill_service(localnodetmp_node_service_proc)
 
     if (
         globalnode_tables_before_cleanup
@@ -775,7 +778,7 @@ async def test_cleanup_node_service_down_algorithm_execution(
         request_id=request_id, context_id=context_id
     )
 
-    kill_node_service(localnodetmp_node_service)
+    kill_service(localnodetmp_node_service)
 
     controller._cleaner.release_contextid_for_cleanup(context_id=context_id)
 
@@ -825,7 +828,7 @@ async def test_cleanup_node_service_down_algorithm_execution(
     # the node service was started in here so it must manually killed, otherwise it is
     # alive through the whole pytest session and is erroneously accessed by other tests
     # where teh node service is supposedly down
-    kill_node_service(localnodetmp_node_service_proc)
+    kill_service(localnodetmp_node_service_proc)
 
     if (
         globalnode_tables_before_cleanup
diff --git a/tests/standalone_tests/test_node_landscape_aggregator_update_loop.py b/tests/standalone_tests/test_node_landscape_aggregator_update_loop.py
index 0e34d268bc3746190e439880bbb1fbc08d22f19c..39a3175631c3c04353473c25795db617afb39f0f 100644
--- a/tests/standalone_tests/test_node_landscape_aggregator_update_loop.py
+++ b/tests/standalone_tests/test_node_landscape_aggregator_update_loop.py
@@ -17,7 +17,7 @@ from tests.standalone_tests.conftest import _create_node_service
 from tests.standalone_tests.conftest import _create_rabbitmq_container
 from tests.standalone_tests.conftest import _load_data_monetdb_container
 from tests.standalone_tests.conftest import _remove_data_model_from_localnodetmp_monetdb
-from tests.standalone_tests.conftest import kill_node_service
+from tests.standalone_tests.conftest import kill_service
 from tests.standalone_tests.conftest import remove_localnodetmp_rabbitmq
 
 WAIT_TIME_LIMIT = 30
@@ -41,6 +41,7 @@ def controller_config_mock():
                 "password": "password",
                 "vhost": "user_vhost",
                 "celery_tasks_timeout": 10,
+                "celery_run_udf_task_timeout": 30,
                 "celery_tasks_max_retries": 3,
                 "celery_tasks_interval_start": 0,
                 "celery_tasks_interval_step": 0.2,
@@ -138,7 +139,7 @@ async def test_update_loop_node_service_down(
         and len(data_models["dementia:0.1"].values) == 186
     )
 
-    kill_node_service(localnodetmp_node_service)
+    kill_service(localnodetmp_node_service)
 
     # wait until node registry removes tmplocalnode
     start = time.time()
@@ -173,7 +174,7 @@ async def test_update_loop_node_service_down(
     # the node service was started in here, so it must manually be killed, otherwise it is
     # alive through the whole pytest session and is erroneously accessed by other tests
     # where the node service is supposedly down
-    kill_node_service(localnodetmp_node_service_proc)
+    kill_service(localnodetmp_node_service_proc)
 
 
 @pytest.mark.slow
@@ -244,7 +245,7 @@ async def test_update_loop_rabbitmq_down(
     # the node service was started in here, so it must manually be killed, otherwise it is
     # alive through the whole pytest session and is erroneously accessed by other tests
     # where the node service is supposedly down
-    kill_node_service(localnodetmp_node_service)
+    kill_service(localnodetmp_node_service)
 
 
 @pytest.mark.slow
@@ -340,7 +341,7 @@ async def test_update_loop_data_models_removed(
     # the node service was started in here, so it must manually be killed, otherwise it is
     # alive through the whole pytest session and is erroneously accessed by other tests
     # where the node service is supposedly down
-    kill_node_service(localnodetmp_node_service)
+    kill_service(localnodetmp_node_service)
 
 
 def remove_data_model_from_localnodetmp_monetdb(data_model):
diff --git a/tests/standalone_tests/test_node_tasks_handler_celery.py b/tests/standalone_tests/test_node_tasks_handler_celery.py
index 18ae0304c22886090ef400a592fba3d28b257ead..3ae42bdd4003edd783c271c598bc71f336b20a44 100644
--- a/tests/standalone_tests/test_node_tasks_handler_celery.py
+++ b/tests/standalone_tests/test_node_tasks_handler_celery.py
@@ -13,7 +13,7 @@ from mipengine.node_tasks_DTOs import TableSchema
 from mipengine.node_tasks_DTOs import UDFPosArguments
 from tests.standalone_tests.conftest import RABBITMQ_LOCALNODETMP_NAME
 from tests.standalone_tests.conftest import _remove_rabbitmq_container
-from tests.standalone_tests.conftest import kill_node_service
+from tests.standalone_tests.conftest import kill_service
 
 COMMON_TASKS_REQUEST_ID = "rqst1"
 
@@ -173,7 +173,7 @@ def test_time_limit_exceeded_exception(
     )
 
     # Stop the nodes (NOT the task queue of the node, only the celery app)
-    kill_node_service(localnodetmp_node_service)
+    kill_service(localnodetmp_node_service)
 
     # Queue a task which will raise the exception
     with pytest.raises(TimeoutError):
diff --git a/tests/standalone_tests/test_single_local_node_algorithm_execution.py b/tests/standalone_tests/test_single_local_node_algorithm_execution.py
index 3caf4c7fa86428d5ba661fa7fd4e2da133d9f7ea..c97dc31580a211b6e4ac76abc7bcf7dcee865e22 100644
--- a/tests/standalone_tests/test_single_local_node_algorithm_execution.py
+++ b/tests/standalone_tests/test_single_local_node_algorithm_execution.py
@@ -262,6 +262,7 @@ def test_single_local_node_algorithm_execution(
         node_queue_addr=queue_addr,
         node_db_addr=db_addr,
         tasks_timeout=45,
+        run_udf_task_timeout=120,
     )
 
     single_node_task_handler = NodesTasksHandlersDTO(
diff --git a/tests/standalone_tests/test_smpc_algorithms.py b/tests/standalone_tests/test_smpc_algorithms.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5cf9c5a5fd23656212de80b715a319d8a0f48fe
--- /dev/null
+++ b/tests/standalone_tests/test_smpc_algorithms.py
@@ -0,0 +1,395 @@
+import json
+import re
+import time
+
+import pytest
+import requests
+
+algorithms_url = "http://127.0.0.1:4501/algorithms"
+
+
+def get_parametrization_list_success_cases():
+    parametrization_list = []
+
+    # ~~~~~~~~~~success case 1~~~~~~~~~~
+    algorithm_name = "smpc_standard_deviation_int_only"
+    request_dict = {
+        "inputdata": {
+            "data_model": "dementia:0.1",
+            "datasets": [
+                "edsd0",
+                "edsd1",
+                "edsd2",
+                "edsd3",
+                "edsd4",
+                "edsd5",
+                "edsd6",
+                "edsd7",
+                "edsd8",
+                "edsd9",
+            ],
+            "x": [
+                "lefthippocampus",
+            ],
+            "filters": {
+                "condition": "AND",
+                "rules": [
+                    {
+                        "id": "dataset",
+                        "type": "string",
+                        "value": [
+                            "edsd0",
+                            "edsd1",
+                            "edsd2",
+                            "edsd3",
+                            "edsd4",
+                            "edsd5",
+                            "edsd6",
+                            "edsd7",
+                            "edsd8",
+                            "edsd9",
+                        ],
+                        "operator": "in",
+                    },
+                    {
+                        "condition": "AND",
+                        "rules": [
+                            {
+                                "id": variable,
+                                "type": "string",
+                                "operator": "is_not_null",
+                                "value": None,
+                            }
+                            for variable in [
+                                "lefthippocampus",
+                            ]
+                        ],
+                    },
+                ],
+                "valid": True,
+            },
+        },
+    }
+    expected_response = {
+        "title": "Standard Deviation",
+        "columns": [
+            {"name": "variable", "data": ["lefthippocampus"], "type": "STR"},
+            {"name": "std_deviation", "data": [0.3611575592573076], "type": "FLOAT"},
+            {"name": "min_value", "data": [1.0], "type": "FLOAT"},
+            {"name": "max_value", "data": [4.0], "type": "FLOAT"},
+        ],
+    }
+    parametrization_list.append((algorithm_name, request_dict, expected_response))
+    # END ~~~~~~~~~~success case 1~~~~~~~~~~
+
+    # ~~~~~~~~~~success case 2~~~~~~~~~~
+    algorithm_name = "smpc_standard_deviation_int_only"
+    request_dict = {
+        "inputdata": {
+            "data_model": "dementia:0.1",
+            "datasets": [
+                "edsd0",
+                "edsd1",
+                "edsd2",
+                "edsd3",
+                "edsd4",
+                "edsd5",
+                "edsd6",
+                "edsd7",
+                "edsd8",
+                "edsd9",
+            ],
+            "x": [
+                "lefthippocampus",
+            ],
+            "filters": {
+                "condition": "AND",
+                "rules": [
+                    {
+                        "id": "dataset",
+                        "type": "string",
+                        "value": [
+                            "edsd0",
+                            "edsd1",
+                            "edsd2",
+                            "edsd3",
+                            "edsd4",
+                            "edsd5",
+                            "edsd6",
+                            "edsd7",
+                            "edsd8",
+                            "edsd9",
+                        ],
+                        "operator": "in",
+                    },
+                    {
+                        "condition": "AND",
+                        "rules": [
+                            {
+                                "id": variable,
+                                "type": "string",
+                                "operator": "is_not_null",
+                                "value": None,
+                            }
+                            for variable in [
+                                "lefthippocampus",
+                            ]
+                        ],
+                    },
+                ],
+                "valid": True,
+            },
+        },
+        "flags": {
+            "smpc": True,
+        },
+    }
+    expected_response = {
+        "title": "Standard Deviation",
+        "columns": [
+            {"name": "variable", "data": ["lefthippocampus"], "type": "STR"},
+            {"name": "std_deviation", "data": [0.3611575592573076], "type": "FLOAT"},
+            {"name": "min_value", "data": [1.0], "type": "FLOAT"},
+            {"name": "max_value", "data": [4.0], "type": "FLOAT"},
+        ],
+    }
+    parametrization_list.append((algorithm_name, request_dict, expected_response))
+    # END ~~~~~~~~~~success case 2~~~~~~~~~~
+    #
+    # # ~~~~~~~~~~success case 3~~~~~~~~~~
+    # algorithm_name = "smpc_standard_deviation"
+    # request_dict = {
+    #     "inputdata": {
+    #         "data_model": "dementia:0.1",
+    #         "datasets": ["edsd"],
+    #         "x": [
+    #             "lefthippocampus",
+    #         ],
+    #         "filters": {
+    #             "condition": "AND",
+    #             "rules": [
+    #                 {
+    #                     "id": "dataset",
+    #                     "type": "string",
+    #                     "value": ["edsd"],
+    #                     "operator": "in",
+    #                 },
+    #                 {
+    #                     "condition": "AND",
+    #                     "rules": [
+    #                         {
+    #                             "id": variable,
+    #                             "type": "string",
+    #                             "operator": "is_not_null",
+    #                             "value": None,
+    #                         }
+    #                         for variable in [
+    #                             "lefthippocampus",
+    #                         ]
+    #                     ],
+    #                 },
+    #             ],
+    #             "valid": True,
+    #         },
+    #     },
+    # }
+    # expected_response = {
+    #     "title": "Standard Deviation",
+    #     "columns": [
+    #         {"name": "variable", "data": ["lefthippocampus"], "type": "STR"},
+    #         {"name": "std_deviation", "data": [0.3634506955662605], "type": "FLOAT"},
+    #         {"name": "min_value", "data": [1.3047], "type": "FLOAT"},
+    #         {"name": "max_value", "data": [4.4519], "type": "FLOAT"},
+    #     ],
+    # }
+    # parametrization_list.append((algorithm_name, request_dict, expected_response))
+    # # END ~~~~~~~~~~success case 3~~~~~~~~~~
+    #
+    # # ~~~~~~~~~~success case 4~~~~~~~~~~
+    # algorithm_name = "smpc_standard_deviation"
+    # request_dict = {
+    #     "inputdata": {
+    #         "data_model": "dementia:0.1",
+    #         "datasets": ["edsd"],
+    #         "x": [
+    #             "lefthippocampus",
+    #         ],
+    #         "filters": {
+    #             "condition": "AND",
+    #             "rules": [
+    #                 {
+    #                     "id": "dataset",
+    #                     "type": "string",
+    #                     "value": ["edsd"],
+    #                     "operator": "in",
+    #                 },
+    #                 {
+    #                     "condition": "AND",
+    #                     "rules": [
+    #                         {
+    #                             "id": variable,
+    #                             "type": "string",
+    #                             "operator": "is_not_null",
+    #                             "value": None,
+    #                         }
+    #                         for variable in [
+    #                             "lefthippocampus",
+    #                         ]
+    #                     ],
+    #                 },
+    #             ],
+    #             "valid": True,
+    #         },
+    #     },
+    #     "flags": {
+    #         "smpc": True,
+    #     },
+    # }
+    # expected_response = {
+    #     "title": "Standard Deviation",
+    #     "columns": [
+    #         {"name": "variable", "data": ["lefthippocampus"], "type": "STR"},
+    #         {"name": "std_deviation", "data": [0.3634506955662605], "type": "FLOAT"},
+    #         {"name": "min_value", "data": [1.3047], "type": "FLOAT"},
+    #         {"name": "max_value", "data": [4.4519], "type": "FLOAT"},
+    #     ],
+    # }
+    # parametrization_list.append((algorithm_name, request_dict, expected_response))
+    # # END ~~~~~~~~~~success case 4~~~~~~~~~~
+    return parametrization_list
+
+
+@pytest.mark.skip(reason="https://team-1617704806227.atlassian.net/browse/MIP-608")
+@pytest.mark.smpc
+@pytest.mark.parametrize(
+    "algorithm_name, request_dict, expected_response",
+    get_parametrization_list_success_cases(),
+)
+def test_post_smpc_algorithm(
+    smpc_cluster,
+    smpc_globalnode_node_service,
+    smpc_localnode1_node_service,
+    load_data_smpc_localnode1,
+    smpc_localnode2_node_service,
+    load_data_smpc_localnode2,
+    smpc_controller_service,
+    algorithm_name,
+    request_dict,
+    expected_response,
+):
+    algorithm_url = algorithms_url + "/" + algorithm_name
+
+    headers = {"Content-type": "application/json", "Accept": "text/plain"}
+    response = requests.post(
+        algorithm_url,
+        data=json.dumps(request_dict),
+        headers=headers,
+    )
+    assert response.status_code == 200, f"Response message: {response.text}"
+    assert json.loads(response.text) == expected_response
+
+
+def get_parametrization_list_exception_cases():
+    parametrization_list = []
+    algorithm_name = "smpc_standard_deviation"
+    request_dict = {
+        "inputdata": {
+            "data_model": "dementia:0.1",
+            "datasets": [
+                "edsd0",
+                "edsd1",
+                "edsd2",
+                "edsd3",
+                "edsd4",
+                "edsd5",
+                "edsd6",
+                "edsd7",
+                "edsd8",
+                "edsd9",
+            ],
+            "x": [
+                "lefthippocampus",
+            ],
+            "filters": {
+                "condition": "AND",
+                "rules": [
+                    {
+                        "id": "dataset",
+                        "type": "string",
+                        "value": [
+                            "edsd0",
+                            "edsd1",
+                            "edsd2",
+                            "edsd3",
+                            "edsd4",
+                            "edsd5",
+                            "edsd6",
+                            "edsd7",
+                            "edsd8",
+                            "edsd9",
+                        ],
+                        "operator": "in",
+                    },
+                    {
+                        "condition": "AND",
+                        "rules": [
+                            {
+                                "id": variable,
+                                "type": "string",
+                                "operator": "is_not_null",
+                                "value": None,
+                            }
+                            for variable in [
+                                "lefthippocampus",
+                            ]
+                        ],
+                    },
+                ],
+                "valid": True,
+            },
+        },
+        "flags": {
+            "smpc": False,
+        },
+    }
+
+    expected_response = (
+        462,
+        "The computation cannot be made without SMPC.",
+    )
+
+    parametrization_list.append((algorithm_name, request_dict, expected_response))
+
+    return parametrization_list
+
+
+@pytest.mark.skip(reason="https://team-1617704806227.atlassian.net/browse/MIP-608")
+@pytest.mark.smpc
+@pytest.mark.parametrize(
+    "algorithm_name, request_dict, expected_response",
+    get_parametrization_list_exception_cases(),
+)
+def test_post_smpc_algorithm_exception(
+    smpc_globalnode_node_service,
+    smpc_localnode1_node_service,
+    load_data_smpc_localnode1,
+    smpc_localnode2_node_service,
+    load_data_smpc_localnode2,
+    smpc_controller_service,
+    algorithm_name,
+    request_dict,
+    expected_response,
+):
+    algorithm_url = algorithms_url + "/" + algorithm_name
+
+    headers = {"Content-type": "application/json", "Accept": "text/plain"}
+    response = requests.post(
+        algorithm_url,
+        data=json.dumps(request_dict),
+        headers=headers,
+    )
+    exp_response_status, exp_response_message = expected_response
+    assert (
+        response.status_code == exp_response_status
+    ), f"Response message: {response.text}"
+    assert re.search(exp_response_message, response.text)
diff --git a/tests/standalone_tests/test_smpc_enabled_and_optional_flags.py b/tests/standalone_tests/test_smpc_enabled_and_optional_flags.py
index 57f105e5ef2a0e85bcb8b08165a1dbb7c32e20f1..d55d0c48aa8c9a89235c6d0bd6c875f9875a6483 100644
--- a/tests/standalone_tests/test_smpc_enabled_and_optional_flags.py
+++ b/tests/standalone_tests/test_smpc_enabled_and_optional_flags.py
@@ -27,6 +27,7 @@ def get_validate_smpc_usage_success_cases():
     return validate_smpc_usage_success_cases
 
 
+@pytest.mark.smpc
 @pytest.mark.parametrize(
     "node_config, use_smpc", get_validate_smpc_usage_success_cases()
 )
@@ -66,6 +67,7 @@ def get_validate_smpc_usage_fail_cases():
     return validate_smpc_usage_fail_cases
 
 
+@pytest.mark.smpc
 @pytest.mark.parametrize(
     "node_config, use_smpc, exception", get_validate_smpc_usage_fail_cases()
 )
diff --git a/tests/standalone_tests/test_smpc_node_tasks.py b/tests/standalone_tests/test_smpc_node_tasks.py
index 9e3955179e3be398283a9e75353649a85509a49a..cab1ac51beef44fe8349f6a8ad7cade030f9933a 100644
--- a/tests/standalone_tests/test_smpc_node_tasks.py
+++ b/tests/standalone_tests/test_smpc_node_tasks.py
@@ -1,5 +1,6 @@
 import json
 import uuid
+from time import sleep
 from typing import Tuple
 
 import pytest
@@ -17,8 +18,11 @@ from mipengine.node_tasks_DTOs import UDFPosArguments
 from mipengine.node_tasks_DTOs import UDFResults
 from mipengine.smpc_cluster_comm_helpers import ADD_DATASET_ENDPOINT
 from mipengine.smpc_cluster_comm_helpers import TRIGGER_COMPUTATION_ENDPOINT
+from mipengine.smpc_cluster_comm_helpers import get_smpc_result
 from mipengine.smpc_DTOs import SMPCRequestData
 from mipengine.smpc_DTOs import SMPCRequestType
+from mipengine.smpc_DTOs import SMPCResponse
+from mipengine.smpc_DTOs import SMPCResponseStatus
 from mipengine.udfgen import make_unique_func_name
 from tests.algorithms.orphan_udfs import smpc_global_step
 from tests.algorithms.orphan_udfs import smpc_local_step
@@ -33,7 +37,7 @@ context_id = "testsmpcudfs" + str(uuid.uuid4().hex)[:10]
 command_id = "command123"
 smpc_job_id = "testKey123"
 SMPC_GET_DATASET_ENDPOINT = "/api/update-dataset/"
-SMPC_COORDINATOR_ADDRESS = "http://dl056.madgik.di.uoa.gr:12314"
+SMPC_COORDINATOR_ADDRESS = "http://172.17.0.1:12314"
 
 
 def create_secure_transfer_table(celery_app) -> str:
@@ -219,6 +223,7 @@ def test_secure_transfer_input_with_smpc_off(
     )
 
 
+@pytest.mark.smpc
 def test_validate_smpc_templates_match(
     smpc_localnode1_node_service,
     use_smpc_localnode1_database,
@@ -240,6 +245,7 @@ def test_validate_smpc_templates_match(
         pytest.fail(f"No exception should be raised. Exception: {exc}")
 
 
+@pytest.mark.smpc
 def test_validate_smpc_templates_dont_match(
     smpc_localnode1_node_service,
     use_smpc_localnode1_database,
@@ -260,6 +266,7 @@ def test_validate_smpc_templates_dont_match(
     assert "SMPC templates dont match." in str(exc)
 
 
+@pytest.mark.smpc
 def test_secure_transfer_run_udf_flow_with_smpc_on(
     smpc_localnode1_node_service,
     use_smpc_localnode1_database,
@@ -346,6 +353,7 @@ def test_secure_transfer_run_udf_flow_with_smpc_on(
     )
 
 
+@pytest.mark.smpc
 def test_load_data_to_smpc_client_from_globalnode_fails(
     smpc_globalnode_node_service,
     smpc_globalnode_celery_app,
@@ -363,13 +371,13 @@ def test_load_data_to_smpc_client_from_globalnode_fails(
     assert "load_data_to_smpc_client is allowed only for a LOCALNODE." in str(exc)
 
 
-@pytest.mark.skip(
-    reason="SMPC is not deployed in the CI yet. https://team-1617704806227.atlassian.net/browse/MIP-344"
-)
+@pytest.mark.skip(reason="https://team-1617704806227.atlassian.net/browse/MIP-608")
+@pytest.mark.smpc
 def test_load_data_to_smpc_client(
     smpc_localnode1_node_service,
     use_smpc_localnode1_database,
     smpc_localnode1_celery_app,
+    smpc_cluster,
 ):
     table_name, sum_op_values_str = create_table_with_smpc_sum_op_values(
         smpc_localnode1_celery_app
@@ -381,9 +389,8 @@ def test_load_data_to_smpc_client(
 
     load_data_to_smpc_client_task.delay(
         request_id=request_id,
-        context_id=context_id,
         table_name=table_name,
-        jobid="testKey123",
+        jobid=smpc_job_id,
     ).get()
 
     node_config = get_node_config_by_id(LOCALNODE1_SMPC_CONFIG_FILE)
@@ -406,6 +413,7 @@ def test_load_data_to_smpc_client(
     assert json.dumps(result) == sum_op_values_str
 
 
+@pytest.mark.smpc
 def test_get_smpc_result_from_localnode_fails(
     smpc_localnode1_node_service,
     smpc_localnode1_celery_app,
@@ -424,13 +432,13 @@ def test_get_smpc_result_from_localnode_fails(
     assert "get_smpc_result is allowed only for a GLOBALNODE." in str(exc)
 
 
-@pytest.mark.skip(
-    reason="SMPC is not deployed in the CI yet. https://team-1617704806227.atlassian.net/browse/MIP-344"
-)
+@pytest.mark.skip(reason="https://team-1617704806227.atlassian.net/browse/MIP-608")
+@pytest.mark.smpc
 def test_get_smpc_result(
     smpc_globalnode_node_service,
     use_smpc_globalnode_database,
     smpc_globalnode_celery_app,
+    smpc_cluster,
 ):
     get_smpc_result_task = get_celery_task_signature(
         smpc_globalnode_celery_app, "get_smpc_result"
@@ -446,7 +454,7 @@ def test_get_smpc_result(
     smpc_computation_data = [100]
     response = requests.post(
         request_url,
-        data=json.dumps(smpc_computation_data),
+        data=json.dumps({"type": "int", "data": smpc_computation_data}),
         headers=request_headers,
     )
     assert response.status_code == 200
@@ -464,6 +472,24 @@ def test_get_smpc_result(
     )
     assert response.status_code == 200
 
+    # --------------- Wait for SMPC result to be ready ------------------------
+    for _ in range(1, 100):
+        response = get_smpc_result(
+            coordinator_address=SMPC_COORDINATOR_ADDRESS,
+            jobid=smpc_job_id,
+        )
+        smpc_response = SMPCResponse.parse_raw(response)
+
+        if smpc_response.status == SMPCResponseStatus.FAILED:
+            raise ValueError(
+                f"The SMPC returned a {SMPCResponseStatus.FAILED} status. Body: {response}"
+            )
+        elif smpc_response.status == SMPCResponseStatus.COMPLETED:
+            break
+        sleep(1)
+    else:
+        raise TimeoutError("SMPC did not finish in 100 tries.")
+
     # --------------- GET SMPC RESULT IN GLOBALNODE ------------------------
     result_tablename = get_smpc_result_task.delay(
         request_id=request_id,
@@ -477,9 +503,8 @@ def test_get_smpc_result(
     )
 
 
-@pytest.mark.skip(
-    reason="SMPC is not deployed in the CI yet. https://team-1617704806227.atlassian.net/browse/MIP-344"
-)
+@pytest.mark.skip(reason="https://team-1617704806227.atlassian.net/browse/MIP-608")
+@pytest.mark.smpc
 def test_orchestrate_SMPC_between_two_localnodes_and_the_globalnode(
     smpc_globalnode_node_service,
     smpc_localnode1_node_service,
@@ -490,6 +515,7 @@ def test_orchestrate_SMPC_between_two_localnodes_and_the_globalnode(
     smpc_globalnode_celery_app,
     smpc_localnode1_celery_app,
     smpc_localnode2_celery_app,
+    smpc_cluster,
 ):
     run_udf_task_globalnode = get_celery_task_signature(
         smpc_globalnode_celery_app, "run_udf"
@@ -612,13 +638,11 @@ def test_orchestrate_SMPC_between_two_localnodes_and_the_globalnode(
     # --------- LOAD LOCALNODE ADD OP DATA TO SMPC CLIENTS -----------------
     smpc_client_1 = load_data_to_smpc_client_task_localnode1.delay(
         request_id=request_id,
-        context_id=context_id,
         table_name=local_1_smpc_result.value.sum_op_values.value,
         jobid=smpc_job_id,
     ).get()
     smpc_client_2 = load_data_to_smpc_client_task_localnode2.delay(
         request_id=request_id,
-        context_id=context_id,
         table_name=local_2_smpc_result.value.sum_op_values.value,
         jobid=smpc_job_id,
     ).get()
@@ -642,7 +666,25 @@ def test_orchestrate_SMPC_between_two_localnodes_and_the_globalnode(
     )
     assert response.status_code == 200
 
-    # --------- Get Results of SMPC in globalnode -----------------
+    # --------------- Wait for SMPC result to be ready ------------------------
+    for _ in range(1, 100):
+        response = get_smpc_result(
+            coordinator_address=SMPC_COORDINATOR_ADDRESS,
+            jobid=smpc_job_id,
+        )
+        smpc_response = SMPCResponse.parse_raw(response)
+
+        if smpc_response.status == SMPCResponseStatus.FAILED:
+            raise ValueError(
+                f"The SMPC returned a {SMPCResponseStatus.FAILED} status. Body: {response}"
+            )
+        elif smpc_response.status == SMPCResponseStatus.COMPLETED:
+            break
+        sleep(1)
+    else:
+        raise TimeoutError("SMPC did not finish in 100 tries.")
+
+    # --------- Get SMPC result in globalnode -----------------
     sum_op_values_tablename = get_smpc_result_task_globalnode.delay(
         request_id=request_id,
         context_id=context_id,
diff --git a/tests/standalone_tests/testing_env_configs/smpc_globalnode.toml b/tests/standalone_tests/testing_env_configs/smpc_globalnode.toml
index 82535edc8791ab6c43599052bb2e238b120899cc..9b42fe714dc3e71b4295f461fa0127b7462e9e4b 100644
--- a/tests/standalone_tests/testing_env_configs/smpc_globalnode.toml
+++ b/tests/standalone_tests/testing_env_configs/smpc_globalnode.toml
@@ -3,7 +3,6 @@ log_level = "DEBUG"
 framework_log_level = "INFO"
 
 role = "GLOBALNODE"
-cdes_metadata_path = "./tests/test_data"
 
 [privacy]
 minimum_row_count = 10
@@ -34,6 +33,4 @@ enabled = true
 optional = false
 client_id = ""
 client_address = ""
-coordinator_address = "http://dl056.madgik.di.uoa.gr:12314"
-get_result_interval = 0.5
-get_result_max_retries = 120
+coordinator_address = "http://172.17.0.1:12314"
diff --git a/tests/standalone_tests/testing_env_configs/smpc_localnode1.toml b/tests/standalone_tests/testing_env_configs/smpc_localnode1.toml
index 246c80dc47c1663d7b61f4ddfb7a29bddc06fc71..5f5b7e88c91185889075b0ccfcc893cbe974b354 100644
--- a/tests/standalone_tests/testing_env_configs/smpc_localnode1.toml
+++ b/tests/standalone_tests/testing_env_configs/smpc_localnode1.toml
@@ -3,7 +3,6 @@ log_level = "DEBUG"
 framework_log_level = "INFO"
 
 role = "LOCALNODE"
-cdes_metadata_path = "./tests/test_data"
 
 [privacy]
 minimum_row_count = 10
@@ -32,8 +31,6 @@ password = "monetdb"
 [smpc]
 enabled = true
 optional = false
-client_id = 1
-client_address = "http://dl057.madgik.di.uoa.gr:9001"
-coordinator_address = "http://dl056.madgik.di.uoa.gr:12314"
-get_result_interval = 0.5
-get_result_max_retries = 120
+client_id = "testsmpclocalnode1"
+client_address = "http://172.17.0.1:9005"
+coordinator_address = "http://172.17.0.1:12314"
diff --git a/tests/standalone_tests/testing_env_configs/smpc_localnode2.toml b/tests/standalone_tests/testing_env_configs/smpc_localnode2.toml
index a3471b47ccfee7efe5f63d124f627b0c3b7ce306..7ff7f378f859229ba3ac4c1ffebbae12bfce7828 100644
--- a/tests/standalone_tests/testing_env_configs/smpc_localnode2.toml
+++ b/tests/standalone_tests/testing_env_configs/smpc_localnode2.toml
@@ -3,7 +3,6 @@ log_level = "DEBUG"
 framework_log_level = "INFO"
 
 role = "LOCALNODE"
-cdes_metadata_path = "./tests/test_data"
 
 [privacy]
 minimum_row_count = 10
@@ -32,8 +31,6 @@ password = "monetdb"
 [smpc]
 enabled = true
 optional = false
-client_id = 2
-client_address = "http://dl058.madgik.di.uoa.gr:9002"
-coordinator_address = "http://dl056.madgik.di.uoa.gr:12314"
-get_result_interval = 0.5
-get_result_max_retries = 120
+client_id = "testsmpclocalnode2"
+client_address = "http://172.17.0.1:9006"
+coordinator_address = "http://172.17.0.1:12314"
diff --git a/tests/standalone_tests/testing_env_configs/test_smpc_controller.toml b/tests/standalone_tests/testing_env_configs/test_smpc_controller.toml
new file mode 100644
index 0000000000000000000000000000000000000000..ec369af39a3c8b10840fe571e4da2be6c57da9f7
--- /dev/null
+++ b/tests/standalone_tests/testing_env_configs/test_smpc_controller.toml
@@ -0,0 +1,32 @@
+log_level = "DEBUG"
+framework_log_level = "INFO"
+deployment_type = "LOCAL"
+node_landscape_aggregator_update_interval = 30
+
+[cleanup]
+contextids_cleanup_folder = "/tmp"
+nodes_cleanup_interval = 10
+contextid_release_timelimit = 3600
+
+[localnodes]
+config_file = "$LOCALNODES_CONFIG_FILE"
+dns = ""
+port = ""
+
+[rabbitmq]
+user = "user"
+password = "password"
+vhost = "user_vhost"
+celery_tasks_timeout = 20
+celery_run_udf_task_timeout = 120
+celery_tasks_max_retries = 3
+celery_tasks_interval_start = 0
+celery_tasks_interval_step = 0.2
+celery_tasks_interval_max = 0.5
+
+[smpc]
+enabled = true
+optional = false
+coordinator_address = "http://172.17.0.1:12314"
+get_result_interval = 2
+get_result_max_retries = 100
diff --git a/tests/standalone_tests/testing_env_configs/test_smpc_localnodes_addresses.json b/tests/standalone_tests/testing_env_configs/test_smpc_localnodes_addresses.json
new file mode 100644
index 0000000000000000000000000000000000000000..69e9b61e6074bcbb21b287f35cd8598f3bb6ceac
--- /dev/null
+++ b/tests/standalone_tests/testing_env_configs/test_smpc_localnodes_addresses.json
@@ -0,0 +1 @@
+["172.17.0.1:60004", "172.17.0.1:60005", "172.17.0.1:60006"]
diff --git a/tests/standalone_tests/testing_env_configs/testcontroller.toml b/tests/standalone_tests/testing_env_configs/testcontroller.toml
new file mode 100644
index 0000000000000000000000000000000000000000..e775123f4e021bd13ef8162c7be3e7cf7d3641b6
--- /dev/null
+++ b/tests/standalone_tests/testing_env_configs/testcontroller.toml
@@ -0,0 +1,32 @@
+log_level = "DEBUG"
+framework_log_level = "INFO"
+deployment_type = "LOCAL"
+node_landscape_aggregator_update_interval = 30
+
+[cleanup]
+contextids_cleanup_folder = "/tmp"
+nodes_cleanup_interval = 10
+contextid_release_timelimit = 3600
+
+[localnodes]
+config_file = "$LOCALNODES_CONFIG_FILE"
+dns = ""
+port = ""
+
+[rabbitmq]
+user = "user"
+password = "password"
+vhost = "user_vhost"
+celery_tasks_timeout = 20
+celery_run_udf_task_timeout = 120
+celery_tasks_max_retries = 3
+celery_tasks_interval_start = 0
+celery_tasks_interval_step = 0.2
+celery_tasks_interval_max = 0.5
+
+[smpc]
+enabled = false
+optional = false
+coordinator_address = "http://172.17.0.1:12314"
+get_result_interval = 2
+get_result_max_retries = 100