diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestBrainProcess.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestBrainProcess.py
index 50c9ea7ed4bda79540ccff0efc4f5d1e6cb62600..279506ea017c8eb880a71bd121116b9924ae7193 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestBrainProcess.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestBrainProcess.py
@@ -137,6 +137,7 @@ class NestBrainProcess(object):
             elif data == 'step':
 
                 # run the coordinated simulation step
+                print "[MPI] ===================== step ======================="
                 self._brain_controller.run_step(self._timestep * 1000.0)  # msec
                 self._brain_communicator.refresh_buffers(0.0)
 
diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestLauncher.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestLauncher.py
index 5111b32453d8f7bf981c8723ee5e9f70f9b8e6dd..ca00e3dec6e7d4886e8ebcddcd1a66b62dceb7d4 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestLauncher.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestLauncher.py
@@ -97,7 +97,7 @@ class NestLauncher(object):
         # construct the actual MPI launcher with the process that determines if the CLE or
         # standalone brain should be launched
         # TODO: Find way to send simconfig object directly to the DistributedNestProcess
-        args = ['--exdconf={}'.format(os.path.realpath(self._sim_config.exc_abs_path)),
+        args = ['--exdconf={}'.format(os.path.realpath(self._sim_config.exc_path.abs_path)),
                 '--gzserver-host={}'.format(self._sim_config.gzserver_host),
                 '--reservation={}'.format(reservation_str),
                 '--sim-id={}'.format(self._sim_config._sim_id),
diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/main.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/main.py
index b0a6fc1c2a4c2ee8e765b181d9e76cc20f411afd..f9ebd28f13671db0a3a9942ecd39da91119c2618 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/main.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/main.py
@@ -23,6 +23,11 @@ Entry point of distributed CLE and NEST
 """
 import sys
 
+# import pyNN.nest here to ensure NEST ranks are initialized correctly
+import pyNN.nest as sim
+import nest
+nest.set_debug(False)
+
 argv_backup = list(sys.argv[1:])
 sys.argv = [sys.argv[0]]
 
@@ -47,11 +52,14 @@ if __name__ == '__main__':  # pragma: no cover
     from mpi4py import MPI
     rank = MPI.COMM_WORLD.Get_rank()
 
+    print '[ MPI ] ========== nest rank={} ========'.format(nest.Rank())
+
     # use the MPI process rank to determine if we should launch CLE or brain process
     # both launch commands are blocking until shutdown occurs
     signal.signal(signal.SIGTERM, handle_sigterm)
 
-    print '[ MPI ] ========== initialized={} with thread_level={} ========'.format(str(MPI.Is_initialized()), str(MPI.Query_thread()))
+    print '[ MPI ] ========== initialized={} with thread_level={} ========'.format(
+        str(MPI.Is_initialized()), str(MPI.Query_thread()))
     if not MPI.Is_initialized():
         MPI.Init_thread(MPI.THREAD_MULTIPLE)
 
@@ -62,6 +70,8 @@ if __name__ == '__main__':  # pragma: no cover
         launch_cle(argv_backup)
 
     else:
+        # import pydevd
+        # pydevd.settrace('localhost', port=50004, stdoutToServer=True, stderrToServer=True, suspend=False)
         print '[ MPI ] ================ LAUNCHING NEST ================ ' + str(rank)
         launch_brain(argv_backup)