diff --git a/Makefile b/Makefile
index 91af83910598fc4dc6bbbabc7ab7be126d712135..614d3cd776bcdb59a6e4f6bc2705e6b4de9a8c69 100644
--- a/Makefile
+++ b/Makefile
@@ -10,7 +10,7 @@ COVER_PACKAGES=hbp_nrp_distributed_nest
 #documentation to build
 #DOC_MODULES=hbp_nrp_distributed_nest/doc
 
-PYTHON_PIP_VERSION?=pip==9.0.3
+PYTHON_PIP_VERSION?='pip>=19'
 
 ##### DO NOT MODIFY BELOW #####################
 
diff --git a/bitbucket-pipelines.yml b/bitbucket-pipelines.yml
index 69c25fc63c10515bd52731a5adc7c98a1e04a371..7ba3149ea949bfb67125f22251fc491a47decf0f 100644
--- a/bitbucket-pipelines.yml
+++ b/bitbucket-pipelines.yml
@@ -16,6 +16,7 @@ pipelines:
           - . ./nrp_branch_topic_checkout admin-scripts user-scripts ExDBackend ExperimentControl CLE Experiments Models GazeboRosPackages
           - cd $BITBUCKET_CLONE_DIR
 
+          # Build GazeboRosPackages in case of new ROS msgs or srv definitions
           # This plan depends on GazeboRosPackages being built
           - pushd $HBP/GazeboRosPackages && rm -rf build devel && catkin_make && popd && cd $BITBUCKET_CLONE_DIR
 
@@ -35,6 +36,9 @@ pipelines:
           # Copy bbp-client from user-scripts (before make devinstall)
           - cp -af $HBP/user-scripts/config_files/platform_venv/* $VIRTUAL_ENV_PATH/lib/python2.7/site-packages/
 
+          # Delete pip lock in VIRTUAL_ENV so to force checking for pip upgrade
+          - rm $VIRTUAL_ENV/new-pip.txt
+
           # Generate schemas
           # Egg-links have to be removed because make devinstall set them up wrongly
           - pushd $VIRTUAL_ENV_PATH/lib/python2.7/site-packages && rm -f hbp-nrp-distributed-nest.egg-link && popd
diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/cle/DistributedPyNNCommunicationAdapter.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/cle/DistributedPyNNCommunicationAdapter.py
index 417af2eeba3a6e86e09b17a814f64d30db632ed2..f4c790b6cd2f80647c287ed83b5aac9241fb8e53 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/cle/DistributedPyNNCommunicationAdapter.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/cle/DistributedPyNNCommunicationAdapter.py
@@ -25,6 +25,7 @@
 Extensions of the base CLE PyNNCommunicationAdapter to communicate with distributed
 processes. Maxmimum code reuse and minimal duplication where possible.
 """
+from builtins import range
 from hbp_nrp_cle.brainsim.pynn_nest.PyNNNestCommunicationAdapter import PyNNNestCommunicationAdapter
 from hbp_nrp_cle.brainsim.pynn_nest.devices.__NestDeviceGroup import PyNNNestDevice
 
@@ -153,7 +154,7 @@ class DistributedPyNNCommunicationAdapter(PyNNNestCommunicationAdapter):
             Returns neuron indices of a PopulationView in the root Population and the root label
             """
             label = p.grandparent.label
-            indices = p.index_in_grandparent(range(p.size))
+            indices = p.index_in_grandparent(list(range(p.size)))
             return label, indices
 
         def index_from_assembly(a):
@@ -174,7 +175,7 @@ class DistributedPyNNCommunicationAdapter(PyNNNestCommunicationAdapter):
         # propagate the synapse creation parameters to all remote notes, they will run the same
         # connection/creation commands after receiving these messages, guaranteed to be
         # run from CLE MPI process 0 only
-        for rank in xrange(1, COMM_NRP.Get_size()):
+        for rank in range(1, COMM_NRP.Get_size()):
             COMM_NRP.send({'command': 'ConnectTF', 'type': kind, 'assemblies': assemblies,
                                  'device': device, 'timestep': timestep,
                                  'params': params},
@@ -194,6 +195,6 @@ class DistributedPyNNCommunicationAdapter(PyNNNestCommunicationAdapter):
 
         # propagate the deletion configuration to all other processes, guaranteed to be
         # run from CLE MPI process 0 only
-        for rank in xrange(1, COMM_NRP.Get_size()):
+        for rank in range(1, COMM_NRP.Get_size()):
             COMM_NRP.send({'command': 'DeleteTF', 'timestep': timestep},
                                 dest=rank, tag=NestBrainProcess.MPI_MSG_TAG)
diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/cle/DistributedPyNNControlAdapter.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/cle/DistributedPyNNControlAdapter.py
index aca198aafced6e0a40b6ad54ca53dafd1a9a9e07..b70fc32b2f92fd7a8d210f0be8d7511b49db7707 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/cle/DistributedPyNNControlAdapter.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/cle/DistributedPyNNControlAdapter.py
@@ -23,6 +23,7 @@ This module defined a CLE control adapter that notifies all remote brain process
 when they should step the simulation.
 """
 
+from builtins import range
 from hbp_nrp_cle.brainsim.pynn.PyNNControlAdapter import PyNNControlAdapter
 from hbp_nrp_distributed_nest.launch.NestBrainProcess import NestBrainProcess
 
@@ -50,7 +51,7 @@ class DistributedPyNNControlAdapter(PyNNControlAdapter):
         """
 
         # notify all other processes, blocking send calls for them to receive
-        for rank in xrange(COMM_NRP.Get_size()):
+        for rank in range(COMM_NRP.Get_size()):
             if rank == COMM_NRP.Get_rank():
                 continue
             COMM_NRP.send({'command': 'LoadBrain', 'file': network_file,
@@ -68,7 +69,7 @@ class DistributedPyNNControlAdapter(PyNNControlAdapter):
         """
 
         # notify all other processes, blocking send calls for them to receive
-        for rank in xrange(COMM_NRP.Get_size()):
+        for rank in range(COMM_NRP.Get_size()):
             if rank == COMM_NRP.Get_rank():
                 continue
             COMM_NRP.send('step', dest=rank, tag=NestBrainProcess.MPI_MSG_TAG)
diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/DaintLauncher.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/DaintLauncher.py
index cad54a119910c0a752e729914156a4920331de2b..e15b26fb3ac7bcbc402f94f58122a871276ef02b 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/DaintLauncher.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/DaintLauncher.py
@@ -25,6 +25,7 @@
 """
 Helper class to build and execute a formatted srun command
 """
+from builtins import object
 import logging
 import os
 import subprocess
@@ -166,15 +167,15 @@ class DaintLauncher(object):
         if not self._launched:
             o = self._process.stdout.read()
             e = self._process.stderr.read()
-            print 'PROCESS END: {}'.format(n)
+            print('PROCESS END: {}'.format(n))
             if o:
-                print '--------- STDOUT -------'
-                print o
-                print '--------- STDOUT -------'
+                print('--------- STDOUT -------')
+                print(o)
+                print('--------- STDOUT -------')
             if e:
-                print '--------- ERROR -------'
-                print e
-                print '--------- ERROR -------'
+                print('--------- ERROR -------')
+                print(e)
+                print('--------- ERROR -------')
 
             raise Exception(
                 'ABORTING: Distributed launch failure. '
diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/DistributedCLEProcess.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/DistributedCLEProcess.py
index a2e4393840b1064114c985ec4556631a8de0aaa3..7af52cb7977087f7ee47abffd35a145d6d259a3d 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/DistributedCLEProcess.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/DistributedCLEProcess.py
@@ -24,12 +24,15 @@
 """
 This module contains the CLE process logic for the simulation assembly
 """
+from __future__ import print_function
+from __future__ import absolute_import
 
+from builtins import range
 import os
 import argparse
 import logging
 
-from DistributedCLESimulationAssembly import DistributedCLESimulationAssembly
+from .DistributedCLESimulationAssembly import DistributedCLESimulationAssembly
 
 simulation = None
 
@@ -130,7 +133,7 @@ def launch_cle(argv): # pragma: no cover
         # the tag is a magic number to avoid circular build/release dependency for now but
         # this will be removed when the referenced bug is fixed
         # notify MPI processes that configuration is complete
-        for rank in xrange(COMM_NRP.Get_size()):
+        for rank in range(COMM_NRP.Get_size()):
             if rank != COMM_NRP.Get_rank():
                 COMM_NRP.send('ready', dest=rank, tag=100)
         COMM_NRP.Barrier()
@@ -143,14 +146,14 @@ def launch_cle(argv): # pragma: no cover
         # are also killed
         logger.error('CLE aborted with message {}, terminating.'.format(e.message))
         # if no logger
-        print '[ MPI ] CLE aborted with message {}, terminating.'.format(e.message)
+        print('[ MPI ] CLE aborted with message {}, terminating.'.format(e.message))
         logger.exception(e)
 
-        for rank in xrange(COMM_NRP.Get_size()):
+        for rank in range(COMM_NRP.Get_size()):
             if rank != COMM_NRP.Get_rank():
                 COMM_NRP.send('abort', dest=rank, tag=100)
 
-        print '[ MPI ] ABORTing distributed CLE process: {}'.format(str(COMM_NRP.Get_rank()))
+        print('[ MPI ] ABORTing distributed CLE process: {}'.format(str(COMM_NRP.Get_rank())))
         COMM_NRP.Abort(-1)
 
     finally:
@@ -165,6 +168,6 @@ def launch_cle(argv): # pragma: no cover
     # terminate the spawned brain processes
     # send a shutdown message in case the brain processes are in a recv loop at startup since they
     # seem to block and ignore the Abort command until receiving a message
-    for rank in xrange(COMM_NRP.Get_size()):
+    for rank in range(COMM_NRP.Get_size()):
         if rank != COMM_NRP.Get_rank():
             COMM_NRP.send('shutdown', dest=rank, tag=100)
diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/DistributedNestProcess.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/DistributedNestProcess.py
index 12ff3c4cafd2710c2ad7a983fb2b3e85d083bdba..00cc962bd8ca2bead8673696d46229aa79c6c224 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/DistributedNestProcess.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/DistributedNestProcess.py
@@ -21,6 +21,7 @@
 """
 This module contains the distributed Nest process logic and simulation assembly
 """
+from __future__ import print_function
 
 # The Nest imports below somehow delete/inject command line arguments that cause
 # issues with argparse in each of the launchers, save the valid arguments now and
@@ -71,7 +72,7 @@ def launch_brain(argv): # pragma: no cover
     except Exception as ex: # pylint: disable=broad-except
         # print the traceback which should go back to the remote logger
         traceback.print_exc()
-        print str(ex)
+        print(str(ex))
         # for any failures, terminate all other brain processes and the CLE
-        print '[ MPI ] ABORTing distributed NEST process: {}'.format(str(COMM_NRP.Get_rank()))
+        print('[ MPI ] ABORTing distributed NEST process: {}'.format(str(COMM_NRP.Get_rank())))
         COMM_NRP.Abort(-1)
diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/MPILauncher.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/MPILauncher.py
index 1b5fc8e5827338f0a07812b19050061e602d812b..e804cffddd5a206e38907ca8aa40d4e60bed2e90 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/MPILauncher.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/MPILauncher.py
@@ -30,6 +30,7 @@ Helper class to build and execute a formatted mpirun command in the format:
 where each of the hosts has a specific working directory with necessary config files already
 in place. Also passes environment variables required for NRP/CLE execution.
 """
+from builtins import object
 import logging
 import os
 import subprocess
diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestBrainProcess.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestBrainProcess.py
index 38efbecc85bcbd71aad86d222d35f59b6919b788..5094931f47959fe984d866927d66085eb2396324 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestBrainProcess.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestBrainProcess.py
@@ -24,6 +24,7 @@
 """
 A distributed brain process that can be launched standalone on remote hosts.
 """
+from builtins import object
 from hbp_nrp_cle.brainsim import config
 
 import pyNN.nest as sim
diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestLauncher.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestLauncher.py
index f2e2b0a61204ce014af83d20f848f5fea6bc5ac3..760c78ca006ad1382692be88b130de9a81f46de8 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestLauncher.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/NestLauncher.py
@@ -26,6 +26,7 @@ Setup, build, and launch a distributed Nest instance that will spawn the CLE and
 requested brain processes.
 """
 
+from builtins import object
 from hbp_nrp_distributed_nest.launch.host.LocalLauncher import LocalLauncher
 from hbp_nrp_distributed_nest.launch.MPILauncher import MPILauncher
 from hbp_nrp_distributed_nest.launch.DaintLauncher import DaintLauncher
@@ -92,7 +93,7 @@ class NestLauncher(object):
         reservation_str = self._sim_config.reservation if self._sim_config.reservation else ''
         timeout_str = str(self._sim_config.timeout).replace(' ', '_')
         rng_str = (self._sim_config.rng_seed if self._sim_config.rng_seed
-                   else random.randint(1, sys.maxint))
+                   else random.randint(1, sys.maxsize))
 
         # construct the actual MPI launcher with the process that determines if the CLE or
         # standalone brain should be launched
diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/host/__init__.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/host/__init__.py
index 790732f7788a38a3971ca787a685115f496c3e67..b880c03492e072fe267226112c1c1ed2afbb6ce9 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/host/__init__.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/host/__init__.py
@@ -4,6 +4,7 @@ simulation targets.
 """
 
 
+from builtins import object
 class IHostLauncher(object):
     """
     A generic interface to implement a host specific launcher. Guarantees necessary property and
diff --git a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/main.py b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/main.py
index eec79fdffa6e79df960ff7dd90cce1582a0a396c..74ed82e0b6c2fc71d92f23bfd46f669dc8f2a68a 100644
--- a/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/main.py
+++ b/hbp_nrp_distributed_nest/hbp_nrp_distributed_nest/launch/main.py
@@ -21,27 +21,28 @@
 """
 Entry point of distributed CLE and NEST
 """
+from __future__ import print_function
 import sys
+import signal
 
 # import pyNN.nest here to ensure NEST ranks are initialized correctly
 import pyNN.nest as sim
 import nest
 nest.set_debug(False)
 
-argv_backup = list(sys.argv[1:])
-sys.argv = [sys.argv[0]]
-
-import signal
-
 import hbp_nrp_distributed_nest.launch.DistributedCLEProcess as DistCLE
 from hbp_nrp_distributed_nest.launch.DistributedNestProcess import launch_brain
 
+argv_backup = list(sys.argv[1:])
+sys.argv = [sys.argv[0]]
+
 
 def handle_sigterm(signo, stack_frame):
-    print '[ MPI ] ================ received sigterm ================ ' + str(rank)
+    message_template = '[ MPI ] ================ {message} ================ {rank}'
+    print(message_template.format(message="received sigterm", rank=str(rank)))
     if DistCLE.simulation is not None:
-        print '[ MPI ] ================ shutdown on sigterm ================ ' + str(rank)
-        DistCLE.simulation.shutdown()
+        print(message_template.format(message="shutdown on sigterm", rank=str(rank)))
+    DistCLE.simulation.shutdown()
     sys.exit(0)
 
 
@@ -52,28 +53,27 @@ if __name__ == '__main__':  # pragma: no cover
     import socket
     hostname = socket.gethostname()
 
-    print '[ MPI ] ========== nest rank={}; hostname={} ========'.format(nest.Rank(),hostname)
+    print('[ MPI ] ========== nest rank={}; hostname={} ========'.format(nest.Rank(), hostname))
 
     # use the MPI process rank to determine if we should launch CLE or brain process
     # both launch commands are blocking until shutdown occurs
     signal.signal(signal.SIGTERM, handle_sigterm)
 
-    print '[ MPI ] ========== initialized={} with thread_level={} ========'.format(
-        str(MPI.Is_initialized()), str(MPI.Query_thread()))
+    print('[ MPI ] ========== initialized={} with thread_level={} ========'.format(
+        str(MPI.Is_initialized()), str(MPI.Query_thread())))
     if not MPI.Is_initialized():
         MPI.Init_thread(MPI.THREAD_MULTIPLE)
 
     if rank == 0:
         # import pydevd
         # pydevd.settrace('localhost', port=50003, stdoutToServer=True, stderrToServer=True, suspend=False)
-        print '[ MPI ] ================ LAUNCHING CLE  ================ ' + str(rank)
+        print('[ MPI ] ================ LAUNCHING CLE  ================ ' + str(rank))
         DistCLE.launch_cle(argv_backup)
-
     else:
         # import pydevd
         # pydevd.settrace('localhost', port=50004, stdoutToServer=True, stderrToServer=True, suspend=False)
-        print '[ MPI ] ================ LAUNCHING NEST ================ ' + str(rank)
+        print('[ MPI ] ================ LAUNCHING NEST ================ ' + str(rank))
         launch_brain(argv_backup)
 
-    print '[ MPI ] Gracefully exit process ' + str(rank)
+    print('[ MPI ] Gracefully exit process ' + str(rank))
     sys.exit(0)
diff --git a/hbp_nrp_distributed_nest/requirements.txt b/hbp_nrp_distributed_nest/requirements.txt
index f9d1c29bda91641a174e8c221f01bb52ba813735..57119afadaf3ffc333e744ffd30e24f9488188a8 100644
--- a/hbp_nrp_distributed_nest/requirements.txt
+++ b/hbp_nrp_distributed_nest/requirements.txt
@@ -1,2 +1,3 @@
 # third party requirements
+future==0.18.2
 mpi4py==2.0.0
diff --git a/hbp_nrp_distributed_nest/requirements_extension_tests.txt b/hbp_nrp_distributed_nest/requirements_extension_tests.txt
index 031ce958f7c92f34b5aebfd200ba995fa2a54b88..076fef2e3af5bd4dffad43891493047eebb92bab 100644
--- a/hbp_nrp_distributed_nest/requirements_extension_tests.txt
+++ b/hbp_nrp_distributed_nest/requirements_extension_tests.txt
@@ -1,3 +1,3 @@
 #the following is required for the unit testing
 mock==1.0.1
-testfixtures==3.0.2
+testfixtures
diff --git a/hbp_nrp_distributed_nest/setup.py b/hbp_nrp_distributed_nest/setup.py
index 51bfed328b2e2a849b75734fcc5c0c5cd49e7ba6..b8f53d2c95181fa6bd495f3381bd856e927c9025 100644
--- a/hbp_nrp_distributed_nest/setup.py
+++ b/hbp_nrp_distributed_nest/setup.py
@@ -1,26 +1,25 @@
 '''setup.py'''
 
+from builtins import next
 from setuptools import setup
 
 import hbp_nrp_distributed_nest
 import pip
 
-from optparse import Option
+from optparse import Option # pylint:disable=deprecated-module
 options = Option('--workaround')
 options.skip_requirements_regex = None
 reqs_file = './requirements.txt'
+
+pip_version_major = int(pip.__version__.split('.')[0])
 # Hack for old pip versions
-if pip.__version__.startswith('10.'):
-    # Versions greater or equal to 10.x don't rely on pip.req.parse_requirements
-    install_reqs = list(val.strip() for val in open(reqs_file))
-    reqs = install_reqs
-elif pip.__version__.startswith('1.'):
+if pip_version_major == 1:
     # Versions 1.x rely on pip.req.parse_requirements
     # but don't require a "session" parameter
     from pip.req import parse_requirements # pylint:disable=no-name-in-module, import-error
     install_reqs = parse_requirements(reqs_file, options=options)
     reqs = [str(ir.req) for ir in install_reqs]
-else:
+elif 10 > pip_version_major > 1:
     # Versions greater than 1.x but smaller than 10.x rely on pip.req.parse_requirements
     # and requires a "session" parameter
     from pip.req import parse_requirements # pylint:disable=no-name-in-module, import-error
@@ -32,6 +31,10 @@ else:
         options=options
     )
     reqs = [str(ir.req) for ir in install_reqs]
+elif pip_version_major >= 10:
+    # Versions greater or equal to 10.x don't rely on pip.req.parse_requirements
+    install_reqs = list(val.strip() for val in open(reqs_file))
+    reqs = install_reqs
 
 # ensure we install numpy before the main list of requirements, ignore
 # failures if numpy/cython are not requirements and just proceed (future proof)
@@ -60,6 +63,8 @@ config = {
                  'hbp_nrp_distributed_nest.cle',
                  'hbp_nrp_distributed_nest.launch',
                  'hbp_nrp_distributed_nest.launch.host'],
+    'classifiers': ['Programming Language :: Python :: 3',
+                    "Programming Language :: Python :: 2.7"],
     'scripts': [],
     'name': 'hbp-nrp-distributed-nest',
     'include_package_data': True,
diff --git a/hbp_nrp_distributed_nest/setup_requirements.txt b/hbp_nrp_distributed_nest/setup_requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2c6edea8d2e78b31d241281c0d528c2da1709a1c
--- /dev/null
+++ b/hbp_nrp_distributed_nest/setup_requirements.txt
@@ -0,0 +1 @@
+future