Skip to content
Snippets Groups Projects
Commit d522b828 authored by groetznerFortiss's avatar groetznerFortiss Committed by Ugo
Browse files

[NRRPLT-7590] Python 3 compatibility

[NRRPLT-7717] Applied stage1 and stage2 of futurize and tested verify.sh and run_tests.sh.

[NRRPLT-7590] Upgrade setup.py

[NRRPLT-7590] add setup_requirements.txt and fix Makefile

[NRRPLT-7590] remove future str

[NRRPLT-7590] fix pip version specifier in Makefile

[NRRPLT-7590] update test_fixtures version

[NRRPLT-7590] fix prints

[NRRPLT-7590] merge development

[NRRPLT-7590] quote pip version

[NRRPLT-7590] Force check for pip version in pipeline.
Build GazeboRosPackages in pipeline, since there might be some new ROS dependency.
parent 28d73b25
Branches
Tags
No related merge requests found
Showing
with 67 additions and 45 deletions
......@@ -10,7 +10,7 @@ COVER_PACKAGES=hbp_nrp_distributed_nest
#documentation to build
#DOC_MODULES=hbp_nrp_distributed_nest/doc
PYTHON_PIP_VERSION?=pip==9.0.3
PYTHON_PIP_VERSION?='pip>=19'
##### DO NOT MODIFY BELOW #####################
......
......@@ -16,6 +16,7 @@ pipelines:
- . ./nrp_branch_topic_checkout admin-scripts user-scripts ExDBackend ExperimentControl CLE Experiments Models GazeboRosPackages
- cd $BITBUCKET_CLONE_DIR
# Build GazeboRosPackages in case of new ROS msgs or srv definitions
# This plan depends on GazeboRosPackages being built
- pushd $HBP/GazeboRosPackages && rm -rf build devel && catkin_make && popd && cd $BITBUCKET_CLONE_DIR
......@@ -35,6 +36,9 @@ pipelines:
# Copy bbp-client from user-scripts (before make devinstall)
- cp -af $HBP/user-scripts/config_files/platform_venv/* $VIRTUAL_ENV_PATH/lib/python2.7/site-packages/
# Delete pip lock in VIRTUAL_ENV so to force checking for pip upgrade
- rm $VIRTUAL_ENV/new-pip.txt
# Generate schemas
# Egg-links have to be removed because make devinstall set them up wrongly
- pushd $VIRTUAL_ENV_PATH/lib/python2.7/site-packages && rm -f hbp-nrp-distributed-nest.egg-link && popd
......
......@@ -25,6 +25,7 @@
Extensions of the base CLE PyNNCommunicationAdapter to communicate with distributed
processes. Maxmimum code reuse and minimal duplication where possible.
"""
from builtins import range
from hbp_nrp_cle.brainsim.pynn_nest.PyNNNestCommunicationAdapter import PyNNNestCommunicationAdapter
from hbp_nrp_cle.brainsim.pynn_nest.devices.__NestDeviceGroup import PyNNNestDevice
......@@ -153,7 +154,7 @@ class DistributedPyNNCommunicationAdapter(PyNNNestCommunicationAdapter):
Returns neuron indices of a PopulationView in the root Population and the root label
"""
label = p.grandparent.label
indices = p.index_in_grandparent(range(p.size))
indices = p.index_in_grandparent(list(range(p.size)))
return label, indices
def index_from_assembly(a):
......@@ -174,7 +175,7 @@ class DistributedPyNNCommunicationAdapter(PyNNNestCommunicationAdapter):
# propagate the synapse creation parameters to all remote notes, they will run the same
# connection/creation commands after receiving these messages, guaranteed to be
# run from CLE MPI process 0 only
for rank in xrange(1, COMM_NRP.Get_size()):
for rank in range(1, COMM_NRP.Get_size()):
COMM_NRP.send({'command': 'ConnectTF', 'type': kind, 'assemblies': assemblies,
'device': device, 'timestep': timestep,
'params': params},
......@@ -194,6 +195,6 @@ class DistributedPyNNCommunicationAdapter(PyNNNestCommunicationAdapter):
# propagate the deletion configuration to all other processes, guaranteed to be
# run from CLE MPI process 0 only
for rank in xrange(1, COMM_NRP.Get_size()):
for rank in range(1, COMM_NRP.Get_size()):
COMM_NRP.send({'command': 'DeleteTF', 'timestep': timestep},
dest=rank, tag=NestBrainProcess.MPI_MSG_TAG)
......@@ -23,6 +23,7 @@ This module defined a CLE control adapter that notifies all remote brain process
when they should step the simulation.
"""
from builtins import range
from hbp_nrp_cle.brainsim.pynn.PyNNControlAdapter import PyNNControlAdapter
from hbp_nrp_distributed_nest.launch.NestBrainProcess import NestBrainProcess
......@@ -50,7 +51,7 @@ class DistributedPyNNControlAdapter(PyNNControlAdapter):
"""
# notify all other processes, blocking send calls for them to receive
for rank in xrange(COMM_NRP.Get_size()):
for rank in range(COMM_NRP.Get_size()):
if rank == COMM_NRP.Get_rank():
continue
COMM_NRP.send({'command': 'LoadBrain', 'file': network_file,
......@@ -68,7 +69,7 @@ class DistributedPyNNControlAdapter(PyNNControlAdapter):
"""
# notify all other processes, blocking send calls for them to receive
for rank in xrange(COMM_NRP.Get_size()):
for rank in range(COMM_NRP.Get_size()):
if rank == COMM_NRP.Get_rank():
continue
COMM_NRP.send('step', dest=rank, tag=NestBrainProcess.MPI_MSG_TAG)
......
......@@ -25,6 +25,7 @@
"""
Helper class to build and execute a formatted srun command
"""
from builtins import object
import logging
import os
import subprocess
......@@ -166,15 +167,15 @@ class DaintLauncher(object):
if not self._launched:
o = self._process.stdout.read()
e = self._process.stderr.read()
print 'PROCESS END: {}'.format(n)
print('PROCESS END: {}'.format(n))
if o:
print '--------- STDOUT -------'
print o
print '--------- STDOUT -------'
print('--------- STDOUT -------')
print(o)
print('--------- STDOUT -------')
if e:
print '--------- ERROR -------'
print e
print '--------- ERROR -------'
print('--------- ERROR -------')
print(e)
print('--------- ERROR -------')
raise Exception(
'ABORTING: Distributed launch failure. '
......
......@@ -24,12 +24,15 @@
"""
This module contains the CLE process logic for the simulation assembly
"""
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
import os
import argparse
import logging
from DistributedCLESimulationAssembly import DistributedCLESimulationAssembly
from .DistributedCLESimulationAssembly import DistributedCLESimulationAssembly
simulation = None
......@@ -130,7 +133,7 @@ def launch_cle(argv): # pragma: no cover
# the tag is a magic number to avoid circular build/release dependency for now but
# this will be removed when the referenced bug is fixed
# notify MPI processes that configuration is complete
for rank in xrange(COMM_NRP.Get_size()):
for rank in range(COMM_NRP.Get_size()):
if rank != COMM_NRP.Get_rank():
COMM_NRP.send('ready', dest=rank, tag=100)
COMM_NRP.Barrier()
......@@ -143,14 +146,14 @@ def launch_cle(argv): # pragma: no cover
# are also killed
logger.error('CLE aborted with message {}, terminating.'.format(e.message))
# if no logger
print '[ MPI ] CLE aborted with message {}, terminating.'.format(e.message)
print('[ MPI ] CLE aborted with message {}, terminating.'.format(e.message))
logger.exception(e)
for rank in xrange(COMM_NRP.Get_size()):
for rank in range(COMM_NRP.Get_size()):
if rank != COMM_NRP.Get_rank():
COMM_NRP.send('abort', dest=rank, tag=100)
print '[ MPI ] ABORTing distributed CLE process: {}'.format(str(COMM_NRP.Get_rank()))
print('[ MPI ] ABORTing distributed CLE process: {}'.format(str(COMM_NRP.Get_rank())))
COMM_NRP.Abort(-1)
finally:
......@@ -165,6 +168,6 @@ def launch_cle(argv): # pragma: no cover
# terminate the spawned brain processes
# send a shutdown message in case the brain processes are in a recv loop at startup since they
# seem to block and ignore the Abort command until receiving a message
for rank in xrange(COMM_NRP.Get_size()):
for rank in range(COMM_NRP.Get_size()):
if rank != COMM_NRP.Get_rank():
COMM_NRP.send('shutdown', dest=rank, tag=100)
......@@ -21,6 +21,7 @@
"""
This module contains the distributed Nest process logic and simulation assembly
"""
from __future__ import print_function
# The Nest imports below somehow delete/inject command line arguments that cause
# issues with argparse in each of the launchers, save the valid arguments now and
......@@ -71,7 +72,7 @@ def launch_brain(argv): # pragma: no cover
except Exception as ex: # pylint: disable=broad-except
# print the traceback which should go back to the remote logger
traceback.print_exc()
print str(ex)
print(str(ex))
# for any failures, terminate all other brain processes and the CLE
print '[ MPI ] ABORTing distributed NEST process: {}'.format(str(COMM_NRP.Get_rank()))
print('[ MPI ] ABORTing distributed NEST process: {}'.format(str(COMM_NRP.Get_rank())))
COMM_NRP.Abort(-1)
......@@ -30,6 +30,7 @@ Helper class to build and execute a formatted mpirun command in the format:
where each of the hosts has a specific working directory with necessary config files already
in place. Also passes environment variables required for NRP/CLE execution.
"""
from builtins import object
import logging
import os
import subprocess
......
......@@ -24,6 +24,7 @@
"""
A distributed brain process that can be launched standalone on remote hosts.
"""
from builtins import object
from hbp_nrp_cle.brainsim import config
import pyNN.nest as sim
......
......@@ -26,6 +26,7 @@ Setup, build, and launch a distributed Nest instance that will spawn the CLE and
requested brain processes.
"""
from builtins import object
from hbp_nrp_distributed_nest.launch.host.LocalLauncher import LocalLauncher
from hbp_nrp_distributed_nest.launch.MPILauncher import MPILauncher
from hbp_nrp_distributed_nest.launch.DaintLauncher import DaintLauncher
......@@ -92,7 +93,7 @@ class NestLauncher(object):
reservation_str = self._sim_config.reservation if self._sim_config.reservation else ''
timeout_str = str(self._sim_config.timeout).replace(' ', '_')
rng_str = (self._sim_config.rng_seed if self._sim_config.rng_seed
else random.randint(1, sys.maxint))
else random.randint(1, sys.maxsize))
# construct the actual MPI launcher with the process that determines if the CLE or
# standalone brain should be launched
......
......@@ -4,6 +4,7 @@ simulation targets.
"""
from builtins import object
class IHostLauncher(object):
"""
A generic interface to implement a host specific launcher. Guarantees necessary property and
......
......@@ -21,26 +21,27 @@
"""
Entry point of distributed CLE and NEST
"""
from __future__ import print_function
import sys
import signal
# import pyNN.nest here to ensure NEST ranks are initialized correctly
import pyNN.nest as sim
import nest
nest.set_debug(False)
argv_backup = list(sys.argv[1:])
sys.argv = [sys.argv[0]]
import signal
import hbp_nrp_distributed_nest.launch.DistributedCLEProcess as DistCLE
from hbp_nrp_distributed_nest.launch.DistributedNestProcess import launch_brain
argv_backup = list(sys.argv[1:])
sys.argv = [sys.argv[0]]
def handle_sigterm(signo, stack_frame):
print '[ MPI ] ================ received sigterm ================ ' + str(rank)
message_template = '[ MPI ] ================ {message} ================ {rank}'
print(message_template.format(message="received sigterm", rank=str(rank)))
if DistCLE.simulation is not None:
print '[ MPI ] ================ shutdown on sigterm ================ ' + str(rank)
print(message_template.format(message="shutdown on sigterm", rank=str(rank)))
DistCLE.simulation.shutdown()
sys.exit(0)
......@@ -52,28 +53,27 @@ if __name__ == '__main__': # pragma: no cover
import socket
hostname = socket.gethostname()
print '[ MPI ] ========== nest rank={}; hostname={} ========'.format(nest.Rank(),hostname)
print('[ MPI ] ========== nest rank={}; hostname={} ========'.format(nest.Rank(), hostname))
# use the MPI process rank to determine if we should launch CLE or brain process
# both launch commands are blocking until shutdown occurs
signal.signal(signal.SIGTERM, handle_sigterm)
print '[ MPI ] ========== initialized={} with thread_level={} ========'.format(
str(MPI.Is_initialized()), str(MPI.Query_thread()))
print('[ MPI ] ========== initialized={} with thread_level={} ========'.format(
str(MPI.Is_initialized()), str(MPI.Query_thread())))
if not MPI.Is_initialized():
MPI.Init_thread(MPI.THREAD_MULTIPLE)
if rank == 0:
# import pydevd
# pydevd.settrace('localhost', port=50003, stdoutToServer=True, stderrToServer=True, suspend=False)
print '[ MPI ] ================ LAUNCHING CLE ================ ' + str(rank)
print('[ MPI ] ================ LAUNCHING CLE ================ ' + str(rank))
DistCLE.launch_cle(argv_backup)
else:
# import pydevd
# pydevd.settrace('localhost', port=50004, stdoutToServer=True, stderrToServer=True, suspend=False)
print '[ MPI ] ================ LAUNCHING NEST ================ ' + str(rank)
print('[ MPI ] ================ LAUNCHING NEST ================ ' + str(rank))
launch_brain(argv_backup)
print '[ MPI ] Gracefully exit process ' + str(rank)
print('[ MPI ] Gracefully exit process ' + str(rank))
sys.exit(0)
# third party requirements
future==0.18.2
mpi4py==2.0.0
#the following is required for the unit testing
mock==1.0.1
testfixtures==3.0.2
testfixtures
'''setup.py'''
from builtins import next
from setuptools import setup
import hbp_nrp_distributed_nest
import pip
from optparse import Option
from optparse import Option # pylint:disable=deprecated-module
options = Option('--workaround')
options.skip_requirements_regex = None
reqs_file = './requirements.txt'
pip_version_major = int(pip.__version__.split('.')[0])
# Hack for old pip versions
if pip.__version__.startswith('10.'):
# Versions greater or equal to 10.x don't rely on pip.req.parse_requirements
install_reqs = list(val.strip() for val in open(reqs_file))
reqs = install_reqs
elif pip.__version__.startswith('1.'):
if pip_version_major == 1:
# Versions 1.x rely on pip.req.parse_requirements
# but don't require a "session" parameter
from pip.req import parse_requirements # pylint:disable=no-name-in-module, import-error
install_reqs = parse_requirements(reqs_file, options=options)
reqs = [str(ir.req) for ir in install_reqs]
else:
elif 10 > pip_version_major > 1:
# Versions greater than 1.x but smaller than 10.x rely on pip.req.parse_requirements
# and requires a "session" parameter
from pip.req import parse_requirements # pylint:disable=no-name-in-module, import-error
......@@ -32,6 +31,10 @@ else:
options=options
)
reqs = [str(ir.req) for ir in install_reqs]
elif pip_version_major >= 10:
# Versions greater or equal to 10.x don't rely on pip.req.parse_requirements
install_reqs = list(val.strip() for val in open(reqs_file))
reqs = install_reqs
# ensure we install numpy before the main list of requirements, ignore
# failures if numpy/cython are not requirements and just proceed (future proof)
......@@ -60,6 +63,8 @@ config = {
'hbp_nrp_distributed_nest.cle',
'hbp_nrp_distributed_nest.launch',
'hbp_nrp_distributed_nest.launch.host'],
'classifiers': ['Programming Language :: Python :: 3',
"Programming Language :: Python :: 2.7"],
'scripts': [],
'name': 'hbp-nrp-distributed-nest',
'include_package_data': True,
......
future
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment