diff --git a/multiarea_model/.ipynb_checkpoints/analysis-checkpoint.py b/multiarea_model/.ipynb_checkpoints/analysis-checkpoint.py
deleted file mode 100644
index 10a5c0564b630cdce288521e904659dcfe3f7322..0000000000000000000000000000000000000000
--- a/multiarea_model/.ipynb_checkpoints/analysis-checkpoint.py
+++ /dev/null
@@ -1,978 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""
-analysis
-============
-
-Analysis package to load and analyse data from simulations of the
-multi-area model of macaque visual cortex (Schmidt et al. 2017).
-
-
-Classes
---------
-
-Analysis : loads the data of the specified simulation and provides members
-functions to post-process the data and plot it in various visualizations.
-
-Authors
---------
-Maximilian Schmidt
-Sacha van Albada
-
-"""
-from . import analysis_helpers as ah
-import glob
-import inspect
-from itertools import chain, product
-import json
-import matplotlib.pyplot as plt
-import numpy as np
-import os
-import pandas as pd
-
-from copy import copy
-from matplotlib.colors import LogNorm
-from matplotlib.ticker import FixedLocator
-from nested_dict import nested_dict
-
-try:
-    import seaborn as sns
-except ImportError:
-    pass
-
-
-class Analysis:
-    def __init__(self, network, simulation, data_list=['spikes'],
-                 load_areas=None):
-        """
-        Analysis class.
-        An instance of the analysis class for the given network and simulation.
-        Can be created as a member class of a multiarea_model instance or standalone.
-
-        Parameters
-        ----------
-        network : MultiAreaModel
-            An instance of the multiarea_model class that specifies
-            the network to be analyzed.
-        simulation : Simulation
-            An instance of the simulation class that specifies
-            the simulation to be analyzed.
-        data_list : list of strings {'spikes', vm'}, optional
-            Specifies which type of data is to load. Defaults to ['spikes'].
-        load_areas : list of strings with area names, optional
-            Specifies the areas for which data is to be loaded.
-            Default value is None and leads to loading of data for all
-            simulated areas.
-        """
-
-        self.network = network
-        self.simulation = simulation
-        assert(self.network.label == self.simulation.network.label)
-        self.output_dir = os.path.join(self.simulation.data_dir, 'Analysis/')
-        try:
-            os.mkdir(self.output_dir)
-        except OSError:
-            pass
-
-        self.T = self.simulation.T
-
-        self.areas_simulated = self.simulation.areas_simulated
-        self.areas_recorded = self.simulation.areas_recorded
-        if load_areas is None:
-            self.areas_loaded = self.areas_simulated
-        else:
-            self.areas_loaded = load_areas
-
-        assert(all([area in self.areas_recorded for area in
-                    self.areas_loaded])), "Tried to load areas which "
-        "were not recorded"
-        self.interarea_speed = self.network.params['delay_params']['interarea_speed']
-
-        self.load_data(data_list)
-
-    def load_data(self, data_list):
-        """
-        Loads simulation data of the requested type either from hdf5 files.
-
-        Parameters
-        ----------
-
-        data_list : list
-            list of observables to be loaded. Can contain 'spikes' and 'vm'
-        """
-        rec_dir = os.path.join(self.simulation.data_dir, 'recordings')
-        self.network_gids = pd.read_csv(os.path.join(rec_dir, 'network_gids.txt'),
-                                        names=['area', 'population', 'min_gid', 'max_gid'])
-        for data_type in data_list:
-            if data_type == 'spikes':
-                columns = ['senders', 'times']
-                d = 'spike_dict'
-            elif data_type == 'vm':
-                assert(self.simulation.params['recording_dict']['record_vm']), "Trying to "
-                "load membrane potentials, but these data have not been recorded"
-                d = 'vm_dict'
-                columns = ['senders', 'times', 'V_m']
-            # print('loading {}'.format(data_type))
-            data = {}
-            # Check if the data has already been stored in binary file
-            for area in self.areas_loaded:
-                data[area] = {}
-                for pop in self.network.structure[area]:
-                    fp = '-'.join((self.simulation.label,
-                                   self.simulation.params['recording_dict'][d]['label'],
-                                   area,
-                                   pop))
-                    fn = os.path.join(rec_dir,
-                                      '.'.join((fp, 'npy')))
-                    try:
-                        data[area][pop] = np.load(fn, allow_pickle=True)
-                    except FileNotFoundError:
-                        if not hasattr(self, 'all_spikes'):
-                            csv_args = {'names': columns,
-                                        'sep': '\t',
-                                        'index_col': False}
-                            if self.network.params['USING_NEST_3']:
-                                csv_args.update({'skiprows': 3})
-                                file_ending = 'dat'
-                            else:
-                                file_ending = 'gdf'
-                            fp = '.'.join(('-'.join((self.simulation.label,
-                                                     self.simulation.params[
-                                                         'recording_dict'][d]['label'],
-                                                     '*')),
-                                           file_ending))
-                            files = glob.glob(os.path.join(rec_dir, fp))
-                            dat = pd.DataFrame(columns=columns)
-                            for f in files:
-                                dat = dat.append(pd.read_csv(f, **csv_args),
-                                                 ignore_index=True)
-                            self.all_spikes = dat
-                        # print(area, pop)
-                        gids = self.network_gids[(self.network_gids.area == area) &
-                                                 (self.network_gids.population == pop)]
-                        ind = ((self.all_spikes.senders >= gids.min_gid.values[0]) &
-                               (self.all_spikes.senders <= gids.max_gid.values[0]))
-                        dat = self.all_spikes[ind]
-                        self.all_spikes.drop(np.where(ind)[0])
-                        np.save(fn, np.array(dat))
-                        data[area][pop] = np.array(dat)
-            if data_type == 'spikes':
-                self.spike_data = data
-            elif data_type == 'vm':
-                # Sort membrane potentials to reduce data load
-                self.vm_data = {}
-                for area in data:
-                    self.vm_data[area] = {}
-                    for pop in data[area]:
-                        neurons, time, vm = ah.sort_membrane_by_id(data[area][pop])
-                        self.vm_data[area][pop] = {'neurons': neurons,
-                                                   'V_m': vm,
-                                                   'time': (time[0], time[-1])}
-                self._set_num_vm_neurons()
-
-    def _set_num_vm_neurons(self):
-        """
-        Sets number of neurons from which membrane voltages
-        were recorded during simulation.
-        """
-        self.num_vm_neurons = {}
-        for area in self.vm_data:
-            self.num_vm_neurons[area] = {}
-            for pop in self.vm_data[area]:
-                self.num_vm_neurons[area][pop] = self.vm_data[area][pop][
-                    'neurons'][-1] - self.vm_data[area][pop]['neurons'][0] + 1
-
-# ______________________________________________________________________________
-# Functions for post-processing data into dynamical measures
-    def create_pop_rates(self, **keywords):
-        """
-        Calculate time-averaged population rates and store them in member pop_rates.
-        If the rates had previously been stored with the same
-        parameters, they are loaded from file.
-
-        Parameters
-        ----------
-        t_min : float, optional
-            Minimal time in ms of the simulation to take into account
-            for the calculation. Defaults to 500 ms.
-        t_max : float, optional
-            Maximal time in ms of the simulation to take into account
-            for the calculation. Defaults to the simulation time.
-        compute_stat : bool, optional
-            If set to true, the mean and variance of the population rate
-            is calculated. Defaults to False.
-            Caution: Setting to True slows down the computation.
-        areas : list, optional
-            Which areas to include in the calculcation.
-            Defaults to all loaded areas.
-        pops : list or {'complete'}, optional
-            Which populations to include in the calculation.
-            If set to 'complete', all populations the respective areas
-            are included. Defaults to 'complete'.
-        """
-        default_dict = {'areas': self.areas_loaded,
-                        'pops': 'complete', 'compute_stat': False}
-        params = ah._create_parameter_dict(default_dict, self.T, **keywords)
-        iterator = ah.model_iter(mode='single',
-                                 areas=params['areas'],
-                                 pops=params['pops'])
-        # Check if population rates have been stored with the same parameters
-        fp = os.path.join(self.output_dir, 'pop_rates.json')
-        self.pop_rates = ah._check_stored_data(fp,
-                                               copy(iterator), params)
-
-        if self.pop_rates is None:
-            # print("Computing population rates")
-            d = nested_dict()
-            d['Parameters'] = params
-
-            if params['compute_stat']:
-                for area in params['areas']:
-                    if params['pops'] == 'complete':
-                        pops = self.network.structure[area]
-                    else:
-                        pops = params['pops']
-                        total_rates = []
-                        for pop in pops:
-                            rate = ah.pop_rate(self.spike_data[area][pop],
-                                               params['t_min'],
-                                               params['t_max'],
-                                               self.network.N[area][pop])
-                            d[area][pop] = (rate[0], rate[1])
-                            total_rates += rate[2]
-                        d[area]['total'] = (np.mean(total_rates), np.std(total_rates))
-            else:
-                for area, pop in iterator:
-                    if pop in self.network.structure[area]:
-                        spikes = self.spike_data[area][pop][:, 1]
-                        indices = np.where(np.logical_and(spikes > params['t_min'],
-                                                          spikes < params['t_max']))
-                        d[area][pop] = (indices[0].size / (self.network.N[
-                            area][pop] * (params['t_max'] - params['t_min']) / 1000.0), np.nan)
-                    else:
-                        d[area][pop] = (0., 0.)
-                for area in params['areas']:
-                    total_spikes = ah.area_spike_train(self.spike_data[area])
-                    indices = np.where(np.logical_and(total_spikes[:, 1] > params['t_min'],
-                                                      total_spikes[:, 1] < params['t_max']))
-                    d[area]['total'] = total_spikes[:, 1][indices].size / (
-                        self.network.N[area]['total'] *
-                        (params['t_max'] - params['t_min']) / 1000.0)
-            self.pop_rates = d.to_dict()
-
-    def create_pop_rate_dists(self, **keywords):
-        """
-        Calculate single neuron population rates and store them in member pop_rate_dists.
-        If the distributions had previously been stored with the
-        same parameters, they are loaded from file.
-        Uses helper function pop_rate_distribution.
-
-        Parameters
-        ----------
-        t_min : float, optional
-            Minimal time in ms of the simulation to take into account
-            for the calculation. Defaults to 500 ms.
-        t_max : float, optional
-            Maximal time in ms of the simulation to take into account
-            for the calculation. Defaults to the simulation time.
-        areas : list, optional
-            Which areas to include in the calculcation.
-            Defaults to all loaded areas.
-        pops : list or {'complete'}, optional
-            Which populations to include in the calculation.
-            If set to 'complete', all populations the respective areas
-            are included. Defaults to 'complete'.
-        """
-
-        default_dict = {'areas': self.areas_loaded, 'pops': 'complete'}
-        params = ah._create_parameter_dict(
-            default_dict, self.T, **keywords)
-        iterator = ah.model_iter(mode='single',
-                                 areas=params['areas'],
-                                 pops=params['pops'])
-        elements = [('histogram',), ('stats-mu',), ('stats-sigma',)]
-        iter_list = [tuple(chain.from_iterable(prod)) for
-                     prod in product(copy(iterator), elements)]
-        # Check if population rates have been stored with the same parameters
-        self.pop_rate_dists = ah._check_stored_data(os.path.join(self.output_dir,
-                                                                 'pop_rate_dists'),
-                                                    iter_list, params)
-
-        if self.pop_rate_dists is None:
-            # print("Computing population dists")
-            d = nested_dict()
-            d['Parameters'] = params
-            for area, pop in iterator:
-                if pop in self.network.structure[area]:
-                    res = list(ah.pop_rate_distribution(self.spike_data[area][pop],
-                                                        params['t_min'],
-                                                        params['t_max'],
-                                                        self.network.N[area][pop]))
-                    d[area][pop] = {'histogram': np.array([res[0], res[1]]),
-                                    'stats': {'mu': res[2],
-                                              'sigma': res[3]}}
-            self.pop_rate_dists = d.to_dict()
-
-    def create_synchrony(self, **keywords):
-        """
-        Calculate synchrony as the coefficient of variation of the population rate
-        and store in member synchrony. Uses helper function synchrony.
-        If the synchrony has previously been stored with the
-        same parameters, they are loaded from file.
-
-
-        Parameters
-        ----------
-        t_min : float, optional
-            Minimal time in ms of the simulation to take into account
-            for the calculation. Defaults to 500 ms.
-        t_max : float, optional
-            Maximal time in ms of the simulation to take into account
-            for the calculation. Defaults to the simulation time.
-        areas : list, optional
-            Which areas to include in the calculcation.
-            Defaults to all loaded areas.
-        pops : list or {'complete'}, optional
-            Which populations to include in the calculation.
-            If set to 'complete', all populations the respective areas
-            are included. Defaults to 'complete'.
-        resolution : float, optional
-            Resolution of the population rate. Defaults to 1 ms.
-        """
-
-        default_dict = {'areas': self.areas_loaded,
-                        'pops': 'complete', 'resolution': 1.0}
-        params = ah._create_parameter_dict(
-            default_dict, self.T, **keywords)
-        iterator = ah.model_iter(mode='single',
-                                 areas=params['areas'],
-                                 pops=params['pops'])
-        # Check if synchrony values have been stored with the same parameters
-        self.synchrony = ah._check_stored_data(os.path.join(self.output_dir, 'synchrony.json'),
-                                               copy(iterator), params)
-
-        if self.synchrony is None:
-            # print("Computing synchrony")
-            d = nested_dict()
-            d['Parameters'] = params
-            for area, pop in iterator:
-                if pop in self.network.structure[area]:
-                    d[area][pop] = ah.synchrony(self.spike_data[area][pop],
-                                                self.network.N[area][pop],
-                                                params['t_min'],
-                                                params['t_max'],
-                                                resolution=params['resolution'])
-                else:
-                    d[area][pop] = np.nan
-
-            for area in params['areas']:
-                total_spikes = ah.area_spike_train(self.spike_data[area])
-                d[area]['total'] = ah.synchrony(
-                    total_spikes,
-                    self.network.N[area]['total'],
-                    params['t_min'],
-                    params['t_max'],
-                    resolution=params['resolution'])
-            self.synchrony = d.to_dict()
-
-    def create_rate_time_series(self, **keywords):
-        """
-        Calculate time series of population- and area-averaged firing rates.
-        Uses ah.pop_rate_time_series.
-        If the rates have previously been stored with the
-        same parameters, they are loaded from file.
-
-
-        Parameters
-        ----------
-        t_min : float, optional
-            Minimal time in ms of the simulation to take into account
-            for the calculation. Defaults to 500 ms.
-        t_max : float, optional
-            Maximal time in ms of the simulation to take into account
-            for the calculation. Defaults to the simulation time.
-        areas : list, optional
-            Which areas to include in the calculcation.
-            Defaults to all loaded areas.
-        pops : list or {'complete'}, optional
-            Which populations to include in the calculation.
-            If set to 'complete', all populations the respective areas
-            are included. Defaults to 'complete'.
-        kernel : {'gauss_time_window', 'alpha_time_window', 'rect_time_window'}, optional
-            Specifies the kernel to be convolved with the spike histogram.
-            Defaults to 'binned', which corresponds to no convolution.
-        resolution: float, optional
-            Width of the convolution kernel. Specifically it correponds to:
-            - 'binned' : bin width of the histogram
-            - 'gauss_time_window' : sigma
-            - 'alpha_time_window' : time constant of the alpha function
-            - 'rect_time_window' : width of the moving rectangular function
-        """
-        default_dict = {'areas': self.areas_loaded, 'pops': 'complete',
-                        'kernel': 'binned', 'resolution': 1.0}
-        params = ah._create_parameter_dict(
-            default_dict, self.T, **keywords)
-
-        # Check if firing rates have been stored with the same parameters
-        fp = os.path.join(self.output_dir, 'rate_time_series')
-        iterator_areas = ah.model_iter(mode='single',
-                                       areas=params['areas'],
-                                       pops=None)
-        iterator_pops = ah.model_iter(mode='single',
-                                      areas=params['areas'],
-                                      pops=params['pops'])
-        self.rate_time_series = ah._check_stored_data(fp, copy(iterator_areas), params)
-        fp = os.path.join(self.output_dir, 'rate_time_series_pops')
-        self.rate_time_series_pops = ah._check_stored_data(fp, copy(iterator_pops), params)
-
-        if self.rate_time_series is None:
-            # print('Computing rate time series')
-
-            # calculate area-averaged firing rates
-            d = nested_dict()
-            d['Parameters'] = params
-            # population-averaged firing rates
-            d_pops = nested_dict()
-            d_pops['Parameters'] = params
-            for area, pop in iterator_pops:
-                if pop in self.network.structure[area]:
-                    time_series = ah.pop_rate_time_series(self.spike_data[area][pop],
-                                                          self.network.N[area][pop],
-                                                          params['t_min'],
-                                                          params['t_max'],
-                                                          params['resolution'],
-                                                          kernel=params['kernel'])
-                else:
-                    # np.ones only takes integers as inputs
-                    # time_series = np.nan*np.ones(params['t_max'] - params['t_min'])
-                    time_series = np.nan*np.ones(int(params['t_max'] - params['t_min']))
-                d_pops[area][pop] = time_series
-
-                total_spikes = ah.area_spike_train(self.spike_data[area])
-                time_series = ah.pop_rate_time_series(total_spikes,
-                                                      self.network.N[area]['total'],
-                                                      params['t_min'],
-                                                      params['t_max'],
-                                                      params['resolution'],
-                                                      kernel=params['kernel'])
-                d[area] = time_series
-            self.rate_time_series_pops = d_pops.to_dict()
-            self.rate_time_series = d.to_dict()
-
-    def create_synaptic_input(self, **keywords):
-        """
-        Calculate synaptic input of populations and areas using the spike data.
-        Uses function ah.pop_synaptic_input.
-        If the synaptic inputs have previously been stored with the
-        same parameters, they are loaded from file.
-
-        Parameters
-        ----------
-        t_min : float, optional
-            Minimal time in ms of the simulation to take into account
-            for the calculation. Defaults to 500 ms.
-        t_max : float, optional
-            Maximal time in ms of the simulation to take into account
-            for the calculation. Defaults to the simulation time.
-        areas : list, optional
-            Which areas to include in the calculcation.
-            Defaults to all loaded areas.
-        pops : list or {'complete'}, optional
-            Which populations to include in the calculation.
-            If set to 'complete', all populations the respective areas
-            are included. Defaults to 'complete'.
-        kernel : {'gauss_time_window', 'alpha_time_window', 'rect_time_window'}, optional
-            Convolution kernel for the calculation of the underlying firing rates.
-            Defaults to 'binned' which corresponds to a simple histogram.
-        resolution: float, optional
-            Width of the convolution kernel. Specifically it correponds to:
-            - 'binned' : bin width of the histogram
-            - 'gauss_time_window' : sigma
-            - 'alpha_time_window' : time constant of the alpha function
-            - 'rect_time_window' : width of the moving rectangular function
-        """
-        default_dict = {'areas': self.areas_loaded, 'pops': 'complete',
-                        'resolution': 1., 'kernel': 'binned'}
-        params = ah._create_parameter_dict(
-            default_dict, self.T, **keywords)
-
-        # Check if synaptic inputs have been stored with the same parameters
-        iterator_areas = ah.model_iter(mode='single',
-                                       areas=params['areas'],
-                                       pops=None)
-        iterator_pops = ah.model_iter(mode='single',
-                                      areas=params['areas'],
-                                      pops=params['pops'])
-        fp = os.path.join(self.output_dir, 'synaptic_input')
-        self.synaptic_input = ah._check_stored_data(fp, copy(iterator_areas), params)
-        fp = os.path.join(self.output_dir, 'synaptic_input_pops')
-        self.synaptic_input_pops = ah._check_stored_data(fp, copy(iterator_pops), params)
-
-        if self.synaptic_input is None:
-            # print('Computing rate time series')
-            if 'rate_time_series' not in inspect.getmembers(self):
-                self.create_rate_time_series(**params)
-
-            d_pops = nested_dict()
-            d_pops['Parameters'] = params
-            for area, pop in copy(iterator_pops):
-                if pop in self.network.structure[area]:
-                    if 'I' in pop:
-                        tau_syn = self.network.params['neuron_params'][
-                            'single_neuron_dict']['tau_syn_in']
-                    else:
-                        tau_syn = self.network.params['neuron_params'][
-                            'single_neuron_dict']['tau_syn_ex']
-                    time_series = ah.synaptic_output(self.rate_time_series_pops[area][pop],
-                                                     tau_syn, params['t_min'], params['t_max'],
-                                                     resolution=params['resolution'])
-                    d_pops[area][pop] = time_series
-            self.synaptic_output_pops = d_pops.to_dict()
-
-            d_pops = nested_dict()
-            d_pops['Parameters'] = params
-            d_pops['Parameters'] = params
-            for area, pop in iterator_pops:
-                if pop in self.network.structure[area]:
-                    time_series = np.zeros(
-                        int((params['t_max'] - params['t_min']) / params['resolution']))
-                    for source_area, source_pop in ah.model_iter(mode='single',
-                                                                 areas=self.areas_loaded):
-                        if source_pop in self.network.structure[source_area]:
-                            weight = self.network.W[area][pop][source_area][source_pop]
-                            time_series += (self.synaptic_output_pops[source_area][source_pop] *
-                                            abs(weight) *
-                                            self.network.K[area][pop][source_area][source_pop])
-                    d_pops[area][pop] = time_series
-
-            d = nested_dict()
-            d['Parameters'] = params
-            for area in params['areas']:
-                d[area] = np.zeros(
-                    int((params['t_max'] - params['t_min']) / params['resolution']))
-                for pop in self.network.structure[area]:
-                    d[area] += d_pops[area][pop] * self.network.N[area][pop]
-                d[area] /= self.network.N[area]['total']
-            self.synaptic_input = d.to_dict()
-            self.synaptic_input_pops = d_pops.to_dict()
-
-    def create_pop_cv_isi(self, **keywords):
-        """
-        Calculate population-averaged CV ISI values and store as member pop_cv_isi.
-        Uses helper function cv_isi.
-        If the CV ISI have previously been stored with the
-        same parameters, they are loaded from file.
-
-        Parameters
-        ----------
-        t_min : float, optional
-            Minimal time in ms of the simulation to take into account
-            for the calculation. Defaults to 500 ms.
-        t_max : float, optional
-            Maximal time in ms of the simulation to take into account
-            for the calculation. Defaults to the simulation time.
-        areas : list, optional
-            Which areas to include in the calculcation.
-            Defaults to all loaded areas.
-        pops : list or {'complete'}, optional
-            Which populations to include in the calculation.
-            If set to 'complete', all populations the respective areas
-            are included. Defaults to 'complete'.
-        """
-
-        default_dict = {'areas': self.areas_loaded, 'pops': 'complete'}
-        params = ah._create_parameter_dict(
-            default_dict, self.T, **keywords)
-        # Check if CV ISI have been stored with the same parameters
-        iterator = ah.model_iter(mode='single',
-                                 areas=params['areas'],
-                                 pops=params['pops'])
-        fp = os.path.join(self.output_dir, 'pop_cv_isi.json')
-        self.pop_cv_isi = ah._check_stored_data(fp,
-                                                copy(iterator), params)
-
-        if self.pop_cv_isi is None:
-            # print("Computing population CV ISI")
-            d = nested_dict()
-            d['Parameters'] = params
-            for area, pop in iterator:
-                if pop in self.network.structure[area]:
-                    d[area][pop] = ah.pop_cv_isi(self.spike_data[area][pop],
-                                                 params['t_min'],
-                                                 params['t_max'])
-            self.pop_cv_isi = d.to_dict()
-
-    def create_pop_LvR(self, **keywords):
-        """
-        Calculate poulation-averaged LvR (see Shinomoto et al. 2009) and
-        store as member pop_LvR. Uses helper function LvR.
-
-        Parameters
-        ----------
-        t_min : float, optional
-            Minimal time in ms of the simulation to take into account
-            for the calculation. Defaults to 500 ms.
-        t_max : float, optional
-            Maximal time in ms of the simulation to take into account
-            for the calculation. Defaults to the simulation time.
-        areas : list, optional
-            Which areas to include in the calculcation.
-            Defaults to all loaded areas.
-        pops : list or {'complete'}, optional
-            Which populations to include in the calculation.
-            If set to 'complete', all populations the respective areas
-            are included. Defaults to 'complete'.
-        """
-        default_dict = {'areas': self.areas_loaded, 'pops': 'complete'}
-        params = ah._create_parameter_dict(
-            default_dict, self.T, **keywords)
-
-        # Check if LvR have been stored with the same parameters
-        iterator = ah.model_iter(mode='single',
-                                 areas=params['areas'],
-                                 pops=params['pops'])
-        fp = os.path.join(self.output_dir, 'pop_LvR.json')
-        self.pop_LvR = ah._check_stored_data(fp,
-                                             copy(iterator), params)
-        if self.pop_LvR is None:
-            # print("Computing population LvR")
-            d = nested_dict()
-            d['Parameters'] = params
-            for area, pop in iterator:
-                if pop in self.network.structure[area]:
-                    if self.network.N[area][pop] > 0.:
-                        d[area][pop] = ah.pop_LvR(self.spike_data[area][pop],
-                                                  2.0,
-                                                  params['t_min'],
-                                                  params['t_max'],
-                                                  int(self.network.N[area][pop]))[0]
-            self.pop_LvR = d.to_dict()
-
-# ______________________________________________________________________________
-# Function for plotting data
-    def single_dot_display(self, area,  frac_neurons, t_min=500., t_max='T', **keywords):
-        """
-        Create raster display of a single area with populations stacked
-        onto each other. Excitatory neurons in blue, inhibitory
-        neurons in red.
-
-        Parameters
-        ----------
-        area : string {area}
-            Area to be plotted.
-        frac_neurons : float, [0,1]
-            Fraction of cells to be considered.
-        t_min : float, optional
-            Minimal time in ms of spikes to be shown. Defaults to 0 ms.
-        t_max : float, optional
-            Minimal time in ms of spikes to be shown. Defaults to simulation time.
-        output : {'pdf', 'png', 'eps'}, optional
-            If given, the function stores the plot to a file of the given format.
-
-        """
-        if t_max == 'T':
-            t_max = self.T
-
-        try:
-            fig = plt.figure()
-        except RuntimeError:
-            plt.switch_backend('agg')
-            fig = plt.figure()
-        ax = fig.add_subplot(111)
-        assert(area in self.areas_loaded)
-        # Determine number of neurons that will be plotted for this area (for vertical offset)
-        offset = 0
-        n_to_plot = {}
-        for pop in self.network.structure[area]:
-            n_to_plot[pop] = int(self.network.N[
-                                 area][pop] * frac_neurons)
-            offset = offset + n_to_plot[pop]
-        y_max = offset + 1
-        prev_pop = ''
-        yticks = []
-        yticklocs = []
-        # Loop over populations
-        for pop in self.network.structure[area]:
-            if pop[0:-1] != prev_pop:
-                prev_pop = pop[0:-1]
-                yticks.append('L' + pop[0:-1])
-                yticklocs.append(offset - 0.5 * n_to_plot[pop])
-            indices = np.where(np.logical_and(self.spike_data[area][pop] > t_min,
-                                              self.spike_data[area][pop] < t_max))
-
-            pop_data = self.spike_data[area][pop][indices[0]]
-            neurons_to_plot = np.arange(np.min(self.spike_data[area][pop][:, 0]), np.min(
-                self.spike_data[area][pop][:, 0]) + n_to_plot[pop], 1)
-            # print pop,neurons_to_plot.size
-
-            if pop.find('E') > (-1):
-                pcolor = '#595289'
-            else:
-                pcolor = '#af143c'
-
-            for k in range(n_to_plot[pop]):
-                spike_times = pop_data[
-                    pop_data[:, 0] == neurons_to_plot[k], 1]
-
-                ax.plot(spike_times, np.zeros(len(spike_times)) +
-                        offset - k, '.', color=pcolor, markersize=1)
-            offset = offset - n_to_plot[pop]
-        y_min = offset
-        ax.set_xlim([t_min, t_max])
-        ax.set_ylim([y_min, y_max])
-        ax.set_xlabel('time [ms]', size=16)
-        ax.set_ylabel('Neuron', size=16)
-
-        if 'output' in keywords:
-            plt.savefig(os.path.join(self.output_dir,
-                                     '{}_Dotplot_{}.{}'.format(self.simulation.label,
-                                                               area, keywords['output'])))
-        else:
-            fig.show()
-
-    def single_rate_display(self, area, pop=None,  t_min=None, t_max=None, **keywords):
-        """
-        Plot rates time series for a single area or population.
-        Uses rate time series stored in dictionary pop_rate_time_series.
-        Parameters
-        ----------
-        area : string {area}
-            Area to be plotted.
-        pop : string, optional
-            If given, the rate of a specific population in area is plotted.
-            Defaults to None, then the area-averaged rate is plotted.
-        t_min : float, optional
-            Minimal time in ms of spikes to be shown.
-            Defaults to minimal time of computed rate time series.
-        t_max : float, optional
-            Minimal time in ms of spikes to be shown.
-            Defaults to maximal time of computed rate time series.
-        output : {'pdf', 'png', 'eps'}, optional
-            If given, the function stores the plot to a file of the given format.
-        """
-        if pop is None:
-            rates = self.rate_time_series[area]
-            params = self.rate_time_series['Parameters']
-        else:
-            rates = self.rate_time_series_pops[area][pop]
-            params = self.rate_time_series_pops['Parameters']
-
-        if t_max is None:
-            t_max = params['t_max']
-        if t_min is None:
-            t_min = params['t_min']
-
-        i_min = int(t_min - params['t_min'])
-        i_max = int(t_max - params['t_min'])
-
-        rates = rates[i_min:i_max]
-
-        fig = plt.figure(figsize=(6, 4))
-        ax = fig.add_subplot(111)
-
-        times = np.arange(t_min, t_max, 1.0)
-
-        ax.plot(times, rates, color='k', markersize=1)
-
-        if pop:
-            ax.set_title('{} {} {}'.format(area, pop, params['kernel']))
-        else:
-            ax.set_title('{} {}'.format(area, params['kernel']))
-        ax.set_xlabel('time [ms]', size=15)
-        ax.set_ylabel('rate [1/s]', size=15)
-
-        if 'output' in keywords:
-            if pop:
-                plt.savefig(os.path.join(self.output_dir,
-                                         '{}_rate_{}_{}.{}'.format(self.simulation.label,
-                                                                   area, pop, keywords['output'])))
-            else:
-                plt.savefig(os.path.join(self.output_dir,
-                                         '{}_rate_{}.{}'.format(self.simulation.label,
-                                                                area, keywords['output'])))
-        else:
-            fig.show()
-
-    def single_power_display(self, area, pop=None, t_min=None,
-                             t_max=None, resolution=1., kernel='binned', Df=None, **keywords):
-        """
-        Plot power spectrum for a single area.
-        Directly computes the values via function 'spectrum' using
-        rate time series stored in dictionary pop_rate_time_series.
-
-        Parameters
-        ----------
-        area : string {area}
-            Area to be plotted.
-        pop : string, optional
-            If given, the rate of a specific population in area is plotted.
-            Defaults to None, then the area-averaged rate is plotted.
-        t_min : float, optional
-            Minimal time in ms of spikes to be shown.
-            Defaults to minimal time of underlying rate time series.
-        t_max : float, optional
-            Minimal time in ms of spikes to be shown.
-            Defaults to maximal time of underlying rate time series.
-        kernel : {'gauss_time_window', 'alpha_time_window', 'rect_time_window'}, optional
-            Specifies the kernel to be convolved with the spike histogram.
-            Defaults to 'binned', which corresponds to no convolution.
-        resolution: float, optional
-            Width of the convolution kernel. Specifically it correponds to:
-            - 'binned' : bin width of the histogram
-            - 'gauss_time_window' : sigma
-            - 'alpha_time_window' : time constant of the alpha function
-            - 'rect_time_window' : width of the moving rectangular function
-        Df : float, optional
-            Window width of sliding rectangular filter (smoothing) of the spectrum.
-            The default value is None and leads to no smoothing.
-        output : {'pdf', 'png', 'eps'}, optional
-            If given, the function stores the plot to a file of the given format.
-        """
-        if pop is None:
-            data = self.spike_data[area][self.network.structure[area][0]]
-            num_neur = self.network.N[area][self.network.structure[area][0]]
-            for population in self.network.structure[area][1:]:
-                data = np.vstack((data, self.spike_data[area][population]))
-                num_neur += self.network.N[area][self.network.structure[area][0]]
-        else:
-            data = self.spike_data[area][pop]
-            num_neur = self.network.N[area][pop]
-
-        if t_max is None:
-            t_max = self.T
-        if t_min is None:
-            t_min = 0.
-
-        power, freq = ah.spectrum(data, num_neur, t_min, t_max,
-                                  resolution=resolution, kernel=kernel, Df=Df)
-
-        fig = plt.figure()
-        ax = fig.add_subplot(111)
-
-        ax.plot(freq, power, color='k', markersize=3)
-        if pop:
-            ax.set_title('{} {} {}'.format(area, pop, kernel))
-        else:
-            ax.set_title('{} {}'.format(area, kernel))
-        ax.set_xlabel('Frequency [Hz]', size=16)
-        ax.set_ylabel('Power', size=16)
-        ax.set_xlim(0.0, 500.0)
-        ax.set_yscale("Log")
-
-        if 'output' in keywords:
-            if pop:
-                plt.savefig(os.path.join(self.output_dir,
-                                         '{}_power_spectrum_{}_{}.{}'.format(self.simulation.label,
-                                                                             area,
-                                                                             pop,
-                                                                             keywords['output'])))
-            else:
-                plt.savefig(os.path.join(self.output_dir,
-                                         '{}_power_spectrum_{}.{}'.format(self.simulation.label,
-                                                                          area,
-                                                                          keywords['output'])))
-        else:
-            fig.show()
-
-    def show_rates(self, area_list=None, **keywords):
-        """
-        Plot overview over time-averaged population rates encoded in colors
-        with areas along x-axis and populations along y-axis.
-
-        Parameters
-        ----------
-        area_list : list, optional
-           Specifies with areas are plotted in which order.
-           Default to None, leading to plotting of  all areas ordered by architectural type.
-        output : {'pdf', 'png', 'eps'}, optional
-            If given, the function stores the plot to a file of the given format.
-        """
-        if area_list is None:
-            area_list = ['V1', 'V2', 'VP', 'V3', 'PIP', 'V3A', 'MT', 'V4t', 'V4',
-                                     'PO', 'VOT', 'DP', 'MIP', 'MDP', 'MSTd', 'VIP', 'LIP',
-                                     'PITv', 'PITd', 'AITv', 'MSTl', 'FST', 'CITv', 'CITd',
-                                     '7a', 'STPp', 'STPa', 'FEF', '46', 'TF', 'TH', 'AITd']
-
-        matrix = np.zeros((len(area_list), len(self.network.structure['V1'])))
-
-        fig = plt.figure(figsize=(6, 4))
-        ax = fig.add_subplot(111)
-
-        for i, area in enumerate(area_list):
-            # print(i, area)
-            # self.network has no attribute structure_reversed
-            # for j, pop in enumerate(self.network.structure_reversed['V1']):
-            # for j, pop in enumerate(list(reversed(self.network.structure['V1']))):
-            for j, pop in enumerate(self.network.structure['V1']):
-                if pop in self.network.structure[area]:
-                    rate = self.pop_rates[area][pop][0]
-                    if rate == 0.0:
-                        rate = 1e-5  # To distinguish zero-rate from non-existing populations
-                else:
-                    rate = np.nan
-                matrix[i][j] = rate
-
-        cm = plt.cm.jet
-        cm = cm.from_list('mycmap', [(0., 64./255., 192./255.),  # custom dark blue
-                                     (0., 128./255., 192./255.),  # custom light blue
-                                     'white',
-                                     (245./255., 157./255., 115./255.),  # custom light red
-                                     (192./255., 64./255., 0.)], N=256)  # custom dark red
-        cm.set_under('0.3')
-        cm.set_bad('k')
-
-        matrix = np.transpose(matrix)
-        masked_matrix = np.ma.masked_where(np.isnan(matrix), matrix)
-        ax.patch.set_hatch('x')
-        im = ax.pcolormesh(masked_matrix, cmap=cm, edgecolors='None', norm=LogNorm(
-            vmin=0.01, vmax=100.))
-        ax.set_xlim(0, matrix[0].size)
-
-        x_index = np.arange(4.5, 31.6, 5.0)
-        x_ticks = [int(a + 0.5) for a in x_index]
-        y_index = list(range(len(self.network.structure['V1'])))
-        y_index = [a + 0.5 for a in y_index]
-        # print(self.network.structure['V1'])
-        ax.set_xticks(x_index)
-        ax.set_xticklabels(x_ticks)
-        ax.set_yticks(y_index)
-        # self.network has no attribute structure_reversed
-        # ax.set_yticklabels(self.network.structure_reversed['V1'])
-        # ax.set_yticklabels(list(reversed(self.network.structure['V1'])))
-        # ax.set_yticklabels(self.network.structure['V1'])
-        ax.set_ylabel('Population', size=18)
-        ax.set_xlabel('Area index', size=18)
-        t = FixedLocator([0.01, 0.1, 1., 10., 100.])
-
-        plt.colorbar(im, ticks=t)
-
-        if 'output' in keywords:
-            plt.savefig(os.path.join(self.output_dir, '{}_rates.{}'.format(self.simulation.label,
-                                                                           keywords['output'])))
-        else:
-            fig.show()
-
-# ______________________________________________________________________________
-# Functions to store data to file
-
-    def save(self):
-        """
-        Saves all post-processed data to files.
-        """
-        members = inspect.getmembers(self)
-        save_list_json = ['structure', 'pop_rates', 'synchrony',
-                          'pop_cv_isi', 'pop_LvR',
-                          'indegree_data', 'indegree_areas_data',
-                          'outdegree_data', 'outdegree_areas_data']
-        save_list_npy = ['pop_rate_dists', 'rate_time_series',
-                         'rate_time_series_pops', 'bold_signal',
-                         'synaptic_input', 'synaptic_input_pops']
-        for i in range(0, len(members)):
-            if members[i][0] in save_list_json:
-                f = open(self.output_dir + members[i][0] + '.json', 'w')
-                # print(members[i][0])
-                json.dump(members[i][1], f)
-                f.close()
-            if members[i][0] in save_list_npy:
-                f = self.output_dir + members[i][0]
-                ah._save_dict_to_npy(f, members[i][1])
diff --git a/multiarea_model/.ipynb_checkpoints/analysis_helpers-checkpoint.py b/multiarea_model/.ipynb_checkpoints/analysis_helpers-checkpoint.py
deleted file mode 100644
index aa241e10b3c838ab5bf437c3ed8268e6f085af5c..0000000000000000000000000000000000000000
--- a/multiarea_model/.ipynb_checkpoints/analysis_helpers-checkpoint.py
+++ /dev/null
@@ -1,814 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""
-analysis_helpers
-============
-
-Helper and analysis functions to support ana_vistools and
-the analysis of simulations of the multi-area model of
-macaque visual cortex (Schmidt et al. 2018).
-
-
-Functions
---------
-_create_parameter_dict : Create parameter dict for functions
-                         of data class.
-_check_stored_data : Check if stored data was computed
-                     with the correct Parameters.
-online_hist : Compute spike histogram on a spike file line by line.
-pop_rate : Compute average firing rate.
-pop_rate_distribution : Compute distribution of single-cell firing rates.
-pop_rate_time_series : Compute time series of population rate.
-Regularity measures:
-    - pop_cv_isi : Compute population-averaged CV ISI.
-    - pop_LvR: Compute average LvR of neuronal population.
-
-Synchrony measures :
-    - synchrony : CV of population rate.
-    - synchrony_subthreshold : Synchrony measure on membrane potentials.
-    - spike_synchrony : Synchrony measure on population rate.
-
-spectrum : Compound power spectrum of a neuronal population.
-synaptic_output : Synaptic output of neuronal population.
-compare : Compare two simulations with each other.
-
-
-Authors
---------
-Maximilian Schmidt
-Sacha van Albada
-
-"""
-
-from copy import copy
-from nested_dict import nested_dict
-import numpy as np
-import json
-from itertools import product
-from scipy.signal import welch
-
-
-area_list = ['V1', 'V2', 'VP', 'V3', 'PIP', 'V3A', 'MT', 'V4t', 'V4',
-             'PO', 'VOT', 'DP', 'MIP', 'MDP', 'MSTd', 'VIP', 'LIP',
-             'PITv', 'PITd', 'AITv', 'MSTl', 'FST', 'CITv', 'CITd',
-             '7a', 'STPp', 'STPa', 'FEF', '46', 'TF', 'TH', 'AITd']
-pop_list = ['23E', '23I', '4E',  '4I', '5E', '5I', '6E', '6I']
-
-
-def model_iter(mode='single',
-               areas=None, pops='complete',
-               areas2=None, pops2='complete'):
-    """
-    Helper function to create a an iterator over all possible pairs of
-    populations in the model, possible restricted by specifying areas
-    or pops.
-
-    Parameters
-    ----------
-    mode : {'single', 'pairs'}, optional
-        If equal to 'single', loop over all populations of all areas.
-        If equal to 'pairs', loop over all pairs of
-        populations of all areas.
-        Defaults to 'single'.
-
-    areas, areas2 : list, optional
-        If specified, loop only over these areas as target and source
-        areas. Defaults to None, which corresponds to taking all areas
-        into account.
-    pops, pops2 : string or list, optional
-        If specified, loop only over these populations as target and
-        source populations. Defaults to 'complete', which corresponds
-        to taking all areas into account. If None, loop only over
-        areas.
-
-    Returns
-    -------
-    iterator : iterator
-        Cartesian product of 2 ('single' mode) or 4 ('double' mode) lists
-    """
-    if mode == 'single':
-        assert((areas2 is None) and (pops2 == 'complete'))
-    if pops is None or pops2 is None:
-        assert((pops is None) and (pops2 is None) or mode == 'single')
-    if pops == 'complete':
-        pops = pop_list
-    if areas is None:
-        areas = area_list
-    if pops2 == 'complete':
-        pops2 = pop_list
-    if areas2 is None:
-        areas2 = area_list
-    if mode == 'single':
-        if pops is None:
-            return product(areas)
-        else:
-            return product(areas, pops)
-    elif mode == 'pairs':
-        if pops is None:
-            return product(areas, areas2)
-        else:
-            return product(areas, pops, areas2, pops2)
-
-
-def area_spike_train(spike_data):
-    """
-    Helper function to create one spike train for an area from
-    the spike trains of the single populations.
-
-    Parameters
-    ----------
-    spike_data : dict
-        Dictionary containing the populations as keys
-        and their spike trains as values. Spike trains
-        are stored as 2D arrays with GIDs in the 1st column
-        and time stamps in the 2nd column.
-
-    Returns
-    -------
-    data_array : numpy.ndarray
-    """
-    data_array = np.array([])
-    for pop in spike_data:
-        data_array = np.append(data_array, spike_data[pop])
-    data_array = np.reshape(data_array, (-1, 2))
-    return data_array
-
-
-def centralize(data, time=False, units=False):
-    """
-    Code written by David Dahmen and Jakob Jordan,
-    available from https://github.com/INM-6/correlation-toolbox .
-
-    Set mean of the given data to zero by averaging either
-    across time or units.
-    """
-
-    assert(time is not False or units is not False)
-    res = copy(data)
-    if time is True:
-        res = np.array([x - np.mean(x) for x in res])
-    if units is True:
-        res = np.array(res - np.mean(res, axis=0))
-    return res
-
-
-def sort_gdf_by_id(data, idmin=None, idmax=None):
-    """
-    Code written by David Dahmen and Jakob Jordan,
-    available from https://github.com/INM-6/correlation-toolbox .
-
-    Sort gdf data [(id,time),...] by neuron id.
-
-    Parameters
-    ----------
-
-    data: numpy.array (dtype=object) with lists ['int', 'float']
-          The nest output loaded from gdf format. Each row contains a
-          global id
-    idmin, idmax : int, optional
-            The minimum/maximum neuron id to be considered.
-
-    Returns
-    -------
-    ids : list of ints
-          Neuron ids, e.g., [id1,id2,...]
-    srt : list of lists of floats
-          Spike trains corresponding to the neuron ids, e.g.,
-          [[t1,t2,...],...]
-    """
-
-    assert((idmin is None and idmax is None)
-           or (idmin is not None and idmax is not None))
-
-    if len(data) > 0:
-        # get neuron ids
-        if idmin is None and idmax is None:
-            ids = np.unique(data[:, 0])
-        else:
-            ids = np.arange(idmin, idmax+1)
-        srt = []
-        for i in ids:
-            srt.append(np.sort(data[np.where(data[:, 0] == i)[0], 1]))
-        return ids, srt
-    else:
-        print('CT warning(sort_spiketrains_by_id): empty gdf data!')
-        return None, None
-
-
-"""
-Helper functions for data loading
-"""
-
-
-def _create_parameter_dict(default_dict, T, **keywords):
-    """
-    Create the parameter dict for the members of the data class.
-
-    Parameters
-    ----------
-    default_dict : dict
-        Default dictionary of the function calling this function.
-    T : float
-        Maximal time of the simulation of the data class calling.
-
-    Returns
-    -------
-    d : dict
-        Parameter dictionary.
-    """
-    d = default_dict
-    if 't_min' not in keywords:
-        t_min = 500.
-        d.update({'t_min': t_min})
-    if 't_max' not in keywords:
-        t_max = T
-        d.update({'t_max': t_max})
-    d.update(keywords)
-    return d
-
-
-def _check_stored_data(fp, fn_iter, param_dict):
-    """
-    Check if a data member of the data class has already
-    been computed with the same parameters.
-
-    Parameters
-    ----------
-    fn : string
-        Filename of the file containing the data.
-    param_dict : dict
-        Parameters of the calculation to compare with
-        the parameters of the stored data.
-    """
-    if 'json' in fp:
-        try:
-            f = open(fp)
-            data = json.load(f)
-            f.close()
-        except IOError:
-            return None
-        param_dict2 = data['Parameters']
-    else:
-        try:
-            data = _load_npy_to_dict(fp, fn_iter)
-        except IOError:
-            return None
-        with open('-'.join((fp, 'parameters')), 'r') as f:
-            param_dict2 = json.load(f)
-    param_dict_copy = copy(param_dict)
-    param_dict2_copy = copy(param_dict2)
-    for k in param_dict:
-        if (isinstance(param_dict_copy[k], list) or
-                isinstance(param_dict_copy[k], np.ndarray)):
-            param_dict_copy[k] = set(param_dict_copy[k])
-        if (isinstance(param_dict2_copy[k], list) or
-                isinstance(param_dict2_copy[k], np.ndarray)):
-            param_dict2_copy[k] = set(param_dict2_copy[k])
-    if param_dict_copy == param_dict2_copy:
-        # print("Loading data from file")
-        return data
-    else:
-        print("Stored data have been computed "
-              "with different parameters")
-        return None
-
-
-def _save_dict_to_npy(fp, data):
-    """
-    Save data dictionary to binary numpy files
-    by iteratively going through the dictionary.
-
-    Parameters
-    ----------
-    fp : str
-       File pattern to which the keys of the dictionary are attached.
-    data : dict
-       Dictionary containing the data
-    """
-    for key, val in data.items():
-        if key != 'Parameters':
-            fp_key = '-'.join((fp, key))
-            if isinstance(val, dict):
-                _save_dict_to_npy(fp_key, val)
-            else:
-                np.save(fp_key, val)
-        else:
-            fp_key = '-'.join((fp, 'parameters'))
-            with open(fp_key, 'w') as f:
-                json.dump(val, f)
-
-
-def _load_npy_to_dict(fp, fn_iter):
-    """
-    Load data stored in the files defined by fp
-    and fn_iter to a dictionary.
-
-    Parameters
-    ----------
-    fp : str
-       Base file pattern of the npy files
-    fn_iter : iterable
-       Iterable defining all the suffixes that are
-       appended to fp to form the file names.
-    """
-    data = nested_dict()
-    for it in fn_iter:
-        fp_it = (fp,) + it
-        fp_ = '{}.npy'.format('-'.join(fp_it))
-        if len(it) == 1:
-            data[it[0]] = np.load(fp_)
-        else:
-            data[it[0]][it[1]] = np.load(fp_)
-    return data
-
-
-"""
-Analysis functions
-"""
-
-
-def pop_rate(data_array, t_min, t_max, num_neur, return_stat=False):
-    """
-    Calculates firing rate of a given array of spikes.
-    Rates are calculated in spikes/s. Assumes spikes are sorted
-    according to time. First calculates rates for individual neurons
-    and then averages over neurons.
-
-    Parameters
-    ----------
-    data_array : numpy.ndarray
-        Array with spike data.
-        column 0: neuron_ids, column 1: spike times
-    tmin : float
-        Minimal time stamp to be considered in ms.
-    tmax : float
-        Maximal time stamp to be considered in ms.
-    num_neur : int
-        Number of recorded neurons. Needs to provided explicitly
-        to avoid corruption of results by silent neurons not
-        present in the given data.
-    Returns
-    -------
-    mean : float
-        Mean firing rate across neurons.
-    std : float
-        Standard deviation of firing rate distribution.
-    rates : list
-        List of single-cell firing rates.
-    """
-
-    indices = np.where(np.logical_and(data_array[:, 1] > t_min,
-                                      data_array[:, 1] < t_max))
-    data_array = data_array[indices]
-    if return_stat:
-        rates = []
-        for i in np.unique(data_array[:, 0]):
-            num_spikes = np.where(data_array[:, 0] == i)[0].size
-            rates.append(num_spikes / ((t_max - t_min) / 1000.))
-            while len(rates) < num_neur:
-                rates.append(0.0)
-            mean = np.mean(rates)
-            std = np.std(rates)
-            return mean, std, rates
-    else:
-        return data_array[:, 1].size / (num_neur * (t_max - t_min) / 1000.)
-
-
-def pop_rate_distribution(data_array, t_min, t_max, num_neur):
-    """
-    Calculates firing rate distribution over neurons in a given array
-    of spikes. Rates are calculated in spikes/s. Assumes spikes are
-    sorted according to time. First calculates rates for individual
-    neurons and then averages over neurons.
-
-    Parameters
-    ----------
-    data_array : numpy.ndarray
-        Array with spike data.
-        column 0: neuron_ids, column 1: spike times
-    tmin : float
-        Minimal time stamp to be considered in ms.
-    tmax : float
-        Maximal time stamp to be considered in ms.
-    num_neur: int
-        Number of recorded neurons. Needs to provided explicitly
-        to avoid corruption of results by silent neurons not
-        present in the given data.
-
-    Returns
-    -------
-    bins : numpy.ndarray
-        Left edges of the distribution bins
-    vals : numpy.ndarray
-        Values of the distribution
-    mean : float
-        Arithmetic mean of the distribution
-    std : float
-        Standard deviation of the distribution
-    """
-    indices = np.where(np.logical_and(data_array[:, 1] > t_min,
-                                      data_array[:, 1] < t_max))
-    neurons = data_array[:, 0][indices]
-    neurons = np.sort(neurons)
-    if len(neurons) > 0:
-        n = neurons[0]
-    else:  # No spikes in [t_min, t_max]
-        n = None
-    rates = np.zeros(int(num_neur))
-    s = 0
-    for i in range(neurons.size):
-        if neurons[i] == n:
-            rates[s] += 1
-        else:
-            n = neurons[i]
-            s += 1
-    rates /= (t_max - t_min) / 1000.
-    vals, bins = np.histogram(rates, bins=100)
-    vals = vals / float(np.sum(vals))
-    if (num_neur > 0. and t_max != t_min
-            and len(data_array) > 0 and len(indices) > 0):
-        return bins[0:-1], vals, np.mean(rates), np.std(rates)
-    else:
-        return np.arange(0, 20., 20. / 100.), np.zeros(100), 0.0, 0.0
-
-
-def pop_rate_time_series(data_array, num_neur, t_min, t_max,
-                         resolution=10., kernel='binned'):
-    """
-    Computes time series of the population-averaged rates of a group
-    of neurons.
-
-    Parameters
-    ----------
-    data_array : numpy.ndarray
-        Array with spike data.
-        column 0: neuron_ids, column 1: spike times
-    tmin : float
-        Minimal time for the calculation.
-    tmax : float
-        Maximal time for the calculation.
-    num_neur: int
-        Number of recorded neurons. Needs to provided explicitly
-        to avoid corruption of results by silent neurons not
-        present in the given data.
-    kernel : {'gauss_time_window', 'alpha_time_window',
-        'rect_time_window'}, optional
-        Specifies the kernel to be
-        convolved with the spike histogram. Defaults to 'binned',
-        which corresponds to no convolution.
-    resolution: float, optional
-        Width of the convolution kernel. Specifically it correponds to:
-        - 'binned' : bin width of the histogram
-        - 'gauss_time_window' : sigma
-        - 'alpha_time_window' : time constant of the alpha function
-        - 'rect_time_window' : width of the moving rectangular function
-        Defaults to 1 ms.
-
-    Returns
-    -------
-    time_series : numpy.ndarray
-        Time series of the population rate
-    """
-    if kernel == 'binned':
-        rate, times = np.histogram(data_array[:, 1], bins=int((t_max - t_min) / (resolution)),
-                                   range=(t_min + resolution / 2., t_max + resolution / 2.))
-        rate = rate / (num_neur * resolution / 1000.0)
-        rates = np.array([])
-        last_time_step = times[0]
-
-        for i in range(1, times.size):
-            rates = np.append(
-                rates, rate[i - 1] * np.ones_like(np.arange(last_time_step, times[i], 1.0)))
-            last_time_step = times[i]
-
-        time_series = rates
-    else:
-        spikes = data_array[:, 1][data_array[:, 1] > t_min]
-        spikes = spikes[spikes < t_max]
-        binned_spikes = np.histogram(spikes, bins=int(
-            (t_max - t_min)), range=(t_min, t_max))[0]
-        if kernel == 'rect_time_window':
-            kernel = np.ones(int(resolution)) / resolution
-        if kernel == 'gauss_time_window':
-            sigma = resolution
-            time_range = np.arange(-0.5 * (t_max - t_min),
-                                   0.5 * (t_max - t_min), 1.0)
-            kernel = 1 / (np.sqrt(2.0 * np.pi) * sigma) * \
-                np.exp(-(time_range ** 2 / (2 * sigma ** 2)))
-        if kernel == 'alpha_time_window':
-            alpha = 1 / resolution
-            time_range = np.arange(-0.5 * (t_max - t_min),
-                                   0.5 * (t_max - t_min), 1.0)
-            time_range[time_range < 0] = 0.0
-            kernel = alpha * time_range * np.exp(-alpha * time_range)
-
-        rate = np.convolve(kernel, binned_spikes, mode='same')
-        rate = rate / (num_neur / 1000.0)
-        time_series = rate
-
-    return time_series
-
-
-def pop_cv_isi(data_array, t_min, t_max):
-    """
-    Calculate coefficient of variation of interspike intervals
-    between t_min and t_max for every single neuron in data_array
-    and average the result over neurons in data_array.
-    Assumes spikes are sorted according to time.
-
-    Parameters
-    ----------
-    data_array : numpy.ndarray
-        Array with spike data.
-        column 0: neuron_ids, column 1: spike times
-    tmin : float
-        Minimal time stamp to be considered in ms.
-    tmax : float
-        Maximal time stamp to be considered in ms.
-
-    Returns
-    -------
-    mean : float
-        Mean CV ISI value of the population
-    """
-    cv_isi = []
-    indices = np.where(np.logical_and(data_array[:, 1] > t_min,
-                                      data_array[:, 1] < t_max))[0]
-    if len(data_array) > 1 and len(indices) > 1:
-        for i in np.unique(data_array[:, 0]):
-            intervals = np.diff(data_array[indices][
-                                np.where(data_array[indices, 0] == i), 1])
-            if intervals.size > 0:
-                cv_isi.append(np.std(intervals) / np.mean(intervals))
-        if len(cv_isi) > 0:
-            return np.mean(cv_isi)
-        else:
-            return 0.0
-    else:
-        print('cv_isi: no or only one spike in data_array, returning 0.0')
-        return 0.0
-
-
-def ISI_SCC(data_array, t_min, t_max):
-    """
-    Computes the serial correlation coefficient of
-    inter-spike intervals of the given spike data.
-
-    Parameters
-    ----------
-    data_array : numpy.ndarray
-        Arrays with spike data.
-        column 0: neuron_ids, column 1: spike times
-    t_min : float
-        Minimal time for the calculation.
-    t_max : float
-        Maximal time for the calculation.
-
-
-    Return
-    -------
-    bins : numpy.ndarray
-        ISI lags
-    values : numpy.ndarray
-        Serial correlation coefficient values
-    """
-    indices = np.where(np.logical_and(data_array[:, 1] > t_min,
-                                      data_array[:, 1] < t_max))
-    scc_averaged = np.zeros(max(1001, 2 * (t_max - t_min) + 1))
-    half = max(1000, 2 * (t_max - t_min)) / 2.0
-    if len(data_array) > 1 and len(indices) > 1:
-        for i in np.unique(data_array[:, 0]):
-            intervals = np.diff(data_array[indices][
-                                np.where(data_array[indices, 0] == i), 1])
-
-            if intervals.size > 1:
-                mean = np.mean(intervals)
-                scc = (np.correlate(intervals, intervals, mode='full') - mean ** 2) / (
-                    np.mean(intervals ** 2) - mean ** 2)
-                scc_averaged[half - scc.size /
-                             2:half + scc.size / 2 + 1] += scc
-
-        scc_averaged = scc_averaged / np.unique(data_array[:, 0]).size
-        return np.arange(-half, half + 1, 1), scc_averaged / np.sum(scc_averaged)
-    else:
-        print('cv_isi: no or only one spike in data_array, returning 0.0')
-        return 0.0
-
-
-def pop_LvR(data_array, t_ref, t_min, t_max, num_neur):
-    """
-    Compute the LvR value of the given data_array.
-    See Shinomoto et al. 2009 for details.
-
-    Parameters
-    ----------
-    data_array : numpy.ndarray
-        Arrays with spike data.
-        column 0: neuron_ids, column 1: spike times
-    t_ref : float
-        Refractory period of the neurons.
-    t_min : float
-        Minimal time for the calculation.
-    t_max : float
-        Maximal time for the calculation.
-    num_neur: int
-        Number of recorded neurons. Needs to provided explicitly
-        to avoid corruption of results by silent neurons not
-        present in the given data.
-
-    Returns
-    -------
-    mean : float
-        Population-averaged LvR.
-    LvR : numpy.ndarray
-        Single-cell LvR values
-    """
-    i_min = np.searchsorted(data_array[:, 1], t_min)
-    i_max = np.searchsorted(data_array[:, 1], t_max)
-    LvR = np.array([])
-    data_array = data_array[i_min:i_max]
-    for i in np.unique(data_array[:, 0]):
-        intervals = np.diff(data_array[
-                            np.where(data_array[:, 0] == i)[0], 1])
-        if intervals.size > 1:
-            val = np.sum((1. - 4 * intervals[0:-1] * intervals[1:] / (intervals[0:-1] + intervals[
-                         1:]) ** 2) * (1 + 4 * t_ref / (intervals[0:-1] + intervals[1:])))
-            LvR = np.append(LvR, val * 3 / (intervals.size - 1.))
-        else:
-            LvR = np.append(LvR, 0.0)
-    if len(LvR) < num_neur:
-        LvR = np.append(LvR, np.zeros(num_neur - len(LvR)))
-    return np.mean(LvR), LvR
-
-
-def synchrony(data_array, num_neur, t_min, t_max, resolution=1.0):
-    """
-    Compute the synchrony of an array of spikes as the coefficient
-    of variation of the population rate.
-    Uses pop_rate_time_series().
-
-
-    Parameters
-    ----------
-    data_array : numpy.ndarray
-        Array with spike data.
-        column 0: neuron_ids, column 1: spike times
-    tmin : float
-        Minimal time for the calculation of the histogram in ms.
-    tmax : float
-        Maximal time for the calculation of the histogram in ms.
-    resolution : float, optional
-        Bin width of the histogram. Defaults to 1 ms.
-
-    Returns
-    -------
-    synchrony : float
-        Synchrony of the population.
-    """
-    spike_count_histogramm = pop_rate_time_series(
-        data_array, num_neur, t_min, t_max, resolution=resolution)
-    mean = np.mean(spike_count_histogramm)
-    std_dev = np.std(spike_count_histogramm)
-    try:
-        synchrony = std_dev / mean
-    except ZeroDivisionError:
-        synchrony = np.inf
-    return synchrony
-
-
-def spectrum(data_array, num_neur, t_min, t_max, resolution=1., kernel='binned', Df=None):
-    """
-    Compute compound power spectrum of a population of neurons.
-    Uses the powerspec function of the correlation toolbox.
-
-    Parameters
-    ----------
-    data_array : numpy.ndarray
-        Array with spike data.
-        column 0: neuron_ids, column 1: spike times
-    t_min : float
-        Minimal time for the calculation of the histogram in ms.
-    t_max : float
-        Maximal time for the calculation of the histogram in ms.
-    num_neur: int
-        Number of recorded neurons. Needs to provided explicitly
-        to avoid corruption of results by silent neurons not
-        present in the given data.
-    kernel : {'gauss_time_window', 'alpha_time_window', 'rect_time_window'}, optional
-        Specifies the kernel to be convolved with the spike histogram.
-        Defaults to 'binned', which corresponds to no convolution.
-    resolution: float, optional
-        Width of the convolution kernel. Specifically it correponds to:
-        - 'binned' : bin width of the histogram
-        - 'gauss_time_window' : sigma
-        - 'alpha_time_window' : time constant of the alpha function
-        - 'rect_time_window' : width of the moving rectangular function
-        Defaults to 1 ms.
-    Df : float, optional
-        Window width of sliding rectangular filter (smoothing) of the spectrum.
-        The default value is None and leads to no smoothing.
-
-    Returns
-    -------
-    power : numpy.ndarray
-        Values of the power spectrum.
-    freq : numpy.ndarray
-        Discrete frequency values
-    """
-    rate = pop_rate_time_series(
-        data_array, num_neur, t_min, t_max, kernel=kernel, resolution=resolution)
-    rate = centralize(rate, units=True)
-    freq, power = welch(rate, fs=1.e3,
-                        noverlap=1000, nperseg=1024)
-    return power[0][freq > 0], freq[freq > 0]
-
-
-def synaptic_output(rate, tau_syn, t_min, t_max, resolution=1.):
-    """
-    Compute the synaptic output of a population of neurons.
-    Convolves the population spike histogram with an exponential
-    synaptic filter.
-
-    Parameters
-    ----------
-    rate : numpy.ndarray
-        Time series of the population rate.
-    tau_syn : float
-        Synaptic time constant of the single neurons
-    t_min : float
-        Minimal time for the calculation.
-    t_max : float
-        Maximal time for the calculation.
-    resolution : float, optional
-        Time resolution of the synaptic filtering kernel in ms.
-        Defaults to 1 ms.
-
-    """
-    t = np.arange(0., 20., resolution)
-    kernel = np.exp(-t / tau_syn)
-    syn_current = np.convolve(kernel, rate, mode='same')
-    return syn_current
-
-
-def compare(sim1, sim2):
-    """
-    Compares two simulations (2 instances of the ana_vistools data class)
-    in regards of their parameters.
-
-    Parameters
-    ----------
-    sim1, sim2 : ana_vistools.data
-        The two instances of the ana_vistools.data classe to be compared.
-
-    Returns
-    -------
-    None
-    """
-
-    template = "{0:30}{1:20}{2:25}{3:15}"
-    print(template.format("parameter", sim1.label[0:5], sim2.label[0:5], "equal?"))
-
-    info1 = sim1.sim_info
-    info2 = sim2.sim_info
-    compare_keys = []
-    for key in list(info1.keys()) + list(info2.keys()):
-        p = False
-        if key in list(info1.keys()):
-            value = info1[key]
-        else:
-            value = info2[key]
-
-        if isinstance(value, str):
-            # To exclude paths from the compared keys
-            if (value.find('_') == -1 and
-                value.find('/') == -1 and
-                    value.find('.') == -1):
-                p = True
-        else:
-            p = True
-
-        if key in ['sim_label', 'K_stable_path']:
-            p = False
-        if p and key not in compare_keys:
-            compare_keys.append(key)
-    for key in compare_keys:
-        if key in info2 and key in info1:
-            out = (key, str(info1[key]),  str(info2[
-                    key]), info1[key] == info2[key])
-        elif key not in info1:
-            out = (key, '', str(info2[key]), 'false')
-        elif key not in info2:
-            out = (key, str(info1[key]),  '', 'false')
-        print(template.format(*out))
-    # Compare sum of indegrees
-    s1 = 0.
-    s2 = 0.
-    for area1 in sim1.areas:
-        for pop1 in sim1.structure[area1]:
-            for area2 in sim1.areas:
-                for pop2 in sim1.structure[area2]:
-                    s1 += sim1.indegree_data[area1][pop1][area2][pop2]
-                    s2 += sim2.indegree_data[area1][pop1][area2][pop2]
-
-    out = ('indegrees', str(s1),  str(s2), s1 == s2)
-    print(template.format(*out))
diff --git a/multiarea_model/.ipynb_checkpoints/default_params-checkpoint.py b/multiarea_model/.ipynb_checkpoints/default_params-checkpoint.py
deleted file mode 100644
index a09b449254c5e4f908d03500f3e0500190eb1e13..0000000000000000000000000000000000000000
--- a/multiarea_model/.ipynb_checkpoints/default_params-checkpoint.py
+++ /dev/null
@@ -1,321 +0,0 @@
-"""
-default_parameters.py
-=====================
-This script defines the default values of all
-parameters and defines functions to compute
-single neuron and synapse parameters and to
-properly set the seed of the random generators.
-
-Authors
--------
-Maximilian Schmidt
-"""
-
-from config import base_path
-import json
-import os
-import nest
-
-import numpy as np
-
-complete_area_list = ['V1', 'V2', 'VP', 'V3', 'V3A', 'MT', 'V4t', 'V4', 'VOT', 'MSTd',
-                      'PIP', 'PO', 'DP', 'MIP', 'MDP', 'VIP', 'LIP', 'PITv', 'PITd',
-                      'MSTl', 'CITv', 'CITd', 'FEF', 'TF', 'AITv', 'FST', '7a', 'STPp',
-                      'STPa', '46', 'AITd', 'TH']
-
-population_list = ['23E', '23I', '4E', '4I', '5E', '5I', '6E', '6I']
-
-f1 = open(os.path.join(base_path, 'multiarea_model/data_multiarea',
-                       'viscortex_raw_data.json'), 'r')
-raw_data = json.load(f1)
-f1.close()
-av_indegree_Cragg = raw_data['av_indegree_Cragg']
-av_indegree_OKusky = raw_data['av_indegree_OKusky']
-
-
-"""
-Simulation parameters
-"""
-sim_params = {
-    # master seed for random number generators
-    'rng_seed': 1,
-    # simulation step (in ms)
-    'dt': 0.1,
-    # simulated time (in ms)
-    't_sim': 10.0,
-    # no. of MPI processes:
-    'num_processes': 1,
-    # no. of threads per MPI process':
-    'local_num_threads': 1,
-    # Areas represented in the network
-    'areas_simulated': complete_area_list,
-}
-
-"""
-Network parameters
-"""
-network_params = {
-    # Surface area of each area in mm^2
-    'surface': 1.0,
-    # Scaling of population sizes
-    'N_scaling': 1.,
-    # Scaling of indegrees
-    'K_scaling': 1.,
-    # Absolute path to the file holding full-scale rates for scaling
-    # synaptic weights
-    'fullscale_rates': None,
-    # Check whether NEST 2 or 3 is used. No straight way of checking this is
-    # available. But PrintNetwork was removed in NEST 3, so checking for its
-    # existence should suffice.
-    'USING_NEST_3': 'PrintNetwork' not in dir(nest)
-}
-
-
-"""
-Single-neuron parameters
-"""
-
-sim_params.update(
-    {
-        'initial_state': {
-            # mean of initial membrane potential (in mV)
-            'V_m_mean': -58.0,
-            # std of initial membrane potential (in mV)
-            'V_m_std': 10.0
-        }
-    })
-
-# dictionary defining single-cell parameters
-single_neuron_dict = {
-    # Leak potential of the neurons (in mV).
-    'E_L': -65.0,
-    # Threshold potential of the neurons (in mV).
-    'V_th': -50.0,
-    # Membrane potential after a spike (in mV).
-    'V_reset': -65.0,
-    # Membrane capacitance (in pF).
-    'C_m': 250.0,
-    # Membrane time constant (in ms).
-    'tau_m': 10.0,
-    # Time constant of postsynaptic excitatory currents (in ms).
-    'tau_syn_ex': 0.5,
-    # Time constant of postsynaptic inhibitory currents (in ms).
-    'tau_syn_in': 0.5,
-    # Refractory period of the neurons after a spike (in ms).
-    't_ref': 2.0}
-
-neuron_params = {
-    # neuron model
-    'neuron_model': 'iaf_psc_exp',
-    # neuron parameters
-    'single_neuron_dict': single_neuron_dict,
-    # Mean and standard deviation for the
-    # distribution of initial membrane potentials
-    'V0_mean': -100.,
-    'V0_sd': 50.}
-
-network_params.update({'neuron_params': neuron_params})
-
-
-"""
-General connection parameters
-"""
-connection_params = {
-    # Whether to apply the stabilization method of
-    # Schuecker, Schmidt et al. (2017). Default is None.
-    # Options are True to perform the stabilization or
-    # a string that specifies the name of a binary
-    # numpy file containing the connectivity matrix
-    'K_stable': None,
-
-    # Whether to replace all cortico-cortical connections by stationary
-    # Poisson input with population-specific rates (het_poisson_stat)
-    # or by time-varying current input (het_current_nonstat)
-    # while still simulating all areas. In both cases, the data to replace
-    # the cortico-cortical input is loaded from `replace_cc_input_source`.
-    'replace_cc': False,
-
-    # Whether to replace non-simulated areas by Poisson sources
-    # with the same global rate rate_ext ('hom_poisson_stat') or
-    # by specific rates ('het_poisson_stat')
-    # or by time-varying specific current ('het_current_nonstat')
-    # In the two latter cases, the data to replace the cortico-cortical
-    # input is loaded from `replace_cc_input_source`
-    'replace_non_simulated_areas': None,
-
-    # Source of the input rates to replace cortico-cortical input
-    # Either a json file (has to end on .json) holding a scalar values
-    # for each population or
-    # a base name such that files with names
-    # $(replace_cc_input_source)-area-population.npy
-    # (e.g. '$(replace_cc_input_source)-V1-23E.npy')
-    # contain the time series for each population.
-    # We recommend using absolute paths rather than relative paths.
-    'replace_cc_input_source': None,
-
-    # whether to redistribute CC synapse to meet literature value
-    # of E-specificity
-    'E_specificity': True,
-
-    # Relative inhibitory synaptic strength (in relative units).
-    'g': -16.,
-
-    # compute average indegree in V1 from data
-    'av_indegree_V1': np.mean([av_indegree_Cragg, av_indegree_OKusky]),
-
-    # synaptic volume density
-    # area-specific --> conserves average in-degree
-    # constant --> conserve syn. volume density
-    'rho_syn': 'constant',
-
-    # Increase the external Poisson indegree onto 5E and 6E
-    'fac_nu_ext_5E': 1.,
-    'fac_nu_ext_6E': 1.,
-    # to increase the ext. input to 23E and 5E in area TH
-    'fac_nu_ext_TH': 1.,
-
-    # synapse weight parameters for current-based neurons
-    # excitatory intracortical synaptic weight (mV)
-    'PSP_e': 0.15,
-    'PSP_e_23_4': 0.3,
-    # synaptic weight (mV) for external input
-    'PSP_ext': 0.15,
-
-    # relative SD of normally distributed synaptic weights
-    'PSC_rel_sd_normal': 0.1,
-    # relative SD of lognormally distributed synaptic weights
-    'PSC_rel_sd_lognormal': 3.0,
-
-    # scaling factor for cortico-cortical connections (chi)
-    'cc_weights_factor': 1.,
-    # factor to scale cortico-cortical inh. weights in relation
-    # to exc. weights (chi_I)
-    'cc_weights_I_factor': 1.,
-
-    # 'switch whether to distribute weights lognormally
-    'lognormal_weights': False,
-    # 'switch whether to distribute only EE weight lognormally if
-    # 'lognormal_weights': True
-    'lognormal_EE_only': False,
-}
-
-network_params.update({'connection_params': connection_params})
-
-"""
-Delays
-"""
-delay_params = {
-    # Local dendritic delay for excitatory transmission [ms]
-    'delay_e': 1.5,
-    # Local dendritic delay for inhibitory transmission [ms]
-    'delay_i': 0.75,
-    # Relative standard deviation for both local and inter-area delays
-    'delay_rel': 0.5,
-    # Axonal transmission speed to compute interareal delays [mm/ms]
-    'interarea_speed': 3.5
-}
-network_params.update({'delay_params': delay_params})
-
-"""
-Input parameters
-"""
-input_params = {
-    # Whether to use Poisson or DC input (True or False)
-    'poisson_input': True,
-
-    # synapse type for Poisson input
-    'syn_type_ext': 'static_synapse_hpc',
-
-    # Rate of the Poissonian spike generator (in spikes/s).
-    'rate_ext': 10.,
-
-    # Whether to switch on time-dependent DC input
-    'dc_stimulus': False,
-}
-
-network_params.update({'input_params': input_params})
-
-"""
-Recording settings
-"""
-recording_dict = {
-    # Which areas to record spike data from
-    'areas_recorded': complete_area_list,
-
-    # voltmeter
-    'record_vm':  False,
-    # Fraction of neurons to record membrane potentials from
-    # in each population if record_vm is True
-    'Nrec_vm_fraction': 0.01,
-
-    # Parameters for the spike detectors
-    'spike_dict': {
-        'label': 'spikes',
-        'start': 0.},
-    # Parameters for the voltmeters
-    'vm_dict': {
-        'label': 'vm',
-        'start': 0.,
-        'stop': 1000.,
-        'interval': 0.1}
-    }
-if network_params['USING_NEST_3']:
-    recording_dict['spike_dict'].update({'record_to': 'ascii'})
-    recording_dict['vm_dict'].update({'record_to': 'ascii'})
-else:
-    recording_dict['spike_dict'].update({'withtime': True,
-                                         'record_to': ['file']})
-    recording_dict['vm_dict'].update({'withtime': True,
-                                      'record_to': ['file']})
-sim_params.update({'recording_dict': recording_dict})
-
-"""
-Theory params
-"""
-
-theory_params = {'neuron_params': neuron_params,
-                 # Initial rates can be None (start integration at
-                 # zero rates), a numpy.ndarray defining the initial
-                 # rates or 'random_uniform' which leads to randomly
-                 # drawn initial rates from a uniform distribution.
-                 'initial_rates': None,
-                 # If 'initial_rates' is set to 'random_uniform',
-                 # 'initial_rates_iter' defines the number of
-                 # different initial conditions
-                 'initial_rates_iter': None,
-                 # If 'initial_rates' is set to 'random_uniform',
-                 # 'initial_rates_max' defines the maximum rate of the
-                 # uniform distribution to draw the initial rates from
-                 'initial_rates_max': 1000.,
-                 # The simulation time of the mean-field theory integration
-                 'T': 50.,
-                 # The time step of the mean-field theory integration
-                 'dt': 0.01,
-                 # Time interval for recording the trajectory of the mean-field calcuation
-                 # If None, then the interval is set to dt
-                 'rec_interval': None}
-
-
-"""
-Helper function to update default parameters with custom
-parameters
-"""
-
-
-def nested_update(d, d2):
-    for key in d2:
-        if isinstance(d2[key], dict) and key in d:
-            nested_update(d[key], d2[key])
-        else:
-            d[key] = d2[key]
-
-
-def check_custom_params(d, def_d):
-    for key, val in d.items():
-        if isinstance(val, dict):
-            check_custom_params(d[key], def_d[key])
-        else:
-            try:
-                def_val = def_d[key]
-            except KeyError:
-                raise KeyError('Unused key in custom parameter dictionary: {}'.format(key))
diff --git a/multiarea_model/.ipynb_checkpoints/multiarea_model-checkpoint.py b/multiarea_model/.ipynb_checkpoints/multiarea_model-checkpoint.py
deleted file mode 100644
index 235246e47b9db678d3b04fe96936477ead8b28df..0000000000000000000000000000000000000000
--- a/multiarea_model/.ipynb_checkpoints/multiarea_model-checkpoint.py
+++ /dev/null
@@ -1,313 +0,0 @@
-"""
-multiarea_model
-==============
-
-Network class to instantiate and administer instances of the
-multi-area model of macaque visual cortex by Schmidt et al. (2018).
-
-Classes
--------
-MultiAreaModel : Loads a parameter file that specifies custom parameters for a
-particular instance of the model. An instance of the model has a unique hash
-label. As members, it may contain three classes:
-
-- simulation : contains all relevant parameters for a simulation of
-  the network
-
-- theory : theory class that serves to estimate the stationary state
-  of the network using mean-field theory
-
-  Schuecker J, Schmidt M, van Albada SJ, Diesmann M, Helias M (2017)
-  Fundamental Activity Constraints Lead to Specific Interpretations of
-  the Connectome. PLoS Comput Biol 13(2): e1005179.
-  doi:10.1371/journal.pcbi.1005179
-
-- analysis: provides methods to load data and perform data analysis
-
-"""
-import json
-import numpy as np
-import os
-import pprint
-import shutil
-from .default_params import complete_area_list, nested_update, network_params
-from .default_params import check_custom_params
-from collections import OrderedDict
-from copy import deepcopy
-from .data_multiarea.Model import compute_Model_params
-from .analysis import Analysis
-from config import base_path
-from dicthash import dicthash
-from .multiarea_helpers import (
-    area_level_dict,
-    load_degree_data,
-    convert_syn_weight,
-    dict_to_matrix,
-    dict_to_vector,
-    indegree_to_synapse_numbers,
-    matrix_to_dict,
-    vector_to_dict,
-)
-from .simulation import Simulation
-from .theory import Theory
-
-# Set precision of dicthash library to 1e-4
-# because this is sufficient for indegrees
-# and neuron numbers and guarantees reproducibility
-# of the class label despite inevitably imprecise float calculations
-# in the data scripts.
-dicthash.FLOAT_FACTOR = 1e4
-dicthash.FLOOR_SMALL_FLOATS = True
-
-
-class MultiAreaModel:
-    def __init__(self, network_spec, theory=False, simulation=False,
-                 analysis=False, *args, **keywords):
-        """
-        Multiarea model class.
-        An instance of the multiarea model with the given parameters.
-
-        Parameters
-        ----------
-        network_spec : dict or str
-            Specify the network. If it is of type dict, the parameters defined
-            in the dictionary overwrite the default parameters defined in
-            default_params.py.
-            If it is of type str, the string defines the label of a previously
-            initialized model instance that is now loaded.
-        theory : bool
-            whether to create an instance of the theory class as member.
-        simulation : bool
-            whether to create an instance of the simulation class as member.
-        analysis : bool
-            whether to create an instance of the analysis class as member.
-
-        """
-        self.params = deepcopy(network_params)
-        if isinstance(network_spec, dict):
-            print("Initializing network from dictionary.")
-            check_custom_params(network_spec, self.params)
-            self.custom_params = network_spec
-            p_ = 'multiarea_model/data_multiarea/custom_data_files'
-            # Draw random integer label for data script to avoid clashes with
-            # parallelly created class instances
-            rand_data_label = np.random.randint(10000)
-            print("RAND_DATA_LABEL", rand_data_label)
-            tmp_parameter_fn = os.path.join(base_path,
-                                            p_,
-                                            'custom_{}_parameter_dict.json'.format(rand_data_label))
-            tmp_data_fn = os.path.join(base_path,
-                                       p_,
-                                       'custom_Data_Model_{}.json'.format(rand_data_label))
-
-            with open(tmp_parameter_fn, 'w') as f:
-                json.dump(self.custom_params, f)
-            # Execute Data script
-            compute_Model_params(out_label=str(rand_data_label),
-                                 mode='custom')
-        else:
-            print("Initializing network from label.")
-            parameter_fn = os.path.join(base_path,
-                                        'config_files',
-                                        '{}_config'.format(network_spec))
-            tmp_data_fn = os.path.join(base_path,
-                                       'config_files',
-                                       'custom_Data_Model_{}.json'.format(network_spec))
-            with open(parameter_fn, 'r') as f:
-                self.custom_params = json.load(f)
-        nested_update(self.params, self.custom_params)
-        with open(tmp_data_fn, 'r') as f:
-            dat = json.load(f)
-
-        self.structure = OrderedDict()
-        for area in dat['area_list']:
-            self.structure[area] = dat['structure'][area]
-        self.N = dat['neuron_numbers']
-        self.synapses = dat['synapses']
-        self.W = dat['synapse_weights_mean']
-        self.W_sd = dat['synapse_weights_sd']
-        self.area_list = complete_area_list
-        self.distances = dat['distances']
-
-        ind, inda, out, outa = load_degree_data(tmp_data_fn)
-        # If K_stable is specified in the params, load the stabilized matrix
-        # TODO: Extend this by calling the stabilization method
-        if self.params['connection_params']['K_stable'] is None:
-            self.K = ind
-        else:
-            if not isinstance(self.params['connection_params']['K_stable'], str):
-                raise TypeError("Not supported. Please store the "
-                                "matrix in a binary numpy file and define "
-                                "the path to the file as the parameter value.")
-            # Assume that the parameter defines a filename containing the matrix
-            K_stable = np.load(self.params['connection_params']['K_stable'])
-            ext = {area: {pop: ind[area][pop]['external'] for pop in
-                          self.structure['V1']} for area in self.area_list}
-            self.K = matrix_to_dict(
-                K_stable, self.area_list, self.structure, external=ext)
-            self.synapses = indegree_to_synapse_numbers(self.K, self.N)
-
-        self.vectorize()
-        if self.params['K_scaling'] != 1. or self.params['N_scaling'] != 1.:
-            if self.params['fullscale_rates'] is None:
-                raise KeyError('For downscaling, you have to define a file'
-                               ' with fullscale rates.')
-            self.scale_network()
-
-        self.K_areas = area_level_dict(self.K, self.N)
-        self.label = dicthash.generate_hash_from_dict({'params': self.params,
-                                                       'K': self.K,
-                                                       'N': self.N,
-                                                       'structure': self.structure},
-                                                      blacklist=[('params', 'fullscale_rates'),
-                                                                 ('params',
-                                                                  'connection_params',
-                                                                  'K_stable'),
-                                                                 ('params',
-                                                                  'connection_params',
-                                                                  'replace_cc_input_source')])
-
-        if isinstance(network_spec, dict):
-            parameter_fn = os.path.join(base_path,
-                                        'config_files',
-                                        '{}_config'.format(self.label))
-            data_fn = os.path.join(base_path,
-                                   'config_files',
-                                   'custom_Data_Model_{}.json'.format(self.label))
-
-            shutil.move(tmp_parameter_fn,
-                        parameter_fn)
-            shutil.move(tmp_data_fn,
-                        data_fn)
-
-        elif isinstance(network_spec, str):
-            assert(network_spec == self.label)
-
-        # Initialize member classes
-        if theory:
-            if 'theory_spec' not in keywords:
-                theory_spec = {}
-            else:
-                theory_spec = keywords['theory_spec']
-            self.init_theory(theory_spec)
-
-        if simulation:
-            if 'sim_spec' not in keywords:
-                sim_spec = {}
-            else:
-                sim_spec = keywords['sim_spec']
-            self.init_simulation(sim_spec)
-
-        if analysis:
-            assert(getattr(self, 'simulation'))
-            if 'ana_spec' not in keywords:
-                ana_spec = {}
-            else:
-                ana_spec = keywords['ana_spec']
-            self.init_analysis(ana_spec)
-
-    def __str__(self):
-        s = "Multi-area network {} with custom parameters: \n".format(self.label)
-        s += pprint.pformat(self.params, width=1)
-        return s
-
-    def __eq__(self, other):
-        return self.label == other.label
-
-    def __hash__(self):
-        return hash(self.label)
-
-    def init_theory(self, theory_spec):
-        self.theory = Theory(self, theory_spec)
-
-    def init_simulation(self, sim_spec):
-        self.simulation = Simulation(self, sim_spec)
-
-    def init_analysis(self, ana_spec):
-        assert(hasattr(self, 'simulation'))
-        if 'load_areas' in ana_spec:
-            load_areas = ana_spec['load_areas']
-        else:
-            load_areas = None
-        if 'data_list' in ana_spec:
-            data_list = ana_spec['data_list']
-        else:
-            data_list = ['spikes']
-        self.analysis = Analysis(self, self.simulation,
-                                 data_list=data_list,
-                                 load_areas=load_areas)
-
-    def scale_network(self):
-        """
-        Scale the network if `N_scaling` and/or `K_scaling` differ from 1.
-
-        This function:
-        - adjusts the synaptic weights such that the population-averaged
-          stationary spike rates approximately match the given `full-scale_rates`.
-        - scales the population sizes with `N_scaling` and indegrees with `K_scaling`.
-        - scales the synapse numbers with `N_scaling`*`K_scaling`.
-        """
-        # population sizes
-        self.N_vec *= self.params['N_scaling']
-
-        # Scale the synaptic weights before the indegrees to use full-scale indegrees
-        self.adj_W_to_K()
-        # Then scale the indegrees and synapse numbers
-        self.K_matrix *= self.params['K_scaling']
-        self.syn_matrix *= self.params['K_scaling'] * self.params['N_scaling']
-
-        # Finally recreate dictionaries
-        self.N = vector_to_dict(self.N_vec, self.area_list, self.structure)
-        self.K = matrix_to_dict(self.K_matrix[:, :-1], self.area_list,
-                                self.structure, external=self.K_matrix[:, -1])
-        self.W = matrix_to_dict(self.W_matrix[:, :-1], self.area_list,
-                                self.structure, external=self.W_matrix[:, -1])
-
-        self.synapses = matrix_to_dict(self.syn_matrix, self.area_list, self.structure)
-
-    def vectorize(self):
-        """
-        Create matrix and vector version of neuron numbers, synapses
-        and synapse weight dictionaries.
-        """
-
-        self.N_vec = dict_to_vector(self.N, self.area_list, self.structure)
-        self.syn_matrix = dict_to_matrix(self.synapses, self.area_list, self.structure)
-        self.K_matrix = dict_to_matrix(self.K, self.area_list, self.structure)
-        self.W_matrix = dict_to_matrix(self.W, self.area_list, self.structure)
-        self.J_matrix = convert_syn_weight(self.W_matrix,
-                                           self.params['neuron_params']['single_neuron_dict'])
-        self.structure_vec = ['-'.join((area, pop)) for area in
-                              self.area_list for pop in self.structure[area]]
-        self.add_DC_drive = np.zeros_like(self.N_vec)
-
-    def adj_W_to_K(self):
-        """
-        Adjust weights to scaling of neuron numbers and indegrees.
-
-        The recurrent and external weights are adjusted to the scaling
-        of the indegrees. Extra DC input is added to compensate the scaling
-        and preserve the mean and variance of the input.
-        """
-        tau_m = self.params['neuron_params']['single_neuron_dict']['tau_m']
-        C_m = self.params['neuron_params']['single_neuron_dict']['C_m']
-
-        if isinstance(self.params['fullscale_rates'], np.ndarray):
-            raise ValueError("Not supported. Please store the "
-                             "rates in a file and define the path to the file as "
-                             "the parameter value.")
-        else:
-            with open(self.params['fullscale_rates'], 'r') as f:
-                d = json.load(f)
-            full_mean_rates = dict_to_vector(d, self.area_list, self.structure)
-
-        rate_ext = self.params['input_params']['rate_ext']
-        J_ext = self.J_matrix[:, -1]
-        K_ext = self.K_matrix[:, -1]
-        x1_ext = 1e-3 * tau_m * J_ext * K_ext * rate_ext
-        x1 = 1e-3 * tau_m * np.dot(self.J_matrix[:, :-1] * self.K_matrix[:, :-1], full_mean_rates)
-        K_scaling = self.params['K_scaling']
-        self.J_matrix /= np.sqrt(K_scaling)
-        self.add_DC_drive = C_m / tau_m * ((1. - np.sqrt(K_scaling)) * (x1 + x1_ext))
-        neuron_params = self.params['neuron_params']['single_neuron_dict']
-        self.W_matrix = (1. / convert_syn_weight(1., neuron_params) * self.J_matrix)
diff --git a/multiarea_model/.ipynb_checkpoints/simulation-checkpoint.py b/multiarea_model/.ipynb_checkpoints/simulation-checkpoint.py
deleted file mode 100644
index 4d474babfc12033dd265f47a6cd41b8f72733a5c..0000000000000000000000000000000000000000
--- a/multiarea_model/.ipynb_checkpoints/simulation-checkpoint.py
+++ /dev/null
@@ -1,715 +0,0 @@
-
-"""
-multiarea_model
-==============
-
-Simulation class of the multi-area model of macaque visual vortex by
-Schmidt et al. (2018).
-
-
-Classes
--------
-Simulation : Loads a parameter file that specifies simulation
-parameters for a simulation of the instance of the model. A simulation
-is identified by a unique hash label.
-
-"""
-
-import json
-import nest
-import numpy as np
-import os
-import pprint
-import shutil
-import time
-
-from .analysis_helpers import _load_npy_to_dict, model_iter
-from config import base_path, data_path
-from copy import deepcopy
-from .default_params import nested_update, sim_params
-from .default_params import check_custom_params
-from dicthash import dicthash
-from .multiarea_helpers import extract_area_dict, create_vector_mask
-try:
-    from .sumatra_helpers import register_runtime
-    sumatra_found = True
-except ImportError:
-    sumatra_found = False
-
-
-class Simulation:
-    def __init__(self, network, sim_spec):
-        """
-        Simulation class.
-        An instance of the simulation class with the given parameters.
-        Can be created as a member class of a multiarea_model instance
-        or standalone.
-
-        Parameters
-        ----------
-        network : multiarea_model
-            An instance of the multiarea_model class that specifies
-            the network to be simulated.
-        params : dict
-            custom simulation parameters that overwrite the
-            default parameters defined in default_params.py
-        """
-        self.params = deepcopy(sim_params)
-        if isinstance(sim_spec, dict):
-            check_custom_params(sim_spec, self.params)
-            self.custom_params = sim_spec
-        else:
-            fn = os.path.join(data_path,
-                              sim_spec,
-                              '_'.join(('custom_params',
-                                        sim_spec)))
-            with open(fn, 'r') as f:
-                self.custom_params = json.load(f)['sim_params']
-
-        nested_update(self.params, self.custom_params)
-
-        self.network = network
-        self.label = dicthash.generate_hash_from_dict({'params': self.params,
-                                                       'network_label': self.network.label})
-
-        print("Simulation label: {}".format(self.label))
-        self.data_dir = os.path.join(data_path, self.label)
-        try:
-            os.mkdir(self.data_dir)
-            os.mkdir(os.path.join(self.data_dir, 'recordings'))
-        except OSError:
-            pass
-        self.copy_files()
-        print("Copied files.")
-        d = {'sim_params': self.custom_params,
-             'network_params': self.network.custom_params,
-             'network_label': self.network.label}
-        with open(os.path.join(self.data_dir,
-                               '_'.join(('custom_params', self.label))), 'w') as f:
-            json.dump(d, f)
-        print("Initialized simulation class.")
-
-        self.areas_simulated = self.params['areas_simulated']
-        self.areas_recorded = self.params['recording_dict']['areas_recorded']
-        self.T = self.params['t_sim']
-
-    def __eq__(self, other):
-        # Two simulations are equal if the simulation parameters and
-        # the simulated networks are equal.
-        return self.label == other.label
-
-    def __hash__(self):
-        return hash(self.label)
-
-    def __str__(self):
-        s = "Simulation {} of network {} with parameters:".format(self.label, self.network.label)
-        s += pprint.pformat(self.params, width=1)
-        return s
-
-    def copy_files(self):
-        """
-        Copy all relevant files for the simulation to its data directory.
-        """
-        files = [os.path.join('multiarea_model',
-                              'data_multiarea',
-                              'Model.py'),
-                 os.path.join('multiarea_model',
-                              'data_multiarea',
-                              'VisualCortex_Data.py'),
-                 os.path.join('multiarea_model',
-                              'multiarea_model.py'),
-                 os.path.join('multiarea_model',
-                              'simulation.py'),
-                 os.path.join('multiarea_model',
-                              'default_params.py'),
-                 os.path.join('config_files',
-                              ''.join(('custom_Data_Model_', self.network.label, '.json'))),
-                 os.path.join('config_files',
-                              '_'.join((self.network.label, 'config')))]
-        if self.network.params['connection_params']['replace_cc_input_source'] is not None:
-            fs = self.network.params['connection_params']['replace_cc_input_source']
-            if '.json' in fs:
-                files.append(fs)
-            else:  # Assume that the cc input is stored in one npy file per population
-                fn_iter = model_iter(mode='single', areas=self.network.area_list)
-                for it in fn_iter:
-                    fp_it = (fs,) + it
-                    fp_ = '{}.npy'.format('-'.join(fp_it))
-                    files.append(fp_)
-        for f in files:
-            shutil.copy2(os.path.join(base_path, f),
-                         self.data_dir)
-
-    def prepare(self):
-        """
-        Prepare NEST Kernel.
-        """
-        nest.ResetKernel()
-        rng_seed = self.params['rng_seed']
-        num_processes = self.params['num_processes']
-        local_num_threads = self.params['local_num_threads']
-        vp = num_processes * local_num_threads
-        nest.SetKernelStatus({'resolution': self.params['dt'],
-                              'total_num_virtual_procs': vp,
-                              'overwrite_files': True,
-                              'data_path': os.path.join(self.data_dir, 'recordings'),
-                              'print_time': False})
-        if self.network.params['USING_NEST_3']:
-            nest.SetKernelStatus({'rng_seed': rng_seed})
-        else:
-            nest.SetKernelStatus({'grng_seed': rng_seed,
-                                  'rng_seeds': list(range(rng_seed + 1,
-                                                          rng_seed + vp + 1))})
-            self.pyrngs = [np.random.RandomState(s) for s in list(range(
-                rng_seed + vp + 1, rng_seed + 2 * (vp + 1)))]
-        nest.SetDefaults(self.network.params['neuron_params']['neuron_model'],
-                         self.network.params['neuron_params']['single_neuron_dict'])
-
-
-    def create_recording_devices(self):
-        """
-        Create devices for all populations. Depending on the
-        configuration, this will create:
-        - spike detector
-        - voltmeter
-        """
-        try:
-            self.spike_detector = nest.Create('spike_recorder')
-        except:
-            self.spike_detector = nest.Create('spike_detector')
-        status_dict = deepcopy(self.params['recording_dict']['spike_dict'])
-        label = '-'.join((self.label,
-                          status_dict['label']))
-        status_dict.update({'label': label})
-        nest.SetStatus(self.spike_detector, status_dict)
-
-        if self.params['recording_dict']['record_vm']:
-            self.voltmeter = nest.Create('voltmeter')
-            status_dict = self.params['recording_dict']['vm_dict']
-            label = '-'.join((self.label,
-                              status_dict['label']))
-            status_dict.update({'label': label})
-            nest.SetStatus(self.voltmeter, status_dict)
-
-    def create_areas(self):
-        """
-        Create all areas with their populations and internal connections.
-        """
-        self.areas = []
-        for area_name in self.areas_simulated:
-            a = Area(self, self.network, area_name)
-            self.areas.append(a)
-            print("Memory after {0} : {1:.2f} MB".format(area_name, self.memory() / 1024.))
-
-    def cortico_cortical_input(self):
-        """
-        Create connections between areas.
-        """
-        replace_cc = self.network.params['connection_params']['replace_cc']
-        replace_non_simulated_areas = self.network.params['connection_params'][
-            'replace_non_simulated_areas']
-        if self.network.params['connection_params']['replace_cc_input_source'] is None:
-            replace_cc_input_source = None
-        else:
-            replace_cc_input_source = os.path.join(self.data_dir,
-                                                   self.network.params['connection_params'][
-                                                       'replace_cc_input_source'])
-
-        if not replace_cc and set(self.areas_simulated) != set(self.network.area_list):
-            if replace_non_simulated_areas == 'het_current_nonstat':
-                fn_iter = model_iter(mode='single', areas=self.network.area_list)
-                non_simulated_cc_input = _load_npy_to_dict(replace_cc_input_source, fn_iter)
-            elif replace_non_simulated_areas == 'het_poisson_stat':
-                fn = self.network.params['connection_params']['replace_cc_input_source']
-                with open(fn, 'r') as f:
-                    non_simulated_cc_input = json.load(f)
-            elif replace_non_simulated_areas == 'hom_poisson_stat':
-                non_simulated_cc_input = {source_area_name:
-                                          {source_pop:
-                                           self.network.params['input_params']['rate_ext']
-                                           for source_pop in
-                                           self.network.structure[source_area_name]}
-                                          for source_area_name in self.network.area_list}
-            else:
-                raise KeyError("Please define a valid method to"
-                               " replace non-simulated areas.")
-
-        if replace_cc == 'het_current_nonstat':
-            fn_iter = model_iter(mode='single', areas=self.network.area_list)
-            cc_input = _load_npy_to_dict(replace_cc_input_source, fn_iter)
-        elif replace_cc == 'het_poisson_stat':
-            with open(self.network.params['connection_params'][
-                    'replace_cc_input_source'], 'r') as f:
-                cc_input = json.load(f)
-        elif replace_cc == 'hom_poisson_stat':
-            cc_input = {source_area_name:
-                        {source_pop:
-                         self.network.params['input_params']['rate_ext']
-                         for source_pop in
-                         self.network.structure[source_area_name]}
-                        for source_area_name in self.network.area_list}
-
-        # Connections between simulated areas are not replaced
-        if not replace_cc:
-            for target_area in self.areas:
-                # Loop source area though complete list of areas
-                for source_area_name in self.network.area_list:
-                    if target_area.name != source_area_name:
-                        # If source_area is part of the simulated network,
-                        # connect it to target_area
-                        if source_area_name in self.areas:
-                            source_area = self.areas[self.areas.index(source_area_name)]
-                            connect(self,
-                                    target_area,
-                                    source_area)
-                        # Else, replace the input from source_area with the
-                        # chosen method
-                        else:
-                            target_area.create_additional_input(replace_non_simulated_areas,
-                                                                source_area_name,
-                                                                non_simulated_cc_input[
-                                                                    source_area_name])
-        # Connections between all simulated areas are replaced
-        else:
-            for target_area in self.areas:
-                for source_area in self.areas:
-                    if source_area != target_area:
-                        target_area.create_additional_input(replace_cc,
-                                                            source_area.name,
-                                                            cc_input[source_area.name])
-
-    def simulate(self):
-        """
-        Create the network and execute simulation.
-        Record used memory and wallclock time.
-        """
-        t0 = time.time()
-        self.base_memory = self.memory()
-        self.prepare()
-        t1 = time.time()
-        self.time_prepare = t1 - t0
-        print("Prepared simulation in {0:.2f} seconds.".format(self.time_prepare))
-
-        self.create_recording_devices()
-        self.create_areas()
-        t2 = time.time()
-        self.time_network_local = t2 - t1
-        print("Created areas and internal connections in {0:.2f} seconds.".format(
-            self.time_network_local))
-
-        self.cortico_cortical_input()
-        t3 = time.time()
-        self.network_memory = self.memory()
-        self.time_network_global = t3 - t2
-        print("Created cortico-cortical connections in {0:.2f} seconds.".format(
-            self.time_network_global))
-
-        self.save_network_gids()
-
-        nest.Simulate(self.T)
-        t4 = time.time()
-        self.time_simulate = t4 - t3
-        self.total_memory = self.memory()
-        print("Simulated network in {0:.2f} seconds.".format(self.time_simulate))
-        self.logging()
-
-    def memory(self):
-        """
-        Use NEST's memory wrapper function to record used memory.
-        """
-        try:
-            mem = nest.ll_api.sli_func('memory_thisjob')
-        except AttributeError:
-            mem = nest.sli_func('memory_thisjob')
-        if isinstance(mem, dict):
-            return mem['heap']
-        else:
-            return mem
-
-    def logging(self):
-        """
-        Write runtime and memory for the first 30 MPI processes
-        to file.
-        """
-        if nest.Rank() < 30:
-            d = {'time_prepare': self.time_prepare,
-                 'time_network_local': self.time_network_local,
-                 'time_network_global': self.time_network_global,
-                 'time_simulate': self.time_simulate,
-                 'base_memory': self.base_memory,
-                 'network_memory': self.network_memory,
-                 'total_memory': self.total_memory}
-            fn = os.path.join(self.data_dir,
-                              'recordings',
-                              '_'.join((self.label,
-                                        'logfile',
-                                        str(nest.Rank()))))
-            with open(fn, 'w') as f:
-                json.dump(d, f)
-
-    def save_network_gids(self):
-        with open(os.path.join(self.data_dir,
-                               'recordings',
-                               'network_gids.txt'), 'w') as f:
-            for area in self.areas:
-                for pop in self.network.structure[area.name]:
-                    if self.network.params['USING_NEST_3']:
-                        first_id = area.gids[pop][0].get()['global_id']
-                        last_id = area.gids[pop][-1].get()['global_id']
-                    else:
-                        first_id = area.gids[pop][0]
-                        last_id = area.gids[pop][1]
-                    f.write("{area},{pop},{g0},{g1}\n".format(area=area.name,
-                                                              pop=pop,
-                                                              g0=first_id,
-                                                              g1=last_id))
-
-    def register_runtime(self):
-        if sumatra_found:
-            register_runtime(self.label)
-        else:
-            raise ImportWarning('Sumatra is not installed, the '
-                                'runtime cannot be registered.')
-
-
-class Area:
-    def __init__(self, simulation, network, name):
-        """
-        Area class.
-        This class encapsulates a single area of the model.
-        It creates all populations and the intrinsic connections between them.
-        It provides an interface to allow connecting the area to other areas.
-
-        Parameters
-        ----------
-        simulation : simulation
-           An instance of the simulation class that specifies the
-           simulation that the area is part of.
-        network : multiarea_model
-            An instance of the multiarea_model class that specifies
-            the network the area is part of.
-        name : str
-            Name of the area.
-        """
-
-        self.name = name
-        self.simulation = simulation
-        self.network = network
-        self.neuron_numbers = network.N[name]
-        self.synapses = extract_area_dict(network.synapses,
-                                          network.structure,
-                                          self.name,
-                                          self.name)
-        self.W = extract_area_dict(network.W,
-                                   network.structure,
-                                   self.name,
-                                   self.name)
-        self.W_sd = extract_area_dict(network.W_sd,
-                                      network.structure,
-                                      self.name,
-                                      self.name)
-        self.populations = network.structure[name]
-
-        self.external_synapses = {}
-        for pop in self.populations:
-            self.external_synapses[pop] = self.network.K[self.name][pop]['external']['external']
-
-        self.create_populations()
-        self.connect_devices()
-        self.connect_populations()
-        print("Rank {}: created area {} with {} local nodes".format(nest.Rank(),
-                                                                    self.name,
-                                                                    self.num_local_nodes))
-
-    def __str__(self):
-        s = "Area {} with {} neurons.".format(
-            self.name, int(self.neuron_numbers['total']))
-        return s
-
-    def __eq__(self, other):
-        # If other is an instance of area, it should be the exact same
-        # area This opens the possibility to have multiple instance of
-        # one cortical areas
-        if isinstance(other, Area):
-            return self.name == other.name and self.gids == other.gids
-        elif isinstance(other, str):
-            return self.name == other
-
-    def create_populations(self):
-        """
-        Create all populations of the area.
-        """
-        self.gids = {}
-        self.num_local_nodes = 0
-        for pop in self.populations:
-            gid = nest.Create(self.network.params['neuron_params']['neuron_model'],
-                              int(self.neuron_numbers[pop]))
-            mask = create_vector_mask(self.network.structure, areas=[self.name], pops=[pop])
-            I_e = self.network.add_DC_drive[mask][0]
-            if not self.network.params['input_params']['poisson_input']:
-                K_ext = self.external_synapses[pop]
-                W_ext = self.network.W[self.name][pop]['external']['external']
-                tau_syn = self.network.params['neuron_params']['single_neuron_dict']['tau_syn_ex']
-                DC = K_ext * W_ext * tau_syn * 1.e-3 * \
-                    self.network.params['rate_ext']
-                I_e += DC
-            nest.SetStatus(gid, {'I_e': I_e})
-
-            if self.network.params['USING_NEST_3']:
-                # Store GIDCollection of each population
-                self.gids[pop] = gid
-            else:
-                # Store first and last GID of each population
-                self.gids[pop] = (gid[0], gid[-1])
-
-            # Initialize membrane potentials
-            # This could also be done after creating all areas, which
-            # might yield better performance. Has to be tested.
-            if self.network.params['USING_NEST_3']:
-                gid.V_m = nest.random.normal(self.network.params['neuron_params']['V0_mean'],
-                                             self.network.params['neuron_params']['V0_sd'])
-            else:
-                for t in np.arange(nest.GetKernelStatus('local_num_threads')):
-                    local_nodes = np.array(nest.GetNodes(
-                        [0], {
-                            'model': self.network.params['neuron_params']['neuron_model'],
-                            'thread': t
-                        }, local_only=True
-                    )[0])
-                    local_nodes_pop = local_nodes[(np.logical_and(local_nodes >= gid[0],
-                                                                  local_nodes <= gid[-1]))]
-                    if len(local_nodes_pop) > 0:
-                        vp = nest.GetStatus([local_nodes_pop[0]], 'vp')[0]
-                        # vp is the same for all local nodes on the same thread
-                        nest.SetStatus(
-                            list(local_nodes_pop), 'V_m', self.simulation.pyrngs[vp].normal(
-                                self.network.params['neuron_params']['V0_mean'],
-                                self.network.params['neuron_params']['V0_sd'],
-                                len(local_nodes_pop))
-                                )
-                        self.num_local_nodes += len(local_nodes_pop)
-
-    def connect_populations(self):
-        """
-        Create connections between populations.
-        """
-        connect(self.simulation,
-                self,
-                self)
-
-    def connect_devices(self):
-        if self.name in self.simulation.params['recording_dict']['areas_recorded']:
-            for pop in self.populations:
-                # Always record spikes from all neurons to get correct
-                # statistics
-                if self.network.params['USING_NEST_3']:
-                    nest.Connect(self.gids[pop],
-                                 self.simulation.spike_detector)
-                else:
-                    nest.Connect(tuple(range(self.gids[pop][0], self.gids[pop][1] + 1)),
-                                 self.simulation.spike_detector)
-
-        if self.simulation.params['recording_dict']['record_vm']:
-            for pop in self.populations:
-                nrec = int(self.simulation.params['recording_dict']['Nrec_vm_fraction'] *
-                           self.neuron_numbers[pop])
-                if self.network.params['USING_NEST_3']:
-                    nest.Connect(self.simulation.voltmeter,
-                                 self.gids[pop][:nrec])
-                else:
-                    nest.Connect(self.simulation.voltmeter,
-                                 tuple(range(self.gids[pop][0], self.gids[pop][0] + nrec + 1)))
-        if self.network.params['input_params']['poisson_input']:
-            self.poisson_generators = []
-            for pop in self.populations:
-                K_ext = self.external_synapses[pop]
-                W_ext = self.network.W[self.name][pop]['external']['external']
-                pg = nest.Create('poisson_generator')
-                nest.SetStatus(
-                    pg, {'rate': self.network.params['input_params']['rate_ext'] * K_ext})
-                syn_spec = {'weight': W_ext}
-                if self.network.params['USING_NEST_3']:
-                    nest.Connect(pg,
-                                 self.gids[pop],
-                                 syn_spec=syn_spec)
-                else:
-                    nest.Connect(pg,
-                                 tuple(
-                                     range(self.gids[pop][0], self.gids[pop][1] + 1)),
-                                 syn_spec=syn_spec)
-                self.poisson_generators.append(pg[0])
-
-    def create_additional_input(self, input_type, source_area_name, cc_input):
-        """
-        Replace the input from a source area by the chosen type of input.
-
-        Parameters
-        ----------
-        input_type : str, {'het_current_nonstat', 'hom_poisson_stat',
-                           'het_poisson_stat'}
-            Type of input to replace source area. The source area can
-            be replaced by Poisson sources with the same global rate
-            rate_ext ('hom_poisson_stat') or by specific rates
-            ('het_poisson_stat') or by time-varying specific current
-            ('het_current_nonstat')
-        source_area_name: str
-            Name of the source area to be replaced.
-        cc_input : dict
-            Dictionary of cortico-cortical input of the process
-            replacing the source area.
-        """
-        synapses = extract_area_dict(self.network.synapses,
-                                     self.network.structure,
-                                     self.name,
-                                     source_area_name)
-        W = extract_area_dict(self.network.W,
-                              self.network.structure,
-                              self.name,
-                              source_area_name)
-        v = self.network.params['delay_params']['interarea_speed']
-        s = self.network.distances[self.name][source_area_name]
-        delay = s / v
-        for pop in self.populations:
-            for source_pop in self.network.structure[source_area_name]:
-                syn_spec = {'weight': W[pop][source_pop],
-                            'delay': delay}
-                K = synapses[pop][source_pop] / self.neuron_numbers[pop]
-
-                if input_type == 'het_current_nonstat':
-                    curr_gen = nest.Create('step_current_generator')
-                    dt = self.simulation.params['dt']
-                    T = self.simulation.params['t_sim']
-                    assert(len(cc_input[source_pop]) == int(T))
-                    nest.SetStatus(curr_gen, {'amplitude_values': K * cc_input[source_pop] * 1e-3,
-                                              'amplitude_times': np.arange(dt,
-                                                                           T + dt,
-                                                                           1.)})
-                    if self.network.params['USING_NEST_3']:
-                        nest.Connect(curr_gen,
-                                     self.gids[pop],
-                                     syn_spec=syn_spec)
-                    else:
-                        nest.Connect(curr_gen,
-                                     tuple(
-                                         range(self.gids[pop][0], self.gids[pop][1] + 1)),
-                                     syn_spec=syn_spec)
-                elif 'poisson_stat' in input_type:  # hom. and het. poisson lead here
-                    pg = nest.Create('poisson_generator')
-                    nest.SetStatus(pg, {'rate': K * cc_input[source_pop]})
-                    if self.network.params['USING_NEST_3']:
-                        nest.Connect(pg,
-                                     self.gids[pop],
-                                     syn_spec=syn_spec)
-                    else:
-                        nest.Connect(pg,
-                                     tuple(
-                                         range(self.gids[pop][0], self.gids[pop][1] + 1)),
-                                     syn_spec=syn_spec)
-
-
-def connect(simulation,
-            target_area,
-            source_area):
-    """
-    Connect two areas with each other.
-
-    Parameters
-    ----------
-    simulation : Simulation instance
-        Simulation simulating the network containing the two areas.
-    target_area : Area instance
-        Target area of the projection
-    source_area : Area instance
-        Source area of the projection
-    """
-
-    network = simulation.network
-    synapses = extract_area_dict(network.synapses,
-                                 network.structure,
-                                 target_area.name,
-                                 source_area.name)
-    W = extract_area_dict(network.W,
-                          network.structure,
-                          target_area.name,
-                          source_area.name)
-    W_sd = extract_area_dict(network.W_sd,
-                             network.structure,
-                             target_area.name,
-                             source_area.name)
-
-    if network.params['USING_NEST_3']:
-        for target in target_area.populations:
-            for source in source_area.populations:
-                conn_spec = {'rule': 'fixed_total_number',
-                             'N': int(synapses[target][source])}
-
-                if target_area == source_area:
-                    if 'E' in source:
-                        w_min = 0.
-                        w_max = np.Inf
-                        mean_delay = network.params['delay_params']['delay_e']
-                    elif 'I' in source:
-                        w_min = np.NINF
-                        w_max = 0.
-                        mean_delay = network.params['delay_params']['delay_i']
-                else:
-                    w_min = 0.
-                    w_max = np.Inf
-                    v = network.params['delay_params']['interarea_speed']
-                    s = network.distances[target_area.name][source_area.name]
-                    mean_delay = s / v
-
-                syn_spec = {
-                    'synapse_model': 'static_synapse',
-                    'weight': nest.math.redraw(
-                        nest.random.normal(mean=W[target][source],
-                                           std=W_sd[target][source]),
-                                           min=w_min,
-                                           max=w_max),
-                    'delay': nest.math.redraw(
-                        nest.random.normal(
-                            mean=mean_delay,
-                            std=(mean_delay *
-                                 network.params['delay_params']['delay_rel'])),
-                            min=simulation.params['dt'],
-                            max=np.Inf)
-                    }
-
-                nest.Connect(source_area.gids[source],
-                             target_area.gids[target],
-                             conn_spec,
-                             syn_spec)
-    else:
-        for target in target_area.populations:
-            for source in source_area.populations:
-                conn_spec = {'rule': 'fixed_total_number',
-                             'N': int(synapses[target][source])}
-
-                syn_weight = {'distribution': 'normal_clipped',
-                              'mu': W[target][source],
-                              'sigma': W_sd[target][source]}
-                if target_area == source_area:
-                    if 'E' in source:
-                        syn_weight.update({'low': 0.})
-                        mean_delay = network.params['delay_params']['delay_e']
-                    elif 'I' in source:
-                        syn_weight.update({'high': 0.})
-                        mean_delay = network.params['delay_params']['delay_i']
-                else:
-                    v = network.params['delay_params']['interarea_speed']
-                    s = network.distances[target_area.name][source_area.name]
-                    mean_delay = s / v
-                syn_delay = {'distribution': 'normal_clipped',
-                             'low': simulation.params['dt'],
-                             'mu': mean_delay,
-                             'sigma': mean_delay * network.params['delay_params']['delay_rel']}
-                syn_spec = {'weight': syn_weight,
-                            'delay': syn_delay,
-                            'model': 'static_synapse'}
-
-                nest.Connect(tuple(range(source_area.gids[source][0],
-                                         source_area.gids[source][1] + 1)),
-                             tuple(range(target_area.gids[target][0],
-                                         target_area.gids[target][1] + 1)),
-                             conn_spec,
-                             syn_spec)