diff --git a/.github/workflows/ebrains-push.yml b/.github/workflows/ebrains-push.yml index a7e08d550336e76fe56c6b516755c8667f67a6b9..d9c177388b06b1365898e8ba2b66c1838624625c 100644 --- a/.github/workflows/ebrains-push.yml +++ b/.github/workflows/ebrains-push.yml @@ -9,12 +9,6 @@ jobs: runs-on: ubuntu-latest if: ${{ github.repository_owner == 'INM-6' }} steps: - - name: Harden Runner - uses: step-security/harden-runner@63c24ba6bd7ba022e95695ff85de572c04a18142 # v2.7.0 - with: - egress-policy: audit - disable-telemetry: true - - name: sycnmaster uses: wei/git-sync@55c6b63b4f21607da0e9877ca9b4d11a29fc6d83 with: diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..633ad20875755e2a13645ce6e85436e762256dc7 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,37 @@ +# Specify stages in this CI workflow +stages: + - build + - test + - deploy + +# List variables and they can be called by the variable names +# variables: + # GITLAB_BUILD_ENV_DOCKER_IMAGE: docker-registry.ebrains.eu/tc/ebrains-spack-build-env/gitlab_runners_nfs:gitlab_runners_nfs_23.06 + # SYSTEMNAME: ebrainslab + +run_notebooks: + stage: test + tags: + # Add tags for runner to select runners that meet the requirement + - docker-runner + + before_script: + # - cat /mnt/ebrains_env/ebrains-23.09/bin/load_env.sh + - . /mnt/ebrains_env/ebrains-23.09/bin/load_env.sh + + + script: + - echo "Starting testing..." + # List all kernels + # - jupyter kernelspec list + - echo "Testing multi-area-model.ipynb..." + # Replace all ebrains-23.09 with python3 as the kernel ebrains-23.09 is now actually named as python3 + - sed -i -e "s/ebrains-23.09/python3/" multi-area-model.ipynb + # Convert jupyter notebook to python file and execute it + - jupyter nbconvert --to notebook --execute ./multi-area-model.ipynb + - echo "Testing M2E_statistical_test.ipynb..." + # Replace all ebrains-23.09 with python3 as the kernel ebrains-23.09 is now actually named as python3 + - sed -i -e "s/ebrains-23.09/python3/" M2E_statistical_test.ipynb + # Convert jupyter notebook to python file and execute it + - jupyter nbconvert --to notebook --execute ./M2E_statistical_test.ipynb + - echo "Testing are finished and passed!" diff --git a/figures/MAM2EBRAINS/M2E_compute_corrcoeff.py b/figures/MAM2EBRAINS/M2E_compute_corrcoeff.py index ba37d1bd069eb1502f246c46a5384a61b3c41769..1b486296c04d90927b025043cbc08c16185da0c2 100644 --- a/figures/MAM2EBRAINS/M2E_compute_corrcoeff.py +++ b/figures/MAM2EBRAINS/M2E_compute_corrcoeff.py @@ -1,17 +1,22 @@ import json import numpy as np import os - import correlation_toolbox.helper as ch -from multiarea_model import MultiAreaModel -import sys - -""" -Compute correlation coefficients for a subsample -of neurons for the entire network from raw spike files of a given simulation. -""" def compute_corrcoeff(M, data_path, label): + """ + Compute the correlation coefficient between + the spiking rates of different populations + in different areas. + + Parameters: + - M (MultiAreaModel): The MultiAreaModel instance. + - data_path (str): The path to the directory where the data is stored. + - label (str): The label used to identify the specific data set. + + Returns: + None + """ load_path = os.path.join(data_path, label, 'recordings') @@ -19,9 +24,6 @@ def compute_corrcoeff(M, data_path, label): label, 'Analysis') - # with open(os.path.join(data_path, label, 'custom_params_{}'.format(label)), 'r') as f: - # sim_params = json.load(f) - # T = sim_params['T'] T = M.simulation.params["t_sim"] areas_simulated = M.simulation.params["areas_simulated"] @@ -29,18 +31,9 @@ def compute_corrcoeff(M, data_path, label): subsample = 2000 resolution = 1. - """ - Create MultiAreaModel instance to have access to data structures - """ - # M = MultiAreaModel({}) - - spike_data = {} cc_dict = {} - # for area in M.area_list: for area in areas_simulated: cc_dict[area] = {} - LvR_list = [] - N = [] for pop in M.structure[area]: fp = '-'.join((label, 'spikes', # assumes that the default label for spike files was used @@ -48,20 +41,18 @@ def compute_corrcoeff(M, data_path, label): pop)) fn = '{}/{}.npy'.format(load_path, fp) # +1000 to ensure that we really have subsample non-silent neurons in the end - # spikes = np.load(fn) spikes = np.load(fn, allow_pickle=True) ids = np.unique(spikes[:, 0]) dat = ch.sort_gdf_by_id(spikes, idmin=ids[0], idmax=ids[0]+subsample+1000) bins, hist = ch.instantaneous_spike_count(dat[1], resolution, tmin=tmin, tmax=T) rates = ch.strip_binned_spiketrains(hist)[:subsample] - # test if only 1 of the neurons is firing, if yes, print warning message and continue + # Test if only 1 of the neurons is firing, if yes, print warning message and continue if rates.shape[0] < 2: print(f"WARNING: There are less than 2 neurons firing in the population: {area} {pop}, the corresponding cross-correlation will not be computed.") - # cc_dict[area][pop] = 0. continue - # compute cross correlation coefficient + # Compute cross correlation coefficient cc = np.corrcoef(rates) cc = np.extract(1-np.eye(cc[0].size), cc) cc[np.where(np.isnan(cc))] = 0. diff --git a/figures/MAM2EBRAINS/M2E_compute_fc.py b/figures/MAM2EBRAINS/M2E_compute_fc.py index 57e390cf7471c5ed80d1ec34818d8a083a90853f..8cdd103b4214d1dc5ef08fd933dc05aa533ae42a 100644 --- a/figures/MAM2EBRAINS/M2E_compute_fc.py +++ b/figures/MAM2EBRAINS/M2E_compute_fc.py @@ -1,26 +1,25 @@ import correlation_toolbox.helper as ch import numpy as np import os -import sys -from multiarea_model import MultiAreaModel from scipy.spatial.distance import pdist from scipy.spatial.distance import squareform from M2E_compute_synaptic_input import compute_synaptic_input -""" -Compute the functional connectivity between all areas of a given -simulation based on their time series of spiking rates or their -estimated BOLD signal. -""" - -# data_path = sys.argv[1] -# label = sys.argv[2] -# method = sys.argv[3] - def compute_fc(M, data_path, label): - # compute synaptic input + """ + Compute the functional connectivity between different areas of the brain using synaptic input. + + Parameters: + - M (MultiAreaModel): An instance of the MultiAreaModel class representing the brain model. + - data_path (str): The path to the directory where the data is stored. + - label (str): The label used to identify the specific data set. + + Returns: + None + """ + # Compute synaptic input for area in M.area_list: compute_synaptic_input(M, data_path, label, area) @@ -33,11 +32,6 @@ def compute_fc(M, data_path, label): save_path = os.path.join(data_path, label, 'Analysis') - - # """ - # Create MultiAreaModel instance to have access to data structures - # """ - # M = MultiAreaModel({}) time_series = [] for area in M.area_list: diff --git a/figures/MAM2EBRAINS/M2E_compute_louvain_communities.py b/figures/MAM2EBRAINS/M2E_compute_louvain_communities.py index ec7e92362d1351ed53740351ea8ca6f889f99497..629e7bd2915b1f7014a97f47c15d30df749dd6dd 100644 --- a/figures/MAM2EBRAINS/M2E_compute_louvain_communities.py +++ b/figures/MAM2EBRAINS/M2E_compute_louvain_communities.py @@ -5,27 +5,22 @@ import json import networkx as nx import numpy as np import os -import sys -from multiarea_model.multiarea_model import MultiAreaModel - -""" -Determines communities in the functional connectivity of either the -experimental fMRI data used in Schmidt et al. 2018 or of a given -simulation (the functional connectivity being based either on spike -rates or an estimated BOLD signal). -""" - -# data_path = sys.argv[1] -# label = sys.argv[2] -# method = sys.argv[3] +def compute_communities(M, data_path, label): + """ + Determines communities in the functional connectivity of either the + experimental fMRI data used in Schmidt et al. 2018 or of a given + simulation (the functional connectivity being based either on spike + rates or an estimated BOLD signal). -# """ -# Create MultiAreaModel instance to have access to data structures -# """ -# M = MultiAreaModel({}) + Parameters: + - M (MultiAreaModel): The M object containing the area list. + - data_path (str): The path to the data directory. + - label (str): The label for the data. -def compute_communities(M, data_path, label): + Returns: + None + """ method = "synaptic_input" if label == 'exp': @@ -73,7 +68,6 @@ def compute_communities(M, data_path, label): edges.append((area, area2, FC[i][j])) G.add_weighted_edges_from(edges) - # part = community.best_partition(G) part = community_louvain.best_partition(G) if label == 'exp': diff --git a/figures/MAM2EBRAINS/M2E_compute_pop_LvR.py b/figures/MAM2EBRAINS/M2E_compute_pop_LvR.py index 54799aba7bb698b2beb3c0551a1810a0bd9537cb..0cb91a6e4e5fcc16416f08546415d851985060ed 100644 --- a/figures/MAM2EBRAINS/M2E_compute_pop_LvR.py +++ b/figures/MAM2EBRAINS/M2E_compute_pop_LvR.py @@ -3,18 +3,20 @@ import numpy as np import os from multiarea_model.analysis_helpers import pop_LvR -from multiarea_model import MultiAreaModel -import sys - -""" -Compute LvR for the entire network from raw spike -files of a given simulation. -""" - -# data_path = sys.argv[1] -# label = sys.argv[2] def compute_pop_LvR(M, data_path, label): + """ + Compute LvR for the entire network from raw spike + files of a given simulation. + + Parameters: + - M (MultiAreaModel): MultiAreaModel instance + - data_path (str): path to the data + - label (str): label for the data + + Returns: + None + """ load_path = os.path.join(data_path, label, 'recordings') @@ -22,33 +24,21 @@ def compute_pop_LvR(M, data_path, label): label, 'Analysis') - # with open(os.path.join(data_path, label, 'custom_params_{}'.format(label)), 'r') as f: - # sim_params = json.load(f) - # T = sim_params['T'] T = M.simulation.params["t_sim"] areas_simulated = M.simulation.params["areas_simulated"] - # """ - # Create MultiAreaModel instance to have access to data structures - # """ - # M = MultiAreaModel({}) - spike_data = {} pop_LvR_dict = {} - # for area in ['V1']: # , 'V2', 'FEF']: + for area in areas_simulated: pop_LvR_dict[area] = {} - LvR_list = [] - N = [] for pop in M.structure[area]: fp = '-'.join((label, - 'spikes', # assumes that the default label for spike files was used + 'spikes', # Assumes that the default label for spike files was used area, pop)) fn = '{}/{}.npy'.format(load_path, fp) - # dat = np.load(fn) dat = np.load(fn, allow_pickle=True) - # print(area, pop) pop_LvR_dict[area][pop] = pop_LvR(dat, 2., 500., T, round(M.N[area][pop]))[0] fn = os.path.join(save_path, diff --git a/figures/MAM2EBRAINS/M2E_compute_pop_rates.py b/figures/MAM2EBRAINS/M2E_compute_pop_rates.py index bc71e0fe5dc342f74c9f91c4254a6a79c6c25ec4..a5b79623defc7496566e8d3682ec593fd55c183f 100644 --- a/figures/MAM2EBRAINS/M2E_compute_pop_rates.py +++ b/figures/MAM2EBRAINS/M2E_compute_pop_rates.py @@ -3,18 +3,20 @@ import numpy as np import os from multiarea_model.analysis_helpers import pop_rate -from multiarea_model import MultiAreaModel -import sys - -""" -Compute stationary spike rates for the entire network from raw spike -files of a given simulation. -""" - -# data_path = sys.argv[1] -# label = sys.argv[2] def compute_pop_rates(M, data_path, label): + """ + Compute stationary spike rates for the entire network from raw spike + files of a given simulation. + + Parameters: + - M (MultiAreaModel): The MultiAreaModel instance containing the simulation data. + - data_path (str): The path to the directory where the simulation data is stored. + - label (str): The label used to identify the simulation data. + + Returns: + None + """ load_path = os.path.join(data_path, label, 'recordings') @@ -22,20 +24,10 @@ def compute_pop_rates(M, data_path, label): label, 'Analysis') - # with open(os.path.join(data_path, label, 'custom_params_{}'.format(label)), 'r') as f: - # sim_params = json.load(f) - # T = sim_params['T'] T = M.simulation.params["t_sim"] areas_simulated = M.simulation.params["areas_simulated"] - # """ - # Create MultiAreaModel instance to have access to data structures - # """ - # M = MultiAreaModel({}) - - spike_data = {} pop_rates = {} - # for area in M.area_list: for area in areas_simulated: pop_rates[area] = {} rate_list = [] @@ -46,9 +38,7 @@ def compute_pop_rates(M, data_path, label): area, pop)) fn = '{}/{}.npy'.format(load_path, fp) - # dat = np.load(fn) dat = np.load(fn, allow_pickle=True) - # print(area, pop) pop_rates[area][pop] = pop_rate(dat, 500., T, M.N[area][pop]) rate_list.append(pop_rates[area][pop]) N.append(M.N[area][pop]) diff --git a/figures/MAM2EBRAINS/M2E_compute_rate_time_series.py b/figures/MAM2EBRAINS/M2E_compute_rate_time_series.py index 2ef7908b8e507d780acf0c960883720271097ea3..ce83fd1b9dc46edfe01d732c88b3f93d10dae32d 100644 --- a/figures/MAM2EBRAINS/M2E_compute_rate_time_series.py +++ b/figures/MAM2EBRAINS/M2E_compute_rate_time_series.py @@ -6,28 +6,27 @@ import quantities as pq from multiarea_model.analysis_helpers import pop_rate_time_series from elephant.statistics import instantaneous_rate -from multiarea_model import MultiAreaModel -import sys - -""" -Compute time series of population-averaged spike rates for a given -area from raw spike files of a given simulation. - -Implements three different methods: -- binned spike histograms on all neurons ('full') -- binned spike histograms on a subsample of 140 neurons ('subsample') -- spike histograms convolved with a Gaussian kernel of optimal width - after Shimazaki et al. (2010) -""" - -# assert(len(sys.argv) == 5) - -# data_path = sys.argv[1] -# label = sys.argv[2] -# area = sys.argv[3] -# method = sys.argv[4] def compute_rate_time_series(M, data_path, label, area, method): + """ + Compute time series of population-averaged spike rates for a given + area from raw spike files of a given simulation. + + Implements three different methods: + - binned spike histograms on all neurons ('full') + - binned spike histograms on a subsample of 140 neurons ('subsample') + - spike histograms convolved with a Gaussian kernel of optimal width after Shimazaki et al. (2010) + + Parameters: + - M (MultiAreaModel): The MultiAreaModel instance representing the model. + - data_path (str): The path to the data directory. + - label (str): The label for the data. + - area (str): The area for which to compute the rate time series. + - method (str): The method to use for computing the rate time series. Must be one of 'subsample', 'full', or 'auto_kernel'. + + Returns: + None + """ assert(method in ['subsample', 'full', 'auto_kernel']) # subsample : subsample spike data to 140 neurons to match the Chu 2014 data # full : use spikes of all neurons and compute spike histogram with bin size 1 ms @@ -47,26 +46,16 @@ def compute_rate_time_series(M, data_path, label, area, method): except FileExistsError: pass - # with open(os.path.join(data_path, label, 'custom_params_{}'.format(label)), 'r') as f: - # sim_params = json.load(f) - # T = sim_params['T'] T = M.simulation.params["t_sim"] - areas_simulated = M.simulation.params["areas_simulated"] - - """ - Create MultiAreaModel instance to have access to data structures - """ - # M = MultiAreaModel({}) time_series_list = [] N_list = [] for pop in M.structure[area]: fp = '-'.join((label, - 'spikes', # assumes that the default label for spike files was used + 'spikes', # Assumes that the default label for spike files was used area, pop)) fn = '{}/{}.npy'.format(load_path, fp) - # spike_data = np.load(fn) spike_data = np.load(fn, allow_pickle=True) spike_data = spike_data[np.logical_and(spike_data[:, 1] > 500., spike_data[:, 1] <= T)] diff --git a/figures/MAM2EBRAINS/M2E_compute_synaptic_input.py b/figures/MAM2EBRAINS/M2E_compute_synaptic_input.py index bf871af1fe37d33b08451826ae147ee8710c100b..fad8f63b40bd9e3d0fbe1b321201dcad9d2ef9bb 100644 --- a/figures/MAM2EBRAINS/M2E_compute_synaptic_input.py +++ b/figures/MAM2EBRAINS/M2E_compute_synaptic_input.py @@ -1,17 +1,20 @@ -import correlation_toolbox.helper as ch -import correlation_toolbox.correlation_analysis as corr import json import numpy as np import os -import sys -from multiarea_model.multiarea_model import MultiAreaModel +def compute_synaptic_input(M, data_path, label, area): + """ + Compute the synaptic input for a given area. -# data_path = sys.argv[1] -# label = sys.argv[2] -# area = sys.argv[3] + Parameters: + - M (MultiAreaModel): An instance of the MultiAreaModel class. + - data_path (str): The path to the data directory. + - label (str): The label for the data. + - area (str): The area for which to compute the synaptic input. -def compute_synaptic_input(M, data_path, label, area): + Returns: + None + """ load_path = os.path.join(data_path, label, 'Analysis', @@ -21,23 +24,8 @@ def compute_synaptic_input(M, data_path, label, area): 'Analysis', 'synaptic_input') - with open(os.path.join(data_path, label, 'custom_params_{}'.format(label)), 'r') as f: - sim_params = json.load(f) - # T = sim_params['T'] T = M.simulation.params['t_sim'] - - # """ - # Create MultiAreaModel instance to have access to data structures - # """ - # connection_params = {'g': -11., - # 'cc_weights_factor': sim_params['cc_weights_factor'], - # 'cc_weights_I_factor': sim_params['cc_weights_I_factor'], - # 'K_stable': '../SchueckerSchmidt2017/K_prime_original.npy'} - # network_params = {'connection_params': connection_params} - # M = MultiAreaModel(network_params) - - """ Synaptic filtering kernel """ diff --git a/figures/MAM2EBRAINS/M2E_firing_rate.py b/figures/MAM2EBRAINS/M2E_firing_rate.py index 66db04dd95dde2742d7930d63783c90f7cb7a8c5..c89214eab201aaccd8a6e35641db117de711d2be 100644 --- a/figures/MAM2EBRAINS/M2E_firing_rate.py +++ b/figures/MAM2EBRAINS/M2E_firing_rate.py @@ -2,14 +2,21 @@ import numpy as np import matplotlib.pyplot as pl import os import json -from matplotlib.colors import LogNorm -from matplotlib.ticker import FixedLocator from M2E_compute_pop_rates import compute_pop_rates from M2E_compute_rate_time_series import compute_rate_time_series -# Function for computing and printing the mean firing rate for all and only all simulated populations def mean_firing_rate(M, data_path): + """ + Calculate the mean firing rate for all simulated populations. + + Parameters: + - M (MultiAreaModel): The simulation object. + - data_path (str): The path to the data directory. + + Returns: + None + """ label = M.simulation.label # Compute firing rate for all simulated populations @@ -24,7 +31,6 @@ def mean_firing_rate(M, data_path): rates = np.zeros((len(M.area_list), 8)) for i, area in enumerate(M.area_list): for j, pop in enumerate(M.structure[area][::-1]): - # rate = pop_rates[area][pop][0] rate = pop_rates[area][pop] if rate == 0.0: rate = 1e-5 @@ -38,39 +44,41 @@ def mean_firing_rate(M, data_path): print("The mean firing rate over all simulated populations is {0:.3f} spikes/s.".format(mfr)) -# Function for visualizing the instantaneous firing rate over all simulated areas def plot_firing_rate_over_areas(M, data_path): + """ + Generate a plot of firing rate over different areas based on the provided simulation data. + + Parameters: + - M: object containing simulation data + - data_path: path to the data directory + + Returns: + None + """ area_list = M.simulation.params["areas_simulated"] label = M.simulation.label for area in area_list: compute_rate_time_series(M, data_path, label, area, 'full') - # time series of firing rates + # Time series of firing rates rate_time_series = {} for area in area_list: fn = os.path.join(data_path, label, 'Analysis', 'rate_time_series_full', 'rate_time_series_full_{}.npy'.format(area)) - # fn = os.path.join(data_path, label, - # 'Analysis', - # 'rate_time_series-{}.npy'.format(area)) rate_time_series[area] = np.load(fn) - # print(rate_time_series) t_min = 500. t_max = M.simulation.params['t_sim'] - time = np.arange(t_min, t_max) matrix = [] for area in area_list: - # print(area) binned_spikes = rate_time_series[area][500:600] matrix.append(binned_spikes) matrix = np.array(matrix) - # normalized_matrix = (matrix - np.min(matrix)) / (np.max(matrix) - np.min(matrix)) fig = pl.figure(figsize=(12, 5)) fig.suptitle('Instantaneous firing rate over simulated areas', fontsize=16, x=0.45, y=0.95) @@ -78,7 +86,6 @@ def plot_firing_rate_over_areas(M, data_path): cmap = pl.get_cmap('YlOrBr') - # masked_matrix = np.ma.masked_where(np.isnan(matrix), matrix) ax.patch.set_hatch('x') im = ax.pcolormesh(matrix, cmap=cmap, edgecolors='None', vmin=0) ax.set_xlim(0, matrix[0].size) @@ -89,6 +96,5 @@ def plot_firing_rate_over_areas(M, data_path): ax.set_yticklabels(area_list) ax.set_ylabel('Area', size=13) ax.set_xlabel('Time (ms)', size=13) - # t = FixedLocator([0.01, 0.1, 1., 10., 100.]) pl.colorbar(im) \ No newline at end of file diff --git a/figures/MAM2EBRAINS/M2E_visualize_dynamics.py b/figures/MAM2EBRAINS/M2E_visualize_dynamics.py index 4ec7caa3574f8f930cfbe8691581ad13877f20a0..946a6fcc19487422785e439407ca80e7eb63b539 100644 --- a/figures/MAM2EBRAINS/M2E_visualize_dynamics.py +++ b/figures/MAM2EBRAINS/M2E_visualize_dynamics.py @@ -5,16 +5,11 @@ import os import sys sys.path.append('./figures/Schmidt2018_dyn') -# from helpers import original_data_path, population_labels from helpers import population_labels -from multiarea_model import MultiAreaModel -from multiarea_model import Analysis from plotcolors import myred, myblue import matplotlib.pyplot as pl from matplotlib import gridspec -# from matplotlib import rc_file -# rc_file('plotstyle.rc') from M2E_compute_pop_LvR import compute_pop_LvR from M2E_compute_corrcoeff import compute_corrcoeff @@ -24,6 +19,15 @@ icolor = myred ecolor = myblue def set_boxplot_props(d): + """ + Sets the properties of a boxplot. + + Parameters: + - d (dict): A dictionary containing the boxplot elements. + + Returns: + None + """ for i in range(len(d['boxes'])): if i % 2 == 0: d['boxes'][i].set_facecolor(icolor) @@ -39,6 +43,19 @@ def set_boxplot_props(d): markerfacecolor='k', markeredgecolor='k', markersize=3.) def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): + """ + Visualize the network dynamics for the given simulation, + including raster plots, mean firing rates, + correlation coefficients, and rate time series. + + Parameters: + - M (MultiAreaModel): The MultiAreaModel instance containing the simulation data. + - data_path (str): The path to the directory where the simulation data is stored. + - raster_areas (list): The list of areas to plot the raster plots for. + + Returns: + None + """ label_spikes = M.simulation.label # Compute pop_LvR for simulated area @@ -82,7 +99,6 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): nrows = 4 ncols = 4 - # width = 7.0866 width = 10 panel_wh_ratio = 0.7 * (1. + np.sqrt(5)) / 2. # golden ratio @@ -95,9 +111,6 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): gs1 = gridspec.GridSpec(1, 3) gs1.update(left=0.06, right=0.72, top=0.95, wspace=0.4, bottom=0.35) - # axes['A'] = pl.subplot(gs1[:-1, :1]) - # axes['B'] = pl.subplot(gs1[:-1, 1:2]) - # axes['C'] = pl.subplot(gs1[:-1, 2:]) axes['A'] = pl.subplot(gs1[:1, :1]) axes['B'] = pl.subplot(gs1[:1, 1:2]) axes['C'] = pl.subplot(gs1[:1, 2:]) @@ -116,10 +129,6 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): labels = ['A', 'B', 'C'] for area, label in zip(raster_areas, labels): label_pos = [-0.2, 1.01] - # pl.text(label_pos[0], label_pos[1], r'\bfseries{}' + label + ': ' + area, - # fontdict={'fontsize': 10, 'weight': 'bold', - # 'horizontalalignment': 'left', 'verticalalignment': - # 'bottom'}, transform=axes[label].transAxes) pl.text(label_pos[0], label_pos[1], label + ': ' + area, fontdict={'fontsize': 10, 'weight': 'bold', 'horizontalalignment': 'left', 'verticalalignment': @@ -127,10 +136,6 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): label = 'G' label_pos = [-0.1, 0.92] - # pl.text(label_pos[0], label_pos[1], r'\bfseries{}' + label, - # fontdict={'fontsize': 10, 'weight': 'bold', - # 'horizontalalignment': 'left', 'verticalalignment': - # 'bottom'}, transform=axes[label].transAxes) pl.text(label_pos[0], label_pos[1], label, fontdict={'fontsize': 10, 'weight': 'bold', 'horizontalalignment': 'left', 'verticalalignment': @@ -139,10 +144,6 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): labels = ['E', 'D', 'F'] for label in labels: label_pos = [-0.2, 1.05] - # pl.text(label_pos[0], label_pos[1], r'\bfseries{}' + label, - # fontdict={'fontsize': 10, 'weight': 'bold', - # 'horizontalalignment': 'left', 'verticalalignment': - # 'bottom'}, transform=axes[label].transAxes) pl.text(label_pos[0], label_pos[1], label, fontdict={'fontsize': 10, 'weight': 'bold', 'horizontalalignment': 'left', 'verticalalignment': @@ -159,33 +160,8 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): for label in ['A', 'B', 'C']: axes[label].yaxis.set_ticks_position('none') - - -# """ -# Load data -# """ -# LOAD_ORIGINAL_DATA = True - - -# if LOAD_ORIGINAL_DATA: -# # use T=10500 simulation for spike raster plots -# label_spikes = '3afaec94d650c637ef8419611c3f80b3cb3ff539' -# # and T=100500 simulation for all other panels -# label = '99c0024eacc275d13f719afd59357f7d12f02b77' -# data_path = original_data_path -# else: -# from network_simulations import init_models -# from config import data_path -# models = init_models('Fig5') -# label_spikes = models[0].simulation.label -# label = models[1].simulation.label - - # """ - # Create MultiAreaModel instance to have access to data structures - # """ - # M = MultiAreaModel({}) - # spike data + # Spike data spike_data = {} for area in areas_simulated: spike_data[area] = {} @@ -197,24 +173,21 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): # area, pop))) area, pop)), allow_pickle=True) - # stationary firing rates + # Stationary firing rates fn = os.path.join(data_path, label_spikes, 'Analysis', 'pop_rates.json') with open(fn, 'r') as f: pop_rates = json.load(f) - # time series of firing rates + # Time series of firing rates rate_time_series = {} for area in raster_areas: fn = os.path.join(data_path, label_spikes, 'Analysis', 'rate_time_series_full', 'rate_time_series_full_{}.npy'.format(area)) - # fn = os.path.join(data_path, label_spikes, - # 'Analysis', - # 'rate_time_series-{}.npy'.format(area)) rate_time_series[area] = np.load(fn) - # time series of firing rates convolved with a kernel + # Time series of firing rates convolved with a kernel if rate_auto_kernel == True: rate_time_series_auto_kernel = {} for area in raster_areas: @@ -224,12 +197,12 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): 'rate_time_series_auto_kernel_{}.npy'.format(area)) rate_time_series_auto_kernel[area] = np.load(fn) - # local variance revised (LvR) + # Local variance revised (LvR) fn = os.path.join(data_path, label_spikes, 'Analysis', 'pop_LvR.json') with open(fn, 'r') as f: pop_LvR = json.load(f) - # correlation coefficients + # Correlation coefficients fn = os.path.join(data_path, label_spikes, 'Analysis', 'corrcoeff.json') with open(fn, 'r') as f: corrcoeff = json.load(f) @@ -237,17 +210,12 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): """ Plotting """ - # print("Raster plots") - - # t_min = 3000. - # t_max = 3500. t_min = t_sim - 500 t_max = t_sim icolor = myred ecolor = myblue - # frac_neurons = 0.03 frac_neurons = 1 for i, area in enumerate(raster_areas): @@ -296,29 +264,14 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): ax.set_yticks(yticklocs) ax.set_xlabel('Time (s)', labelpad=-0.1) ax.set_xticks([t_min, t_min + 250., t_max]) - # ax.set_xticklabels([r'$3.$', r'$3.25$', r'$3.5$']) l = t_min/1000 m = (t_min + t_max)/2000 r = t_max/1000 ax.set_xticklabels([f'{l:.2f}', f'{m:.2f}', f'{r:.2f}']) - - # print("plotting Population rates") - - # rates = np.zeros((len(M.area_list), 8)) - # for i, area in enumerate(M.area_list): - # for j, pop in enumerate(M.structure[area][::-1]): - # rate = pop_rates[area][pop][0] - # if rate == 0.0: - # rate = 1e-5 - # if area == 'TH' and j > 3: # To account for missing layer 4 in TH - # rates[i][j + 2] = rate - # else: - # rates[i][j] = rate rates = np.zeros((len(areas_simulated), 8)) for i, area in enumerate(areas_simulated): for j, pop in enumerate(M.structure[area][::-1]): - # rate = pop_rates[area][pop][0] rate = pop_rates[area][pop] if rate == 0.0: rate = 1e-5 @@ -341,25 +294,10 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): ax.set_yticks(np.arange(1., len(M.structure['V1']) + 1., 1.)) ax.set_ylim((0., len(M.structure['V1']) + .5)) - # x_max = 220. x_max = 100. ax.set_xlim((-1., x_max)) ax.set_xlabel(r'Rate (spikes/s)', labelpad=-0.1) - # ax.set_xticks([0., 25., 50.]) ax.set_xticks([0., 25., 50., 75., 100.]) - - # print("plotting Synchrony") - - # syn = np.zeros((len(M.area_list), 8)) - # for i, area in enumerate(M.area_list): - # for j, pop in enumerate(M.structure[area][::-1]): - # value = corrcoeff[area][pop] - # if value == 0.0: - # value = 1e-5 - # if area == 'TH' and j > 3: # To account for missing layer 4 in TH - # syn[i][j + 2] = value - # else: - # syn[i][j] = value syn = np.zeros((len(areas_simulated), 8)) for i, area in enumerate(areas_simulated): @@ -367,7 +305,6 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): try: value = corrcoeff[area][pop] if value == 0.0: - # print(i, area, j, pop, value) value = 1e-5 except: value = 0.0 @@ -376,45 +313,22 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): syn[i][j + 2] = value else: syn[i][j] = value - # print(syn) syn = np.transpose(syn) masked_syn = np.ma.masked_where(syn == 0, syn) - # print(masked_syn) ax = axes['E'] - # d = ax.boxplot(np.transpose(syn), vert=False, - # patch_artist=True, whis=1.5, showmeans=True) d = ax.boxplot(np.transpose(masked_syn), vert=False, patch_artist=True, whis=1.5, showmeans=True) set_boxplot_props(d) - # ax.plot(np.mean(syn, axis=1), np.arange( - # 1., len(M.structure['V1']) + 1., 1.), 'x', color='k', markersize=3) ax.plot(np.mean(masked_syn, axis=1), np.arange( 1., len(M.structure['V1']) + 1., 1.), 'x', color='k', markersize=3) ax.set_yticklabels(population_labels[::-1], size=8) ax.set_yticks(np.arange(1., len(M.structure['V1']) + 1., 1.)) ax.set_ylim((0., len(M.structure['V1']) + .5)) - # ax.set_xticks(np.arange(0.0, 0.601, 0.2)) - # ax.set_xticks([0., 0.005, 0.02]) ax.set_xlabel('Correlation coefficient', labelpad=-0.1) - # ax.set_xlabel('Synchrony', labelpad=-0.1) - - - # print("plotting Irregularity") - - # LvR = np.zeros((len(M.area_list), 8)) - # for i, area in enumerate(M.area_list): - # for j, pop in enumerate(M.structure[area][::-1]): - # value = pop_LvR[area][pop] - # if value == 0.0: - # value = 1e-5 - # if area == 'TH' and j > 3: # To account for missing layer 4 in TH - # LvR[i][j + 2] = value - # else: - # LvR[i][j] = value LvR = np.zeros((len(areas_simulated), 8)) for i, area in enumerate(areas_simulated): @@ -456,8 +370,6 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): axes['G'].set_xticks([]) axes['G'].set_yticks([]) - - # print("Plotting rate time series") pos = axes['G'].get_position() ax = [] h = pos.y1 - pos.y0 @@ -468,11 +380,8 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): colors = ['0.5', '0.3', '0.0'] - # t_min = 500. - # t_max = 10500. t_min = 500. t_max = t_sim - # time = np.arange(500., t_max) time = np.arange(500, t_max) for i, area in enumerate(raster_areas[::-1]): ax[i].spines['right'].set_color('none') @@ -484,7 +393,6 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): np.logical_and(time >= t_min, time < t_max))] ax[i].plot(time, binned_spikes, color=colors[0], label=area) if rate_auto_kernel == True: - # rate = rate_time_series_auto_kernel[area] rate = rate_time_series_auto_kernel[area][np.where( np.logical_and(time >= t_min, time < t_max))] ax[i].plot(time, rate, color=colors[2], label=area) @@ -497,14 +405,11 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): ax[i].set_xticks([]) ax[i].set_yticks([0., 30.]) else: - # ax[i].set_xticks([1000., 5000., 10000.]) ax[i].set_xticks([t_min, (t_min+t_max)/2, t_max]) l = t_min/1000 m = (t_min + t_max)/2000 r = t_max/1000 - # ax[i].set_xticklabels([r'$1.$', r'$5.$', r'$10.$']) ax[i].set_xticklabels([f'{l:.2f}', f'{m:.2f}', f'{r:.2f}']) - # ax[i].set_yticks([0., 5.]) ax[i].set_yticks([0., 30.]) if i == 1: ax[i].set_ylabel(r'Rate (spikes/s)') @@ -512,6 +417,4 @@ def visual_dynamics(M, data_path, raster_areas=['V1', 'V2', 'FEF']): ax[0].set_xlabel('Time (s)', labelpad=-0.05) fig.subplots_adjust(left=0.05, right=0.95, top=0.95, - bottom=0.075, wspace=1., hspace=.5) - - # pl.savefig('Fig5_ground_state.eps') \ No newline at end of file + bottom=0.075, wspace=1., hspace=.5) \ No newline at end of file diff --git a/figures/MAM2EBRAINS/M2E_visualize_fc.py b/figures/MAM2EBRAINS/M2E_visualize_fc.py index f9a8caa0282137d838f6bd2a1cf9d5653c496786..749144ed1a5b08610f9c30d27dd481017b9a26fa 100644 --- a/figures/MAM2EBRAINS/M2E_visualize_fc.py +++ b/figures/MAM2EBRAINS/M2E_visualize_fc.py @@ -3,22 +3,16 @@ import copy import json import numpy as np import os -# import pyx import sys sys.path.append('./figures/Schmidt2018_dyn') -# from helpers import original_data_path, infomap_path -from multiarea_model.multiarea_model import MultiAreaModel from plotcolors import myred, myblue import matplotlib.pyplot as pl from matplotlib import gridspec -from matplotlib import rc_file -# rc_file('./figures/Schmidt2018_dyn/plotstyle.rc') sys.path.append('./figures/Schmidt2018') -# from graph_helpers import apply_map_equation from M2E_compute_fc import compute_fc from M2E_compute_louvain_communities import compute_communities @@ -27,23 +21,15 @@ cmap = pl.cm.coolwarm cmap = cmap.from_list('mycmap', [myblue, 'white', myred], N=256) cmap2 = cmap.from_list('mycmap', ['white', myred], N=256) -# """ -# Create MultiAreaModel instance to have access to data structures -# """ -# conn_params = {'g': -11., -# 'fac_nu_ext_TH': 1.2, -# 'fac_nu_ext_5E': 1.125, -# 'fac_nu_ext_6E': 1.41666667, -# 'av_indegree_V1': 3950., -# 'K_stable': '../SchueckerSchmidt2017/K_prime_original.npy'} -# network_params = {'N_scaling': 1., -# 'K_scaling': 1., -# 'connection_params': conn_params} -# M = MultiAreaModel(network_params) - def zero_diagonal(matrix): """ Return copy of a matrix with diagonal set to zero. + + Parameters: + - matrix (ndarray): Matrix to copy. + + Returns: + - M (ndarray): Matrix with diagonal set to zero. """ M = copy.copy(matrix) for i in range(M.shape[0]): @@ -54,6 +40,17 @@ def zero_diagonal(matrix): def matrix_plot(M, ax, matrix, index, vlim, pos=None): """ Plot matrix into matplotlib axis sorted according to index. + + Parameters: + - M (MultiAreaModel): Object containing simulation data. + - ax (matplotlib axis): Axis to plot matrix into. + - matrix (ndarray): Matrix to plot. + - index (int): Index of matrix to plot. + - vlim (float): Value limit. + - pos (tuple): Position of matrix in figure. + + Returns: + None """ ax.yaxis.set_ticks_position('none') ax.xaxis.set_ticks_position('none') @@ -70,7 +67,6 @@ def matrix_plot(M, ax, matrix, index, vlim, pos=None): vmax = vlim vmin = -vlim - # , norm = LogNorm(1e-8,1.)) im = ax.pcolormesh(matrix[index][:, index][::-1], cmap=cmap, vmin=vmin, vmax=vmax) @@ -78,73 +74,46 @@ def matrix_plot(M, ax, matrix, index, vlim, pos=None): cb = pl.colorbar(im, ax=ax, ticks=cbticks, fraction=0.046) cb.ax.tick_params(labelsize=14) ax.set_yticks([]) - - # if pos != (0, 2): - # cb.remove() - # else: - # ax.text(1.25, 0.52, r'FC', rotation=90, - # transform=ax.transAxes, size=14) ax.set_xticks([]) - ax.set_xlabel('Cortical area', size=14) ax.set_ylabel('Cortical area', size=14) def visualize_fc(M, data_path): + """ + Visualize functional connectivity. + + Parameters: + - M ((MultiAreaModel)): Object containing simulation data. + - data_path (str): Path to the data directory + + Returns: + None + """ label = M.simulation.label - # compute functional connectivity + # Compute functional connectivity compute_fc(M, data_path, label) compute_communities(M, data_path, label) """ Figure layout """ - # width = 7.0866 - # n_horz_panels = 2. - n_horz_panels = 1. - # n_vert_panels = 3. - n_vert_panels = 2. - fig = pl.figure(figsize=(10, 4)) fig.suptitle('Simulated functional connectivity (left) and FC of macaque resting-state fMRI', fontsize=17, x=0.5, y=1.1) axes = {} - # gs1 = gridspec.GridSpec(1, 3) gs1 = gridspec.GridSpec(1, 2) - # gs1.update(left=0.05, right=0.95, top=0.95, - # bottom=0.52, wspace=0., hspace=0.4) gs1.update(left=0.05, right=0.95, top=1, bottom=0, wspace=0.3, hspace=0) axes['A'] = pl.subplot(gs1[:1, :1]) axes['B'] = pl.subplot(gs1[:1, 1:2]) - # axes['C'] = pl.subplot(gs1[:, 2]) - - # gs1 = gridspec.GridSpec(1, 1) - # gs1.update(left=0.18, right=0.8, top=0.35, - # wspace=0., bottom=0.13, hspace=0.2) - # axes['D'] = pl.subplot(gs1[:, :]) - - # gs1 = gridspec.GridSpec(1, 1) - # gs1.update(left=0.165, right=0.6, top=0.04, - # wspace=0., bottom=0.0, hspace=0.2) - # axes['E'] = pl.subplot(gs1[:, :]) - # gs1 = gridspec.GridSpec(1, 1) - # gs1.update(left=0.688, right=0.95, top=0.04, - # wspace=0., bottom=0.0, hspace=0.2) - # axes['F'] = pl.subplot(gs1[:, :]) - - # for label in ['A', 'B', 'C', 'D', 'E', 'F']: for label in ['A', 'B']: if label in ['E', 'F']: label_pos = [-0.08, 1.01] else: label_pos = [-0.1, 1.01] - # pl.text(label_pos[0], label_pos[1], r'\bfseries{}' + label, - # fontdict={'fontsize': 16, 'weight': 'bold', - # 'horizontalalignment': 'left', 'verticalalignment': - # 'bottom'}, transform=axes[label].transAxes) pl.text(label_pos[0], label_pos[1], label, fontdict={'fontsize': 16, 'weight': 'bold', 'horizontalalignment': 'left', 'verticalalignment': @@ -154,21 +123,9 @@ def visualize_fc(M, data_path): axes[label].yaxis.set_ticks_position("left") axes[label].xaxis.set_ticks_position("bottom") - # for label in ['E', 'F']: - # axes[label].spines['right'].set_color('none') - # axes[label].spines['top'].set_color('none') - # axes[label].spines['left'].set_color('none') - # axes[label].spines['bottom'].set_color('none') - - # axes[label].yaxis.set_ticks_position("none") - # axes[label].xaxis.set_ticks_position("none") - # axes[label].set_yticks([]) - # axes[label].set_xticks([]) - """ Load data """ - # Load experimental functional connectivity func_conn_data = {} with open('./figures/Schmidt2018_dyn/Fig8_exp_func_conn.csv', 'r') as f: @@ -190,39 +147,9 @@ def visualize_fc(M, data_path): for j, area2 in enumerate(M.area_list): exp_FC[i][j] = func_conn_data[area1][area2] - # fn = './figures/Schmidt2018_dyn/FC_exp_communities.json' - # with open(fn, 'r') as f: - # part_exp = json.load(f) - # part_exp_list = [part_exp[area] for area in M.area_list] - - """ Simulation data """ - # LOAD_ORIGINAL_DATA = True - - # cc_weights_factor = [1.0, 1.4, 1.5, 1.6, 1.7, 1.75, 1.8, 2., 2.1, 2.5, 1.9] - - # if LOAD_ORIGINAL_DATA: - # labels = ['33fb5955558ba8bb15a3fdce49dfd914682ef3ea', - # '783cedb0ff27240133e3daa63f5d0b8d3c2e6b79', - # '380856f3b32f49c124345c08f5991090860bf9a3', - # '5a7c6c2d6d48a8b687b8c6853fb4d98048681045', - # 'c1876856b1b2cf1346430cf14e8d6b0509914ca1', - # 'a30f6fba65bad6d9062e8cc51f5483baf84a46b7', - # '1474e1884422b5b2096d3b7a20fd4bdf388af7e0', - # 'f18158895a5d682db5002489d12d27d7a974146f', - # '08a3a1a88c19193b0af9d9d8f7a52344d1b17498', - # '5bdd72887b191ec22a5abcc04ca4a488ea216e32', - # '99c0024eacc275d13f719afd59357f7d12f02b77'] - # data_path = original_data_path - # label_plot = labels[-1] # chi=1.9 - # else: - # from network_simulations import init_models - # from config import data_path - # models = init_models('Fig8') - # labels = [M.simulation.label for M in models] - labels = [M.simulation.label] sim_FC = {} @@ -233,14 +160,6 @@ def visualize_fc(M, data_path): 'functional_connectivity_synaptic_input.npy') sim_FC[label] = np.load(fn) - # sim_FC_bold = {} - # for label in [label_plot]: - # fn = os.path.join(data_path, - # label, - # 'Analysis', - # 'functional_connectivity_bold_signal.npy') - # sim_FC_bold[label] = np.load(fn) - label = M.simulation.label fn = os.path.join(data_path, label, @@ -262,22 +181,11 @@ def visualize_fc(M, data_path): Plotting """ ax = axes['A'] - # label = label_plot - matrix_plot(M, ax, zero_diagonal(sim_FC[label]), part_sim_index, 1., pos=(0, 0)) - # ax = axes['B'] - # label = label_plot - - # matrix_plot(ax, zero_diagonal(sim_FC_bold[label]), - # part_sim_index, 1., pos=(0, 0)) - - # ax = axes['C'] ax = axes['B'] - # matrix_plot(M, ax, zero_diagonal(exp_FC), - # part_sim_index, 1., pos=(0, 2)) matrix_plot(M, ax, zero_diagonal(exp_FC), part_sim_index, 1., pos=(0, 0)) @@ -286,131 +194,3 @@ def visualize_fc(M, data_path): for area in areas[1:]: area_string += ' ' area_string += area - - # # pl.text(0.02, 0.45, r'\bfseries{}Order of cortical areas:', transform=fig.transFigure, fontsize=12) - # pl.text(0.02, 0.3, 'Order of cortical areas:', transform=fig.transFigure, fontsize=17) - # pl.text(0.02, 0.25, area_string, - # transform=fig.transFigure, fontsize=15) - - - # ax = axes['D'] - # ax.spines['right'].set_color('none') - # ax.spines['top'].set_color('none') - # ax.yaxis.set_ticks_position("left") - # ax.xaxis.set_ticks_position("bottom") - - # cc_list = [] - # for i, label in enumerate(labels): - # cc = np.corrcoef(zero_diagonal(sim_FC[label]).flatten(), - # zero_diagonal(exp_FC).flatten())[0][1] - # cc_list.append(cc) - - - # ax.plot(cc_weights_factor[1:], cc_list[1:], '.', ms=10, - # markeredgecolor='none', label='Sim. vs. Exp.', color='k') - # ax.plot(cc_weights_factor[0], cc_list[0], '^', ms=5, - # markeredgecolor='none', label='Sim. vs. Exp.', color='k') - - # label = label_plot - # cc_bold = np.corrcoef(zero_diagonal(sim_FC_bold[label]).flatten(), - # zero_diagonal(exp_FC).flatten())[0][1] - # ax.plot([1.9], cc_bold, '.', - # ms=10, markeredgecolor='none', color=myred) - - # # Correlation between exp. FC and structural connectivity - # # Construct the structural connectivity as the matrix of relative - # conn_matrix = np.zeros((len(M.area_list), len(M.area_list))) - # for i, area1 in enumerate(M.area_list): - # s = np.sum(list(M.K_areas[area1].values())) - # for j, area2 in enumerate(M.area_list): - # value = M.K_areas[area1][area2] / s - # conn_matrix[i][j] = value - - - # cc = np.corrcoef(zero_diagonal(conn_matrix).flatten(), - # zero_diagonal(exp_FC).flatten())[0][1] - - # # Formatting - # ax.hlines(cc, -0.1, 2.5, linestyle='dashed', color='k') - # ax.set_xlabel(r'Cortico-cortical weight factor $\chi$', - # labelpad=-0.1, size=16) - # ax.set_ylabel(r'$r_{\mathrm{Pearson}}$', size=20) - # ax.set_xlim((0.9, 2.7)) - # ax.set_ylim((-0.1, 0.6)) - # ax.set_yticks([0., 0.2, 0.4]) - # ax.set_yticklabels([0., 0.2, 0.4], size=13) - # ax.set_xticks([1., 1.5, 2., 2.5]) - # ax.set_xticklabels([1., 1.5, 2., 2.5], size=13) - - - # """ - # Save figure - # """ - # pl.savefig('Fig8_interactions_mpl.eps') - - # """ - # We compare the clusters found in the functional connectivity to - # clusters found in the structural connectivity of the network. To - # detect the clusters in the structural connectivity, we repeat the the - # procedure from Fig. 7 of Schmidt et al. 'Multi-scale account of the - # network structure of macaque visual cortex' and apply the map equation - # method (see Materials & Methods in Schmidt et al. 2018) to the - # structural connectivity of the network. - - # This requires installation of the infomap executable and defining the - # path to the executable. - # """ - # fn = 'Fig8_structural_clusters' - # modules, modules_areas, index = apply_map_equation(conn_matrix, - # M.area_list, - # filename=fn, - # infomap_path=infomap_path) - # with open('{}.map'.format(fn), 'r') as f: - # line = '' - # while '*Nodes' not in line: - # line = f.readline() - # line = f.readline() - # map_equation = [] - # map_equation_areas = [] - # while "*Links" not in line: - # map_equation.append(int(line.split(':')[0])) - # map_equation_areas.append(line.split('"')[1]) - # line = f.readline() - # f.close() - # map_equation = np.array(map_equation) - # map_equation_dict = dict( - # list(zip(map_equation_areas, map_equation))) - - # # To create the alluvial input, we rename the simulated clusters - # # 1S --> 2S, 2S ---> 1S for visual purposes - # f = open('alluvial_input.txt', 'w') - # f.write("area,map_equation, louvain, louvain_exp\n") - # for i, area in enumerate(M.area_list): - # if part_sim_list[i] == 1: - # psm = 2 - # elif part_sim_list[i] == 0: - # psm = 1 - # s = '{}, {}, {}, {}'.format(area, - # map_equation_dict[area], - # psm, - # part_exp_list[i]) - # f.write(s) - # f.write('\n') - # f.close() - - # # The alluvial plot cannot be created with a script. To reproduce the alluvial - # # plot, go to http://app.rawgraphs.io/ and proceed from there. - - # """ - # Merge with alluvial plot - # """ - # c = pyx.canvas.canvas() - # c.fill(pyx.path.rect(0, 0., 17.9, 17.), [pyx.color.rgb.white]) - - # c.insert(pyx.epsfile.epsfile(0., 6., "Fig8_interactions_mpl.eps", width=17.9)) - # c.insert(pyx.epsfile.epsfile( - # 0.1, -1., "Fig8_alluvial_struct_sim.eps", width=11.)) - # c.insert(pyx.epsfile.epsfile( - # 11.2, -0.6, "Fig8_alluvial_sim_exp.eps", width=6.5)) - - # c.writeEPSfile("Fig8_interactions.eps") diff --git a/figures/MAM2EBRAINS/M2E_visualize_interareal_connectivity.py b/figures/MAM2EBRAINS/M2E_visualize_interareal_connectivity.py index d18b7e05dc64e8137be2012a878ab997f73247b5..bb111c2cd7535efe4a7577aa2cf739e9154ad568 100644 --- a/figures/MAM2EBRAINS/M2E_visualize_interareal_connectivity.py +++ b/figures/MAM2EBRAINS/M2E_visualize_interareal_connectivity.py @@ -1,7 +1,5 @@ -import json import numpy as np import matplotlib.pyplot as pl -import os import sys sys.path.append('./figures/Schmidt2018') @@ -10,56 +8,46 @@ from helpers import area_list, datapath from matplotlib import gridspec from matplotlib.colors import LogNorm from matplotlib.ticker import FixedLocator -from matplotlib import rc_file from multiarea_model import MultiAreaModel -from plotcolors import myblue -from scipy import stats - -# rc_file('plotstyle.rc') def visualize_interareal_connectivity(M): - # full-scale model + """ + Visualize inter-area connectivity for a comparison of the full-scale model and the down-scaled model + + Parameters: + - M ((MultiAreaModel)): Object containing simulation data. + + Returns: + None + """ + # Full-scale model M_full_scale = MultiAreaModel({}) """ Figure layout """ - # nrows = 2 nrows = 1 ncols = 2 - # width = 6.8556 width = 12 panel_wh_ratio = 0.7 * (1. + np.sqrt(5)) / 2. # golden ratio height = width / panel_wh_ratio * float(nrows) / ncols - # print(width, height) pl.rcParams['figure.figsize'] = (width, height) fig = pl.figure() fig.suptitle('Area-level connectivity of the full-scale and down-scaled MAM expressed as relative indegrees for each target area', fontsize=15, x=0.5, y=1.05) axes = {} - # gs1 = gridspec.GridSpec(2, 2) gs1 = gridspec.GridSpec(1, 2) - # gs1.update(left=0.06, right=0.95, top=0.95, bottom=0.1, wspace=0.1, hspace=0.3) gs1.update(left=0.1, right=0.95, top=0.95, bottom=0.1, wspace=0.3, hspace=0.3) - # axes['A'] = pl.subplot(gs1[:1, :1]) - # axes['B'] = pl.subplot(gs1[:1, 1:2]) axes['B'] = pl.subplot(gs1[:1, :1]) - # axes['D'] = pl.subplot(gs1[:1, 2:]) axes['D'] = pl.subplot(gs1[:1, 1:2]) - # pos = axes['A'].get_position() pos2 = axes['D'].get_position() - # axes['C'] = pl.axes([pos.x0 + 0.01, pos2.y0, pos.x1 - pos.x0 - 0.025, 0.23]) - - # print(pos.x1 - pos.x0 - 0.025) - # labels = ['A', 'B', 'C', 'D'] labels = ['B', 'D'] labels_display = ['Full-scale model', 'Down-scaled model'] - # for label in labels: for i in range(len(labels)): label = labels[i] label_display = labels_display[i] @@ -67,113 +55,11 @@ def visualize_interareal_connectivity(M): label_pos = [-0.045, 1.18] else: label_pos = [-0.2, 1.04] - # pl.text(label_pos[0], label_pos[1], r'\bfseries{}' + label, - # fontdict={'fontsize': 10, 'weight': 'bold', - # 'horizontalalignment': 'left', 'verticalalignment': - # 'bottom'}, transform=axes[label].transAxes) pl.text(label_pos[0], label_pos[1], label_display, fontdict={'fontsize': 12, 'weight': 'bold', 'horizontalalignment': 'left', 'verticalalignment': 'bottom'}, transform=axes[label].transAxes) -# """ -# Load data -# """ -# # M = MultiAreaModel({}) -# M_full_scale = MultiAreaModel({}) - -# datapath = './multiarea_model/data_multiarea/' -# with open(os.path.join(datapath, 'viscortex_processed_data.json'), 'r') as f: -# proc = json.load(f) -# with open(os.path.join(datapath, 'viscortex_raw_data.json'), 'r') as f: -# raw = json.load(f) - -# FLN_Data_FV91 = proc['FLN_Data_FV91'] - -# cocomac_data = raw['cocomac_data'] -# median_distance_data = raw['median_distance_data'] - -# cocomac = np.zeros((32, 32)) -# conn_matrix = np.zeros((32, 32)) -# for i, area1 in enumerate(area_list[::-1]): -# for j, area2 in enumerate(area_list): -# # if M.K_areas[area1][area2] > 0. and area2 in cocomac_data[area1]: -# if M_full_scale.K_areas[area1][area2] > 0. and area2 in cocomac_data[area1]: -# cocomac[i][j] = 1. -# if area2 in FLN_Data_FV91[area1]: -# conn_matrix[i][j] = FLN_Data_FV91[area1][area2] - -# """ -# Panel A: CoCoMac Data -# """ -# ax = axes['A'] -# # ax.yaxis.set_ticks_position("left") -# # ax.xaxis.set_ticks_position("bottom") - -# ax.set_aspect(1. / ax.get_data_ratio()) -# ax.yaxis.set_ticks_position("none") -# ax.xaxis.set_ticks_position("none") - -# masked_matrix = np.ma.masked_values(cocomac, 0.0) -# cmap = pl.cm.binary -# cmap.set_bad('w', 1.0) - -# x = np.arange(0, len(area_list) + 1) -# y = np.arange(0, len(area_list[::-1]) + 1) -# X, Y = np.meshgrid(x, y) - -# ax.set_xticks([i + 0.5 for i in np.arange(0, len(area_list) + 1, 1)]) -# ax.set_xticks([i + 0.5 for i in np.arange(0, len(area_list), 1)]) -# # ax.set_xticklabels(area_list, rotation=90, size=6.) -# ax.set_xticklabels(area_list, rotation=90, size=10.) - -# # ax.set_yticks([i + 0.5 for i in np.arange(0, len(area_list) + 1, 1)]) -# ax.set_yticks([i + 0.5 for i in np.arange(0, len(area_list), 1)]) -# # ax.set_yticklabels(area_list[::-1], size=6.) -# ax.set_yticklabels(area_list[::-1], size=6.) - -# ax.set_ylabel('Target area') -# ax.set_xlabel('Source area') - -# im = ax.pcolormesh(masked_matrix, cmap=cmap, -# edgecolors='None', vmin=0., vmax=1.) - -# t = FixedLocator([]) -# cbar = pl.colorbar(im, ticks=t, fraction=0.046, ax=ax) -# cbar.set_alpha(0.) - # cbar.remove() - -# """ -# Panel B: Data from Markov et al. (2014) "A weighted and directed -# interareal connectivity matrix for macaque cerebral cortex." -# Cerebral Cortex, 24(1), 17–36. -# """ -# ax = axes['B'] -# ax.set_aspect(1. / ax.get_data_ratio()) -# ax.yaxis.set_ticks_position("none") -# ax.xaxis.set_ticks_position("none") - -# masked_matrix = np.ma.masked_values(conn_matrix, 0.0) -# cmap = pl.get_cmap('inferno') -# cmap.set_bad('w', 1.0) - -# x = np.arange(0, len(area_list) + 1) -# y = np.arange(0, len(area_list[::-1]) + 1) -# X, Y = np.meshgrid(x, y) - -# ax.set_xticks([i + 0.5 for i in np.arange(0, len(area_list) + 1, 1)]) -# ax.set_xticklabels(area_list, rotation=90, size=6.) - -# ax.set_yticks([i + 0.5 for i in np.arange(0, len(area_list) + 1, 1)]) -# ax.set_yticklabels(area_list[::-1], size=6.) - -# im = ax.pcolormesh(masked_matrix, cmap=cmap, -# edgecolors='None', norm=LogNorm(vmin=1e-6, vmax=1.)) - -# t = FixedLocator([1e-6, 1e-4, 1e-2, 1]) -# cbar = pl.colorbar(im, ticks=t, fraction=0.046, ax=ax) -# cbar.set_alpha(0.) - """ Panel B: Interareal connectivity of full-scaling multi-area model """ @@ -190,7 +76,7 @@ def visualize_interareal_connectivity(M): ax.set_aspect(1. / ax.get_data_ratio()) masked_matrix_full_scale = np.ma.masked_values(conn_matrix_full_scale, 0.0) - # cmap = pl.get_cmap('inferno') + cmap = pl.get_cmap('YlOrBr') cmap.set_bad('w', 1.0) @@ -198,18 +84,12 @@ def visualize_interareal_connectivity(M): y = np.arange(0, len(area_list[::-1]) + 1) X, Y = np.meshgrid(x, y) - # ax.set_xticks([i + 0.5 for i in np.arange(0, len(area_list) + 1, 1)]) ax.set_xticks([i + 0.5 for i in np.arange(0, len(area_list), 1)]) - # ax.set_xticklabels(area_list, rotation=90, size=6.) ax.set_xticklabels(area_list, rotation=90, size=10.) - # ax.set_yticks([i + 0.5 for i in np.arange(0, len(area_list) + 1, 1)]) ax.set_yticks([i + 0.5 for i in np.arange(0, len(area_list), 1)]) - # ax.set_yticklabels(area_list[::-1], size=6.) ax.set_yticklabels(area_list[::-1], size=10.) - # ax.set_ylabel('Target area') - # ax.set_xlabel('Source area') ax.set_ylabel('Target area', fontsize=15) ax.set_xlabel('Source area', fontsize=15) im = ax.pcolormesh(masked_matrix_full_scale, cmap=cmap, @@ -219,93 +99,6 @@ def visualize_interareal_connectivity(M): cbar = pl.colorbar(im, ticks=t, fraction=0.046, ax=ax) cbar.set_alpha(0.) - # """ - # Panel C: Exponential decay of FLN with distance - # """ - # FLN_values_FV91 = np.array([]) - # distances_FV91 = np.array([]) - - # for target_area in FLN_Data_FV91: - # for source_area in FLN_Data_FV91[target_area]: - # if target_area in median_distance_data and source_area in median_distance_data: - # if FLN_Data_FV91[target_area][source_area]: - # FLN_values_FV91 = np.append(FLN_values_FV91, FLN_Data_FV91[ - # target_area][source_area]) - # distances_FV91 = np.append(distances_FV91, median_distance_data[ - # target_area][source_area]) - - # # Linear fit of distances vs. log FLN - # print("\n \n Linear fit to logarithmic values") - # gradient, intercept, r_value, p_value, std_err = stats.linregress( - # distances_FV91, np.log(FLN_values_FV91)) - # print("Raw parameters: ", gradient, intercept) - # print("Transformed parameters: ", -gradient, np.exp(intercept)) - # print('r_value**2', r_value ** 2) - # print('p_value', p_value) - # print('std_err', std_err) - - # ax = axes['C'] - # ax.yaxis.set_ticks_position("left") - # ax.xaxis.set_ticks_position("bottom") - - # ax.yaxis.set_ticks_position("left") - # ax.xaxis.set_ticks_position("bottom") - - # ax.spines['right'].set_color('none') - # ax.spines['top'].set_color('none') - # ax.yaxis.set_ticks_position("left") - # ax.xaxis.set_ticks_position("bottom") - - # ax.plot(distances_FV91, np.log10(FLN_values_FV91), '.', color=myblue) - # x = np.arange(np.min(distances_FV91), np.max(distances_FV91), 1) - # ax.plot(x, (intercept + gradient * x) / np.log(10), linewidth=2.0, - # color='Black', label='Linear regression fit') - - # ax.set_xlabel('Distance (mm)', labelpad=7) - # ax.set_ylabel(r'$\log(FLN)$') - # ax.set_yticks([-6, -4, -2, 0]) - - # print("log fit") - # print(np.corrcoef(gradient * distances_FV91 + intercept, np.log(FLN_values_FV91))[0][1]) - - # """ - # Panel D: Resulting connectivity matrix - # """ - # conn_matrix = np.zeros((32, 32)) - # for i, area1 in enumerate(area_list[::-1]): - # for j, area2 in enumerate(area_list): - # conn_matrix[i][j] = M.K_areas[area1][ - # area2] / np.sum(list(M.K_areas[area1].values())) - - # ax = axes['D'] - # ax.yaxis.set_ticks_position("none") - # ax.xaxis.set_ticks_position("none") - - # ax.set_aspect(1. / ax.get_data_ratio()) - - # masked_matrix = np.ma.masked_values(conn_matrix, 0.0) - # cmap = pl.get_cmap('inferno') - # cmap.set_bad('w', 1.0) - - # x = np.arange(0, len(area_list) + 1) - # y = np.arange(0, len(area_list[::-1]) + 1) - # X, Y = np.meshgrid(x, y) - - # ax.set_xticks([i + 0.5 for i in np.arange(0, len(area_list) + 1, 1)]) - # ax.set_xticklabels(area_list, rotation=90, size=6.) - - # ax.set_yticks([i + 0.5 for i in np.arange(0, len(area_list) + 1, 1)]) - # ax.set_yticklabels(area_list[::-1], size=6.) - - # ax.set_ylabel('Target area') - # ax.set_xlabel('Source area') - # im = ax.pcolormesh(masked_matrix, cmap=cmap, - # edgecolors='None', norm=LogNorm(vmin=1e-6, vmax=1.)) - - # t = FixedLocator([1e-6, 1e-4, 1e-2, 1]) - # cbar = pl.colorbar(im, ticks=t, fraction=0.046, ax=ax) - # cbar.set_alpha(0.) - """ Panel D: Interareal connectivity of down-scaling multi-area model """ @@ -322,21 +115,16 @@ def visualize_interareal_connectivity(M): ax.set_aspect(1. / ax.get_data_ratio()) masked_matrix_down_scale = np.ma.masked_values(conn_matrix_down_scale, 0.0) - # cmap = pl.get_cmap('inferno') cmap.set_bad('w', 1.0) x = np.arange(0, len(area_list) + 1) y = np.arange(0, len(area_list[::-1]) + 1) X, Y = np.meshgrid(x, y) - # ax.set_xticks([i + 0.5 for i in np.arange(0, len(area_list) + 1, 1)]) ax.set_xticks([i + 0.5 for i in np.arange(0, len(area_list), 1)]) - # ax.set_xticklabels(area_list, rotation=90, size=6.) ax.set_xticklabels(area_list, rotation=90, size=10.) - # ax.set_yticks([i + 0.5 for i in np.arange(0, len(area_list) + 1, 1)]) ax.set_yticks([i + 0.5 for i in np.arange(0, len(area_list), 1)]) - # ax.set_yticklabels(area_list[::-1], size=6.) ax.set_yticklabels(area_list[::-1], size=10.) ax.set_ylabel('Target area', fontsize=15) @@ -346,9 +134,4 @@ def visualize_interareal_connectivity(M): t = FixedLocator([1e-6, 1e-4, 1e-2, 1]) cbar = pl.colorbar(im, ticks=t, fraction=0.046, ax=ax) - cbar.set_alpha(0.) - - # """ - # Save figure - # """ - # pl.savefig('Fig4_connectivity.eps') + cbar.set_alpha(0.) \ No newline at end of file diff --git a/figures/MAM2EBRAINS/M2E_visualize_time_ave_pop_rates.py b/figures/MAM2EBRAINS/M2E_visualize_time_ave_pop_rates.py index 54576bfa3615eb6202832b098f09b09f3ddefad9..973855e090c815a8425d6dae9bc86731e01baaee 100644 --- a/figures/MAM2EBRAINS/M2E_visualize_time_ave_pop_rates.py +++ b/figures/MAM2EBRAINS/M2E_visualize_time_ave_pop_rates.py @@ -1,49 +1,38 @@ import numpy as np import os import json - -from multiarea_model import Analysis import matplotlib.pyplot as plt -from matplotlib.colors import LogNorm -from matplotlib.ticker import FixedLocator def plot_time_averaged_population_rates(M, data_path, area_list=None, **keywords): """ Plot overview over time-averaged population rates encoded in colors with areas along x-axis and populations along y-axis. - Parameters - ---------- - area_list : list, optional - Specifies with areas are plotted in which order. - Default to None, leading to plotting of all areas ordered by architectural type. - output : {'pdf', 'png', 'eps'}, optional - If given, the function stores the plot to a file of the given format. + Parameters: + - M (MultiAreaModel): The simulation object. + - data_path (str): The path to the data directory. + - area_list (list): List of areas to plot. If None, all areas are plotted. + - **keywords: Additional keyword arguments. + + Returns: + - fig (object): The figure object. """ - - # with open(os.path.join(data_path, M.simulation.label, 'custom_params_{}'.format(M.simulation.label)), 'r') as f: - # sim_params = json.load(f) - # areas_simulated = sim_params['sim_params']['areas_simulated'] area_list = M.simulation.params["areas_simulated"] - # matrix = np.zeros((len(area_list), len(A.network.structure['V1']))) matrix = np.zeros((len(area_list), len(M.structure['V1']))) fig = plt.figure(figsize=(12, 3)) fig.suptitle('Time-averaged firing rate over simulated populations', fontsize=15, x=0.43) ax = fig.add_subplot(111) - # stationary firing rates + # Stationary firing rates fn = os.path.join(data_path, M.simulation.label, 'Analysis', 'pop_rates.json') with open(fn, 'r') as f: pop_rates = json.load(f) for i, area in enumerate(area_list): - # print(i, area) - # for j, pop in enumerate(A.network.structure_reversed['V1']): for j, pop in enumerate(M.structure['V1'][::-1]): if pop in M.structure[area]: - # rate = A.pop_rates[area][pop][0] rate = pop_rates[area][pop] if rate == 0.0: rate = 1e-5 # To distinguish zero-rate from non-existing populations @@ -51,42 +40,26 @@ def plot_time_averaged_population_rates(M, data_path, area_list=None, **keywords rate = np.nan matrix[i][j] = rate - # cm = plt.cm.jet - # cm = cm.from_list('mycmap', [(0., 64./255., 192./255.), # custom dark blue - # (0., 128./255., 192./255.), # custom light blue - # 'white', - # (245./255., 157./255., 115./255.), # custom light red - # (192./255., 64./255., 0.)], N=256) # custom dark red cm = plt.get_cmap('YlOrBr') cm.set_under('0.3') - # cm.set_bad('k') cm.set_bad('white') matrix = np.transpose(matrix) masked_matrix = np.ma.masked_where(np.isnan(matrix), matrix) ax.patch.set_hatch('x') - # im = ax.pcolormesh(masked_matrix, cmap=cm, edgecolors='None', norm=LogNorm( - # vmin=0.01, vmax=100.)) im = ax.pcolormesh(masked_matrix, cmap=cm, edgecolors='None', vmin=0) ax.set_xlim(0, matrix[0].size) x_index = np.arange(4.5, 31.6, 5.0) x_ticks = [int(a + 0.5) for a in x_index] - # y_index = list(range(len(A.network.structure['V1']))) y_index = list(range(len(M.structure['V1']))) y_index = [a + 0.5 for a in y_index] - # ax.set_xticks(x_index) ax.set_xticks([i + 0.5 for i in np.arange(0, len(area_list), 1)]) - # ax.set_xticklabels(x_ticks) ax.set_xticklabels(area_list, rotation=90, size=10.) ax.set_yticks(y_index) - # ax.set_yticklabels(A.network.structure_reversed['V1']) - # ax.set_yticklabels(M.structure['V1'][::-1]) ax.set_yticklabels(['6I', '6E', '5I', '5E', '4I', '4E', '2/3I', '2/3E']) ax.set_ylabel('Population', size=13) ax.set_xlabel('Area index', size=13) - # t = FixedLocator([0.01, 0.1, 1., 10., 100.]) - # t = FixedLocator([0, 10, 20, 30, 40, 50]) # Iterate over the data and add 'X' for masked cells for i in range(masked_matrix.shape[0]): @@ -94,12 +67,9 @@ def plot_time_averaged_population_rates(M, data_path, area_list=None, **keywords if masked_matrix.mask[i, j]: ax.text(j + 0.5, i + 0.5, 'X', va='center', ha='center', color='black', fontsize=23) - # plt.colorbar(im, ticks=t) plt.colorbar(im) if 'output' in keywords: - # plt.savefig(os.path.join(A.output_dir, '{}_rates.{}'.format(A.simulation.label, - # keywords['output']))) plt.savefig(os.path.join(M.output_dir, '{}_rates.{}'.format(M.simulation.label, keywords['output']))) else: diff --git a/multi-area-model.ipynb b/multi-area-model.ipynb index 78acefdc3f3231b57be1d3045e61068409cf4377..c7017a7d8772f884ff568e556ce4864bd241b902 100644 --- a/multi-area-model.ipynb +++ b/multi-area-model.ipynb @@ -177,7 +177,7 @@ "|Parameter |Default value|Value range/options|Value assigned|Description|\n", "|:---------------:|:-----------:|:-----------------:|:------------:|:---------:|\n", "|scale_down_to |$1.0$ |$(0, 1.0]$ |$0.006$ |$^1$ |\n", - "|cc_weights_factor|$1.9$ |$[1.0, 2.5]$ |$1.9$ |$^2$ |\n", + "|cc_weights_factor|$1.9$ |$\\geq 1.0$ |$1.9$ |$^2$ |\n", "|areas_simulated |complete_area_list|Sublists of complete_area_list|complete_area_list|$^3$|\n", "|replace_non_simulated_areas|None|None, 'hom_poisson_stat', 'het_poisson_stat', 'het_current_nonstat'|'het_poisson_stat'|$^4$ |\n", "|g |$-11.0$|$\\leq -1.0$ |$-11.0$ |$^5$ |\n", diff --git a/multiarea_model/data_multiarea/Model.py b/multiarea_model/data_multiarea/Model.py index 8e5cc5e0fdcb3d4dc404f429fcfcf32fbd613823..2a7a0f88964c46c5e0d3d1ea93b0b25ca2a1f056 100644 --- a/multiarea_model/data_multiarea/Model.py +++ b/multiarea_model/data_multiarea/Model.py @@ -646,7 +646,7 @@ def compute_Model_params(out_label='', mode='default'): # if there is laminar information in CoCoMac, use it if Coco_Data[target_area][source_area]['source_pattern'] is not None: sp = np.array(Coco_Data[target_area][source_area][ - 'source_pattern'], dtype=np.float) + 'source_pattern'], dtype=float) # Manually determine SLN, based on CoCoMac: # from supragranular, then SLN=0.,