Skip to content
Snippets Groups Projects
Commit d6d86853 authored by Maximilian Schmidt's avatar Maximilian Schmidt
Browse files

Fix issues raised by albada

parent c8e7371b
No related branches found
No related tags found
1 merge request!1Add all necessary files for the multi-area model
......@@ -82,7 +82,10 @@ basic analysis and plotting.
The `figures` folder contains a subfolder with all scripts necessary to produce
the figures from [1]. The scripts for [2] and [3] will follow soon.
If snakemake is installed, the figures can be produced by executing
`snakemake` in the respective folder.
`snakemake` in the respective folder:
cd figures/Schmidt2017/
snakemake
## Running a simulation
......
# Absolut path of repository
# Absolute path of repository
base_path = None
# Place to store simulations
data_path = None
# Template for jobscripts
# Template for job scripts
jobscript_template = """
# Instruction for the queuing system
......
......@@ -27,8 +27,6 @@ thicknesses = raw['laminar_thicknesses']
total_thickness_data = raw['total_thickness_data']
# calculate average relative layer thicknesses for each area
# where possible, compare with total thickness, with total thickness
# without L1, and compare L23-L4, L23-L5, L23-L6, L4-L5, L4-L6, and L5-L6
frac_of_total = {}
for area in list(thicknesses.keys()):
area_dict_total = {}
......@@ -168,7 +166,6 @@ for l in layers:
else:
rho[l] = np.zeros(8)
# ax.bar(x - 0.4, y)
bottom = np.zeros(8)
for l in layers[:]:
bottom += rho[l]
......@@ -196,12 +193,6 @@ ax.legend(loc=(0.035, 0.45), edgecolor='k')
##################################################
# total thicknesses from Barbas lab vs architectural type
# total cortical thicknesses from Barbas lab
barbas_array = np.zeros(len(area_list))
for i, area in enumerate(area_list):
barbas_array[i] = total_thickness_data[area] / 1000.
......@@ -224,7 +215,6 @@ line = gradient * log_density_array + intercept
ax.plot(log_density_array, line, '-', linewidth=1.5, color='k')
ax.set_xlabel('Log neuron density', labelpad=0.3)
ax.set_ylabel('Total thickness (mm)')
# ax.set_xlim((0, 9))
ax.set_xticks([4.7, 5.0])
ax.set_yticks(np.arange(1., 3., 0.5))
......@@ -246,7 +236,7 @@ for i, data in enumerate([frac1_of_total, frac23_of_total,
ax.plot(log_density_array, line, '-', linewidth=2.0, c=colors[i])
ax.set_xlabel('Log neuron density', labelpad=0.3)
ax.set_ylabel('Proportion of \n total thickness') # ,size=16.5)
ax.set_ylabel('Proportion of \n total thickness')
ax.set_xlim((4.6, 5.3))
ax.set_xticks([4.7, 5.0])
ax.set_yticks(np.arange(0., 0.7, 0.2))
......
......@@ -10,7 +10,7 @@ from multiarea_model import MultiAreaModel
from plotfuncs import create_fig
"""
Loading and procesing of data
Loading and processing of data
"""
M = MultiAreaModel({})
with open(os.path.join(datapath, 'viscortex_processed_data.json'), 'r') as f:
......
......@@ -114,7 +114,9 @@ cbar.set_alpha(0.)
cbar.remove()
"""
Panel B: Data from Markov et al. (2014)
Panel B: Data from Markov et al. (2014) "A weighted and directed
interareal connectivity matrix for macaque cerebral cortex."
Cerebral Cortex, 24(1), 17–36.
"""
ax = axes['B']
ax.set_aspect(1. / ax.get_data_ratio())
......@@ -143,7 +145,7 @@ cbar = pl.colorbar(im, ticks=t, fraction=0.046, ax=ax)
cbar.set_alpha(0.)
"""
Panel C: Exponential distance rule of FLN
Panel C: Exponential decay of FLN with distance
"""
FLN_values_FV91 = np.array([])
distances_FV91 = np.array([])
......@@ -157,7 +159,7 @@ for target_area in FLN_Data_FV91:
distances_FV91 = np.append(distances_FV91, median_distance_data[
target_area][source_area])
# Linear Fit to log values"
# Linear fit of distances vs. log FLN
print("\n \n Linear fit to logarithmic values")
gradient, intercept, r_value, p_value, std_err = stats.linregress(
distances_FV91, np.log(FLN_values_FV91))
......@@ -221,7 +223,7 @@ ax.set_yticks([i + 0.5 for i in np.arange(0, len(area_list) + 1, 1)])
ax.set_yticklabels(area_list[::-1], size=6.)
ax.set_ylabel('Target area')
ax.set_xlabel('Source area') # , labelpad=-0.01)
ax.set_xlabel('Source area')
im = ax.pcolormesh(masked_matrix, cmap=cmap,
edgecolors='None', norm=LogNorm(vmin=1e-6, vmax=1.))
......
......@@ -176,7 +176,7 @@ corr_bb = round(np.corrcoef(
print("SLN Fit: R={}, Chi2={}".format(corr_bb, goodness_bb))
# Target - source relationship
# Target-source relationship
target_low_SLN_unweighted = np.zeros(6)
target_medium_SLN_unweighted = np.zeros(6)
......
......@@ -80,7 +80,7 @@ Nsyn = M.K_matrix[:, :-1] * Npost
outdegree = Nsyn / Npre
indegree = M.K_matrix[:, :-1]
plot_areas = ['V1', 'V2'] # , 'V2', 'MT', 'FEF', '46', 'TH']
plot_areas = ['V1', 'V2']
mask = create_mask(M.structure, target_areas=plot_areas,
source_areas=plot_areas,
extern=False)[:, :-1]
......@@ -124,8 +124,6 @@ ax.set_ylim((0, new_size))
ax.set_xticks(np.arange(16) + 0.5)
ax.set_xticklabels(2 * population_labels, rotation=90, size=7.)
# ax.set_xticks(ticks)
# ax.set_xticklabels(plot_areas, rotation=90, size=7.)
ax.set_yticks(np.arange(16)[::-1] + 0.5)
ax.set_yticklabels(2 * population_labels, size=7.)
......@@ -155,9 +153,6 @@ ax_hist.set_xlim(cb.get_clim())
ax_hist.set_xticks([])
ax_hist.set_yticks([])
# ax.set_yticks(ticks_r)
# ax.set_yticklabels(plot_areas, size=7.)
ax = axes['B']
matrix = C_plot
......
......@@ -3,7 +3,7 @@ import copy
from helpers import area_list
import numpy as np
from config import base_path
from graph_helpers import perform_map_equation, modularity
from graph_helpers import apply_map_equation, modularity
from graph_helpers import create_graph, plot_clustered_graph
from multiarea_model import MultiAreaModel
from multiarea_model.multiarea_helpers import load_degree_data
......@@ -62,7 +62,7 @@ g_abs = create_graph(conn_matrix_abs, area_list)
"""
Determine clusters using the map equation.
"""
modules, modules_areas, index = perform_map_equation(
modules, modules_areas, index = apply_map_equation(
conn_matrix, area_list, filename='Model')
f = open('Model.map', 'r')
......@@ -108,11 +108,11 @@ mod_list = []
# In the connectivity matrix, rows == targets, columns == sources
# For each column, we shuffle the rows and therefore conserve the total outdegree of each area.
for ii in range(10):
for ii in range(1000):
for jj in range(32):
ind = np.extract(np.arange(32) != jj, np.arange(32))
null_model[:, jj][ind] = null_model[:, jj][ind][np.random.shuffle(ind)]
modules, modules_areas, index = perform_map_equation(null_model, area_list, filename='null')
modules, modules_areas, index = apply_map_equation(null_model, area_list, filename='null')
g_null = create_graph(null_model, area_list)
mod_list.append(modularity(g_null, modules[index]))
......
......@@ -115,7 +115,6 @@ height = 5.25
print(width, height)
pl.rcParams['figure.figsize'] = (width, height)
# ################### Figure layout ####################
fine_tune_cols = 2
fig = pl.figure()
......@@ -154,8 +153,9 @@ and write out the linewidths for the corresponding tex plots to file.
1. Distinguish using SLN
"""
# Differential analysis of hierarchically directed connections (hierarchy_criterion=SLN)
hierarchy_criterion = 'SLN'
# Differential analysis of hierarchically directed connections
# FF = feedforward
# FB = feedback
FF_path_pairs = []
FB_path_pairs = []
lateral_path_pairs = []
......@@ -228,9 +228,8 @@ write_out_lw(fn, C)
"""
2. Use architectural types
"""
hierarchy_criterion = 'arch_types'
# HL = early to late
# LH = late to early
# HL = high to low type
# LH = low to high type
# HZ = equal arch. types
HL_path_pairs = []
LH_path_pairs = []
......@@ -344,7 +343,7 @@ layout_barplot_axes(axes['HL'])
C = Counter(HL_single_area_patterns)
counts = list(C.values())
# Manually define order of pairs to stick to originally published figure
# Define order of pairs consistently across panels
pairs = ['5E', '23E', '4I+23E', '6E']
counts = [C[p] for p in pairs]
# Add 0 value for last pair
......@@ -362,7 +361,7 @@ layout_barplot_axes(axes['HZ'])
C = Counter(HZ_single_area_patterns)
counts = list(C.values())
# Manually define order of pairs to stick to originally published figure
# Define order of pairs consistently across panels
pairs = ['5E', '23E', '4I+23E', '6E']
counts = [C[p] for p in pairs]
# Add 0 value for last pair
......@@ -380,7 +379,7 @@ layout_barplot_axes(axes['LH'])
C = Counter(LH_single_area_patterns)
counts = list(C.values())
# Manually define order of pairs to stick to originally published figure
# Define order of pairs consistently across panels
pairs = ['5E', '23E', '4I+23E', '6E']
counts = [C[p] for p in pairs]
......@@ -412,9 +411,6 @@ Finally, merge the tex-created figures into the main figure.
import pyx
c = pyx.canvas.canvas()
# c.text(0.4, 12.3,r'\textbf{\textsf{A}}')
# c.text(6., 12.3,r'\textbf{\textsf{B}}')
c.insert(pyx.epsfile.epsfile(0., 0., "Fig8_laminar_paths_mpl.eps", width=17.3))
......
## Figures of Schmidt M, Bakker R, Hilgetag C-C, Diesmann M and van Albada SJ: "Multi-scale account of the network structure of macaque visual cortex"
This folder contains the scripts to reproduce all figures of Schmidt M, Bakker R, Hilgetag C-C, Diesmann M and van Albada SJ: "Multi-scale account of the network structure of macaque visual cortex", Brain Structure and Function (2017) [https://doi.org/10.1007/s00429-017-1554-4](https://doi.org/10.1007/s00429-017-1554-4)
Please note: Figures 2, 5, and 8 show slight deviations from the published figures in the paper. The published figures 2 and 5 lack few data points. This error was unintentionally introduced during the review process. Importantly, the presented fits are identical in the correct figures in this repository and in the manuscript. These deviations thus do not affect the scientific conclusions.
Please note: Figures 2, 5, and 8 show slight deviations from the published figures in the paper. Published Figures 2 and 5 miss a few data points. This error slipped in during the review process. Importantly, the presented fits are identical in the (correct) figures in this repository and in the manuscript. These deviations thus do not affect the scientific conclusions.
Note that the placement of areas in Figure 7 will deviate from the published figure, because their location depends on the force-directed algorithm implemented in `igraph` and `python-igraph` does not allow manual setting of the random seed for the algorithm. This is a mere visual issue and does not affect the scientific content.
If snakemake is installed, the figures can be produced by executing
`snakemake`
......@@ -5,9 +5,9 @@ import numpy as np
import os
def perform_map_equation(graph_matrix, node_list, filename='', infomap_path=None):
def apply_map_equation(graph_matrix, node_list, filename='', infomap_path=None):
"""
Perform the map equation of
Apply the map equation algorithm of
Rosvall M, Axelsson D, Bergstrom CT (2009) The map equation.
Eur Phys J Spec Top 178(1):13–23
......@@ -63,7 +63,7 @@ def perform_map_equation(graph_matrix, node_list, filename='', infomap_path=None
"""
if infomap_path:
os.chdir(infomap_path)
os.system('./Infomap --directed --clu --map --verbose ' +
os.system('Infomap --directed --clu --map --verbose ' +
base_dir + '/' + net_fn + ' ' + base_dir)
os.chdir(base_dir)
......
......@@ -5,8 +5,9 @@ mp.use('GTKAgg')
class panel_factory():
"""Class that generates a subpanel in the PloS figure"""
"""
Class for generating subpanels
"""
def __init__(self, scale, figure, n_pan_x, n_pan_y, hoffset,
voffset, hspace, vspace, panel_width, panel_height):
self.scale = scale
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment