diff --git a/multiarea_model/data_multiarea/VisualCortex_Data.py b/multiarea_model/data_multiarea/VisualCortex_Data.py index 03055be2090d00154379ce6267b89a0e0e26d5de..ea42ce272c0027b01ad758a70ea209b8884b1806 100644 --- a/multiarea_model/data_multiarea/VisualCortex_Data.py +++ b/multiarea_model/data_multiarea/VisualCortex_Data.py @@ -2,7 +2,7 @@ VisualCortexData ================ -This script provides the function process_raw_data which fulfils two +This script provides the function process_raw_data which fulfills two tasks: 1) Load the experimental data from the raw data files stored in raw_data/ and stores it to viscorted_raw_data.json. @@ -10,7 +10,7 @@ tasks: densities and laminar thicknesses and store these values to viscortex_processed_data.json. -All details of the procedures in this scripts are described in +All details of the procedures in this script are described in Schmidt M, Bakker R, Hilgetag CC, Diesmann M & van Albada SJ "Multi-scale account of the network structure of macaque visual cortex" Brain Structure and Function (2018), 223:1409 @@ -23,13 +23,13 @@ will be stored in the corresponding dictionaries: 1. Hierarchy from Reid et al. (2009) ---> hierarchy -2. Layer-specific neuronal densities for 12 areas from - Helen Barbas' lab - ----> currently not available +2. Layer-specific and overall neuronal densities from + the lab of Helen Barbas + ----> neuronal_density_data, neuronal_density_data_updated 3. Categorization of all areas, except MIP and MDP, into 8 different structural classes ---> structure -4. Distances between areas with 3 different methods: Euclidean, Thom, and +4. Distances between areas with three different methods: Euclidean, Thom, and Median ---> euclidean_distances, thom_distances, median_distances The median distances are used for the multi-area model. @@ -149,51 +149,47 @@ def process_raw_data(): """ 2. Neuronal densities """ - NEURON_DENSITIES_AVAILABLE = False # data obtained with NeuN staining # delivers total and laminar densities - if NEURON_DENSITIES_AVAILABLE: - neuronal_density_data = {} - temp = pd.read_csv(os.path.join(datapath, 'NeuronalDensities_NeuN.csv'), sep='\t', - skiprows=2, - names=['area', 'overall', 't_error', '23', '23_error', - '4', '4_error', '56', '56_error']) - - for i in np.arange(0, len(temp), 1): - dict_ = {'overall': {'value': temp.iloc[i]['overall'], - 'error': temp.iloc[i]['t_error']}, - '23': {'value': temp.iloc[i]['23'], - 'error': temp.iloc[i]['23_error']}, - '4': {'value': temp.iloc[i]['4'], - 'error': temp.iloc[i]['4_error']}, - '56': {'value': temp.iloc[i]['56'], - 'error': temp.iloc[i]['56_error']}} - neuronal_density_data[temp.iloc[i]['area']] = dict_ - - # data obtained with Nissl staining - # delivers only total densities - with open(os.path.join(datapath, 'NeuronalDensities_Nissl.csv'), 'rt') as f: - myreader = csv.reader(f, delimiter=',') - neuronal_density_data_updated = {} - skip_header() - for temp in myreader: - try: - if temp[0] == 'V5/MT': - neuronal_density_data_updated['MT'] = float(temp[2]) - if temp[0] == 'A46v': - neuronal_density_data_updated['area 46v'] = float(temp[2]) - if temp[0] == 'A7a': - neuronal_density_data_updated['7a'] = float(temp[2]) - if temp[0] == 'LIPv': - neuronal_density_data_updated['LIP'] = float(temp[2]) - if temp[0] == 'TEr': - neuronal_density_data_updated['Te1'] = float(temp[2]) - else: - neuronal_density_data_updated[temp[0]] = float(temp[2]) - except ValueError: - pass - else: - neuronal_density_data = None + neuronal_density_data = {} + temp = pd.read_csv(os.path.join(datapath, 'NeuronalDensities_NeuN.csv'), sep='\t', + skiprows=2, + names=['area', 'overall', 't_error', '23', '23_error', + '4', '4_error', '56', '56_error']) + + for i in np.arange(0, len(temp), 1): + dict_ = {'overall': {'value': temp.iloc[i]['overall'], + 'error': temp.iloc[i]['t_error']}, + '23': {'value': temp.iloc[i]['23'], + 'error': temp.iloc[i]['23_error']}, + '4': {'value': temp.iloc[i]['4'], + 'error': temp.iloc[i]['4_error']}, + '56': {'value': temp.iloc[i]['56'], + 'error': temp.iloc[i]['56_error']}} + neuronal_density_data[temp.iloc[i]['area']] = dict_ + + # data obtained with Nissl staining + # delivers only total densities + with open(os.path.join(datapath, 'NeuronalDensities_Nissl.csv'), 'rt') as f: + myreader = csv.reader(f, delimiter=',') + neuronal_density_data_updated = {} + skip_header() + for temp in myreader: + try: + if temp[0] == 'V5/MT': + neuronal_density_data_updated['MT'] = float(temp[2]) + if temp[0] == 'A46v': + neuronal_density_data_updated['area 46v'] = float(temp[2]) + if temp[0] == 'A7a': + neuronal_density_data_updated['7a'] = float(temp[2]) + if temp[0] == 'LIPv': + neuronal_density_data_updated['LIP'] = float(temp[2]) + if temp[0] == 'TEr': + neuronal_density_data_updated['Te1'] = float(temp[2]) + else: + neuronal_density_data_updated[temp[0]] = float(temp[2]) + except ValueError: + pass """ 3. Architectural Types @@ -660,306 +656,303 @@ def process_raw_data(): to Nissl staining. """ - if NEURON_DENSITIES_AVAILABLE: - # determine fit of scaling factors from NeuN to Nissl staining - new = [] - old = [] - for area in neuronal_density_data_updated: - if area in neuronal_density_data: - old.append(neuronal_density_data[area]['overall']['value']) - new.append(neuronal_density_data_updated[area]) - gradient, intercept, r_value, p_value, std_err = stats.linregress(old, new) - - # map Neuronal density data to FV91 scheme - neuronal_density_data_updated_FV91 = {} - for i in list(neuronal_density_data_updated.keys()): - if i not in area_list: - if i in translation: - areas_FV91 = translation[i] - for kk in areas_FV91: - neuronal_density_data_updated_FV91[ - kk] = neuronal_density_data_updated[i] - else: - neuronal_density_data_updated_FV91[ - i] = neuronal_density_data_updated[i] - - neuronal_density_data_FV91 = {} - for i in list(neuronal_density_data.keys()): - if i not in area_list: + # determine fit of scaling factors from NeuN to Nissl staining + new = [] + old = [] + for area in neuronal_density_data_updated: + if area in neuronal_density_data: + old.append(neuronal_density_data[area]['overall']['value']) + new.append(neuronal_density_data_updated[area]) + gradient, intercept, r_value, p_value, std_err = stats.linregress(old, new) + + # map neuronal density data to FV91 scheme + neuronal_density_data_updated_FV91 = {} + for i in list(neuronal_density_data_updated.keys()): + if i not in area_list: + if i in translation: areas_FV91 = translation[i] for kk in areas_FV91: - neuronal_density_data_FV91[kk] = neuronal_density_data[i].copy( - ) - else: - neuronal_density_data_FV91[i] = neuronal_density_data[i].copy() - - # map Neuronal density data to 4 layers by dividing - neuronal_density_data_FV91_4layers = {} - for i in list(neuronal_density_data_FV91.keys()): - neuronal_density_data_FV91_4layers.update({i: {'23': 0., 'overall': 0., - '4': {'value': 0.0, 'error': 0.0}, - '5': 0., '6': 0.}}) - - for i in list(neuronal_density_data_FV91_4layers.keys()): - for layer in ['23', '4', '56']: - if neuronal_density_data_FV91[i][layer]['value'] > 0.: - # Assign equal density to layers 5 and 6 - if layer == '56': - neuronal_density_data_FV91_4layers[i]['5'] = neuronal_density_data_FV91[ - i]['56']['value'] * gradient + intercept - neuronal_density_data_FV91_4layers[i]['6'] = neuronal_density_data_FV91[ - i]['56']['value'] * gradient + intercept - else: - neuronal_density_data_FV91_4layers[i][layer] = neuronal_density_data_FV91[ - i][layer]['value'] * gradient + intercept + neuronal_density_data_updated_FV91[ + kk] = neuronal_density_data_updated[i] + else: + neuronal_density_data_updated_FV91[ + i] = neuronal_density_data_updated[i] + + neuronal_density_data_FV91 = {} + for i in list(neuronal_density_data.keys()): + if i not in area_list: + areas_FV91 = translation[i] + for kk in areas_FV91: + neuronal_density_data_FV91[kk] = neuronal_density_data[i].copy( + ) + else: + neuronal_density_data_FV91[i] = neuronal_density_data[i].copy() + + # map neuronal density data to 4 layers by dividing + neuronal_density_data_FV91_4layers = {} + for i in list(neuronal_density_data_FV91.keys()): + neuronal_density_data_FV91_4layers.update({i: {'23': 0., 'overall': 0., + '4': {'value': 0.0, 'error': 0.0}, + '5': 0., '6': 0.}}) + + for i in list(neuronal_density_data_FV91_4layers.keys()): + for layer in ['23', '4', '56']: + if neuronal_density_data_FV91[i][layer]['value'] > 0.: + # Assign equal density to layers 5 and 6 + if layer == '56': + neuronal_density_data_FV91_4layers[i]['5'] = neuronal_density_data_FV91[ + i]['56']['value'] * gradient + intercept + neuronal_density_data_FV91_4layers[i]['6'] = neuronal_density_data_FV91[ + i]['56']['value'] * gradient + intercept else: - neuronal_density_data_FV91_4layers[i][layer] = 0. - # if there is Nissl data, then take it, otherwise - # transform NeuN data with the linear fit - if i in neuronal_density_data_updated_FV91: - neuronal_density_data_FV91_4layers[i][ - 'overall'] = neuronal_density_data_updated_FV91[i] + neuronal_density_data_FV91_4layers[i][layer] = neuronal_density_data_FV91[ + i][layer]['value'] * gradient + intercept else: - neuronal_density_data_FV91_4layers[i]['overall'] = neuronal_density_data_FV91[ - i]['overall']['value'] * gradient + intercept - - """ - We build a dictionary containing neuron densities - (overall and layer-specific) for each area. If there - is no data available for an area, we compute the - densities in two steps: + neuronal_density_data_FV91_4layers[i][layer] = 0. + # if there is Nissl data, then take it, otherwise + # transform NeuN data with the linear fit + if i in neuronal_density_data_updated_FV91: + neuronal_density_data_FV91_4layers[i][ + 'overall'] = neuronal_density_data_updated_FV91[i] + else: + neuronal_density_data_FV91_4layers[i]['overall'] = neuronal_density_data_FV91[ + i]['overall']['value'] * gradient + intercept + """ + We build a dictionary containing neuron densities + (overall and layer-specific) for each area. If there + is no data available for an area, we compute the + densities in two steps: + + 1. Assign an average neural density to each cortical + category for each layer and overall density. + 2. Based on the category, assign a density to each + area, if there is no direct data available. + + In contrast to the model, the experimental data + combines layers 5 and 6 to one layer. Thus, we + assign values to 5 and 6 separately by the following calculation: + N56 = N5 + N6, d56 = d5 + d6, A56 = A5 = A6 + rho56 = N56 / (A56*d56) = (N5+N6) / (A56*(d5+d6)) = N5/(A56*(d5+d6)) + + N6/(A56*(d5+d6)) = N5/(A5*d5) * d5/(d5+d6) + N6/(A6*d6) * d6/(d5+d6) = + rho5 * d5/(d5+d6) + rho6 * d6/(d5+d6) = rho5 * factor + rho6 * + (1-factor) + + """ + + # Step 1: Assign an average density to each structural type + neuronal_density_list = [{'overall': [], '23': [], '4': [], '5': [], + '6': []} for i in range(8)] + + for area in list(neuronal_density_data_FV91_4layers.keys()): + category = architecture_completed[area] + for key in list(neuronal_density_data_FV91_4layers[area].keys()): + neuronal_density_list[category - 1][key].append(float( + neuronal_density_data_FV91_4layers[area][key])) + + category_density = {} + + for x in range(8): + dict_ = {} + for i in list(neuronal_density_list[0].keys()): + if len(neuronal_density_list[x][i]) == 0: + dict_[i] = np.nan + else: + dict_[i] = np.mean(neuronal_density_list[x][i]) + category_density[x + 1] = dict_ + + # Step 2: For areas with out experimental data, + # assign the average density values of their structural type + neuronal_densities = nested_dict() + for area in list(architecture_completed.keys()): + dict_ = {} + if architecture_completed[area] in range(1, 9, 1): + if area in list(neuronal_density_data_FV91_4layers.keys()): + for key in list(neuronal_density_data_FV91_4layers[area].keys()): + neuronal_densities[area][key] = neuronal_density_data_FV91_4layers[ + area][key] + else: + neuronal_densities[area] = category_density[architecture_completed[area]] + else: + neuronal_densities[area] = '?' + neuronal_densities = neuronal_densities.to_dict() - 1. Assign an average neural density to each cortical - category for each layer and overall density. - 2. Based on the category, assign a density to each - area, if there is no direct data available. + """ + ### Thicknesses + To convert the neuronal volume densities into + neuron counts, we need total and laminar thicknesses + for each area. - 3. In contrast to the model, the experimental data - combines layers 5 and 6 to one layer. Thus we - assign valyes to 5 and 6 separately by the following calculation: - N56 = N5 + N6, d56 = d5 + d6, A56 = A5 = A6 - rho56 = N56 / (A56*d56) = (N5+N6) / (A56*(d5+d6)) = N5/(A56*(d5+d6)) + - N6/(A56*(d5+d6)) = N5/(A5*d5) * d5/(d5+d6) + N6/(A6*d6) * d6/(d5+d6) = - rho5 * d5/(d5+d6) + rho6 * d6/(d5+d6) = rho5 * factor + rho6 * - (1-factor) + For areas without experimental data on thicknesses, we + use we use a linear fit of total thickness vs. + logarithmic overall neuron density. - """ + In addition, we use linear fits of relative thicknesses + vs. logarithmic neuron densities for L4 thickness, and the + arithmetic mean of the data for L1, L2/3, L5 and L6. - # Step 1: Assign an average density to each structural type - neuronal_density_list = [{'overall': [], '23': [], '4': [], '5': [], - '6': []} for i in range(8)] + Finally, we convert the relative laminar thicknesses + into absolute values by multiplying with the total thickness. - for area in list(neuronal_density_data_FV91_4layers.keys()): - category = architecture_completed[area] - for key in list(neuronal_density_data_FV91_4layers[area].keys()): - neuronal_density_list[category - 1][key].append(float( - neuronal_density_data_FV91_4layers[area][key])) - category_density = {} - for x in range(8): - dict_ = {} - for i in list(neuronal_density_list[0].keys()): - if len(neuronal_density_list[x][i]) == 0: - dict_[i] = np.nan - else: - dict_[i] = np.mean(neuronal_density_list[x][i]) - category_density[x + 1] = dict_ + Total thicknesses + """ + # linear regression of barbas thicknesses vs architectural types + barbas_array = np.zeros(len(area_list)) + log_density_array = np.zeros(len(area_list)) + for i, area in enumerate(area_list): + barbas_array[i] = total_thickness_data[area] + log_density_array[i] = np.log10(neuronal_densities[area]['overall']) - # Step 2: For areas with out experimental data, - # assign the average density values of its structural type - neuronal_densities = nested_dict() - for area in list(architecture_completed.keys()): - dict_ = {} - if architecture_completed[area] in range(1, 9, 1): - if area in list(neuronal_density_data_FV91_4layers.keys()): - for key in list(neuronal_density_data_FV91_4layers[area].keys()): - neuronal_densities[area][key] = neuronal_density_data_FV91_4layers[ - area][key] - else: - neuronal_densities[area] = category_density[architecture_completed[area]] - else: - neuronal_densities[area] = '?' - neuronal_densities = neuronal_densities.to_dict() - - """ - ### Thicknesses - - To convert the neuronal volume densities into - neuron counts, we need total and laminar thicknesses - for each area. - - For areas without experimental data on thicknesses, we - use we use a linear fit of total thickness vs. - logarithmic overall neuron density. - - In addition, we use linear fits of relative thicknesses - vs. logarithmic neuron densities for L4 thickness, and the - arithmetic mean of the data for L1, L2/3, L5 and L6. - - Finally, we convert the relative laminar thicknesses - into absolute values by multiplying with the total thickness. - - - - Total thicknesses - """ - # linear regression of barbas thicknesses vs architectural types - barbas_array = np.zeros(len(area_list)) - log_density_array = np.zeros(len(area_list)) - for i, area in enumerate(area_list): - barbas_array[i] = total_thickness_data[area] - log_density_array[i] = np.log10(neuronal_densities[area]['overall']) - - gradient, intercept, r_value, p_value, std_err = stats.linregress( - log_density_array[np.isfinite(barbas_array)], barbas_array[np.isfinite(barbas_array)]) - - # total thicknesses - total_thicknesses = total_thickness_data.copy() - for a in list(total_thicknesses.keys()): - if np.isnan(total_thicknesses[a]): - total_thicknesses[a] = intercept + gradient * \ - np.log10(neuronal_densities[a]['overall']) - - """ - Laminar thicknesses - """ - # calculate relative layer thicknesses for each area and study - frac_of_total = nested_dict() - for area in list(laminar_thicknesses.keys()): - for layer in list(laminar_thicknesses[area].keys()): - frac_of_total[area][layer] = np.array(laminar_thicknesses[area][ - layer]) / np.array(laminar_thicknesses[area]['total']) - # if layer thickness is zero, it makes up 0% of the total, even if the - # total is unknown - if 0 in np.array(laminar_thicknesses[area][layer]): - if np.isscalar(laminar_thicknesses[area][layer]): - frac_of_total[area][layer] = 0 - else: - indices = np.where( - np.array(laminar_thicknesses[area][layer]) == 0)[0] - for i in indices: - frac_of_total[area][layer][i] = 0 - frac_of_total = frac_of_total.to_dict() - - # for areas for which these are known: mean across studies - # of fractions of total thickness occupied by each layer - relative_layer_thicknesses = nested_dict() - for area, layer in product(area_list, ['1', '23', '4', '5', '6']): - if np.isscalar(frac_of_total[area][layer]): - relative_layer_thicknesses[area][layer] = frac_of_total[area][layer] - else: - relative_layer_thicknesses[area][layer] = np.mean( - frac_of_total[area][layer][np.isfinite(frac_of_total[area][layer])]) - relative_layer_thicknesses = relative_layer_thicknesses.to_dict() - - # for areas where these data are missing, use mean across areas of - # fractions of total thickness occupied by L1, L2/3, by L5, and by L6 - tmp1 = np.array([]) - tmp23 = np.array([]) - tmp5 = np.array([]) - tmp6 = np.array([]) - for area in list(relative_layer_thicknesses.keys()): - tmp1 = np.append(tmp1, relative_layer_thicknesses[area]['1']) - tmp23 = np.append(tmp23, relative_layer_thicknesses[area]['23']) - tmp5 = np.append(tmp5, relative_layer_thicknesses[area]['5']) - tmp6 = np.append(tmp6, relative_layer_thicknesses[area]['6']) - - mean_rel_L1_thickness = np.mean(tmp1[np.isfinite(tmp1)]) - mean_rel_L23_thickness = np.mean(tmp23[np.isfinite(tmp23)]) - mean_rel_L5_thickness = np.mean(tmp5[np.isfinite(tmp5)]) - mean_rel_L6_thickness = np.mean(tmp6[np.isfinite(tmp6)]) - - for area in list(relative_layer_thicknesses.keys()): - if np.isnan(relative_layer_thicknesses[area]['1']): - relative_layer_thicknesses[area]['1'] = mean_rel_L1_thickness - if np.isnan(relative_layer_thicknesses[area]['23']): - relative_layer_thicknesses[area]['23'] = mean_rel_L23_thickness - if np.isnan(relative_layer_thicknesses[area]['5']): - relative_layer_thicknesses[area]['5'] = mean_rel_L5_thickness - if np.isnan(relative_layer_thicknesses[area]['6']): - relative_layer_thicknesses[area]['6'] = mean_rel_L6_thickness - - # mean relative laminar thickness across studies for each area - frac4_of_total = np.zeros(len(area_list)) - - for i, area in enumerate(area_list): - temp = frac_of_total[area]['4'] - if not np.isscalar(temp): - if sum(np.isfinite(temp)): - frac4_of_total[i] = np.nansum(temp) / sum(np.isfinite(temp)) + gradient, intercept, r_value, p_value, std_err = stats.linregress( + log_density_array[np.isfinite(barbas_array)], barbas_array[np.isfinite(barbas_array)]) + + # total thicknesses + total_thicknesses = total_thickness_data.copy() + for a in list(total_thicknesses.keys()): + if np.isnan(total_thicknesses[a]): + total_thicknesses[a] = intercept + gradient * \ + np.log10(neuronal_densities[a]['overall']) + + """ + Laminar thicknesses + """ + # calculate relative layer thicknesses for each area and study + frac_of_total = nested_dict() + for area in list(laminar_thicknesses.keys()): + for layer in list(laminar_thicknesses[area].keys()): + frac_of_total[area][layer] = np.array(laminar_thicknesses[area][ + layer]) / np.array(laminar_thicknesses[area]['total']) + # if layer thickness is zero, it makes up 0% of the total, even if the + # total is unknown + if 0 in np.array(laminar_thicknesses[area][layer]): + if np.isscalar(laminar_thicknesses[area][layer]): + frac_of_total[area][layer] = 0 else: - frac4_of_total[i] = np.nan + indices = np.where( + np.array(laminar_thicknesses[area][layer]) == 0)[0] + for i in indices: + frac_of_total[area][layer][i] = 0 + frac_of_total = frac_of_total.to_dict() + + # for areas for which these are known: mean across studies + # of fractions of total thickness occupied by each layer + relative_layer_thicknesses = nested_dict() + for area, layer in product(area_list, ['1', '23', '4', '5', '6']): + if np.isscalar(frac_of_total[area][layer]): + relative_layer_thicknesses[area][layer] = frac_of_total[area][layer] + else: + relative_layer_thicknesses[area][layer] = np.mean( + frac_of_total[area][layer][np.isfinite(frac_of_total[area][layer])]) + relative_layer_thicknesses = relative_layer_thicknesses.to_dict() + + # for areas where these data are missing, use mean across areas of + # fractions of total thickness occupied by L1, L2/3, by L5, and by L6 + tmp1 = np.array([]) + tmp23 = np.array([]) + tmp5 = np.array([]) + tmp6 = np.array([]) + for area in list(relative_layer_thicknesses.keys()): + tmp1 = np.append(tmp1, relative_layer_thicknesses[area]['1']) + tmp23 = np.append(tmp23, relative_layer_thicknesses[area]['23']) + tmp5 = np.append(tmp5, relative_layer_thicknesses[area]['5']) + tmp6 = np.append(tmp6, relative_layer_thicknesses[area]['6']) + + mean_rel_L1_thickness = np.mean(tmp1[np.isfinite(tmp1)]) + mean_rel_L23_thickness = np.mean(tmp23[np.isfinite(tmp23)]) + mean_rel_L5_thickness = np.mean(tmp5[np.isfinite(tmp5)]) + mean_rel_L6_thickness = np.mean(tmp6[np.isfinite(tmp6)]) + + for area in list(relative_layer_thicknesses.keys()): + if np.isnan(relative_layer_thicknesses[area]['1']): + relative_layer_thicknesses[area]['1'] = mean_rel_L1_thickness + if np.isnan(relative_layer_thicknesses[area]['23']): + relative_layer_thicknesses[area]['23'] = mean_rel_L23_thickness + if np.isnan(relative_layer_thicknesses[area]['5']): + relative_layer_thicknesses[area]['5'] = mean_rel_L5_thickness + if np.isnan(relative_layer_thicknesses[area]['6']): + relative_layer_thicknesses[area]['6'] = mean_rel_L6_thickness + + # mean relative laminar thickness across studies for each area + frac4_of_total = np.zeros(len(area_list)) + + for i, area in enumerate(area_list): + temp = frac_of_total[area]['4'] + if not np.isscalar(temp): + if sum(np.isfinite(temp)): + frac4_of_total[i] = np.nansum(temp) / sum(np.isfinite(temp)) else: - frac4_of_total[i] = temp - - # perform regressions of per-area average relative - # laminar thicknesses vs logarithmic overall densities - gradient4, intercept4, r_value4, p_value4, std_err4 = stats.linregress(np.array( - log_density_array)[np.isfinite(frac4_of_total)], frac4_of_total[ - np.isfinite(frac4_of_total)]) - - # assign values based on linear regressions - for area in list(relative_layer_thicknesses.keys()): - if np.isnan(relative_layer_thicknesses[area]['4']): - relative_layer_thicknesses[area]['4'] = intercept4 + gradient4 * np.log10( - neuronal_densities[area]['overall']) - - # convert relative thicknesses into absolute ones - laminar_thicknesses_completed = {} - for area in list(relative_layer_thicknesses.keys()): - laminar_thicknesses_completed[area] = {} - sum_rel_thick = sum(relative_layer_thicknesses[area].values()) - for layer in list(relative_layer_thicknesses[area].keys()): - # 0.001 converts from micrometer to mm; the result is normalized to - # have the sum of the relative thicknesses equal to 1 - laminar_thicknesses_completed[area][layer] = 0.001 * relative_layer_thicknesses[ - area][layer] * total_thicknesses[area] / sum_rel_thick - - """ - Finally, we compute neuron numbers for each population. - We assume a laminar-specific ratio of excitatory - to inhibitory neurons to be constant across areas. - """ - EI_ratio = {'23': num_V1['23E']['neurons'] / ( - num_V1['23E']['neurons'] + num_V1['23I']['neurons']), - '4': num_V1['4E']['neurons'] / (num_V1['4E']['neurons'] + - num_V1['4I']['neurons']), - '5': num_V1['5E']['neurons'] / (num_V1['5E']['neurons'] + - num_V1['5I']['neurons']), - '6': num_V1['6E']['neurons'] / (num_V1['6E']['neurons'] + - num_V1['6I']['neurons'])} - - """ - Then, we compute the number of neurons in - population i in layer v_i of area A as - N(A,i) = rho(A,v_i) * S(A) * D(A,v_i) * EI_ratio. - """ - realistic_neuronal_numbers = nested_dict() - for area in list(neuronal_densities.keys()): - S = surface_data[area] - ltc = laminar_thicknesses_completed[area] - nd = neuronal_densities[area] - realistic_neuronal_numbers[area]['23E'] = (S * ltc['23'] * - nd['23'] * EI_ratio['23']) - realistic_neuronal_numbers[area]['23I'] = (realistic_neuronal_numbers[area]['23E'] * - (1. - EI_ratio['23']) / EI_ratio['23']) - realistic_neuronal_numbers[area]['4E'] = (S * ltc['4'] * - nd['4'] * EI_ratio['4']) - realistic_neuronal_numbers[area]['4I'] = (realistic_neuronal_numbers[area]['4E'] * - (1. - EI_ratio['4']) / EI_ratio['4']) - realistic_neuronal_numbers[area]['5E'] = (S * ltc['5'] * - nd['5'] * EI_ratio['5']) - realistic_neuronal_numbers[area]['5I'] = (realistic_neuronal_numbers[area]['5E'] * - (1. - EI_ratio['5']) / EI_ratio['5']) - realistic_neuronal_numbers[area]['6E'] = (S * ltc['6'] * - nd['6'] * EI_ratio['6']) - realistic_neuronal_numbers[area]['6I'] = (realistic_neuronal_numbers[area]['6E'] * - (1. - EI_ratio['6']) / EI_ratio['6']) - realistic_neuronal_numbers[area]['total'] = sum( - realistic_neuronal_numbers[area].values()) - realistic_neuronal_numbers = realistic_neuronal_numbers.to_dict() + frac4_of_total[i] = np.nan + else: + frac4_of_total[i] = temp + + # perform regressions of per-area average relative + # laminar thicknesses vs logarithmic overall densities + gradient4, intercept4, r_value4, p_value4, std_err4 = stats.linregress(np.array( + log_density_array)[np.isfinite(frac4_of_total)], frac4_of_total[ + np.isfinite(frac4_of_total)]) + + # assign values based on linear regressions + for area in list(relative_layer_thicknesses.keys()): + if np.isnan(relative_layer_thicknesses[area]['4']): + relative_layer_thicknesses[area]['4'] = intercept4 + gradient4 * np.log10( + neuronal_densities[area]['overall']) + + # convert relative thicknesses into absolute ones + laminar_thicknesses_completed = {} + for area in list(relative_layer_thicknesses.keys()): + laminar_thicknesses_completed[area] = {} + sum_rel_thick = sum(relative_layer_thicknesses[area].values()) + for layer in list(relative_layer_thicknesses[area].keys()): + # 0.001 converts from micrometer to mm; the result is normalized to + # have the sum of the relative thicknesses equal to 1 + laminar_thicknesses_completed[area][layer] = 0.001 * relative_layer_thicknesses[ + area][layer] * total_thicknesses[area] / sum_rel_thick + + """ + Finally, we compute neuron numbers for each population. + We assume a laminar-specific ratio of excitatory + to inhibitory neurons to be constant across areas. + """ + EI_ratio = {'23': num_V1['23E']['neurons'] / ( + num_V1['23E']['neurons'] + num_V1['23I']['neurons']), + '4': num_V1['4E']['neurons'] / (num_V1['4E']['neurons'] + + num_V1['4I']['neurons']), + '5': num_V1['5E']['neurons'] / (num_V1['5E']['neurons'] + + num_V1['5I']['neurons']), + '6': num_V1['6E']['neurons'] / (num_V1['6E']['neurons'] + + num_V1['6I']['neurons'])} + + """ + Then, we compute the number of neurons in + population i in layer v_i of area A as + N(A,i) = rho(A,v_i) * S(A) * D(A,v_i) * EI_ratio. + """ + realistic_neuronal_numbers = nested_dict() + for area in list(neuronal_densities.keys()): + S = surface_data[area] + ltc = laminar_thicknesses_completed[area] + nd = neuronal_densities[area] + realistic_neuronal_numbers[area]['23E'] = (S * ltc['23'] * + nd['23'] * EI_ratio['23']) + realistic_neuronal_numbers[area]['23I'] = (realistic_neuronal_numbers[area]['23E'] * + (1. - EI_ratio['23']) / EI_ratio['23']) + realistic_neuronal_numbers[area]['4E'] = (S * ltc['4'] * + nd['4'] * EI_ratio['4']) + realistic_neuronal_numbers[area]['4I'] = (realistic_neuronal_numbers[area]['4E'] * + (1. - EI_ratio['4']) / EI_ratio['4']) + realistic_neuronal_numbers[area]['5E'] = (S * ltc['5'] * + nd['5'] * EI_ratio['5']) + realistic_neuronal_numbers[area]['5I'] = (realistic_neuronal_numbers[area]['5E'] * + (1. - EI_ratio['5']) / EI_ratio['5']) + realistic_neuronal_numbers[area]['6E'] = (S * ltc['6'] * + nd['6'] * EI_ratio['6']) + realistic_neuronal_numbers[area]['6I'] = (realistic_neuronal_numbers[area]['6E'] * + (1. - EI_ratio['6']) / EI_ratio['6']) + realistic_neuronal_numbers[area]['total'] = sum( + realistic_neuronal_numbers[area].values()) + realistic_neuronal_numbers = realistic_neuronal_numbers.to_dict() """ Cortico-cortical connectivity @@ -971,8 +964,8 @@ def process_raw_data(): 1. Map the injection sites (= target areas) to the FV91 scheme. 2. Map the source areas to FV91 with the overlap tool. - 3. Retrieve information about existing connection - from CoCoMac + 3. Retrieve information about existing connections + from CoCoMac. 4. Fit exponential distance rule to existing data. 5. Fill missing values with exponential distance rule. """ @@ -1328,58 +1321,40 @@ def process_raw_data(): res = integrate.quad(integrand, -1000., x, args=(0., 1.))[0] return res - if NEURON_DENSITIES_AVAILABLE: - # Call R script to perform SLN fit - try: - proc = subprocess.Popen(["Rscript", - os.path.join(basepath, 'SLN_logdensities.R'), - base_path], - stdout=subprocess.PIPE) - out = proc.communicate()[0].decode('utf-8') - R_fit = [float(out.split('\n')[1].split(' ')[1]), + # Call R script to perform SLN fit + try: + proc = subprocess.Popen(["Rscript", + os.path.join(basepath, 'SLN_logdensities.R'), + base_path], + stdout=subprocess.PIPE) + out = proc.communicate()[0].decode('utf-8') + R_fit = [float(out.split('\n')[1].split(' ')[1]), float(out.split('\n')[1].split(' ')[3])] - except OSError: - print("No R installation, taking hard-coded SLN fit parameters.") - R_fit = [-0.1516142, -1.5343200] - else: - print("Neuron densities not available, taking hard-coded SLN fit parameters.") + except OSError: + print("No R installation, taking hard-coded SLN fit parameters.") R_fit = [-0.1516142, -1.5343200] """ 4. Fill missing data with fitted values. """ - if NEURON_DENSITIES_AVAILABLE: - SLN_completed = {} - s = 0. - s2 = 0. - for target in area_list: - SLN_completed[target] = {} - for source in list(cocomac_completed[target].keys()): - if source in area_list and source != target: - if target in SLN_Data_FV91_mapped and source in SLN_Data_FV91_mapped[target]: - value = SLN_Data_FV91_mapped[target][source] - s += 1 - else: - nd_target = neuronal_densities[target] - nd_source = neuronal_densities[source] - x = R_fit[1] * float(np.log(nd_target['overall']) - - np.log(nd_source['overall'])) + R_fit[0] - value = probit(x) - s2 += 1 - SLN_completed[target][source] = value - else: - # Load data that cannot be computed from json file or set to None - with open(os.path.join(out_path, - ''.join(('viscortex_processed_data_backup.json'))), - 'r') as f: - dat = json.load(f) - SLN_completed = dat['SLN_completed'] - realistic_neuronal_numbers = dat['realistic_neuronal_numbers'] - total_thicknesses = dat['total_thicknesses'] - laminar_thicknesses_completed = dat['laminar_thicknesses'] - neuronal_density_data_FV91_4layers = None - neuronal_densities = None - category_density = None + SLN_completed = {} + s = 0. + s2 = 0. + for target in area_list: + SLN_completed[target] = {} + for source in list(cocomac_completed[target].keys()): + if source in area_list and source != target: + if target in SLN_Data_FV91_mapped and source in SLN_Data_FV91_mapped[target]: + value = SLN_Data_FV91_mapped[target][source] + s += 1 + else: + nd_target = neuronal_densities[target] + nd_source = neuronal_densities[source] + x = R_fit[1] * float(np.log(nd_target['overall']) - + np.log(nd_source['overall'])) + R_fit[0] + value = probit(x) + s2 += 1 + SLN_completed[target][source] = value """ Write output files diff --git a/multiarea_model/data_multiarea/raw_data/NeuronalDensities_NeuN.csv b/multiarea_model/data_multiarea/raw_data/NeuronalDensities_NeuN.csv new file mode 100644 index 0000000000000000000000000000000000000000..72cc9972328702bc5abeb5d552602e161cac7200 --- /dev/null +++ b/multiarea_model/data_multiarea/raw_data/NeuronalDensities_NeuN.csv @@ -0,0 +1,16 @@ +# Overall and layer-resolved neuron densities (neurons per cubic mm) of macaque cortical areas from the lab of Helen Barbas, Boston University, obtained with NeuN staining. The overall densities correspond to those in Table 4 of Hilgetag, C. C., Medalla, M., Beul, S. F., & Barbas, H. (2016). The primate connectome in context: Principles of connections of the cortical visual system. NeuroImage, 134, 685-702. Columns: area name, overall density, overall density error, L2/3 density, L2/3 density error, L4 density, L4 density error, L5/6 density, L5/6 density error + +V1 161365.313353296 19427.7514309175 153894.970495772 11237.0785022221 179088.958685915 19483.3411458588 139272.680066401 28642.7198414341 +V2 97618.8004262742 6318.4209238154 92913.7502053809 9168.7044437395 180488.12639129 15599.6221210207 78810.1380340474 5788.3313934117 +V4 71236.5276327589 7947.590257791 67154.5446596756 5803.6225683117 167188.428539576 26194.6145324879 56752.2320613314 10570.4766493826 +MT 65991.5887115086 6996.5656738783 67044.2266724167 5773.2854729385 121372.429209652 5958.4564687061 50507.6146086331 9182.4918445723 +TEO 63271.1025249173 7263.1015279643 56057.260207707 6168.8769518766 153600.74824572 26717.1840533565 57071.8248578393 6982.2666533817 +V3A 61381.5412392137 6202.963723809 58305.5035933036 4035.9619344902 109843.719889781 22376.7397541261 50647.3572613343 5259.7123097559 +LIP 52134.5731946242 4491.9762527831 50703.2775227163 4603.3939842453 90334.8968775757 8221.417134307 43926.0361437937 4960.2985281281 +Opt/DP 48015.1995568109 5932.0962970323 45758.1365355312 4678.0990750539 86467.4698480542 12137.3808972281 41725.5532649263 6548.0941769156 +TF 46083.8963535136 2697.6987375072 44164.6081343695 492.0978572682 88810.1652976761 2904.792305899 40653.2604320568 8573.9510135437 +FEF 44977.7691582594 1147.4075861589 45284.2708812591 3025.5401968487 68733.783030714 1368.1732105009 39382.0621828807 1175.1024421689 +Te1 38839.7460239597 4018.9831766332 36312.629019575 3798.0977648742 84833.0161404169 20492.3782565057 36379.5587038022 6385.8472725441 +area 46v 38026.8136272429 4185.5590572311 34625.8382448048 3028.1656500057 59774.256849191 9644.0789780673 36192.0308636869 4420.3808841888 +7a 36229.994135607 2214.8748387887 35064.7353890857 2945.3862132917 64711.115816133 8419.739423376 31239.53369249 1451.8383374224 +TH 33196.1450004882 4032.662630167 32778.5191185925 5269.5723764642 0 0 33814.8407473548 2994.3319115364 diff --git a/multiarea_model/data_multiarea/raw_data/NeuronalDensities_Nissl.csv b/multiarea_model/data_multiarea/raw_data/NeuronalDensities_Nissl.csv new file mode 100644 index 0000000000000000000000000000000000000000..58b837fd90ff9f57a0c36f485d3902cab0a7e3ef --- /dev/null +++ b/multiarea_model/data_multiarea/raw_data/NeuronalDensities_Nissl.csv @@ -0,0 +1,43 @@ +# Overall neuron densities (neurons per cubic mm) of macaque cortical areas obtained with Nissl staining, providing an update with respect to the NeuN-based densities in Table 4 of Hilgetag, C. C., Medalla, M., Beul, S. F., & Barbas, H. (2016). The primate connectome in context: Principles of connections of the cortical visual system. NeuroImage, 134, 685-702. Columns: area name, architectural type, neuron density, cortical thickness (in mm) + +V1,8,173360,1.24 +V2,7,111730,1.46 +V3,7,—,— +VP,7,—,— +TEO,6,78523,2.13 +V3A,6,76696,1.66 +V4,6,86223,1.89 +V4t,6,—,— +V5/MT,6,81153,1.96 +VOT,6,—,— +A8,5,60837,2.21 +CIT,5,—,— +DP,5,—,— +FEF,5,—,— +LIPd,5,61087,2.3 +LIPv,5,69275,2.3 +MST,5,—,— +PIP,5,—,— +PIT,5,—,— +PO,5,—,— +TEc,5,—,— +TF,5,61906,1.62 +VIP,5,—,— +A36,4,50074,2.93 +A46v,4,52720,1.86 +A7a,4,52379,2.68 +AIT,4,—,— +FST,4,—,— +STP,4,—,— +TEr,4,54902,2.63 +A11 rostral,3,53874,1.62 +A12 lateral,3,—,— +A12 rostral,3,50570,2.25 +MVisPro,2,—,— +OPro,2,40136,2.5 +TH,2,49446,1.87 +VPolePro,2,—,— +OPAll/OPro,1.5,39607,— +A28,1,40825,2.05 +A35,1,40313,1.6 +ER,1,—,—