diff --git a/figures/Schmidt2018/Fig2_anatomy.py b/figures/Schmidt2018/Fig2_anatomy.py
index 7f88a20cd8cecaaa8bdccdbe8f982a36eb92ffcf..4d535c5264cba012918930bbe580ce1361ec74cd 100644
--- a/figures/Schmidt2018/Fig2_anatomy.py
+++ b/figures/Schmidt2018/Fig2_anatomy.py
@@ -17,8 +17,8 @@ if NEURON_DENSITIES_AVAILABLE:
     architecture_completed = proc['architecture_completed']
 
     categories = {}
-    for ii in np.arange(0, 9, 1):
-        categories[ii] = []
+    for i in np.arange(0, 9, 1):
+        categories[i] = []
     for area in architecture_completed:
         categories[architecture_completed[area]].append(area)
 
@@ -171,12 +171,12 @@ if NEURON_DENSITIES_AVAILABLE:
     for l in layers[:]:
         bottom += rho[l]
 
-    for ii, l in enumerate(layers):
+    for i, l in enumerate(layers):
         print(l,  rho[l][4], bottom[4])
         bottom -= rho[l]
         print("LAYER", l, bottom)
         ax.bar(x - 0.4, rho[l], bottom=bottom,
-               color=colors[ii], label='L' + layer_labels[ii],
+               color=colors[i], label='L' + layer_labels[i],
                edgecolor='k')
 
     ax.set_xlabel('Architectural type', labelpad=0.3)
@@ -188,7 +188,7 @@ if NEURON_DENSITIES_AVAILABLE:
     ax.set_ylim((0., 200000.))
     ax.set_xlim((-0.5, 9))
     ax.set_xticks(np.arange(2, 9, 1) - 0.4)
-    ax.set_xticklabels([r'${}$'.format(ii) for ii in list(range(2, 9))])
+    ax.set_xticklabels([r'${}$'.format(i) for i in list(range(2, 9))])
 
     ax.legend(loc=(0.035, 0.45), edgecolor='k')
 
diff --git a/figures/Schmidt2018/Fig4_connectivity.py b/figures/Schmidt2018/Fig4_connectivity.py
index 586e7fe25b8568657bea24c588dd068ca961f87e..b724915930e2ac27dbe4600e9ffaa3b14a2fccd2 100644
--- a/figures/Schmidt2018/Fig4_connectivity.py
+++ b/figures/Schmidt2018/Fig4_connectivity.py
@@ -70,12 +70,12 @@ median_distance_data = raw['median_distance_data']
 
 cocomac = np.zeros((32, 32))
 conn_matrix = np.zeros((32, 32))
-for ii, area1 in enumerate(area_list[::-1]):
-    for jj, area2 in enumerate(area_list):
+for i, area1 in enumerate(area_list[::-1]):
+    for j, area2 in enumerate(area_list):
         if M.K_areas[area1][area2] > 0. and area2 in cocomac_data[area1]:
-            cocomac[ii][jj] = 1.
+            cocomac[i][j] = 1.
         if area2 in FLN_Data_FV91[area1]:
-            conn_matrix[ii][jj] = FLN_Data_FV91[area1][area2]
+            conn_matrix[i][j] = FLN_Data_FV91[area1][area2]
 
 """
 Panel A: CoCoMac Data
@@ -197,9 +197,9 @@ print(np.corrcoef(gradient * distances_FV91 + intercept, np.log(FLN_values_FV91)
 Panel D: Resulting connectivity matrix
 """
 conn_matrix = np.zeros((32, 32))
-for ii, area1 in enumerate(area_list[::-1]):
-    for jj, area2 in enumerate(area_list):
-        conn_matrix[ii][jj] = M.K_areas[area1][
+for i, area1 in enumerate(area_list[::-1]):
+    for j, area2 in enumerate(area_list):
+        conn_matrix[i][j] = M.K_areas[area1][
             area2] / np.sum(list(M.K_areas[area1].values()))
 
 ax = axes['D']
diff --git a/figures/Schmidt2018/Fig5_cc_laminar_pattern.py b/figures/Schmidt2018/Fig5_cc_laminar_pattern.py
index b8753834a242517a3d950e4ee7243f56c2291738..15bf87ffa17563840c3d5156a1cf92024cc2fafb 100644
--- a/figures/Schmidt2018/Fig5_cc_laminar_pattern.py
+++ b/figures/Schmidt2018/Fig5_cc_laminar_pattern.py
@@ -127,8 +127,8 @@ if NEURON_DENSITIES_AVAILABLE:
 
     def probit(x,):
         if isinstance(x, np.ndarray):
-            res = [integrate.quad(integrand, -1000., ii,
-                                  args=(0., 1.))[0] for ii in x]
+            res = [integrate.quad(integrand, -1000., i,
+                                  args=(0., 1.))[0] for i in x]
         else:
             res = integrate.quad(integrand, -1000., x, args=(0., 1.))[0]
         return res
@@ -202,7 +202,6 @@ if NEURON_DENSITIES_AVAILABLE:
                         target_high_SLN_unweighted[layer] += int(int(cocomac[target][
                             source]['target_pattern'][layer]) > 0)
 
-
     SLN_labels = [r' $\mathbf{SLN < 0.35}$',
                   r'$\mathbf{0.35\leq SLN \leq 0.65}$',
                   r'$\mathbf{SLN > 0.65}$']
@@ -224,7 +223,6 @@ if NEURON_DENSITIES_AVAILABLE:
             ax.set_ylabel('Target layer', size=10)
             ax.yaxis.set_label_coords(-0.15, 0.5)
 
-
     # Resulting patterns in the connectivity matrix
     M = MultiAreaModel({})
 
@@ -232,8 +230,8 @@ if NEURON_DENSITIES_AVAILABLE:
     FB_conns = []
     lateral_conns = []
 
-    for ii, target_area in enumerate(area_list):
-        for jj, source_area in enumerate(area_list):
+    for target_area in area_list:
+        for source_area in area_list:
             mask = create_mask(M.structure,
                                target_areas=[target_area],
                                source_areas=[source_area],
@@ -282,9 +280,9 @@ if NEURON_DENSITIES_AVAILABLE:
 
         matrix = np.mean(data, axis=0)
         ind = [0, 4, 6]
-        for ii in range(3):
-            ax.barh(np.arange(8), matrix[:, ind][:, ii][
-                    ::-1], left=ii * factor, color=myred, edgecolor='none')
+        for i in range(3):
+            ax.barh(np.arange(8), matrix[:, ind][:, i][
+                    ::-1], left=i * factor, color=myred, edgecolor='none')
 
         ax.text(0.1, 1.1, label, transform=ax.transAxes)
         ax.set_yticks(np.arange(8) + 0.3)
diff --git a/figures/Schmidt2018/Fig6_connectivity_measures.py b/figures/Schmidt2018/Fig6_connectivity_measures.py
index c3d8234da3cf58908ec9e0334bb1c9e90a94c0c6..177921aa7e7f389e3d0be536188255f69d1bfccc 100644
--- a/figures/Schmidt2018/Fig6_connectivity_measures.py
+++ b/figures/Schmidt2018/Fig6_connectivity_measures.py
@@ -71,9 +71,9 @@ for area in area_list:
         num_vector[index] = M.N[area][pop]
         index += 1
 
-for ii in range(254):
-    Npre[ii] = num_vector
-    Npost[:, ii] = num_vector
+for i in range(254):
+    Npre[i] = num_vector
+    Npost[:, i] = num_vector
 
 C = 1. - (1. - 1. / (Npre * Npost))**(M.K_matrix[:, :-1] * Npost)
 Nsyn = M.K_matrix[:, :-1] * Npost
@@ -98,7 +98,7 @@ ticks_r = []
 for area in plot_areas:
     ticks.append(t_index + 0.5 * len(M.structure[area]))
     ticks_r.append(new_size - (t_index + 0.5 * len(M.structure[area])))
-    for ii, pop in enumerate(M.structure[area]):
+    for pop in M.structure[area]:
         t_index += 1
 
 
diff --git a/figures/Schmidt2018/Fig7_community_structure.py b/figures/Schmidt2018/Fig7_community_structure.py
index bfbbcb65792d0e4711c029410f06450e790c84c9..bcf605dcd1bd7c607fd723730d867250c5401594 100644
--- a/figures/Schmidt2018/Fig7_community_structure.py
+++ b/figures/Schmidt2018/Fig7_community_structure.py
@@ -42,13 +42,13 @@ Construct matrix of relative and absolute outdegrees
 """
 conn_matrix = np.zeros((32, 32))
 conn_matrix_abs = np.zeros((32, 32))
-for ii, area1 in enumerate(area_list):
-    for jj, area2 in enumerate(area_list):
+for i, area1 in enumerate(area_list):
+    for j, area2 in enumerate(area_list):
         value = outa[area1][area2] / np.sum(list(outa[area1].values()))
         value_abs = outa[area1][area2]
         if area1 != area2:
-            conn_matrix[ii][jj] = value
-            conn_matrix_abs[ii][jj] = value_abs
+            conn_matrix[i][j] = value
+            conn_matrix_abs[i][j] = value_abs
 
 
 """
@@ -80,8 +80,8 @@ while "*Links" not in line:
 
 # sort map_equation lists
 index = []
-for ii in range(32):
-    index.append(map_equation_areas.index(area_list[ii]))
+for i in range(32):
+    index.append(map_equation_areas.index(area_list[i]))
 
 map_equation = np.array(map_equation)
 
@@ -109,10 +109,10 @@ mod_list = []
 # For each column, we shuffle the rows and therefore conserve the
 # total outdegree of each area.
 
-for ii in range(1000):
-    for jj in range(32):
-        ind = np.extract(np.arange(32) != jj, np.arange(32))
-        null_model[:, jj][ind] = null_model[:, jj][ind][np.random.shuffle(ind)]
+for i in range(1000):
+    for j in range(32):
+        ind = np.extract(np.arange(32) != j, np.arange(32))
+        null_model[:, j][ind] = null_model[:, j][ind][np.random.shuffle(ind)]
     modules, modules_areas, index = apply_map_equation(null_model, area_list,
                                                        filename='null',
                                                        infomap_path=infomap_path)
diff --git a/figures/Schmidt2018/Fig8_laminar_paths.py b/figures/Schmidt2018/Fig8_laminar_paths.py
index 8e4f2de215519fcc9d879d5b1318b1366c1a32d9..25ab09b995c050a8abdb594b98c3d568e244537c 100644
--- a/figures/Schmidt2018/Fig8_laminar_paths.py
+++ b/figures/Schmidt2018/Fig8_laminar_paths.py
@@ -48,12 +48,12 @@ for area in area_list:
             paths[target_pop][source_pop] = []
 
 path_length_matrix = np.zeros((254, 254))
-for ii, source in enumerate(M.structure_vec):
-    for jj, target in enumerate(M.structure_vec):
+for i, source in enumerate(M.structure_vec):
+    for j, target in enumerate(M.structure_vec):
         if target in path_lengths[source]:
-            path_length_matrix[jj][ii] = path_lengths[source][target]
+            path_length_matrix[j][i] = path_lengths[source][target]
         else:
-            path_length_matrix[jj][ii] = np.inf
+            path_length_matrix[j][i] = np.inf
 
 # Create dictionary containing the shortest path between any pair of areas
 CC_paths = {area: {} for area in area_list}
@@ -73,9 +73,9 @@ for target_area in area_list:
 # Create area-level graph
 K_area = np.zeros((32, 32))
 
-for ii, target in enumerate(area_list):
-    for jj, source in enumerate(area_list):
-        K_area[ii][jj] = M.K_areas[target][source]
+for i, target in enumerate(area_list):
+    for j, source in enumerate(area_list):
+        K_area[i][j] = M.K_areas[target][source]
 
 area_gain_matrix = K_area * np.ones_like(K_area) * M.J_matrix[0][0]
 eig = np.linalg.eig(area_gain_matrix)
diff --git a/figures/Schmidt2018/graph_helpers.py b/figures/Schmidt2018/graph_helpers.py
index a25be729307b73591fc1dfb4002e40830f5b463c..8b66a7a7d59675d1f7bddab96c6063e4b7958d5f 100644
--- a/figures/Schmidt2018/graph_helpers.py
+++ b/figures/Schmidt2018/graph_helpers.py
@@ -43,19 +43,19 @@ def apply_map_equation(graph_matrix, node_list, filename='', infomap_path=None):
     # nodes
     f = open(net_fn, 'w')
     f.write('*Vertices ' + str(len(node_list)) + '\n')
-    for ii, area in enumerate(node_list):
-        f.write(str(ii + 1) + ' "' + area + '"\n')
+    for i, area in enumerate(node_list):
+        f.write(str(i + 1) + ' "' + area + '"\n')
 
     # Determine number of vertices in the network
     k = np.where(graph_matrix != 0)[0].size
     f.write('*Arcs ' + str(k) + '\n')
 
     # edges
-    for ii in range(len(node_list)):
-        for jj in range(len(node_list)):
-            if graph_matrix[ii][jj] > 0.:
-                f.write(str(jj + 1) + ' ' + str(ii + 1) +
-                        ' ' + str(graph_matrix[ii][jj]) + '\n')
+    for i in range(len(node_list)):
+        for j in range(len(node_list)):
+            if graph_matrix[i][j] > 0.:
+                f.write(str(j + 1) + ' ' + str(i + 1) +
+                        ' ' + str(graph_matrix[i][j]) + '\n')
     f.close()
 
     """
@@ -91,8 +91,8 @@ def apply_map_equation(graph_matrix, node_list, filename='', infomap_path=None):
 
     # sort map_equation lists
     index = []
-    for ii in range(32):
-        index.append(map_equation_areas.index(node_list[ii]))
+    for i in range(32):
+        index.append(map_equation_areas.index(node_list[i]))
 
     map_equation = np.array(map_equation)
 
@@ -123,12 +123,12 @@ def modularity(g, membership):
     m = np.sum(g.es['weight'])
 
     Q = 0.
-    for ii, area in enumerate(g.vs):
-        k_out = np.sum(g.es.select(_source=ii)['weight'])
-        for jj, area2 in enumerate(g.vs):
-            k_in = np.sum(g.es.select(_target=jj)['weight'])
-            if membership[ii] == membership[jj]:
-                weight = g.es.select(_source=ii, _target=jj)['weight']
+    for i, area in enumerate(g.vs):
+        k_out = np.sum(g.es.select(_source=i)['weight'])
+        for j, area2 in enumerate(g.vs):
+            k_in = np.sum(g.es.select(_target=j)['weight'])
+            if membership[i] == membership[j]:
+                weight = g.es.select(_source=i, _target=j)['weight']
                 if len(weight) > 0:
                     Q += weight[0] - k_out * k_in / m
                 else:
@@ -241,11 +241,11 @@ def plot_clustered_graph(g, g_abs, membership, filename, center_of_masses, color
     layout = gcopy.layout("kk", **layout_params)
     coords = np.array(copy.copy(layout.coords))
     # For better visibility, place clusters at defined positions
-    for ii in range(np.max(membership)):
-        coo = coords[np.where(membership == ii + 1)]
+    for i in range(np.max(membership)):
+        coo = coords[np.where(membership == i + 1)]
         com = np.mean(coo, axis=0)
-        coo = np.array(coo) - (com - center_of_masses[ii])
-        coords[np.where(membership == ii + 1)] = coo
+        coo = np.array(coo) - (com - center_of_masses[i])
+        coords[np.where(membership == i + 1)] = coo
     # Define layout parameters
     gplot.es["color"] = edges_colors
     visual_style = {}
@@ -295,10 +295,10 @@ def create_graph(matrix, area_list):
     g = igraph.Graph(directed=True)
     g.add_vertices(area_list)
 
-    for ii in range(32):
-        for jj in range(32):
-            if matrix[ii][jj] != 0:
-                g.add_edge(jj, ii, weight=matrix[ii][jj])
+    for i in range(32):
+        for j in range(32):
+            if matrix[i][j] != 0:
+                g.add_edge(j, i, weight=matrix[i][j])
     return g
 
 
@@ -326,13 +326,13 @@ def create_networkx_graph(matrix, complete_population_list, relative=False):
     g = nx.DiGraph()
     g.add_nodes_from(complete_population_list)
 
-    for ii, target in enumerate(complete_population_list):
-        for jj, source in enumerate(complete_population_list):
-            if matrix[ii][jj] != 0:
+    for i, target in enumerate(complete_population_list):
+        for j, source in enumerate(complete_population_list):
+            if matrix[i][j] != 0:
                 if relative:
-                    weight = matrix[ii][jj] / np.sum(matrix[ii][:-1])
+                    weight = matrix[i][j] / np.sum(matrix[i][:-1])
                 else:
-                    weight = matrix[ii][jj]
+                    weight = matrix[i][j]
                 g.add_edge(source, target, weight=weight,
                            distance=np.log10(1. / weight))
     return g
@@ -362,13 +362,13 @@ def create_networkx_area_graph(matrix, area_list, relative=False):
     G = nx.DiGraph()
     G.add_nodes_from(area_list)
 
-    for ii, target in enumerate(area_list):
-        for jj, source in enumerate(area_list):
-            if matrix[ii][jj] > 0:
+    for i, target in enumerate(area_list):
+        for j, source in enumerate(area_list):
+            if matrix[i][j] > 0:
                 if relative:
-                    weight = matrix[ii][jj] / np.sum(matrix[ii])
+                    weight = matrix[i][j] / np.sum(matrix[i])
                 else:
-                    weight = matrix[ii][jj]
+                    weight = matrix[i][j]
                 G.add_edge(source, target, weight=weight,
                            distance=np.log10(1. / weight))
     return G
diff --git a/figures/Schmidt2018_dyn/Fig2_bistability.py b/figures/Schmidt2018_dyn/Fig2_bistability.py
index dcf78beb698a82df52b805a6d2874c8a0d0dc77b..bd16f49c5bc4206d94596abac957822d2e411bee 100644
--- a/figures/Schmidt2018_dyn/Fig2_bistability.py
+++ b/figures/Schmidt2018_dyn/Fig2_bistability.py
@@ -47,9 +47,9 @@ if LOAD_ORIGINAL_DATA:
 
 labels = ['A', 'B', 'C']
 
-for ii, k in enumerate(['LA', 'HA', 'LA_post']):
-    ax = axes[labels[ii]]
-    ax2 = axes[labels[ii] + '2']
+for i, k in enumerate(['LA', 'HA', 'LA_post']):
+    ax = axes[labels[i]]
+    ax2 = axes[labels[i] + '2']
     print(k)
     matrix = np.zeros((len(M.area_list), 8))
 
@@ -66,11 +66,11 @@ for ii, k in enumerate(['LA', 'HA', 'LA_post']):
 
     matrix = np.transpose(matrix)
 
-    if ii == 0:
+    if i == 0:
         matrix_plot(panel_factory.figure, ax, matrix, position='left')
         rate_histogram_plot(panel_factory.figure, ax2,
                             matrix, position='left')
-    elif ii == 1:
+    elif i == 1:
         matrix_plot(panel_factory.figure, ax, matrix, position='center')
         rate_histogram_plot(panel_factory.figure, ax2,
                             matrix, position='center')
diff --git a/figures/Schmidt2018_dyn/Fig3_ground_state_chi1.py b/figures/Schmidt2018_dyn/Fig3_ground_state_chi1.py
index 998fd0bddaa0d27762ca8a15f3c85253dfbc5dbb..0e0781b002945daf9b297e120fdcfce38b1f1dee 100644
--- a/figures/Schmidt2018_dyn/Fig3_ground_state_chi1.py
+++ b/figures/Schmidt2018_dyn/Fig3_ground_state_chi1.py
@@ -150,8 +150,8 @@ ecolor = myblue
 
 frac_neurons = 0.03
 
-for ii, area in enumerate(areas):
-    ax = axes[labels[ii]]
+for i, area in enumerate(areas):
+    ax = axes[labels[i]]
 
     if area in spike_data:
         n_pops = len(spike_data[area])
@@ -166,10 +166,10 @@ for ii, area in enumerate(areas):
         prev_pop = ''
         yticks = []
         yticklocs = []
-        for jj, pop in enumerate(M.structure[area]):
+        for j, pop in enumerate(M.structure[area]):
             if pop[0:-1] != prev_pop:
                 prev_pop = pop[0:-1]
-                yticks.append('L' + population_labels[jj][0:-1])
+                yticks.append('L' + population_labels[j][0:-1])
                 yticklocs.append(offset - 0.5 * n_to_plot[pop])
             ind = np.where(np.logical_and(
                 spike_data[area][pop][:, 1] <= t_max, spike_data[area][pop][:, 1] >= t_min))
@@ -198,16 +198,15 @@ for ii, area in enumerate(areas):
         ax.set_xticks([t_min, t_min + 250., t_max])
         ax.set_xticklabels([r'$3.$', r'$3.25$', r'$3.5$'])
 
-
         
 def set_boxplot_props(d):
-    for ii in range(len(d['boxes'])):
-        if ii % 2 == 0:
-            d['boxes'][ii].set_facecolor(icolor)
-            d['boxes'][ii].set_color(icolor)
+    for i in range(len(d['boxes'])):
+        if i % 2 == 0:
+            d['boxes'][i].set_facecolor(icolor)
+            d['boxes'][i].set_color(icolor)
         else:
-            d['boxes'][ii].set_facecolor(ecolor)
-            d['boxes'][ii].set_color(ecolor)
+            d['boxes'][i].set_facecolor(ecolor)
+            d['boxes'][i].set_color(ecolor)
     pl.setp(d['whiskers'], color='k')
     pl.setp(d['fliers'], color='k', markerfacecolor='k', marker='+')
     pl.setp(d['medians'], color='none')
@@ -341,32 +340,32 @@ colors = ['0.5', '0.3', '0.0']
 t_min = 500.
 t_max = 10000.
 time = np.arange(500., t_max)
-for ii, area in enumerate(areas[::-1]):
-    ax[ii].spines['right'].set_color('none')
-    ax[ii].spines['top'].set_color('none')
-    ax[ii].yaxis.set_ticks_position("left")
-    ax[ii].xaxis.set_ticks_position("none")
+for i, area in enumerate(areas[::-1]):
+    ax[i].spines['right'].set_color('none')
+    ax[i].spines['top'].set_color('none')
+    ax[i].yaxis.set_ticks_position("left")
+    ax[i].xaxis.set_ticks_position("none")
 
     binned_spikes = rate_time_series[area][np.where(
         np.logical_and(time >= t_min, time < t_max))]
-    ax[ii].plot(time, binned_spikes, color=colors[0], label=area)
+    ax[i].plot(time, binned_spikes, color=colors[0], label=area)
     rate = rate_time_series_auto_kernel[area]
-    ax[ii].plot(time, rate, color=colors[2], label=area)
-    ax[ii].set_xlim((500., t_max))
+    ax[i].plot(time, rate, color=colors[2], label=area)
+    ax[i].set_xlim((500., t_max))
 
-    ax[ii].text(9000., 4.7, area)
+    ax[i].text(9000., 4.7, area)
 
-    if ii > 0:
-        ax[ii].spines['bottom'].set_color('none')
-        ax[ii].set_xticks([])
+    if i > 0:
+        ax[i].spines['bottom'].set_color('none')
+        ax[i].set_xticks([])
     else:
-        ax[ii].set_xticks([1000., 5000., 10000.])
-        ax[ii].set_xticklabels([r'$1.$', r'$5.$', r'$10.$'])
-    if ii == 1:
-        ax[ii].set_ylabel(r'Rate (spikes/s)')
+        ax[i].set_xticks([1000., 5000., 10000.])
+        ax[i].set_xticklabels([r'$1.$', r'$5.$', r'$10.$'])
+    if i == 1:
+        ax[i].set_ylabel(r'Rate (spikes/s)')
 
-    ax[ii].set_yticks([0., 10.])
-    ax[ii].set_ylim((0., 11.))
+    ax[i].set_yticks([0., 10.])
+    ax[i].set_ylim((0., 11.))
 ax[0].set_xlabel('Time (s)', labelpad=-0.05)
 
 fig.subplots_adjust(left=0.05, right=0.95, top=0.95,
diff --git a/figures/Schmidt2018_dyn/Fig4_metastability.py b/figures/Schmidt2018_dyn/Fig4_metastability.py
index 756f3f86eddc2c275f29a9de0b0e8f3cc6d1379c..820c714216db76178108b29f520cb0fad096f0c4 100644
--- a/figures/Schmidt2018_dyn/Fig4_metastability.py
+++ b/figures/Schmidt2018_dyn/Fig4_metastability.py
@@ -170,8 +170,8 @@ Plotting
 """
 print("Plotting rate time series")
 area = 'V1'
-for ii, (cc, label) in enumerate(zip(chi_list, labels)):
-    ax = ax_rates[ii]
+for i, (cc, label) in enumerate(zip(chi_list, labels)):
+    ax = ax_rates[i]
     ax.spines['right'].set_color('none')
     ax.spines['top'].set_color('none')
     ax.yaxis.set_ticks_position("left")
@@ -182,7 +182,7 @@ for ii, (cc, label) in enumerate(zip(chi_list, labels)):
     ax.set_ylim((-5., 60.))
     ax.set_yticks([5., 40.])
     ax.text(51500., 48., r'$\chi = $' + str(cc))
-    if ii == len(labels) - 1:
+    if i == len(labels) - 1:
         ax.vlines(0., 0., 40.)
         ax.hlines(0., 0., 2.)
 
@@ -191,7 +191,7 @@ for ii, (cc, label) in enumerate(zip(chi_list, labels)):
         ax.set_xticklabels([0, 10, 20, 30, 40, 50])
     else:
         ax.set_xticks([])
-    if ii == 3:
+    if i == 3:
         ax.set_ylabel(r'Rate $(\mathrm{spikes/s})$')
 
 print("Plotting critical eigenvalues")
@@ -246,13 +246,13 @@ ax.set_yticklabels(chi_list[:-1])
 
         
 load_path = 'Fig4_theory_data'
-for ii, cc_weights_factor in enumerate(chi_list):
-    ax = ax_phasespace[ii]
+for i, cc_weights_factor in enumerate(chi_list):
+    ax = ax_phasespace[i]
     ax.spines['right'].set_color('none')
     ax.spines['left'].set_color('none')
     ax.spines['top'].set_color('none')
     ax.set_xticks([0., 5., 35.])
-    if ii != len(labels) - 1:
+    if i != len(labels) - 1:
         ax.set_xticklabels([])
     else:
         ax.set_xlabel(r'Average activity ($\mathrm{spikes/s}$)')
diff --git a/figures/Schmidt2018_dyn/Fig5_ground_state.py b/figures/Schmidt2018_dyn/Fig5_ground_state.py
index 7c7230dd447bc5748096faefae132629799bd863..e5384ebc3c47d9733403ad8f5d04d280050cdfc4 100644
--- a/figures/Schmidt2018_dyn/Fig5_ground_state.py
+++ b/figures/Schmidt2018_dyn/Fig5_ground_state.py
@@ -153,8 +153,8 @@ ecolor = myblue
 
 frac_neurons = 0.03
 
-for ii, area in enumerate(areas):
-    ax = axes[labels[ii]]
+for i, area in enumerate(areas):
+    ax = axes[labels[i]]
 
     if area in spike_data:
         n_pops = len(spike_data[area])
@@ -204,13 +204,13 @@ for ii, area in enumerate(areas):
 
         
 def set_boxplot_props(d):
-    for ii in range(len(d['boxes'])):
-        if ii % 2 == 0:
-            d['boxes'][ii].set_facecolor(icolor)
-            d['boxes'][ii].set_color(icolor)
+    for i in range(len(d['boxes'])):
+        if i % 2 == 0:
+            d['boxes'][i].set_facecolor(icolor)
+            d['boxes'][i].set_color(icolor)
         else:
-            d['boxes'][ii].set_facecolor(ecolor)
-            d['boxes'][ii].set_color(ecolor)
+            d['boxes'][i].set_facecolor(ecolor)
+            d['boxes'][i].set_color(ecolor)
     pl.setp(d['whiskers'], color='k')
     pl.setp(d['fliers'], color='k', markerfacecolor='k', marker='+')
     pl.setp(d['medians'], color='none')
@@ -340,31 +340,31 @@ colors = ['0.5', '0.3', '0.0']
 t_min = 500.
 t_max = 10000.
 time = np.arange(500., t_max)
-for ii, area in enumerate(areas[::-1]):
-    ax[ii].spines['right'].set_color('none')
-    ax[ii].spines['top'].set_color('none')
-    ax[ii].yaxis.set_ticks_position("left")
-    ax[ii].xaxis.set_ticks_position("none")
+for i, area in enumerate(areas[::-1]):
+    ax[i].spines['right'].set_color('none')
+    ax[i].spines['top'].set_color('none')
+    ax[i].yaxis.set_ticks_position("left")
+    ax[i].xaxis.set_ticks_position("none")
 
     binned_spikes = rate_time_series[area][np.where(
         np.logical_and(time >= t_min, time < t_max))]
-    ax[ii].plot(time, binned_spikes, color=colors[0], label=area)
+    ax[i].plot(time, binned_spikes, color=colors[0], label=area)
     rate = rate_time_series_auto_kernel[area]
-    ax[ii].plot(time, rate, color=colors[2], label=area)
-    ax[ii].set_xlim((500., t_max))
+    ax[i].plot(time, rate, color=colors[2], label=area)
+    ax[i].set_xlim((500., t_max))
 
-    ax[ii].text(0.8, 0.7, area, transform=ax[ii].transAxes)
+    ax[i].text(0.8, 0.7, area, transform=ax[i].transAxes)
 
-    if ii > 0:
-        ax[ii].spines['bottom'].set_color('none')
-        ax[ii].set_xticks([])
-        ax[ii].set_yticks([0., 30.])
+    if i > 0:
+        ax[i].spines['bottom'].set_color('none')
+        ax[i].set_xticks([])
+        ax[i].set_yticks([0., 30.])
     else:
-        ax[ii].set_xticks([1000., 5000., 10000.])
-        ax[ii].set_xticklabels([r'$1.$', r'$5.$', r'$10.$'])
-        ax[ii].set_yticks([0., 5.])
-    if ii == 1:
-        ax[ii].set_ylabel(r'Rate (spikes/s)')
+        ax[i].set_xticks([1000., 5000., 10000.])
+        ax[i].set_xticklabels([r'$1.$', r'$5.$', r'$10.$'])
+        ax[i].set_yticks([0., 5.])
+    if i == 1:
+        ax[i].set_ylabel(r'Rate (spikes/s)')
 
 ax[0].set_xlabel('Time (s)', labelpad=-0.05)
 
diff --git a/figures/Schmidt2018_dyn/Fig8_interactions.py b/figures/Schmidt2018_dyn/Fig8_interactions.py
index 7f88acb523ad03b2aba49af7ddf77d3125eced2b..528dadf6291826326356a0f10d7e587d1b044485 100644
--- a/figures/Schmidt2018_dyn/Fig8_interactions.py
+++ b/figures/Schmidt2018_dyn/Fig8_interactions.py
@@ -275,11 +275,11 @@ ax.plot([1.9], cc_bold, '.',
 # Correlation between exp. FC and structural connectivity
 # Construct the structural connectivity as the matrix of relative
 conn_matrix = np.zeros((len(M.area_list), len(M.area_list)))
-for ii, area1 in enumerate(M.area_list):
+for i, area1 in enumerate(M.area_list):
     s = np.sum(list(M.K_areas[area1].values()))
-    for jj, area2 in enumerate(M.area_list):
+    for j, area2 in enumerate(M.area_list):
         value = M.K_areas[area1][area2] / s
-        conn_matrix[ii][jj] = value
+        conn_matrix[i][j] = value
 
 
 cc = np.corrcoef(zero_diagonal(conn_matrix).flatten(),
diff --git a/figures/Schmidt2018_dyn/Fig9_laminar_interactions.py b/figures/Schmidt2018_dyn/Fig9_laminar_interactions.py
index 67860a9156dd6c406238378a9acc00a070f597db..0bab2ddd11b38e78c1b9caf81ed2563fbfbc734e 100644
--- a/figures/Schmidt2018_dyn/Fig9_laminar_interactions.py
+++ b/figures/Schmidt2018_dyn/Fig9_laminar_interactions.py
@@ -79,8 +79,8 @@ Bottom row
 gs1 = gridspec.GridSpec(2, 3)
 gs1.update(left=0.1, right=0.95, top=0.95, wspace=0.4, bottom=0.3)
 
-for ii, ax_label in enumerate(['A', 'B']):
-    ax = pl.subplot(gs1[ii:ii + 1, :])
+for i, ax_label in enumerate(['A', 'B']):
+    ax = pl.subplot(gs1[i:i + 1, :])
     ax.spines['right'].set_color('none')
     ax.spines['top'].set_color('none')
     ax.spines['left'].set_color('none')
@@ -122,10 +122,10 @@ for target_area in gc:
             prop[grad]['significant'] += s_sign
 
 colors = ['0.1', '0.1', '0.1', mypurple]
-for ii, typ in enumerate(['HL', 'HZ', 'HL', 'same-area']):
-    ax.bar([(ii + 1) / 5.], [prop[typ]['significant'] / prop[typ]['total']],
+for i, typ in enumerate(['HL', 'HZ', 'HL', 'same-area']):
+    ax.bar([(i + 1) / 5.], [prop[typ]['significant'] / prop[typ]['total']],
            width=0.2,
-           color=colors[ii])
+           color=colors[i])
 
 s_total_overall = 0
 s_sign_overall = 0
@@ -159,7 +159,7 @@ ax.xaxis.set_ticks_position("none")
 balance_EI = {}
 NI_overall = 0
 NE_overall = 0
-for ii, typ in enumerate(['HL', 'HZ', 'HL']):
+for i, typ in enumerate(['HL', 'HZ', 'HL']):
     C = Counter(significant_channels[typ])
     NI, NE = 0, 0
     for channel in C:
@@ -172,7 +172,7 @@ for ii, typ in enumerate(['HL', 'HZ', 'HL']):
     balance_EI[typ] = {'E': float(NE) / (NE + NI),
                        'I': float(NI) / (NE + NI)}
 
-    ax.bar(np.array([0, 1]) + (ii + 1) / 5., [balance_EI[typ]['E'], balance_EI[typ]['I']],
+    ax.bar(np.array([0, 1]) + (i + 1) / 5., [balance_EI[typ]['E'], balance_EI[typ]['I']],
            width=0.2,
            color=[myblue, myred],
            edgecolor='1.')
diff --git a/figures/Schmidt2018_dyn/Fig9_path_analysis.py b/figures/Schmidt2018_dyn/Fig9_path_analysis.py
index 63f2100dd42ea0c0f9469c7bd7e8632457aebcfb..84f383e679204ddc07fc5808a542cc594290e43e 100644
--- a/figures/Schmidt2018_dyn/Fig9_path_analysis.py
+++ b/figures/Schmidt2018_dyn/Fig9_path_analysis.py
@@ -78,12 +78,12 @@ for area in M.area_list:
             paths[target_pop][source_pop] = []
 
 path_length_matrix = np.zeros((254, 254))
-for ii, source in enumerate(M.structure_vec):
-    for jj, target in enumerate(M.structure_vec):
+for i, source in enumerate(M.structure_vec):
+    for j, target in enumerate(M.structure_vec):
         if target in path_lengths[source]:
-            path_length_matrix[jj][ii] = path_lengths[source][target]
+            path_length_matrix[j][i] = path_lengths[source][target]
         else:
-            path_length_matrix[jj][ii] = np.inf
+            path_length_matrix[j][i] = np.inf
 
 # Create dictionary containing the shortest path between any pair of areas
 CC_paths = {area: {} for area in M.area_list}
diff --git a/figures/Schmidt2018_dyn/Fig9_significant_channels.py b/figures/Schmidt2018_dyn/Fig9_significant_channels.py
index e57f53af445d0a121c7e94e72e9e5f41b467aaca..beb495df19d933247a85e442431cd0ac54ff398a 100644
--- a/figures/Schmidt2018_dyn/Fig9_significant_channels.py
+++ b/figures/Schmidt2018_dyn/Fig9_significant_channels.py
@@ -42,19 +42,18 @@ for area in M.area_list:
 def area_pair_matrix(target_area, source_area):
     matrix = np.nan * np.zeros((len(M.structure[target_area]),
                                 len(M.structure[source_area])))
-    for ii, target_pop in enumerate(M.structure[target_area]):
-        for jj, source_pop in enumerate(M.structure[source_area]):
+    for i, target_pop in enumerate(M.structure[target_area]):
+        for j, source_pop in enumerate(M.structure[source_area]):
             if source_area in gc[target_area][target_pop]:
                 if source_pop in gc[target_area][target_pop][source_area]:
-                    matrix[ii][jj] = gc[target_area][target_pop][source_area][source_pop][1]
+                    matrix[i][j] = gc[target_area][target_pop][source_area][source_pop][1]
     return np.ma.masked_where(np.isnan(matrix), matrix)
 
 
 def significant_pop_pairs(target_area, source_area):
     significant_pop_pairs = []
-    # for ii, target_pop in enumerate(M.structure[target_area]):
-    for ii, target_pop in enumerate(gc[target_area].keys()):
-        for jj, source_pop in enumerate(M.structure[source_area]):
+    for i, target_pop in enumerate(gc[target_area].keys()):
+        for j, source_pop in enumerate(M.structure[source_area]):
             if source_area in gc[target_area][target_pop]:
                 if source_pop in gc[target_area][target_pop][source_area]:
                     if gc[target_area][target_pop][source_area][source_pop][1] < 0.05:
diff --git a/figures/Schmidt2018_dyn/process_chu2014_data.py b/figures/Schmidt2018_dyn/process_chu2014_data.py
index 1e172d4a6f2edbbbdf227f867da6d20aa00dc559..4bf4fc85b824abd7e491bf78f7e2c75609fa159a 100644
--- a/figures/Schmidt2018_dyn/process_chu2014_data.py
+++ b/figures/Schmidt2018_dyn/process_chu2014_data.py
@@ -55,9 +55,9 @@ Take only neurons of the first 113 electrodes into account which are
 less than 1 mm apart
 """
 ind_mm = []
-for ii, id in enumerate(data_pvc5['ids']):
+for i, id in enumerate(data_pvc5['ids']):
     if int(id_to_channel[id]) <= 112:
-        ind_mm.append(ii)
+        ind_mm.append(i)
 
 np.save(os.path.join(save_path,
                      'spike_data_1mm.npy'),
@@ -135,8 +135,8 @@ for ti in t[ind_high]:
     time_ind_high += list(np.where(np.logical_and(time > 1e3*(ti - 5.),
                                                   time <= 1e3*(ti+5.)))[0])
 
-for ii, (phase, times) in enumerate(zip(['low_fluct', 'high_fluct', 'full'],
-                                        [time_ind_low, time_ind_high, None])):
+for phase, times in zip(['low_fluct', 'high_fluct', 'full'],
+                        [time_ind_low, time_ind_high, None]):
     if times is not None:
         rate = pop_rate[times]
     else:
diff --git a/multiarea_model/analysis_helpers.py b/multiarea_model/analysis_helpers.py
index 12a6ea961c54ad5861505f65379cdd79d9e3354f..ce4c2e47e49425a9d85e0334354af7db95ed864c 100644
--- a/multiarea_model/analysis_helpers.py
+++ b/multiarea_model/analysis_helpers.py
@@ -415,13 +415,13 @@ def pop_rate_distribution(data_array, t_min, t_max, num_neur):
     else:  # No spikes in [t_min, t_max]
         n = None
     rates = np.zeros(int(num_neur))
-    ii = 0
+    s = 0
     for i in range(neurons.size):
         if neurons[i] == n:
-            rates[ii] += 1
+            rates[s] += 1
         else:
             n = neurons[i]
-            ii += 1
+            s += 1
     rates /= (t_max - t_min) / 1000.
     vals, bins = np.histogram(rates, bins=100)
     vals = vals / float(np.sum(vals))
@@ -476,10 +476,10 @@ def pop_rate_time_series(data_array, num_neur, t_min, t_max,
         rates = np.array([])
         last_time_step = times[0]
 
-        for ii in range(1, times.size):
+        for i in range(1, times.size):
             rates = np.append(
-                rates, rate[ii - 1] * np.ones_like(np.arange(last_time_step, times[ii], 1.0)))
-            last_time_step = times[ii]
+                rates, rate[i - 1] * np.ones_like(np.arange(last_time_step, times[i], 1.0)))
+            last_time_step = times[i]
 
         time_series = rates
     else:
diff --git a/multiarea_model/data_multiarea/Model.py b/multiarea_model/data_multiarea/Model.py
index 11c14f504683174d5b1c5535546eab04c37d4c4b..6d68fba7906b192ba3631f498599ababfcf5f8b3 100644
--- a/multiarea_model/data_multiarea/Model.py
+++ b/multiarea_model/data_multiarea/Model.py
@@ -690,9 +690,9 @@ def compute_Model_params(out_label='', mode='default'):
                 elif source_pop in origin_patterns['I']:
                     X = 1.0 - SLN_Data[target_area][source_area]
                     infra_neurons = 0.0
-                    for ii in origin_patterns['I']:
+                    for i in origin_patterns['I']:
                         infra_neurons += neuronal_numbers_fullscale[
-                            source_area][ii]
+                            source_area][i]
                     Y = num_source / infra_neurons
 
             # target side
@@ -721,12 +721,12 @@ def compute_Model_params(out_label='', mode='default'):
                 p_T = np.sum(10 ** tp[np.where(tp > 0.)[0]])
                 Nsyn = 0.0
                 su = 0.
-                for ii in range(len(T)):
-                    if T[ii] in [2, 3]:
+                for i in range(len(T)):
+                    if T[i] in [2, 3]:
                         syn_layer = '23'
                     else:
-                        syn_layer = str(T[ii])
-                    Z = 10 ** tp[np.where(tp > 0.)[0]][ii] / p_T
+                        syn_layer = str(T[i])
+                    Z = 10 ** tp[np.where(tp > 0.)[0]][i] / p_T
                     if target_pop in synapse_to_cell_body[target_area][syn_layer]:
                         Nsyn += synapse_to_cell_body[target_area][syn_layer][
                             target_pop] * Nsyn_tot * FLN_BA * X * Y * Z
@@ -744,9 +744,9 @@ def compute_Model_params(out_label='', mode='default'):
                     T = termination_layers['F']
 
                 p_T = 0.0
-                for ii in T:
-                    if ii != '1':
-                        p_T += laminar_thicknesses[target_area][ii]
+                for i in T:
+                    if i != '1':
+                        p_T += laminar_thicknesses[target_area][i]
 
                 Nsyn = 0.0
                 for syn_layer in T:
@@ -863,23 +863,23 @@ def compute_Model_params(out_label='', mode='default'):
     for area in area_list:
         nonvisual_fraction_matrix = np.zeros(
             (len(structure[area]) + 1, len(structure[area])))
-        for ii in range(len(structure[area])):
+        for i in range(len(structure[area])):
             nonvisual_fraction_matrix[
-                ii] = 1. / len(structure[area]) * np.ones(len(structure[area]))
-            nonvisual_fraction_matrix[ii][ii] -= 1
+                i] = 1. / len(structure[area]) * np.ones(len(structure[area]))
+            nonvisual_fraction_matrix[i][i] -= 1
 
-        for ii in range(len(structure[area])):
+        for i in range(len(structure[area])):
             nonvisual_fraction_matrix[-1][
-                ii] = neuronal_numbers[area][structure[area][ii]]
+                i] = neuronal_numbers[area][structure[area][i]]
 
         vector = np.zeros(len(structure[area]) + 1)
         ext_syn = External_synapses[area]
         vector[-1] = ext_syn
         solution, residues, rank, s = np.linalg.lstsq(
             nonvisual_fraction_matrix, vector)
-        for ii, pop in enumerate(structure[area]):
+        for i, pop in enumerate(structure[area]):
             synapse_numbers[area][pop]['external'] = {
-                'external': solution[ii] * neuronal_numbers[area][pop]}
+                'external': solution[i] * neuronal_numbers[area][pop]}
 
     synapse_numbers['TH']['4E']['external'] = {'external': 0.0}
     synapse_numbers['TH']['4I']['external'] = {'external': 0.0}
diff --git a/multiarea_model/data_multiarea/VisualCortex_Data.py b/multiarea_model/data_multiarea/VisualCortex_Data.py
index 60d5ee03a8017ff0339d2d569884a25e8d74404f..4f062e47317377b79388807bd69fcd81ef413ac3 100644
--- a/multiarea_model/data_multiarea/VisualCortex_Data.py
+++ b/multiarea_model/data_multiarea/VisualCortex_Data.py
@@ -129,9 +129,9 @@ def process_raw_data():
                             names=['area', 'level'])
 
     hierarchy = {area: level for area, level in hier_temp.values}
-    for ii in hierarchy.keys():
-        if hierarchy[ii] != '?':
-            hierarchy[ii] = float(hierarchy[ii])
+    for i in hierarchy.keys():
+        if hierarchy[i] != '?':
+            hierarchy[i] = float(hierarchy[i])
     hierarchy['MIP'] = .5
     hierarchy['MDP'] = .5
 
@@ -142,9 +142,9 @@ def process_raw_data():
                             names=['area', 'level', 'rescaled level'])
 
     hierarchy_markov = {}
-    for ii in range(len(hier_temp)):
-        hierarchy_markov[hier_temp.iloc[ii]['area']] = {'level': hier_temp.iloc[ii][
-            'level'], 'rescaled level': hier_temp.iloc[ii]['rescaled level']}
+    for i in range(len(hier_temp)):
+        hierarchy_markov[hier_temp.iloc[i]['area']] = {'level': hier_temp.iloc[i][
+            'level'], 'rescaled level': hier_temp.iloc[i]['rescaled level']}
 
     """
     2. Neuronal densities
@@ -203,9 +203,9 @@ def process_raw_data():
                        skiprows=2,
                        names=['area', 'structure'])
     architecture = {area: architecture for area, architecture in temp.values}
-    for ii in architecture:
-        if architecture[ii] != '?':
-            architecture[ii] = int(architecture[ii])
+    for i in architecture:
+        if architecture[i] != '?':
+            architecture[i] = int(architecture[i])
 
     """
     4. Distances
@@ -216,12 +216,12 @@ def process_raw_data():
         temp = next(myreader)
         areas = temp
         median_distance_data = {}
-        for ii in range(1, 82, 1):
+        for i in range(1, 82, 1):
             temp = next(myreader)
             dict_ = {}
             for i in range(1, 82, 1):
                 dict_[areas[i]] = float(temp[i])
-            median_distance_data[areas[ii]] = dict_
+            median_distance_data[areas[i]] = dict_
 
     with open(os.path.join(datapath, 'Thom_Distances.csv'), 'rt') as f:
         myreader = csv.reader(f, delimiter='\t')
@@ -229,12 +229,12 @@ def process_raw_data():
         temp = next(myreader)
         areas = temp
         thom_distance_data = {}
-        for ii in range(1, 33, 1):
+        for i in range(1, 33, 1):
             temp = next(myreader)
             dict_ = {}
             for i in range(1, 33, 1):
                 dict_[areas[i]] = float(temp[i])
-            thom_distance_data[areas[ii]] = dict_
+            thom_distance_data[areas[i]] = dict_
 
     # Distances for area in the parcellation used by Markov et al. (2014)
     with open(os.path.join(datapath, 'Thom_Distances_MERetal12.csv'), 'rt') as f:
@@ -243,12 +243,12 @@ def process_raw_data():
         temp = next(myreader)
         areas = temp
         thom_distance_data_markov = {}
-        for ii in range(1, 93, 1):
+        for i in range(1, 93, 1):
             temp = next(myreader)
             dict_ = {}
             for i in range(1, 93, 1):
                 dict_[areas[i]] = float(temp[i])
-            thom_distance_data_markov[areas[ii]] = dict_
+            thom_distance_data_markov[areas[i]] = dict_
 
     with open(os.path.join(datapath, 'Euclidean_Distances.csv'), 'rt') as f:
         myreader = csv.reader(f, delimiter='\t')
@@ -256,12 +256,12 @@ def process_raw_data():
         temp = next(myreader)
         areas = temp
         euclidean_distance_data = {}
-        for ii in range(1, 33, 1):
+        for i in range(1, 33, 1):
             temp = next(myreader)
             dict_ = {}
             for i in range(1, 33, 1):
                 dict_[areas[i]] = float(temp[i])
-            euclidean_distance_data[areas[ii]] = dict_
+            euclidean_distance_data[areas[i]] = dict_
 
     """
     5. Surface areas
@@ -392,7 +392,7 @@ def process_raw_data():
         myreader = csv.reader(f, delimiter='\t')
         skip_header()
         Intrinsic_FLN_Data = {}
-        for ii in range(4):
+        for i in range(4):
             temp = next(myreader)
             dict_ = {'mean': float(temp[1]), 'error': float(temp[2])}
             Intrinsic_FLN_Data[temp[0]] = dict_
@@ -445,12 +445,12 @@ def process_raw_data():
 
         areas = temp
 
-        for ii in range(1, 9, 1):
+        for i in range(1, 9, 1):
             temp = next(myreader)
             dict_ = {}
             for i in range(1, 10, 1):
                 dict_[areas[i]] = float(temp[i])
-            intrinsic_connectivity[areas[ii]] = dict_
+            intrinsic_connectivity[areas[i]] = dict_
 
     """
     11. Numbers of neurons and external inputs in V1
@@ -459,7 +459,7 @@ def process_raw_data():
         myreader = csv.reader(f, delimiter='\t')
         skip_header()
         num_V1 = {}
-        for ii in range(0, 9, 1):
+        for i in range(0, 9, 1):
             temp = next(myreader)
             num_V1[temp[0]] = {'neurons': float(
                 temp[1]), 'ext_inputs': float(temp[2])}
@@ -473,7 +473,7 @@ def process_raw_data():
         skip_header()
         Laminar_Thickness_cat = {}
 
-        for ii in range(10):
+        for i in range(10):
             temp = next(myreader)
             Laminar_Thickness_cat[temp[0]] = {
                 'thickness': float(temp[1]), 'error': float(temp[2])}
@@ -484,9 +484,9 @@ def process_raw_data():
         myreader = csv.reader(f, delimiter='\t')
         skip_header()
         names = next(myreader)[1:16]
-        for ii in range(0, len(names)):
-            names[ii] = re.sub('L', '', names[ii])
-            names[ii] = re.sub('/', '', names[ii])
+        for i in range(0, len(names)):
+            names[i] = re.sub('L', '', names[i])
+            names[i] = re.sub('/', '', names[i])
         laminar_thicknesses = {}
         line = True
         while line:
@@ -496,29 +496,29 @@ def process_raw_data():
                 line = False
             if temp[0] in laminar_thicknesses:
                 if np.isscalar(laminar_thicknesses[temp[0]][names[0]]):
-                    for jj in range(len(temp) - 3):
-                        if temp[jj + 1]:
-                            laminar_thicknesses[temp[0]][names[jj]] = [
-                                laminar_thicknesses[temp[0]][names[jj]]] + [float(temp[jj + 1])]
+                    for j in range(len(temp) - 3):
+                        if temp[j + 1]:
+                            laminar_thicknesses[temp[0]][names[j]] = [
+                                laminar_thicknesses[temp[0]][names[j]]] + [float(temp[j + 1])]
                         else:
-                            laminar_thicknesses[temp[0]][names[jj]] = [
-                                laminar_thicknesses[temp[0]][names[jj]]] + [np.nan]
+                            laminar_thicknesses[temp[0]][names[j]] = [
+                                laminar_thicknesses[temp[0]][names[j]]] + [np.nan]
                 else:
-                    for jj in range(len(temp) - 3):
-                        if temp[jj + 1]:
-                            laminar_thicknesses[temp[0]][names[jj]] = laminar_thicknesses[
-                                temp[0]][names[jj]] + [float(temp[jj + 1])]
+                    for j in range(len(temp) - 3):
+                        if temp[j + 1]:
+                            laminar_thicknesses[temp[0]][names[j]] = laminar_thicknesses[
+                                temp[0]][names[j]] + [float(temp[j + 1])]
                         else:
-                            laminar_thicknesses[temp[0]][names[jj]] = laminar_thicknesses[
-                                temp[0]][names[jj]] + [np.nan]
+                            laminar_thicknesses[temp[0]][names[j]] = laminar_thicknesses[
+                                temp[0]][names[j]] + [np.nan]
             else:
                 laminar_thicknesses[temp[0]] = {}
-                for jj in range(len(temp) - 3):
-                    if temp[jj + 1]:
+                for j in range(len(temp) - 3):
+                    if temp[j + 1]:
                         laminar_thicknesses[temp[0]
-                                            ][names[jj]] = float(temp[jj + 1])
+                                            ][names[j]] = float(temp[j + 1])
                     else:
-                        laminar_thicknesses[temp[0]][names[jj]] = np.nan
+                        laminar_thicknesses[temp[0]][names[j]] = np.nan
 
     """
     13. Total cortical thicknesses from Barbas lab
@@ -531,7 +531,7 @@ def process_raw_data():
         for area in area_list:
             total_thickness_data[area] = np.nan
 
-        for ii in range(0, 30, 1):
+        for i in range(0, 30, 1):
             temp = next(myreader)
             if temp[4]:
                 total_thickness_data[temp[0]] = float(
@@ -550,8 +550,8 @@ def process_raw_data():
         else:
             translation[i] = np.array([j])
 
-    for ii in translation:
-        translation[ii] = list(np.unique(translation[ii]))
+    for i in translation:
+        translation[i] = list(np.unique(translation[i]))
 
     f = open(os.path.join(datapath, 'overlap.json'), 'r')
     overlap = json.load(f)
@@ -566,7 +566,7 @@ def process_raw_data():
 
         pre_cells = next(myreader)
         binzegger_data = {}
-        for ii in range(1, 38, 1):
+        for i in range(1, 38, 1):
             temp = next(myreader)
             if temp[0] != '':
                 subdict = {}
@@ -574,11 +574,11 @@ def process_raw_data():
                     syn_layer = re.sub("\D", "", re.sub(".*\(", "", temp[0]))
                 else:
                     syn_layer = re.sub("\D", "", temp[1])
-                for jj in range(3, len(temp), 1):
+                for j in range(3, len(temp), 1):
                     try:
-                        subdict[pre_cells[jj]] = float(temp[jj])
+                        subdict[pre_cells[j]] = float(temp[j])
                     except:
-                        subdict[pre_cells[jj]] = temp[jj]
+                        subdict[pre_cells[j]] = temp[j]
                 if temp[0] in binzegger_data:
                     binzegger_data[temp[0]]['syn_dict'].update(
                         {syn_layer: subdict})
@@ -672,26 +672,26 @@ def process_raw_data():
 
         # map Neuronal density data to FV91 scheme
         neuronal_density_data_updated_FV91 = {}
-        for ii in list(neuronal_density_data_updated.keys()):
-            if ii not in area_list:
-                if ii in translation:
-                    areas_FV91 = translation[ii]
+        for i in list(neuronal_density_data_updated.keys()):
+            if i not in area_list:
+                if i in translation:
+                    areas_FV91 = translation[i]
                     for kk in areas_FV91:
                         neuronal_density_data_updated_FV91[
-                            kk] = neuronal_density_data_updated[ii]
+                            kk] = neuronal_density_data_updated[i]
             else:
                 neuronal_density_data_updated_FV91[
-                    ii] = neuronal_density_data_updated[ii]
+                    i] = neuronal_density_data_updated[i]
 
         neuronal_density_data_FV91 = {}
-        for ii in list(neuronal_density_data.keys()):
-            if ii not in area_list:
-                areas_FV91 = translation[ii]
+        for i in list(neuronal_density_data.keys()):
+            if i not in area_list:
+                areas_FV91 = translation[i]
                 for kk in areas_FV91:
-                    neuronal_density_data_FV91[kk] = neuronal_density_data[ii].copy(
+                    neuronal_density_data_FV91[kk] = neuronal_density_data[i].copy(
                     )
             else:
-                neuronal_density_data_FV91[ii] = neuronal_density_data[ii].copy()
+                neuronal_density_data_FV91[i] = neuronal_density_data[i].copy()
 
         # map Neuronal density data to 4 layers by dividing
         neuronal_density_data_FV91_4layers = {}
@@ -700,28 +700,28 @@ def process_raw_data():
                                                            '4': {'value': 0.0, 'error': 0.0},
                                                            '5': 0., '6': 0.}})
 
-        for ii in list(neuronal_density_data_FV91_4layers.keys()):
+        for i in list(neuronal_density_data_FV91_4layers.keys()):
             for layer in ['23', '4', '56']:
-                if neuronal_density_data_FV91[ii][layer]['value'] > 0.:
+                if neuronal_density_data_FV91[i][layer]['value'] > 0.:
                     # Assign equal density to layers 5 and 6
                     if layer == '56':
-                        neuronal_density_data_FV91_4layers[ii]['5'] = neuronal_density_data_FV91[
-                            ii]['56']['value'] * gradient + intercept
-                        neuronal_density_data_FV91_4layers[ii]['6'] = neuronal_density_data_FV91[
-                            ii]['56']['value'] * gradient + intercept
+                        neuronal_density_data_FV91_4layers[i]['5'] = neuronal_density_data_FV91[
+                            i]['56']['value'] * gradient + intercept
+                        neuronal_density_data_FV91_4layers[i]['6'] = neuronal_density_data_FV91[
+                            i]['56']['value'] * gradient + intercept
                     else:
-                        neuronal_density_data_FV91_4layers[ii][layer] = neuronal_density_data_FV91[
-                            ii][layer]['value'] * gradient + intercept
+                        neuronal_density_data_FV91_4layers[i][layer] = neuronal_density_data_FV91[
+                            i][layer]['value'] * gradient + intercept
                 else:
-                    neuronal_density_data_FV91_4layers[ii][layer] = 0.
+                    neuronal_density_data_FV91_4layers[i][layer] = 0.
             # if there is Nissl data, then take it, otherwise
             # transform NeuN data with the linear fit
-            if ii in neuronal_density_data_updated_FV91:
-                neuronal_density_data_FV91_4layers[ii][
-                    'overall'] = neuronal_density_data_updated_FV91[ii]
+            if i in neuronal_density_data_updated_FV91:
+                neuronal_density_data_FV91_4layers[i][
+                    'overall'] = neuronal_density_data_updated_FV91[i]
             else:
-                neuronal_density_data_FV91_4layers[ii]['overall'] = neuronal_density_data_FV91[
-                    ii]['overall']['value'] * gradient + intercept
+                neuronal_density_data_FV91_4layers[i]['overall'] = neuronal_density_data_FV91[
+                    i]['overall']['value'] * gradient + intercept
 
         """
         We build a dictionary containing neuron densities
@@ -760,11 +760,11 @@ def process_raw_data():
 
         for x in range(8):
             dict_ = {}
-            for ii in list(neuronal_density_list[0].keys()):
-                if len(neuronal_density_list[x][ii]) == 0:
-                    dict_[ii] = np.nan
+            for i in list(neuronal_density_list[0].keys()):
+                if len(neuronal_density_list[x][i]) == 0:
+                    dict_[i] = np.nan
                 else:
-                    dict_[ii] = np.mean(neuronal_density_list[x][ii])
+                    dict_[i] = np.mean(neuronal_density_list[x][i])
                 category_density[x + 1] = dict_
 
         # Step 2: For areas with out experimental data,
@@ -1092,24 +1092,24 @@ def process_raw_data():
             sp = cocomac_data[target][source]['source_pattern']
             tp = cocomac_data[target][source]['target_pattern']
             if sp is not None:
-                for ii in range(6):
-                    if sp[ii] == 'X':
-                        sp[ii] = 2
-                    if sp[ii] == '?' and ii in [0, 3]:
-                        sp[ii] = 0
-                    if sp[ii] == '?' and ii in [1, 2, 4, 5]:
+                for i in range(6):
+                    if sp[i] == 'X':
+                        sp[i] = 2
+                    if sp[i] == '?' and i in [0, 3]:
+                        sp[i] = 0
+                    if sp[i] == '?' and i in [1, 2, 4, 5]:
                         # Dummy value to enable transformation of this list into a
                         # numpy array
-                        sp[ii] = -1
+                        sp[i] = -1
 
             if tp is not None:
-                for ii in range(6):
-                    if tp[ii] == 'X':
-                        tp[ii] = 2.
-                    if tp[ii] == '?':
+                for i in range(6):
+                    if tp[i] == 'X':
+                        tp[i] = 2.
+                    if tp[i] == '?':
                         # Dummy value to enable transformation of this list into a
                         # numpy array
-                        tp[ii] = -1
+                        tp[i] = -1
 
             cocomac_completed[target][source] = {
                 'source_pattern': sp, 'target_pattern': tp}
@@ -1322,8 +1322,8 @@ def process_raw_data():
 
     def probit(x,):
         if isinstance(x, np.ndarray):
-            res = [integrate.quad(integrand, -1000., ii,
-                                  args=(0., 1.))[0] for ii in x]
+            res = [integrate.quad(integrand, -1000., i,
+                                  args=(0., 1.))[0] for i in x]
         else:
             res = integrate.quad(integrand, -1000., x, args=(0., 1.))[0]
         return res
diff --git a/multiarea_model/multiarea_helpers.py b/multiarea_model/multiarea_helpers.py
index f32527b99f428b4d1af320abc7c516787440164d..2b9cadd0e7a01e10050d3cb2d614420c7177ff39 100644
--- a/multiarea_model/multiarea_helpers.py
+++ b/multiarea_model/multiarea_helpers.py
@@ -227,11 +227,11 @@ def matrix_to_dict(m, area_list, structure, external=None):
             x = np.insert(x, 2, np.zeros((2, 8), dtype=float), axis=0)
         else:
             x = x.reshape((8, 8))
-        for ii, pop in enumerate(population_list):
-            for jj, pop2 in enumerate(population_list):
-                if x[ii][jj] < 1e-20:
-                    x[ii][jj] = 0.
-                dic[area][pop][area2][pop2] = x[ii][jj]
+        for i, pop in enumerate(population_list):
+            for j, pop2 in enumerate(population_list):
+                if x[i][j] < 1e-20:
+                    x[i][j] = 0.
+                dic[area][pop][area2][pop2] = x[i][j]
     if external is not None:
         if isinstance(external, np.ndarray):
             for area in dic:
@@ -272,8 +272,8 @@ def vector_to_dict(v, area_list, structure, external=None):
     dic = nested_dict()
     for area in area_list:
         vmask = create_vector_mask(structure, areas=[area])
-        for ii, pop in enumerate(structure[area]):
-            dic[area][pop] = v[vmask][ii]
+        for i, pop in enumerate(structure[area]):
+            dic[area][pop] = v[vmask][i]
         for pop in population_list:
             if pop not in structure[area]:
                 dic[area][pop] = 0.
diff --git a/multiarea_model/sumatra_helpers.py b/multiarea_model/sumatra_helpers.py
index 9d04d7a59bc6579e267161ee74fd5042337eab9f..94c29c36ac43c1cd338a920ea99be0679ece35e6 100644
--- a/multiarea_model/sumatra_helpers.py
+++ b/multiarea_model/sumatra_helpers.py
@@ -60,10 +60,10 @@ def register_runtime(label):
                       'runtime_*')
     files = glob.glob(fp)
 
-    for ii, fn in enumerate(files):
+    for i, fn in enumerate(files):
         with open(fn, 'r') as f:
             d = json.load(f)
-        if ii == 0:
+        if i == 0:
             data = {key: [value] for key, value in d.items()}
         else:
             for key, value in d.items():
diff --git a/multiarea_model/theory.py b/multiarea_model/theory.py
index f4e93620365ef6558c6bbbaae51cccd6ba6f37bc..4780f49bcc3e4e81fb885541fa80e91a7984502b 100644
--- a/multiarea_model/theory.py
+++ b/multiarea_model/theory.py
@@ -143,19 +143,19 @@ class Theory():
             # initial rates are explicitly defined in self.params
             elif isinstance(self.params['initial_rates'], np.ndarray):
                 num_iter = 1
-                gen = (self.params['initial_rates'] for ii in range(num_iter))
+                gen = (self.params['initial_rates'] for i in range(num_iter))
         # if initial rates are not defined, set them 0
         else:
             num_iter = 1
-            gen = (np.zeros(dim) for ii in range(num_iter))
+            gen = (np.zeros(dim) for i in range(num_iter))
         rates = []
 
         # Loop over all iterations of different initial conditions
         for nsim in range(num_iter):
             print('Iteration: {}'.format(nsim))
             initial_rates = next(gen)
-            for ii in range(dim):
-                nest.SetStatus([neurons[ii]], {'rate': initial_rates[ii]})
+            for i in range(dim):
+                nest.SetStatus([neurons[i]], {'rate': initial_rates[i]})
 
             # create recording device
             multimeter = nest.Create('multimeter', params={'record_from':
@@ -173,8 +173,8 @@ class Theory():
             data = nest.GetStatus(multimeter)[0]['events']
             res = np.array([np.insert(data['rate'][np.where(data['senders'] == n)],
                                       0,
-                                      initial_rates[ii])
-                            for ii, n in enumerate(neurons)])
+                                      initial_rates[i])
+                            for i, n in enumerate(neurons)])
 
             if full_output:
                 rates.append(res)
@@ -322,16 +322,16 @@ class Theory():
                                                    1.e-3*self.NP['t_ref'],
                                                    self.NP['theta'],
                                                    self.NP['V_reset'],
-                                                   mu[ii], sigma[ii]) for ii in range(len(mu))])
+                                                   mu[i], sigma[i]) for i in range(len(mu))])
         # Unit: 1/(mV)**2
         d_nu_d_sigma = np.array([d_nu_d_sigma_fb_numeric(1.e-3*self.NP['tau_m'],
                                                          1.e-3*self.NP['tau_syn'],
                                                          1.e-3*self.NP['t_ref'],
                                                          self.NP['theta'],
                                                          self.NP['V_reset'],
-                                                         mu[ii], sigma[ii])*1/(
-                                                             2. * sigma[ii])
-                                 for ii in range(len(mu))])
+                                                         mu[i], sigma[i])*1/(
+                                                             2. * sigma[i])
+                                 for i in range(len(mu))])
         return d_nu_d_mu, d_nu_d_sigma
 
     def gain_matrix(self, rates, matrix_filter=None,
@@ -368,9 +368,9 @@ class Theory():
 
         N_pre = np.zeros_like(K)
         N_post = np.zeros_like(K)
-        for ii in range(N.size):
-            N_pre[ii] = N
-            N_post[:, ii] = N
+        for i in range(N.size):
+            N_pre[i] = N
+            N_post[:, i] = N
 
         # Connection probabilities between populations
         mu, sigma = self.mu_sigma(rates)
@@ -383,9 +383,9 @@ class Theory():
 
         slope_mu_matrix = np.zeros_like(J)
         slope_sigma_matrix = np.zeros_like(J)
-        for ii in range(N.size):
-            slope_mu_matrix[:, ii] = d_nu_d_mu
-            slope_sigma_matrix[:, ii] = d_nu_d_sigma
+        for i in range(N.size):
+            slope_mu_matrix[:, i] = d_nu_d_mu
+            slope_sigma_matrix[:, i] = d_nu_d_sigma
         G = self.NP['tau_m'] * 1e-3 * (slope_mu_matrix * K * J +
                                        slope_sigma_matrix * K * J**2)
         if full_output:
diff --git a/start_jobs.py b/start_jobs.py
index 54896778244c1a174df743d72f3d34e4caca2140..89e4ad2c86b6ff8d6d2539d394f8d2e868c29865 100644
--- a/start_jobs.py
+++ b/start_jobs.py
@@ -53,8 +53,8 @@ def start_job(label, submit_cmd, jobscript_template, sumatra=False, reason=None,
     nested_update(sim_params, custom_params['sim_params'])
 
     # Copy custom param file for each MPI process
-    for ii in range(sim_params['num_processes']):
-        shutil.copy(fn, '_'.join((fn, str(ii))))
+    for i in range(sim_params['num_processes']):
+        shutil.copy(fn, '_'.join((fn, str(i))))
     # Collect relevant arguments for job script
     num_vp = sim_params['num_processes'] * sim_params[
         'local_num_threads']