From ce2f5b00a37d7af4fae972e3b0b75ffcc920358a Mon Sep 17 00:00:00 2001
From: Maximilian Schmidt <max.schmidt@fz-juelich.de>
Date: Fri, 25 May 2018 18:14:53 +0900
Subject: [PATCH] Add script to compute correlation coefficients

---
 figures/Schmidt2018_dyn/compute_corrcoeff.py | 58 ++++++++++++++++++++
 1 file changed, 58 insertions(+)
 create mode 100644 figures/Schmidt2018_dyn/compute_corrcoeff.py

diff --git a/figures/Schmidt2018_dyn/compute_corrcoeff.py b/figures/Schmidt2018_dyn/compute_corrcoeff.py
new file mode 100644
index 0000000..34ced53
--- /dev/null
+++ b/figures/Schmidt2018_dyn/compute_corrcoeff.py
@@ -0,0 +1,58 @@
+import json
+import numpy as np
+import os
+
+import correlation_toolbox.helper as ch
+from multiarea_model import MultiAreaModel
+import sys
+
+data_path = sys.argv[1]
+label = sys.argv[2]
+
+load_path = os.path.join(data_path,
+                         label,
+                         'recordings')
+save_path = os.path.join(data_path,
+                         label,
+                         'Analysis')
+
+with open(os.path.join(data_path, label, 'custom_params_{}'.format(label)), 'r') as f:
+    sim_params = json.load(f)
+T = sim_params['T']
+
+tmin = 500.
+subsample = 2000
+resolution = 1.
+
+"""
+Create MultiAreaModel instance to have access to data structures
+"""
+M = MultiAreaModel({})
+
+spike_data = {}
+cc_dict = {}
+for area in ['V1', 'V2', 'FEF']:
+    cc_dict[area] = {}
+    LvR_list = []
+    N = []
+    for pop in M.structure[area]:
+        fp = '-'.join((label,
+                       'spikes',  # assumes that the default label for spike files was used
+                       area,
+                       pop))
+        fn = '{}/{}.npy'.format(load_path, fp)
+        # +1000 to ensure that we really have subsample non-silent neurons in the end
+        spikes = np.load(fn)
+        ids = np.unique(spikes[:, 0])
+        dat = ch.sort_gdf_by_id(spikes, idmin=ids[0], idmax=ids[0]+subsample+1000)
+        bins, hist = ch.instantaneous_spike_count(dat[1], resolution, tmin=tmin, tmax=T)
+        rates = ch.strip_binned_spiketrains(hist)[:subsample]
+        cc = np.corrcoef(rates)
+        cc = np.extract(1-np.eye(cc[0].size), cc)
+        cc[np.where(np.isnan(cc))] = 0.
+        cc_dict[area][pop] = np.mean(cc)
+    
+fn = os.path.join(save_path,
+                  'corrcoeff.json')
+with open(fn, 'w') as f:
+    json.dump(cc_dict, f)
-- 
GitLab