diff --git a/data/validation/ball_and_3stick.json b/data/validation/ball_and_3stick.json
new file mode 100644
index 0000000000000000000000000000000000000000..e4121f4383e44dd7a7c15c36598b2a85fba728f2
--- /dev/null
+++ b/data/validation/ball_and_3stick.json
@@ -0,0 +1,158 @@
+[
+  {
+    "nseg": 5, 
+    "dt": 0.001, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          6.943999999998871, 
+          20.48399999999761, 
+          33.862999999979664, 
+          52.52400000006877, 
+          65.9560000001329, 
+          79.3600000001969
+        ], 
+        "thresh": 0.0
+      }, 
+      "clamp": {
+        "spikes": [
+          7.1399999999987624, 
+          20.448999999997692, 
+          33.722999999978995, 
+          52.61200000006919, 
+          65.85800000013244, 
+          79.18600000019607
+        ], 
+        "thresh": 20.0
+      }, 
+      "dend": {
+        "spikes": [
+          7.055999999998809, 
+          20.44199999999771, 
+          33.73499999997905, 
+          52.56900000006898, 
+          65.86400000013246, 
+          79.20200000019615
+        ], 
+        "thresh": -20.0
+      }
+    }
+  }, 
+  {
+    "nseg": 11, 
+    "dt": 0.001, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          6.948999999998868, 
+          20.506999999997557, 
+          33.91399999997991, 
+          52.528000000068786, 
+          65.98500000013304, 
+          79.42000000019719
+        ], 
+        "thresh": 0.0
+      }, 
+      "clamp": {
+        "spikes": [
+          7.147999999998758, 
+          20.478999999997622, 
+          33.77899999997926, 
+          52.62300000006924, 
+          65.8930000001326, 
+          79.24900000019638
+        ], 
+        "thresh": 20.0
+      }, 
+      "dend": {
+        "spikes": [
+          7.061999999998806, 
+          20.467999999997648, 
+          33.785999999979296, 
+          52.576000000069016, 
+          65.8940000001326, 
+          79.26000000019643
+        ], 
+        "thresh": -20.0
+      }
+    }
+  }, 
+  {
+    "nseg": 51, 
+    "dt": 0.001, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          6.949999999998868, 
+          20.512999999997543, 
+          33.92699999997997, 
+          52.530000000068796, 
+          65.99200000013307, 
+          79.43500000019726
+        ], 
+        "thresh": 0.0
+      }, 
+      "clamp": {
+        "spikes": [
+          7.150999999998756, 
+          20.486999999997604, 
+          33.793999999979334, 
+          52.626000000069254, 
+          65.90200000013265, 
+          79.26500000019645
+        ], 
+        "thresh": 20.0
+      }, 
+      "dend": {
+        "spikes": [
+          7.062999999998805, 
+          20.473999999997634, 
+          33.79899999997936, 
+          52.578000000069025, 
+          65.90200000013265, 
+          79.2740000001965
+        ], 
+        "thresh": -20.0
+      }
+    }
+  }, 
+  {
+    "nseg": 101, 
+    "dt": 0.001, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          6.949999999998868, 
+          20.512999999997543, 
+          33.927999999979974, 
+          52.530000000068796, 
+          65.99300000013308, 
+          79.43500000019726
+        ], 
+        "thresh": 0.0
+      }, 
+      "clamp": {
+        "spikes": [
+          7.150999999998756, 
+          20.486999999997604, 
+          33.793999999979334, 
+          52.626000000069254, 
+          65.90300000013265, 
+          79.26600000019646
+        ], 
+        "thresh": 20.0
+      }, 
+      "dend": {
+        "spikes": [
+          7.062999999998805, 
+          20.47499999999763, 
+          33.79999999997936, 
+          52.578000000069025, 
+          65.90200000013265, 
+          79.2750000001965
+        ], 
+        "thresh": -20.0
+      }
+    }
+  }
+]
\ No newline at end of file
diff --git a/data/validation/ball_and_stick.json b/data/validation/ball_and_stick.json
new file mode 100644
index 0000000000000000000000000000000000000000..5b459a3a287e012def1539866531efa458f66cc0
--- /dev/null
+++ b/data/validation/ball_and_stick.json
@@ -0,0 +1,158 @@
+[
+  {
+    "nseg": 5, 
+    "dt": 0.001, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          6.980999999998851, 
+          20.98699999999644, 
+          34.792999999984104, 
+          48.61100000005008, 
+          62.43100000011607, 
+          76.25100000018206
+        ], 
+        "thresh": 0.0
+      }, 
+      "clamp": {
+        "spikes": [
+          7.2499999999987015, 
+          21.193999999995956, 
+          34.97599999998498, 
+          48.78900000005093, 
+          62.60700000011691, 
+          76.4270000001829
+        ], 
+        "thresh": 10.0
+      }, 
+      "dend": {
+        "spikes": [
+          7.169999999998746, 
+          21.177999999995993, 
+          34.97799999998499, 
+          48.79400000005096, 
+          62.614000000116945, 
+          76.43400000018293
+        ], 
+        "thresh": -10.0
+      }
+    }
+  }, 
+  {
+    "nseg": 11, 
+    "dt": 0.001, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          6.998999999998841, 
+          21.078999999996224, 
+          34.983999999985016, 
+          48.9080000000515, 
+          62.835000000118, 
+          76.7630000001845
+        ], 
+        "thresh": 0.0
+      }, 
+      "clamp": {
+        "spikes": [
+          7.277999999998686, 
+          21.29999999999571, 
+          35.17999999998595, 
+          49.09800000005241, 
+          63.0240000001189, 
+          76.9510000001854
+        ], 
+        "thresh": 10.0
+      }, 
+      "dend": {
+        "spikes": [
+          7.181999999998739, 
+          21.2609999999958, 
+          35.15899999998585, 
+          49.08100000005233, 
+          63.00700000011882, 
+          76.93400000018532
+        ], 
+        "thresh": -10.0
+      }
+    }
+  }, 
+  {
+    "nseg": 51, 
+    "dt": 0.001, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          7.003999999998838, 
+          21.10199999999617, 
+          35.03299999998525, 
+          48.98500000005187, 
+          62.9400000001185, 
+          76.89500000018514
+        ], 
+        "thresh": 0.0
+      }, 
+      "clamp": {
+        "spikes": [
+          7.284999999998682, 
+          21.32599999999565, 
+          35.232999999986205, 
+          49.17800000005279, 
+          63.13200000011942, 
+          77.08700000018605
+        ], 
+        "thresh": 10.0
+      }, 
+      "dend": {
+        "spikes": [
+          7.185999999998737, 
+          21.28199999999575, 
+          35.20499999998607, 
+          49.15500000005268, 
+          63.10900000011931, 
+          77.06400000018594
+        ], 
+        "thresh": -10.0
+      }
+    }
+  }, 
+  {
+    "nseg": 101, 
+    "dt": 0.001, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          7.003999999998838, 
+          21.102999999996168, 
+          35.03499999998526, 
+          48.98800000005188, 
+          62.94400000011852, 
+          76.90000000018516
+        ], 
+        "thresh": 0.0
+      }, 
+      "clamp": {
+        "spikes": [
+          7.284999999998682, 
+          21.326999999995646, 
+          35.234999999986215, 
+          49.181000000052805, 
+          63.13500000011943, 
+          77.09100000018607
+        ], 
+        "thresh": 10.0
+      }, 
+      "dend": {
+        "spikes": [
+          7.185999999998737, 
+          21.28299999999575, 
+          35.20699999998608, 
+          49.15700000005269, 
+          63.11300000011933, 
+          77.06900000018597
+        ], 
+        "thresh": -10.0
+      }
+    }
+  }
+]
\ No newline at end of file
diff --git a/data/validation/simple_exp2_synapse.json b/data/validation/simple_exp2_synapse.json
new file mode 100644
index 0000000000000000000000000000000000000000..137d9e6f86916e945b9f8c87d02369912fa03df0
--- /dev/null
+++ b/data/validation/simple_exp2_synapse.json
@@ -0,0 +1,90 @@
+[
+  {
+    "nseg": 5, 
+    "dt": 0.0125, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          11.23749999999963, 
+          21.72500000000066, 
+          41.2625000000051
+        ], 
+        "thresh": 20.0
+      }, 
+      "dend": {
+        "spikes": [
+          11.087499999999638, 
+          21.487500000000605, 
+          41.11250000000506
+        ], 
+        "thresh": -10.0
+      }
+    }
+  }, 
+  {
+    "nseg": 11, 
+    "dt": 0.0125, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          11.23749999999963, 
+          21.73750000000066, 
+          41.2750000000051
+        ], 
+        "thresh": 20.0
+      }, 
+      "dend": {
+        "spikes": [
+          11.087499999999638, 
+          21.487500000000605, 
+          41.11250000000506
+        ], 
+        "thresh": -10.0
+      }
+    }
+  }, 
+  {
+    "nseg": 51, 
+    "dt": 0.0125, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          11.23749999999963, 
+          21.73750000000066, 
+          41.2750000000051
+        ], 
+        "thresh": 20.0
+      }, 
+      "dend": {
+        "spikes": [
+          11.087499999999638, 
+          21.487500000000605, 
+          41.11250000000506
+        ], 
+        "thresh": -10.0
+      }
+    }
+  }, 
+  {
+    "nseg": 101, 
+    "dt": 0.0125, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          11.23749999999963, 
+          21.73750000000066, 
+          41.2750000000051
+        ], 
+        "thresh": 20.0
+      }, 
+      "dend": {
+        "spikes": [
+          11.087499999999638, 
+          21.487500000000605, 
+          41.11250000000506
+        ], 
+        "thresh": -10.0
+      }
+    }
+  }
+]
\ No newline at end of file
diff --git a/data/validation/simple_exp_synapse.json b/data/validation/simple_exp_synapse.json
new file mode 100644
index 0000000000000000000000000000000000000000..675f8accd5753b720efe80020f3665df9ba47318
--- /dev/null
+++ b/data/validation/simple_exp_synapse.json
@@ -0,0 +1,90 @@
+[
+  {
+    "nseg": 5, 
+    "dt": 0.0125, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          11.087499999999638, 
+          21.525000000000613, 
+          41.11250000000506
+        ], 
+        "thresh": 20.0
+      }, 
+      "dend": {
+        "spikes": [
+          11.087499999999638, 
+          21.51250000000061, 
+          41.10000000000506
+        ], 
+        "thresh": -10.0
+      }
+    }
+  }, 
+  {
+    "nseg": 11, 
+    "dt": 0.0125, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          11.099999999999637, 
+          21.537500000000616, 
+          41.125000000005066
+        ], 
+        "thresh": 20.0
+      }, 
+      "dend": {
+        "spikes": [
+          11.087499999999638, 
+          21.525000000000613, 
+          41.11250000000506
+        ], 
+        "thresh": -10.0
+      }
+    }
+  }, 
+  {
+    "nseg": 51, 
+    "dt": 0.0125, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          11.099999999999637, 
+          21.537500000000616, 
+          41.125000000005066
+        ], 
+        "thresh": 20.0
+      }, 
+      "dend": {
+        "spikes": [
+          11.087499999999638, 
+          21.525000000000613, 
+          41.11250000000506
+        ], 
+        "thresh": -10.0
+      }
+    }
+  }, 
+  {
+    "nseg": 101, 
+    "dt": 0.0125, 
+    "measurements": {
+      "soma": {
+        "spikes": [
+          11.099999999999637, 
+          21.537500000000616, 
+          41.125000000005066
+        ], 
+        "thresh": 20.0
+      }, 
+      "dend": {
+        "spikes": [
+          11.087499999999638, 
+          21.525000000000613, 
+          41.11250000000506
+        ], 
+        "thresh": -10.0
+      }
+    }
+  }
+]
\ No newline at end of file
diff --git a/data/validation/soma.json b/data/validation/soma.json
new file mode 100644
index 0000000000000000000000000000000000000000..b0de340ce5891e8cc95fdfb5cc92d956751f37da
--- /dev/null
+++ b/data/validation/soma.json
@@ -0,0 +1,62 @@
+[
+ {
+  "dt": 0.05, 
+  "spikes": [
+   12.04999999999985, 
+   27.57499999999897, 
+   42.85000000000118, 
+   58.10000000000465, 
+   73.37500000000811, 
+   88.62500000001158, 
+   103.90000000001505
+  ]
+ }, 
+ {
+  "dt": 0.02, 
+  "spikes": [
+   12.04999999999985, 
+   27.57499999999897, 
+   42.85000000000118, 
+   58.10000000000465, 
+   73.37500000000811, 
+   88.62500000001158, 
+   103.90000000001505
+  ]
+ }, 
+ {
+  "dt": 0.01, 
+  "spikes": [
+   12.037499999999584, 
+   27.525000000001977, 
+   42.76250000000544, 
+   57.9875000000089, 
+   73.21250000000188, 
+   88.43749999998803, 
+   103.66249999997419
+  ]
+ }, 
+ {
+  "dt": 0.001, 
+  "spikes": [
+   12.026000000003206, 
+   27.476999999981313, 
+   42.68400000002178, 
+   57.881000000094346, 
+   73.0770000001669, 
+   88.27300000023946, 
+   103.46900000031202
+  ]
+ }, 
+ {
+  "dt": 0.0001, 
+  "spikes": [
+   12.024499999978646, 
+   27.473100000350247, 
+   42.67780000085499, 
+   57.87230000135939, 
+   73.06600000186377, 
+   88.25970000236815, 
+   103.45330000287252
+  ]
+ }
+]
\ No newline at end of file
diff --git a/mechanisms/CMakeLists.txt b/mechanisms/CMakeLists.txt
index 203dedff949356b1a715146baf0ad6d6714aac56..cf183d75545c011dbb5fbb3436221ab7ab4d0933 100644
--- a/mechanisms/CMakeLists.txt
+++ b/mechanisms/CMakeLists.txt
@@ -1,4 +1,4 @@
-set(mechanisms pas hh expsyn)
+set(mechanisms pas hh expsyn exp2syn)
 
 set(modcc "${CMAKE_BINARY_DIR}/external/bin/modcc")
 
diff --git a/mechanisms/mod/exp2syn.mod b/mechanisms/mod/exp2syn.mod
new file mode 100755
index 0000000000000000000000000000000000000000..ea0cf025e9228216b4699161a2e574ca1d1dda6e
--- /dev/null
+++ b/mechanisms/mod/exp2syn.mod
@@ -0,0 +1,50 @@
+NEURON {
+	POINT_PROCESS exp2syn
+	RANGE tau1, tau2, e
+	NONSPECIFIC_CURRENT i
+}
+
+UNITS {
+	(nA) = (nanoamp)
+	(mV) = (millivolt)
+	(uS) = (microsiemens)
+}
+
+PARAMETER {
+	tau1 = .5 (ms) : <1e-9,1e9>
+	tau2 = 2  (ms) : <1e-9,1e9>
+	e=0	(mV)
+}
+
+ASSIGNED {
+	factor
+}
+
+STATE {
+	A : (uS)
+	B : (uS)
+}
+
+INITIAL {
+	LOCAL tp
+	A = 0
+	B = 0
+	tp = (tau1*tau2)/(tau2 - tau1) * log(tau2/tau1)
+	factor = -exp(-tp/tau1) + exp(-tp/tau2)
+	factor = 1/factor
+}
+
+BREAKPOINT {
+	SOLVE state METHOD cnexp
+	i = (B - A)*(v - e)
+}
+
+DERIVATIVE state {
+	A' = -A/tau1
+	B' = -B/tau2
+}
+
+NET_RECEIVE(weight) {
+	A = A + weight*factor
+	B = B + weight*factor
+}
diff --git a/mechanisms/mod/expsyn.mod b/mechanisms/mod/expsyn.mod
index db1f6d6ed61a8b563141ea2516658b8f87a35637..16d10c5cf856cd87e50252dca2163d844588e2b9 100644
--- a/mechanisms/mod/expsyn.mod
+++ b/mechanisms/mod/expsyn.mod
@@ -1,5 +1,5 @@
 NEURON {
-    POINT_PROCESS ExpSyn
+    POINT_PROCESS expsyn
     RANGE tau, e
     NONSPECIFIC_CURRENT i
 }
diff --git a/miniapp/io.cpp b/miniapp/io.cpp
index 54fb540117c2418b2d56ccbc950dc6c727122856..52594b6bcf872bc3eb22c0bc7376f2d98b35c060 100644
--- a/miniapp/io.cpp
+++ b/miniapp/io.cpp
@@ -16,7 +16,7 @@ namespace io {
 cl_options read_options(int argc, char** argv) {
 
     // set default options
-    const cl_options default_options{"", 1000, 500, 100, 100., 0.025, false};
+    const cl_options default_options{"", 1000, 500, "expsyn", 100, 100., 0.025, false};
 
     cl_options options;
     // parse command line arguments
@@ -29,6 +29,9 @@ cl_options read_options(int argc, char** argv) {
         TCLAP::ValueArg<uint32_t> nsynapses_arg(
             "s", "nsynapses", "number of synapses per cell",
             false, 500, "non negative integer");
+        TCLAP::ValueArg<std::string> syntype_arg(
+            "S", "syntype", "type of synapse (expsyn or exp2syn)",
+            false, "expsyn", "synapse type");
         TCLAP::ValueArg<uint32_t> ncompartments_arg(
             "c", "ncompartments", "number of compartments per segment",
             false, 100, "non negative integer");
@@ -46,6 +49,7 @@ cl_options read_options(int argc, char** argv) {
 
         cmd.add(ncells_arg);
         cmd.add(nsynapses_arg);
+        cmd.add(syntype_arg);
         cmd.add(ncompartments_arg);
         cmd.add(ifile_arg);
         cmd.add(dt_arg);
@@ -55,6 +59,7 @@ cl_options read_options(int argc, char** argv) {
 
         options.cells = ncells_arg.getValue();
         options.synapses_per_cell = nsynapses_arg.getValue();
+        options.syn_type = syntype_arg.getValue();
         options.compartments_per_segment = ncompartments_arg.getValue();
         options.ifname = ifile_arg.getValue();
         options.tfinal = tfinal_arg.getValue();
diff --git a/miniapp/io.hpp b/miniapp/io.hpp
index 6633abc51ddd884d65d666e46d7ae3222a64a183..1f530d9150c69b01cfa9f6cac8127180ff12ec70 100644
--- a/miniapp/io.hpp
+++ b/miniapp/io.hpp
@@ -11,6 +11,7 @@ struct cl_options {
     std::string ifname;
     uint32_t cells;
     uint32_t synapses_per_cell;
+    std::string syn_type;
     uint32_t compartments_per_segment;
     double tfinal;
     double dt;
diff --git a/miniapp/miniapp.cpp b/miniapp/miniapp.cpp
index f267066ccb35093057527d4da8165d41015d1452..3867f4e8365fa8ac65bbe235e2e7fe915b94e60b 100644
--- a/miniapp/miniapp.cpp
+++ b/miniapp/miniapp.cpp
@@ -43,29 +43,29 @@ struct model {
             mc::threading::parallel_for::apply(
                 0, num_groups(),
                 [&](int i) {
-                        mc::util::profiler_enter("stepping","events");
+                    PE("stepping","events");
                     cell_groups[i].enqueue_events(communicator.queue(i));
-                        mc::util::profiler_leave();
+                    PL();
 
                     cell_groups[i].advance(std::min(t+delta, tfinal), dt);
 
-                        mc::util::profiler_enter("events");
+                    PE("events");
                     communicator.add_spikes(cell_groups[i].spikes());
                     cell_groups[i].clear_spikes();
-                        mc::util::profiler_leave(2);
+                    PL(2);
                 }
             );
 
-                mc::util::profiler_enter("stepping", "exchange");
+            PE("stepping", "exchange");
             communicator.exchange();
-                mc::util::profiler_leave(2);
+            PL(2);
 
             t += delta;
         }
     }
 
     void init_communicator() {
-            mc::util::profiler_enter("setup", "communicator");
+        PE("setup", "communicator");
 
         // calculate the source and synapse distribution serially
         std::vector<id_type> target_counts(num_groups());
@@ -81,11 +81,11 @@ struct model {
         //  create connections
         communicator = communicator_type(num_groups(), target_counts);
 
-            mc::util::profiler_leave(2);
+        PL(2);
     }
 
     void update_gids() {
-            mc::util::profiler_enter("setup", "globalize");
+        PE("setup", "globalize");
         auto com_policy = communicator.communication_policy();
         auto global_source_map = com_policy.make_map(source_map.back());
         auto domain_idx = communicator.domain_id();
@@ -97,7 +97,7 @@ struct model {
                 target_map[i]+communicator.target_gid_from_group_lid(0)
             );
         }
-            mc::util::profiler_leave(2);
+        PL(2);
     }
 
     // TODO : only stored here because init_communicator() and update_gids() are split
@@ -111,6 +111,7 @@ struct model {
             double value;
         };
         std::string name;
+        std::string units;
         index_type id;
         std::vector<sample_type> samples;
     };
@@ -139,9 +140,9 @@ struct model {
     };
 
     mc::sampler make_simple_sampler(
-        index_type probe_gid, const std::string name, index_type id, float dt)
+        index_type probe_gid, const std::string& name, const std::string& units, index_type id, float dt)
     {
-        traces.push_back(trace_data{name, id});
+        traces.push_back(trace_data{name, units, id});
         return {probe_gid, simple_sampler_functor(traces, traces.size()-1, dt)};
     }
 
@@ -156,14 +157,20 @@ struct model {
             auto path = "trace_" + std::to_string(trace.id)
                       + "_" + trace.name + ".json";
 
-            nlohmann::json json;
-            json["name"] = trace.name;
+            nlohmann::json jrep;
+            jrep["name"] = trace.name;
+            jrep["units"] = trace.units;
+            jrep["id"] = trace.id;
+ 
+            auto& jt = jrep["data"]["time"];
+            auto& jy = jrep["data"][trace.name];
+                 
             for (const auto& sample: trace.samples) {
-                json["time"].push_back(sample.time);
-                json["value"].push_back(sample.value);
+                jt.push_back(sample.time);
+                jy.push_back(sample.value);
             }
             std::ofstream file(path);
-            file << std::setw(1) << json << std::endl;
+            file << std::setw(1) << jrep << std::endl;
         }
     }
 };
@@ -184,7 +191,7 @@ namespace synapses {
 ///////////////////////////////////////
 
 /// make a single abstract cell
-mc::cell make_cell(int compartments_per_segment, int num_synapses);
+mc::cell make_cell(int compartments_per_segment, int num_synapses, const std::string& syn_type);
 
 /// do basic setup (initialize global state, print banner, etc)
 void setup();
@@ -270,7 +277,7 @@ void all_to_all_model(nest::mc::io::cl_options& options, model& m) {
 
     // make a basic cell
     auto basic_cell =
-        make_cell(options.compartments_per_segment, synapses_per_cell);
+        make_cell(options.compartments_per_segment, synapses_per_cell, options.syn_type);
 
     auto num_domains = global_policy::size();
     auto domain_id = global_policy::id();
@@ -289,9 +296,9 @@ void all_to_all_model(nest::mc::io::cl_options& options, model& m) {
     mc::threading::parallel_for::apply(
         0, ncell_local,
         [&](int i) {
-                mc::util::profiler_enter("setup", "cells");
+            PE("setup", "cells");
             m.cell_groups[i] = make_lowered_cell(i, basic_cell);
-                mc::util::profiler_leave(2);
+            PL(2);
         }
     );
 
@@ -300,7 +307,7 @@ void all_to_all_model(nest::mc::io::cl_options& options, model& m) {
     //
     m.init_communicator();
 
-        mc::util::profiler_enter("setup", "connections");
+    PE("setup", "connections");
 
     // RNG distributions for connection delays and source cell ids
     auto weight_distribution = std::exponential_distribution<float>(0.75);
@@ -344,8 +351,7 @@ void all_to_all_model(nest::mc::io::cl_options& options, model& m) {
     //  setup probes
     //
 
-        mc::util::profiler_leave();
-        mc::util::profiler_enter("probes");
+    PL(); PE("probes");
 
     // monitor soma and dendrite on a few cells
     float sample_dt = 0.1;
@@ -358,16 +364,20 @@ void all_to_all_model(nest::mc::io::cl_options& options, model& m) {
         auto lid = m.communicator.group_lid(gid);
         auto probe_soma = m.cell_groups[lid].probe_gid_range().first;
         auto probe_dend = probe_soma+1;
+        auto probe_dend_current = probe_soma+2;
 
         m.cell_groups[lid].add_sampler(
-            m.make_simple_sampler(probe_soma, "vsoma", gid, sample_dt)
+            m.make_simple_sampler(probe_soma, "vsoma", "mV", gid, sample_dt)
         );
         m.cell_groups[lid].add_sampler(
-            m.make_simple_sampler(probe_dend, "vdend", gid, sample_dt)
+            m.make_simple_sampler(probe_dend, "vdend", "mV", gid, sample_dt)
+        );
+        m.cell_groups[lid].add_sampler(
+            m.make_simple_sampler(probe_dend_current, "idend", "mA/cm²", gid, sample_dt)
         );
     }
 
-        mc::util::profiler_leave(2);
+    PL(2);
 }
 
 ///////////////////////////////////////
@@ -389,7 +399,7 @@ void setup() {
 }
 
 // make a high level cell description for use in simulation
-mc::cell make_cell(int compartments_per_segment, int num_synapses) {
+mc::cell make_cell(int compartments_per_segment, int num_synapses, const std::string& syn_type) {
     nest::mc::cell cell;
 
     // Soma with diameter 12.6157 um and HH channel
@@ -414,17 +424,20 @@ mc::cell make_cell(int compartments_per_segment, int num_synapses) {
     auto distribution = std::uniform_real_distribution<float>(0.f, 1.0f);
     // distribute the synapses at random locations the terminal dendrites in a
     // round robin manner
+    nest::mc::parameter_list syn_default(syn_type);
     for (auto i=0; i<num_synapses; ++i) {
-        cell.add_synapse({2+(i%2), distribution(gen)});
+        cell.add_synapse({2+(i%2), distribution(gen)}, syn_default);
     }
 
     // add probes: 
     auto probe_soma = cell.add_probe({0, 0}, mc::probeKind::membrane_voltage);
     auto probe_dendrite = cell.add_probe({1, 0.5}, mc::probeKind::membrane_voltage);
+    auto probe_dendrite_current = cell.add_probe({1, 0.5}, mc::probeKind::membrane_current);
 
     EXPECTS(probe_soma==0);
     EXPECTS(probe_dendrite==1);
-    (void)probe_soma, (void)probe_dendrite;
+    EXPECTS(probe_dendrite_current==2);
+    (void)probe_soma, (void)probe_dendrite, (void)probe_dendrite_current;
 
     return cell;
 }
diff --git a/nrn/ball_and_3stick.py b/nrn/ball_and_3stick.py
index 15ece3d3da151daf7bdb90f1af1d6cd368352489..4333072216b56d203708430243e28ef008cdfa68 100644
--- a/nrn/ball_and_3stick.py
+++ b/nrn/ball_and_3stick.py
@@ -4,11 +4,21 @@ from matplotlib import pyplot
 import numpy as np
 import json
 import argparse
+import sys
 from neuron import gui, h
 
 parser = argparse.ArgumentParser(description='generate spike train ball and stick model with hh channels at soma and pas channels in dendrite')
 parser.add_argument('--plot', action='store_true', dest='plot')
-args = parser.parse_args()
+
+# hack to make things work with nrniv ... -python:
+# throw away args before -python foo.py if -python present.
+
+if '-python' in sys.argv:
+    argv = sys.argv[sys.argv.index('-python')+2:]
+else:
+    argv = sys.argv
+
+args = parser.parse_args(argv)
 
 soma = h.Section(name='soma')
 dend = [
diff --git a/nrn/ball_and_stick.py b/nrn/ball_and_stick.py
index 24562337a4baffd7a1ae3a1e7885cb07e06987ec..10932d1adcda27e60e5c8781f491a665b6d8cd8c 100644
--- a/nrn/ball_and_stick.py
+++ b/nrn/ball_and_stick.py
@@ -4,11 +4,21 @@ from matplotlib import pyplot
 import numpy as np
 import json
 import argparse
+import sys
 from neuron import gui, h
 
 parser = argparse.ArgumentParser(description='generate spike train ball and stick model with hh channels at soma and pas channels in dendrite')
 parser.add_argument('--plot', action='store_true', dest='plot')
-args = parser.parse_args()
+
+# hack to make things work with nrniv ... -python:
+# throw away args before -python foo.py if -python present.
+
+if '-python' in sys.argv:
+    argv = sys.argv[sys.argv.index('-python')+2:]
+else:
+    argv = sys.argv
+
+args = parser.parse_args(argv)
 
 soma = h.Section(name='soma')
 dend = h.Section(name='dend')
diff --git a/nrn/generate_validation.sh b/nrn/generate_validation.sh
index c42b1d5ce66512b8dc5ff46bf13e85f4707bd8ef..532a7cd738d53a1a0f8342d07f09c016b5e24857 100755
--- a/nrn/generate_validation.sh
+++ b/nrn/generate_validation.sh
@@ -1,4 +1,5 @@
 python2.7 ./soma.py
 python2.7 ./ball_and_stick.py
 python2.7 ./ball_and_3stick.py
-python2.7 ./simple_synapse.py
+python2.7 ./simple_synapse.py --synapse exp2
+python2.7 ./simple_synapse.py --synapse exp
diff --git a/nrn/simple_synapse.py b/nrn/simple_synapse.py
index 1779799f716157df995f2321346ad6ff52f1a608..de7bf22d7c84f652148605bdb3581defc249d208 100644
--- a/nrn/simple_synapse.py
+++ b/nrn/simple_synapse.py
@@ -8,7 +8,20 @@ from neuron import gui, h
 
 parser = argparse.ArgumentParser(description='generate spike train ball and stick model with hh channels at soma and pas channels in dendrite')
 parser.add_argument('--plot', action='store_true', dest='plot')
-args = parser.parse_args()
+parser.add_argument('--synapse', metavar='SYN', dest='synapse',
+                    choices=['exp', 'exp2'],
+                    default='exp',
+                    help='use synapse type SYN (exp or exp2)')
+
+# hack to make things work with nrniv ... -python:
+# throw away args before -python foo.py if -python present.
+
+if '-python' in sys.argv:
+    argv = sys.argv[sys.argv.index('-python')+2:]
+else:
+    argv = sys.argv
+
+args = parser.parse_args(argv)
 
 soma = h.Section(name='soma')
 dend = h.Section(name='dend')
@@ -62,8 +75,16 @@ for nseg in [5, 11, 51, 101] :
     dend.nseg=nseg
 
     # add a synapse
-    syn_ = h.ExpSyn(dend(0.5))
-    syn_.tau = 2
+    if args.synapse == 'exp':
+        syn_ = h.ExpSyn(dend(0.5))
+        syn_.tau = 2
+    elif args.synapse == 'exp2':
+        syn_ = h.Exp2Syn(dend(0.5))
+        syn_.tau1 = 0.5 # artificially slow onset
+        syn_.tau2 = 2
+    else:
+        raise RuntimeError('unrecognized synapse type')
+
     ncstim = h.NetCon(stim, syn_)
     ncstim.delay = 1 # 1 ms delay (arrives at 10ms)
     ncstim.weight[0] = 0.04 # NetCon weight is a vector
@@ -127,7 +148,7 @@ end = timer()
 print "took ", end-start, " seconds"
 
 # save the spike info as in json format
-fp = open('simple_synapse.json', 'w')
+fp = open('simple_'+args.synapse+'_synapse.json', 'w')
 json.dump(results, fp, indent=2)
 
 if args.plot :
diff --git a/nrn/soma.py b/nrn/soma.py
index 3af4f50f3e6e1ae7761591ab68fd32a1187f9c87..9decb2c1700a6fd113530019835dad3e807c2b9b 100644
--- a/nrn/soma.py
+++ b/nrn/soma.py
@@ -4,11 +4,21 @@ from matplotlib import pyplot
 import numpy as np
 import json
 import argparse
+import sys
 from neuron import gui, h
 
 parser = argparse.ArgumentParser(description='generate spike train info for a soma with hh channels')
 parser.add_argument('--plot', action='store_true', dest='plot')
-args = parser.parse_args()
+
+# hack to make things work with nrniv ... -python:
+# throw away args before -python foo.py if -python present.
+
+if '-python' in sys.argv:
+    argv = sys.argv[sys.argv.index('-python')+2:]
+else:
+    argv = sys.argv
+
+args = parser.parse_args(argv)
 
 if args.plot :
     print '-- plotting turned on'
diff --git a/scripts/README.md b/scripts/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e645a762eaf88c5f7196a995502589da44c2f89c
--- /dev/null
+++ b/scripts/README.md
@@ -0,0 +1,81 @@
+tsplot
+------
+
+The `tsplot` script is a wrapper around matplotlib for displaying a collection of
+time series plots.
+
+## Input data
+
+`tsplot` reads timeseries in JSON format, according to the following conventions.
+
+```
+{
+    "units": <units>
+    "name":  <name>
+    <other key-value metadata>
+    "data": {
+        "time": [ <time values> ]
+        <trace name>: [ <trace values> ]
+    }
+}
+```
+
+The `data` object must contain numeric arrays, with at least one with the key `time`;
+other members of `data` correspond to traces sampled at the corresponding time values.
+
+The other members of the top level object are regarded as metadata, with some keys
+treated specially:
+ * `units` are used to distinguish different axes for plotting, and the labels for those
+   axes. It's value is either a string, where the specified unit is taken as applying to
+   all included traces, or an object representing a mapping of trace names to their
+   corresponding unit string.
+ * `name` is taken as the title of the corresponding plot, if it is unambiguous.
+ * `label` is ignored: the _label_ for a trace is its name in the data object.
+
+## Operation
+
+The basic usage is simply:
+```
+tsplot data.json ...
+```
+which will produce an interactive plot of the timeseries provided by the provided
+files, with one trace per subplot.
+
+### Grouping
+
+Traces can be gathered on to the same subplot by grouping by metadata with the
+`-g` or `--group` option. To collect all traces with the same value of the key
+'id' and the same units:
+```
+tsplot -g units,id data.json ...
+```
+A subplot can comprise data with to two differint units, and will be plotted
+with two differing vertical axes.
+
+Note that for the purposes of tsplot, the value of the key _label_ is the
+propertu name of the trace in its json representation.
+
+### Restricting data
+
+The `-t` or `--trange` option exlcudes any points that have a time range outside
+that specified. Ranges are given by two numbers separated by a comma, but one or
+the other can be omitted to indicate that there is no bound on that side. For
+example:
+```
+tsplot -t ,100 data.json ...
+```
+will display all points with a time value less than or equal to 100.
+
+Extreme values for data can be automatically excluded and marked on the plot
+with the `-x` or `--exclude` option, taking a parameter _N_. All values in a
+timeseries that lie outside the interval [ _m_ - _Nr_, _m_ + _Nr_ ] are omitted,
+where _m_ is the median of the finite values in the timeseries, and _r_ is
+the 90% interquantile gap, that is, the difference between the 5% and 95% quantile
+of the timeseries data.
+
+### Output to file
+
+Use the `-o` or `--output` option to save the plot as an image, instead of
+displaying it interactively.
+
+
diff --git a/scripts/tsplot b/scripts/tsplot
new file mode 100755
index 0000000000000000000000000000000000000000..81a5da78c06a24d71f727431ef237a18f2a13b0b
--- /dev/null
+++ b/scripts/tsplot
@@ -0,0 +1,363 @@
+#!env python
+#coding: utf-8
+
+import argparse
+import json
+import numpy as np
+import sys
+import re
+import logging
+import matplotlib as M
+import matplotlib.pyplot as P
+from distutils.version import LooseVersion
+from itertools import chain, islice, cycle
+
+# Run-time check for matplot lib version for line style functionality.
+if LooseVersion(M.__version__)<LooseVersion("1.5.0"):
+    logging.critical("require matplotlib version ≥ 1.5.0")
+    sys.exit(1)
+
+# Read timeseries data from multiple files, plot each in one planel, with common
+# time axis, and optionally sharing a vertical axis as governed by the configuration.
+
+def parse_clargs():
+    def float_or_none(s):
+        try: return float(s)
+        except ValueError: return None
+
+    def parse_range_spec(s):
+        l, r = (float_or_none(x) for x in s.split(','))
+        return (l,r)
+
+    P = argparse.ArgumentParser(description='Plot time series data on one or more graphs.')
+    P.add_argument('inputs', metavar='FILE', nargs='+',
+                   help='time series data in JSON format')
+    P.add_argument('-t', '--trange', metavar='RANGE', dest='trange',
+                   type=parse_range_spec, 
+                   help='restrict time axis to RANGE (see below)')
+    P.add_argument('-g', '--group', metavar='KEY,...', dest='groupby',
+                   type=lambda s: s.split(','), 
+                   help='plot series with same KEYs on the same axes')
+    P.add_argument('-o', '--output', metavar='FILE', dest='outfile',
+                   help='save plot to file FILE')
+    P.add_argument('-x', '--exclude', metavar='NUM', dest='exclude',
+                   type=float,
+                   help='remove extreme points outside NUM times the 0.9-interquantile range of the median')
+
+    P.epilog =  'A range is specifed by a pair of floating point numbers min,max where '
+    P.epilog += 'either may be omitted to indicate the minimum or maximum of the corresponding '
+    P.epilog += 'values in the data.'
+
+    # modify args to avoid argparse having a fit when it encounters an option
+    # argument of the form '<negative number>,...'
+
+    argsbis = [' '+a if re.match(r'-[\d.]',a) else a for a in sys.argv[1:]]
+    return P.parse_args(argsbis)
+
+def isstring(s):
+    return isinstance(s,str) or isinstance(s,unicode)
+
+def take(n, s):
+    return islice((i for i in s), 0, n)
+
+class TimeSeries:
+    def __init__(self, ts, ys, **kwargs):
+        self.t = np.array(ts)
+        n = self.t.shape[0]
+
+        self.y = np.full_like(self.t, np.nan)
+        ny = min(len(ys), len(self.y))
+        self.y[:ny] = ys[:ny]
+
+        self.meta = dict(kwargs)
+        self.ex_ts = None
+
+    def trestrict(self, bounds):
+        clip = range_meet(self.trange(), bounds)
+        self.t = np.ma.masked_outside(self.t, v1=clip[0], v2=clip[1])
+        self.y = np.ma.masked_array(self.y, mask=self.t.mask)
+
+    def exclude_outliers(self, iqr_factor):
+        yfinite = np.ma.masked_invalid(self.y).compressed()
+        l_, lq, median, uq, u_ = np.percentile(yfinite, [0, 5.0, 50.0, 95.0, 100])
+        lb = median - iqr_factor*(uq-lq)
+        ub = median + iqr_factor*(uq-lq)
+        
+        np_err_save = np.seterr(all='ignore')
+        yex = np.ma.masked_where(np.isfinite(self.y)&(self.y<=ub)&(self.y>=lb), self.y)
+        np.seterr(**np_err_save)
+
+        tex = np.ma.masked_array(self.t, mask=yex.mask)
+        self.ex_ts = TimeSeries(tex.compressed(), yex.compressed())
+        self.ex_ts.meta = dict(self.meta)
+
+        self.y = np.ma.filled(np.ma.masked_array(self.y, mask=~yex.mask), np.nan)
+
+    def excluded(self):
+        return self.ex_ts
+
+    def name(self):
+        return self.meta.get('name',"")   # value of 'name' attribute in source
+
+    def label(self):
+        return self.meta.get('label',"")  # name of column in source
+
+    def units(self):
+        return self.meta.get('units',"")
+        
+    def trange(self):
+        return (min(self.t), max(self.t))
+
+    def yrange(self):
+        return (min(self.y), max(self.y))
+
+
+def read_json_timeseries(source):
+    j = json.load(source)
+
+    # Convention:
+    #
+    # Time series data is represented by an object with a subobject 'data' and optionally
+    # other key/value entries comprising metadata for the time series.
+    #
+    # The 'data' object holds one array of numbers 'time' and zero or more other
+    # numeric arrays of sample values corresponding to the values in 'time'. The
+    # names of these other arrays are taken to be the labels for the plots.
+    #
+    # Units can be specified by a top level entry 'units' which is either a string
+    # (units are common for all data series in the object) or by a map that
+    # takes a label to a unit string.
+
+    ts_list = []
+    jdata = j['data']
+    ncol = len(jdata)
+
+    times = jdata['time']
+    nsample = len(times)
+
+    def units(label):
+        try:
+           unitmap = j['units']
+           if isstring(unitmap):
+               return unitmap
+           else:
+               return unitmap[label]
+        except:
+           return ""
+
+    i = 1
+    for key in jdata.keys():
+        if key=="time": continue
+
+        meta = dict(j.items() + {'label': key, 'data': None, 'units': units(key)}.items())
+        del meta['data']
+
+        ts_list.append(TimeSeries(times, jdata[key], **meta))
+
+    return ts_list
+        
+def range_join(r, s):
+    return (min(r[0], s[0]), max(r[1], s[1]))
+
+def range_meet(r, s):
+    return (max(r[0], s[0]), min(r[1], s[1]))
+
+
+class PlotData:
+    def __init__(self, key_label=""):
+        self.series = []
+        self.group_key_label = key_label
+
+    def trange(self):
+        return reduce(range_join, [s.trange() for s in self.series])
+
+    def yrange(self):
+        return reduce(range_join, [s.yrange() for s in self.series])
+
+    def name(self):
+        return reduce(lambda n, s: n or s.name(), self.series, "")
+
+    def group_label(self):
+        return self.group_key_label
+
+    def unique_labels(self):
+        # attempt to create unique labels for plots in the group based on
+        # meta data
+        labels = [s.label() for s in self.series]
+        if len(labels)<2:
+            return labels
+
+        n = len(labels)
+        keyset = reduce(lambda k, s: k.union(s.meta.keys()), self.series, set())
+        keyi = iter(keyset)
+        try:
+            while len(set(labels)) != n:
+                k = next(keyi)
+                if k=='label':
+                    continue
+
+                vs = [s.meta.get(k,None) for s in self.series]
+                if len(set(vs))==1:
+                    continue
+
+                for i in xrange(n):
+                    prefix = '' if k=='name' else k+'='
+                    if vs[i] is not None:
+                        labels[i] += u', '+k+u'='+unicode(vs[i])
+
+        except StopIteration:
+            pass
+
+        return labels
+
+# Input: list of TimeSeries objects; collection of metadata keys to group on
+# Return list of plot info (time series, data extents, metadata), one per plot.
+
+def gather_ts_plots(tss, groupby):
+    group_lookup = {}
+    plot_groups = []
+
+    for ts in tss:
+        key = tuple([ts.meta.get(g) for g in groupby])
+        if key is () or None in key or key not in group_lookup:
+            pretty_key=', '.join([unicode(k)+u'='+unicode(v) for k,v in zip(groupby, key) if v is not None])
+            pd = PlotData(pretty_key)
+            pd.series = [ts]
+            plot_groups.append(pd)
+            group_lookup[key] = len(plot_groups)-1
+        else:
+            plot_groups[group_lookup[key]].series.append(ts)
+
+    return plot_groups
+
+
+def make_palette(cm_name, n, cmin=0, cmax=1):
+    smap = M.cm.ScalarMappable(M.colors.Normalize(cmin/float(cmin-cmax),(cmin-1)/float(cmin-cmax)),
+                               M.cm.get_cmap(cm_name))
+    return [smap.to_rgba((2*i+1)/float(2*n)) for i in xrange(n)]
+
+def plot_plots(plot_groups, save=None):
+    nplots = len(plot_groups)
+    plot_groups = sorted(plot_groups, key=lambda g: g.group_label())
+
+    # use same global time scale for all plots
+    trange = reduce(range_join, [g.trange() for g in plot_groups])
+
+    # use group names for titles?
+    group_titles = any((g.group_label() for g in plot_groups))
+
+    figure = P.figure()
+    for i in xrange(nplots):
+        group = plot_groups[i]
+        plot = figure.add_subplot(nplots, 1, i+1)
+
+        title = group.group_label() if group_titles else group.name()
+        plot.set_title(title)
+
+        # y-axis label: use timeseries label and units if only
+        # one series in group, otherwise use a legend with labels,
+        # and units alone on the axes. At most two different unit
+        # axes can be drawn.
+
+        def ylabel(unit):
+            if len(group.series)==1:
+                lab = group.series[0].label()
+                if unit:
+                    lab += ' (' + unit + ')'
+            else:
+                lab = unit
+
+            return lab
+            
+        uniq_units = list(set([s.units() for s in group.series]))
+        uniq_units.sort()
+        if len(uniq_units)>2:
+            logging.warning('more than two different units on the same plot')
+            uniq_units = uniq_units[:2]
+
+        # store each series in a slot corresponding to one of the units,
+        # together with a best-effort label
+
+        series_by_unit = [[] for i in xrange(len(uniq_units))]
+        unique_labels = group.unique_labels()
+
+        for si in xrange(len(group.series)):
+            s = group.series[si]
+            label = unique_labels[si]
+            try:
+                series_by_unit[uniq_units.index(s.units())].append((s,label))
+            except ValueError:
+                pass
+
+        # TODO: need to find a scheme of colour/line allocation that is
+        # double y-axis AND greyscale friendly.
+
+        palette = \
+            [make_palette(cm, n, 0, 0.5) for
+                cm, n in zip(['hot', 'winter'],  [len(x) for x in series_by_unit])]
+
+        lines = cycle(["-",(0,(3,1))])
+        
+        first_plot = True
+        for ui in xrange(len(uniq_units)):
+            if not first_plot:
+                plot = plot.twinx()
+
+            axis_color = palette[ui][0]
+            plot.set_ylabel(ylabel(uniq_units[ui]), color=axis_color)
+            for l in plot.get_yticklabels():
+                l.set_color(axis_color)
+
+            plot.get_yaxis().get_major_formatter().set_useOffset(False)
+            plot.get_yaxis().set_major_locator(M.ticker.MaxNLocator(nbins=6))
+            
+            plot.set_xlim(trange)
+
+            colours = cycle(palette[ui])
+            line = next(lines)
+            for s, l in series_by_unit[ui]:
+                c = next(colours)
+                plot.plot(s.t, s.y, color=c, ls=line, label=l)
+                # treat exluded points especially
+                ex = s.excluded()
+                if ex is not None:
+                    ymin, ymax = s.yrange()
+                    plot.plot(ex.t, np.clip(ex.y, ymin, ymax), marker='x', ls='', color=c)
+
+            if first_plot:
+                plot.legend(loc=2, fontsize='small')
+                plot.grid()
+            else:
+                plot.legend(loc=1, fontsize='small')
+
+            first_plot = False
+
+    # adapted from http://stackoverflow.com/questions/6963035
+    axis_ymin = min([ax.get_position().ymin for ax in figure.axes])
+    figure.text(0.5, axis_ymin - float(3)/figure.dpi, 'time', ha='center', va='center')
+    if save:
+        figure.savefig(save)
+    else:
+        P.show()
+        
+args = parse_clargs()
+tss = []
+for filename in args.inputs:
+    with open(filename) as f:
+        tss.extend(read_json_timeseries(f))
+
+if args.trange:
+    for ts in tss:
+        ts.trestrict(args.trange)
+
+if args.exclude:
+    for ts in tss:
+        ts.exclude_outliers(args.exclude)
+
+groupby = args.groupby if args.groupby else []
+plots = gather_ts_plots(tss, groupby)
+
+if not args.outfile:
+    M.interactive(False)
+
+plot_plots(plots, save=args.outfile)
diff --git a/src/cell.cpp b/src/cell.cpp
index 3a2d5672a12bb74e2e484765e16b93e870b2bd4b..01d681a7e080c8c0444fe0299820181d5154028e 100644
--- a/src/cell.cpp
+++ b/src/cell.cpp
@@ -201,16 +201,6 @@ std::vector<int> const& cell::segment_parents() const
     return parents_;
 }
 
-void cell::add_synapse(segment_location loc)
-{
-    synapses_.push_back(loc);
-}
-
-const std::vector<segment_location>& cell::synapses() const
-{
-    return synapses_;
-}
-
 // Rough and ready comparison of two cells.
 // We don't use an operator== because equality of two cells is open to
 // interpretation. For example, it is possible to have two viable representations
diff --git a/src/cell.hpp b/src/cell.hpp
index 744c9970e26af7f439f9cee7cfb2f88c96014624..1cb9b9f65dedbdcc4476c2b447eed1d1dea243ca 100644
--- a/src/cell.hpp
+++ b/src/cell.hpp
@@ -52,6 +52,11 @@ public:
     using index_type = int;
     using value_type = double;
     using point_type = point<value_type>;
+    
+    struct synapse_instance {
+        segment_location location;
+        parameter_list mechanism;
+    };
     struct probe_instance {
         segment_location location;
         probeKind kind;
@@ -138,9 +143,13 @@ public:
     //////////////////
     // synapses
     //////////////////
-    void add_synapse(segment_location loc);
-
-    const std::vector<segment_location>& synapses() const;
+    void add_synapse(segment_location loc, parameter_list p)
+    {
+        synapses_.push_back(synapse_instance{loc, std::move(p)});
+    }
+    const std::vector<synapse_instance>& synapses() const {
+        return synapses_;
+    }
 
     //////////////////
     // spike detectors
@@ -180,7 +189,7 @@ private:
     std::vector<stimulus_instance> stimulii_;
 
     // the synapses
-    std::vector<segment_location> synapses_;
+    std::vector<synapse_instance> synapses_;
 
     // the sensors
     std::vector<detector_instance> spike_detectors_;
diff --git a/src/cell_group.hpp b/src/cell_group.hpp
index a6ee8b8fedb39250870db4b613716c417aaa27b3..a5a6690cf08fe03f64f52d56a3577b0b0c747a52 100644
--- a/src/cell_group.hpp
+++ b/src/cell_group.hpp
@@ -81,7 +81,7 @@ public:
             // take any pending samples
             float cell_time = cell_.time();
 
-                nest::mc::util::profiler_enter("sampling");
+            PE("sampling");
             while (auto m = sample_events_.pop_if_before(cell_time)) {
                 auto& sampler = samplers_[m->sampler_index];
                 EXPECTS((bool)sampler.sample);
@@ -93,7 +93,7 @@ public:
                     sample_events_.push(*m);
                 }
             }
-                nest::mc::util::profiler_leave();
+            PL();
 
             // look for events in the next time step
             auto tstep = std::min(tfinal, cell_.time()+dt);
@@ -106,7 +106,7 @@ public:
                 std::cerr << "warning: solution out of bounds\n";
             }
 
-                nest::mc::util::profiler_enter("events");
+            PE("events");
             // check for new spikes
             for (auto& s : spike_sources_) {
                 if (auto spike = s.source.test(cell_, cell_.time())) {
@@ -124,13 +124,13 @@ public:
                     cell_.apply_event(e.get());
                 }
             }
-                nest::mc::util::profiler_leave();
+            PL();
         }
 
     }
 
     template <typename R>
-    void enqueue_events(R events) {
+    void enqueue_events(const R& events) {
         for (auto e : events) {
             e.target -= first_target_gid_;
             events_.push(e);
diff --git a/src/fvm_cell.hpp b/src/fvm_cell.hpp
index 25c0ae9a0c67dd42e7bcf680a9b32a0c6070290b..148f8ac55be791e2efaefd70e444dfef3a458058 100644
--- a/src/fvm_cell.hpp
+++ b/src/fvm_cell.hpp
@@ -20,8 +20,6 @@
 #include <profiling/profiler.hpp>
 
 #include <vector/include/Vector.hpp>
-#include <mechanisms/expsyn.hpp>
-
 
 namespace nest {
 namespace mc {
@@ -185,10 +183,7 @@ private:
     /// the potential in mV in each CV
     vector_type voltage_;
 
-    /// synapses
-    using synapse_type =
-        mechanisms::ExpSyn::mechanism_ExpSyn<value_type, size_type>;
-    std::size_t synapse_index_;
+    std::size_t synapse_index_; // synapses at the end of mechanisms_, from here
 
     /// the set of mechanisms present in the cell
     std::vector<mechanism_type> mechanisms_;
@@ -342,6 +337,25 @@ fvm_cell<T, I>::fvm_cell(nest::mc::cell const& cell)
         );
     }
 
+    synapse_index_ = mechanisms_.size();
+
+    std::map<std::string, std::vector<int>> syn_map;
+    for (const auto& syn : cell.synapses()) {
+        syn_map[syn.mechanism.name()].push_back(find_compartment_index(syn.location, graph));
+    }
+
+    for (const auto &syni : syn_map) {
+        const auto& mech_name = syni.first;
+        auto& helper = nest::mc::mechanisms::get_mechanism_helper(mech_name);
+
+        index_type compartment_index(syni.second);
+
+        auto mech = helper->new_mechanism(voltage_, current_, compartment_index);
+        mech->set_areas(cv_areas_);
+        mechanisms_.push_back(std::move(mech));
+    }
+
+
     /////////////////////////////////////////////
     // build the ion species
     /////////////////////////////////////////////
@@ -398,24 +412,6 @@ fvm_cell<T, I>::fvm_cell(nest::mc::cell const& cell)
         stimulii_.push_back( {idx, stim.clamp} );
     }
 
-    // add the synapses
-    std::vector<size_type> synapse_indexes;
-    synapse_indexes.reserve(cell.synapses().size());
-    for(auto loc : cell.synapses()) {
-        synapse_indexes.push_back(
-            find_compartment_index(loc, graph)
-        );
-    }
-
-    mechanisms_.push_back(
-        mechanisms::make_mechanism<synapse_type>(
-            voltage_, current_, index_view(synapse_indexes)
-        )
-    );
-    synapse_index_ = mechanisms_.size()-1;
-    // don't forget to give point processes access to cv_areas_
-    mechanisms_[synapse_index_]->set_areas(cv_areas_);
-
     // record probe locations by index into corresponding state vector
     for (auto probe : cell.probes()) {
         uint32_t comp = find_compartment_index(probe.location, graph);
@@ -517,15 +513,15 @@ void fvm_cell<T, I>::advance(T dt)
 {
     using memory::all;
 
-        mc::util::profiler_enter("current");
+    PE("current");
     current_(all) = 0.;
 
     // update currents from ion channels
     for(auto& m : mechanisms_) {
-            mc::util::profiler_enter(m->name().c_str());
+        PE(m->name().c_str());
         m->set_params(t_, dt);
         m->nrn_current();
-            mc::util::profiler_leave();
+        PL();
     }
 
     // add current contributions from stimulii
@@ -536,25 +532,25 @@ void fvm_cell<T, I>::advance(T dt)
         // the factor of 100 scales the injected current to 10^2.nA
         current_[loc] -= 100.*ie/cv_areas_[loc];
     }
-        mc::util::profiler_leave();
+    PL();
 
-        mc::util::profiler_enter("matrix", "setup");
+    PE("matrix", "setup");
     // solve the linear system
     setup_matrix(dt);
-        mc::util::profiler_leave(); mc::util::profiler_enter("solve");
+    PL(); PE("solve");
     matrix_.solve();
-        mc::util::profiler_leave();
+    PL();
     voltage_(all) = matrix_.rhs();
-        mc::util::profiler_leave();
+    PL();
 
-        mc::util::profiler_enter("state");
+    PE("state");
     // integrate state of gating variables etc.
     for(auto& m : mechanisms_) {
-            mc::util::profiler_enter(m->name().c_str());
+        PE(m->name().c_str());
         m->nrn_state();
-            mc::util::profiler_leave();
+        PL();
     }
-        mc::util::profiler_leave();
+    PL();
 
     t_ += dt;
 }
diff --git a/src/mechanism_interface.cpp b/src/mechanism_interface.cpp
index f44d1f561d3efcc51b262b5bb32e0a95084bbcb5..680e8739d11ee0dad16890bb1088a1a7d4b774b5 100644
--- a/src/mechanism_interface.cpp
+++ b/src/mechanism_interface.cpp
@@ -6,6 +6,8 @@
 
 #include <mechanisms/hh.hpp>
 #include <mechanisms/pas.hpp>
+#include <mechanisms/expsyn.hpp>
+#include <mechanisms/exp2syn.hpp>
 
 
 namespace nest {
@@ -24,6 +26,16 @@ void setup_mechanism_helpers() {
         make_mechanism_helper<
             mechanisms::hh::helper<value_type, index_type>
         >();
+
+    mechanism_helpers["expsyn"] =
+        make_mechanism_helper<
+            mechanisms::expsyn::helper<value_type, index_type>
+        >();
+
+    mechanism_helpers["exp2syn"] =
+        make_mechanism_helper<
+            mechanisms::exp2syn::helper<value_type, index_type>
+        >();
 }
 
 mechanism_helper_ptr<value_type, index_type>&
diff --git a/src/profiling/profiler.hpp b/src/profiling/profiler.hpp
index 7230697baab21f914fa3b92f4ffbed4382b062b6..4332925a5b3ee46c8c07164a80c4acf5abd72989 100644
--- a/src/profiling/profiler.hpp
+++ b/src/profiling/profiler.hpp
@@ -226,6 +226,7 @@ void profiler_enter(const char* n, Args... args) {
 
 /// move up one level in the profiler
 void profiler_leave();
+
 /// move up multiple profiler levels in one call
 void profiler_leave(int nlevels);
 
@@ -238,3 +239,8 @@ void profiler_output(double threshold);
 } // namespace util
 } // namespace mc
 } // namespace nest
+
+// define some helper macros to make instrumentation of the source code with calls
+// to the profiler a little less visually distracting
+#define PE nest::mc::util::profiler_enter
+#define PL nest::mc::util::profiler_leave
diff --git a/tests/test_util.hpp b/tests/test_util.hpp
index cfa89896af8039e6188d8fbc460f412d3407a1b3..0f9935d8c517758a4aa1308921b382ea56b596fc 100644
--- a/tests/test_util.hpp
+++ b/tests/test_util.hpp
@@ -55,28 +55,6 @@ void write_vis_file(const std::string& fname, std::vector<std::vector<double>> v
     }
 }
 
-[[gnu::unused]] static
-nlohmann::json
-load_spike_data(const std::string& input_name)
-{
-    nlohmann::json cell_data;
-    std::ifstream fid(input_name);
-    if(!fid.is_open()) {
-        std::cerr << "error : unable to open file " << input_name
-                  << " : run the validation generation script first\n";
-        return {};
-    }
-
-    try {
-        fid >> cell_data;
-    }
-    catch (...) {
-        std::cerr << "error : incorrectly formatted json file " << input_name << "\n";
-        return {};
-    }
-    return cell_data;
-}
-
 template <typename T>
 std::vector<T> find_spikes(std::vector<T> const& v, T threshold, T dt)
 {
diff --git a/tests/unit/test_mechanisms.cpp b/tests/unit/test_mechanisms.cpp
index aef84890275263ca7ef9570c1017864736a6f331..2e31f1139d25cb3e36bc1ff0b32a3ec8a4316a4b 100644
--- a/tests/unit/test_mechanisms.cpp
+++ b/tests/unit/test_mechanisms.cpp
@@ -6,7 +6,7 @@
 TEST(mechanisms, helpers) {
     nest::mc::mechanisms::setup_mechanism_helpers();
 
-    EXPECT_EQ(nest::mc::mechanisms::mechanism_helpers.size(), 2u);
+    EXPECT_EQ(nest::mc::mechanisms::mechanism_helpers.size(), 4u);
 
     // verify that the hh and pas channels are available
     EXPECT_EQ(nest::mc::mechanisms::get_mechanism_helper("hh")->name(), "hh");
diff --git a/tests/unit/test_synapses.cpp b/tests/unit/test_synapses.cpp
index ff79c1e6488a2534a03303aa30f05403bd4acdf9..9ad68c2444ef17d6f2b2355e3c388f76ca9220e6 100644
--- a/tests/unit/test_synapses.cpp
+++ b/tests/unit/test_synapses.cpp
@@ -3,6 +3,8 @@
 
 #include <cell.hpp>
 #include <fvm_cell.hpp>
+#include <mechanisms/expsyn.hpp>
+#include <mechanisms/exp2syn.hpp>
 
 // compares results with those generated by nrn/ball_and_stick.py
 TEST(synapses, add_to_cell)
@@ -18,29 +20,37 @@ TEST(synapses, add_to_cell)
     auto soma = cell.add_soma(12.6157/2.0);
     soma->add_mechanism(hh_parameters());
 
-    cell.add_synapse({0, 0.1});
-    cell.add_synapse({1, 0.2});
-    cell.add_synapse({0, 0.3});
+    parameter_list exp_default("expsyn");
+    parameter_list exp2_default("exp2syn");
+
+    cell.add_synapse({0, 0.1}, exp_default);
+    cell.add_synapse({1, 0.2}, exp2_default);
+    cell.add_synapse({0, 0.3}, exp_default);
 
     EXPECT_EQ(3u, cell.synapses().size());
-    EXPECT_EQ(cell.synapses()[0].segment, 0);
-    EXPECT_EQ(cell.synapses()[0].position, 0.1);
-    EXPECT_EQ(cell.synapses()[1].segment, 1);
-    EXPECT_EQ(cell.synapses()[1].position, 0.2);
-    EXPECT_EQ(cell.synapses()[2].segment, 0);
-    EXPECT_EQ(cell.synapses()[2].position, 0.3);
+    const auto& syns = cell.synapses();
+
+    EXPECT_EQ(syns[0].location.segment, 0);
+    EXPECT_EQ(syns[0].location.position, 0.1);
+    EXPECT_EQ(syns[0].mechanism.name(), "expsyn");
+
+    EXPECT_EQ(syns[1].location.segment, 1);
+    EXPECT_EQ(syns[1].location.position, 0.2);
+    EXPECT_EQ(syns[1].mechanism.name(), "exp2syn");
+
+    EXPECT_EQ(syns[2].location.segment, 0);
+    EXPECT_EQ(syns[2].location.position, 0.3);
+    EXPECT_EQ(syns[2].mechanism.name(), "expsyn");
 }
 
 // compares results with those generated by nrn/ball_and_stick.py
-TEST(synapses, basic_state)
+TEST(synapses, expsyn_basic_state)
 {
     using namespace nest::mc;
 
-    // setup global state for the mechanisms
-    nest::mc::mechanisms::setup_mechanism_helpers();
-
-    using synapse_type = mechanisms::ExpSyn::mechanism_ExpSyn<double, int>;
+    using synapse_type = mechanisms::expsyn::mechanism_expsyn<double, int>;
     auto num_syn = 4;
+
     synapse_type::index_type indexes(num_syn);
     synapse_type::vector_type voltage(num_syn, -65.0);
     synapse_type::vector_type current(num_syn,   1.0);
@@ -81,3 +91,54 @@ TEST(synapses, basic_state)
     EXPECT_EQ(ptr->g[1], 3.14);
     EXPECT_EQ(ptr->g[3], 1.04);
 }
+
+TEST(synapses, exp2syn_basic_state)
+{
+    using namespace nest::mc;
+
+    using synapse_type = mechanisms::exp2syn::mechanism_exp2syn<double, int>;
+    auto num_syn = 4;
+
+    synapse_type::index_type indexes(num_syn);
+    synapse_type::vector_type voltage(num_syn, -65.0);
+    synapse_type::vector_type current(num_syn,   1.0);
+    auto mech = mechanisms::make_mechanism<synapse_type>( voltage, current, indexes );
+
+    auto ptr = dynamic_cast<synapse_type*>(mech.get());
+
+    // parameters initialized to default values
+    for(auto e : ptr->e) {
+        EXPECT_EQ(e, 0.);
+    }
+    for(auto tau1: ptr->tau1) {
+        EXPECT_EQ(tau1, 0.5);
+    }
+    for(auto tau2: ptr->tau2) {
+        EXPECT_EQ(tau2, 2.0);
+    }
+
+    // should be initialized to NaN
+    for(auto factor: ptr->factor) {
+        EXPECT_NE(factor, factor);
+    }
+
+    // initialize state then check factor has sane (positive) value
+    // and A and B are zero
+    ptr->nrn_init();
+    for(auto factor: ptr->factor) {
+        EXPECT_GT(factor, 0.);
+    }
+    for(auto A: ptr->A) {
+        EXPECT_EQ(A, 0.);
+    }
+    for(auto B: ptr->B) {
+        EXPECT_EQ(B, 0.);
+    }
+
+    // call net_receive on two of the synapses
+    ptr->net_receive(1, 3.14);
+    ptr->net_receive(3, 1.04);
+
+    EXPECT_NEAR(ptr->A[1], ptr->factor[1]*3.14, 1e-6);
+    EXPECT_NEAR(ptr->B[3], ptr->factor[3]*1.04, 1e-6);
+}
diff --git a/tests/validation/CMakeLists.txt b/tests/validation/CMakeLists.txt
index 31477528d45b62aef700fbbdff3e3ea686a66ee4..c6c72fa04efab7394190288ba3face603d141faf 100644
--- a/tests/validation/CMakeLists.txt
+++ b/tests/validation/CMakeLists.txt
@@ -1,23 +1,11 @@
-set(HEADERS
-    ${PROJECT_SOURCE_DIR}/src/cell.hpp
-    ${PROJECT_SOURCE_DIR}/src/cell_tree.hpp
-    ${PROJECT_SOURCE_DIR}/src/math.hpp
-    ${PROJECT_SOURCE_DIR}/src/point.hpp
-    ${PROJECT_SOURCE_DIR}/src/segment.hpp
-    ${PROJECT_SOURCE_DIR}/src/swcio.hpp
-    ${PROJECT_SOURCE_DIR}/src/tree.hpp
-)
-
-set(TEST_SOURCES
-    # unit test driver
-    test.cpp
-)
-
 set(VALIDATION_SOURCES
     # unit tests
     validate_ball_and_stick.cpp
     validate_soma.cpp
-    #validate_synapses.cpp
+    validate_synapses.cpp
+
+    # support code
+    validation_data.cpp
 
     # unit test driver
     validate.cpp
diff --git a/tests/validation/test.cpp b/tests/validation/test.cpp
deleted file mode 100644
index f2eab0931a0b4b2a5b17ed12ba6c2f3d1d09d27c..0000000000000000000000000000000000000000
--- a/tests/validation/test.cpp
+++ /dev/null
@@ -1,11 +0,0 @@
-#include <iostream>
-#include <fstream>
-#include <numeric>
-#include <vector>
-
-#include "gtest.h"
-
-int main(int argc, char **argv) {
-    ::testing::InitGoogleTest(&argc, argv);
-    return RUN_ALL_TESTS();
-}
diff --git a/tests/validation/validate.cpp b/tests/validation/validate.cpp
index d814fca1512dd79b22763a4f9a769f2984ca5269..b89a7b54223d7c2342c9882749c28a886782b316 100644
--- a/tests/validation/validate.cpp
+++ b/tests/validation/validate.cpp
@@ -1,7 +1,31 @@
+#include <cstring>
+#include <iostream>
+#include <fstream>
+#include <numeric>
+#include <vector>
+
 #include "gtest.h"
+#include "validation_data.hpp"
+
+int usage(const char* argv0) {
+    std::cerr << "usage: " << argv0 << " [-p|--path validation_data_directory]\n";
+    return 1;
+}
 
 int main(int argc, char **argv) {
     ::testing::InitGoogleTest(&argc, argv);
+
+    if (argv[1] && (!std::strcmp(argv[1], "-p") || !std::strcmp(argv[1], "--path"))) {
+        if (argv[2]) {
+            testing::g_validation_data.set_path(argv[2]);
+        }
+        else {
+            return usage(argv[0]);
+        }
+    }
+    else if (argv[1]) {
+        return usage(argv[0]);
+    }
+
     return RUN_ALL_TESTS();
 }
-
diff --git a/tests/validation/validate_ball_and_stick.cpp b/tests/validation/validate_ball_and_stick.cpp
index 803550641694fb1226bf90c42877634aa9a4fd34..0e41bd021534af6020482659f082eca0867fb63a 100644
--- a/tests/validation/validate_ball_and_stick.cpp
+++ b/tests/validation/validate_ball_and_stick.cpp
@@ -1,12 +1,13 @@
 #include <fstream>
 #include <json/src/json.hpp>
 
-#include "gtest.h"
-#include "../test_util.hpp"
-
 #include <cell.hpp>
 #include <fvm_cell.hpp>
 
+#include "gtest.h"
+#include "../test_util.hpp"
+#include "validation_data.hpp"
+
 // compares results with those generated by nrn/ball_and_stick.py
 TEST(ball_and_stick, neuron_baseline)
 {
@@ -32,7 +33,7 @@ TEST(ball_and_stick, neuron_baseline)
     cell.add_stimulus({1,1}, {5., 80., 0.3});
 
     // load data from file
-    auto cell_data = testing::load_spike_data("../nrn/ball_and_stick.json");
+    auto cell_data = testing::g_validation_data.load("ball_and_stick.json");
     EXPECT_TRUE(cell_data.size()>0);
     if(cell_data.size()==0) return;
 
@@ -187,7 +188,7 @@ TEST(ball_and_3stick, neuron_baseline)
     cell.add_stimulus({3,1}, {40., 10.,-0.2});
 
     // load data from file
-    auto cell_data = testing::load_spike_data("../nrn/ball_and_3stick.json");
+    auto cell_data = testing::g_validation_data.load("ball_and_3stick.json");
     EXPECT_TRUE(cell_data.size()>0);
     if(cell_data.size()==0) return;
 
diff --git a/tests/validation/validate_soma.cpp b/tests/validation/validate_soma.cpp
index dac3ca0fd7604a307b46ed25441334d5641b1ca7..9c3c9fdfa56b14b9aa948c762023355541866d9a 100644
--- a/tests/validation/validate_soma.cpp
+++ b/tests/validation/validate_soma.cpp
@@ -1,12 +1,13 @@
 #include <fstream>
 #include <json/src/json.hpp>
 
-#include "gtest.h"
-#include "../test_util.hpp"
-
 #include <cell.hpp>
 #include <fvm_cell.hpp>
 
+#include "gtest.h"
+#include "../test_util.hpp"
+#include "validation_data.hpp"
+
 // compares results with those generated by nrn/soma.py
 // single compartment model with HH channels
 TEST(soma, neuron_baseline)
@@ -31,7 +32,7 @@ TEST(soma, neuron_baseline)
     fvm::fvm_cell<double, int> model(cell);
 
     // load data from file
-    auto cell_data = testing::load_spike_data("../nrn/soma.json");
+    auto cell_data = testing::g_validation_data.load("soma.json");
     EXPECT_TRUE(cell_data.size()>0);
     if(cell_data.size()==0) return;
 
diff --git a/tests/validation/validate_synapses.cpp b/tests/validation/validate_synapses.cpp
index 3b53b3c1ab48f8d2fc234550c9d27a6ac4644910..42ace29c3393bb177b878ab3e8c6eb6de42d8676 100644
--- a/tests/validation/validate_synapses.cpp
+++ b/tests/validation/validate_synapses.cpp
@@ -1,13 +1,16 @@
 #include <fstream>
+#include <iterator>
 
-#include "gtest.h"
-#include "../test_util.hpp"
+#include <json/src/json.hpp>
 
 #include <cell.hpp>
+#include <cell_group.hpp>
 #include <fvm_cell.hpp>
+#include <mechanism_interface.hpp>
 
-#include <json/src/json.hpp>
-
+#include "gtest.h"
+#include "../test_util.hpp"
+#include "validation_data.hpp"
 
 // For storing the results of a simulation along with the information required
 // to compare two simulations for accuracy.
@@ -43,7 +46,7 @@ struct result {
 };
 
 // compares results with those generated by nrn/simple_synapse.py
-TEST(simple_synapse, neuron_baseline)
+void run_neuron_baseline(const char* syn_type, const char* data_file)
 {
     using namespace nest::mc;
     using namespace nlohmann;
@@ -51,7 +54,7 @@ TEST(simple_synapse, neuron_baseline)
     nest::mc::cell cell;
 
     // setup global state for the mechanisms
-    nest::mc::mechanisms::setup_mechanism_helpers();
+    mechanisms::setup_mechanism_helpers();
 
     // Soma with diameter 12.6157 um and HH channel
     auto soma = cell.add_soma(12.6157/2.0);
@@ -64,11 +67,23 @@ TEST(simple_synapse, neuron_baseline)
     dendrite->mechanism("membrane").set("r_L", 100);
     soma->mechanism("membrane").set("r_L", 100);
 
-    // add stimulus
-    cell.add_synapse({1, 0.5});
+    // add synapse
+    parameter_list syn_default(syn_type);
+    cell.add_synapse({1, 0.5}, syn_default);
+
+    // add probes
+    auto probe_soma = cell.add_probe({0,0}, probeKind::membrane_voltage);
+    auto probe_dend = cell.add_probe({1,0.5}, probeKind::membrane_voltage);
+
+    // injected spike events
+    postsynaptic_spike_event synthetic_events[] = {
+        {0u, 10.0, 0.04},
+        {0u, 20.0, 0.04},
+        {0u, 40.0, 0.04}
+    };
 
     // load data from file
-    auto cell_data = testing::load_spike_data("../nrn/simple_synapse.json");
+    auto cell_data = testing::g_validation_data.load(data_file);
     EXPECT_TRUE(cell_data.size()>0);
     if(cell_data.size()==0) return;
 
@@ -91,34 +106,26 @@ TEST(simple_synapse, neuron_baseline)
         std::vector<std::vector<double>> v(2);
 
         // make the lowered finite volume cell
-        fvm::fvm_cell<double, int> model(cell);
-        auto graph = cell.model();
-
-        // set initial conditions
-        using memory::all;
-        model.voltage()(all) = -65.;
-        model.initialize(); // have to do this _after_ initial conditions are set
+        cell_group<fvm::fvm_cell<double, int>> group(cell);
+        group.set_source_gids(0);
+        group.set_target_gids(0);
 
         // add the 3 spike events to the queue
-        model.queue().push({0u, 10.0, 0.04});
-        model.queue().push({0u, 20.0, 0.04});
-        model.queue().push({0u, 40.0, 0.04});
+        group.enqueue_events(synthetic_events);
 
         // run the simulation
-        auto soma_comp = nest::mc::find_compartment_index({0,0}, graph);
-        auto dend_comp = nest::mc::find_compartment_index({1,0.5}, graph);
-        v[0].push_back(model.voltage()[soma_comp]);
-        v[1].push_back(model.voltage()[dend_comp]);
+        v[0].push_back(group.cell().probe(probe_soma));
+        v[1].push_back(group.cell().probe(probe_dend));
         double t  = 0.;
         while(t < tfinal) {
             t += dt;
-            model.advance_to(t, dt);
-            // save voltage at soma
-            v[0].push_back(model.voltage()[soma_comp]);
-            v[1].push_back(model.voltage()[dend_comp]);
+            group.advance(t, dt);
+            // save voltage at soma and dendrite
+            v[0].push_back(group.cell().probe(probe_soma));
+            v[1].push_back(group.cell().probe(probe_dend));
         }
 
-        results.push_back( {num_compartments, dt, v, measurements} );
+        results.push_back({num_compartments, dt, v, measurements});
     }
 
     // print results
@@ -158,3 +165,12 @@ TEST(simple_synapse, neuron_baseline)
     }
 }
 
+TEST(simple_synapse, expsyn_neuron_baseline) {
+    SCOPED_TRACE("expsyn");
+    run_neuron_baseline("expsyn","simple_exp_synapse.json");
+}
+
+TEST(simple_synapse, exp2syn_neuron_baseline) {
+    SCOPED_TRACE("exp2syn");
+    run_neuron_baseline("exp2syn","simple_exp2_synapse.json");
+}
diff --git a/tests/validation/validation_data.cpp b/tests/validation/validation_data.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..306adbaf38c02171f56a042f62662702d4faa3be
--- /dev/null
+++ b/tests/validation/validation_data.cpp
@@ -0,0 +1,21 @@
+#include "validation_data.hpp"
+
+#include <fstream>
+
+namespace testing {
+
+nlohmann::json data_loader::load(const std::string& name) const {
+    std::string data_path=path_+'/'+name;
+    std::ifstream fid(data_path);
+    if (!fid) {
+        throw std::runtime_error("unable to load validation data: "+data_path);
+    }
+
+    nlohmann::json data;
+    fid >> data;
+    return data;
+}
+
+data_loader g_validation_data;
+
+} // namespace testing
diff --git a/tests/validation/validation_data.hpp b/tests/validation/validation_data.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..2aeb2f393d4fa28cac21e4d50174ad82d0c76289
--- /dev/null
+++ b/tests/validation/validation_data.hpp
@@ -0,0 +1,28 @@
+#pragma once
+
+#include <string>
+
+#include <json/src/json.hpp>
+
+#ifndef DATADIR
+#define DATADIR "../data"
+#endif
+
+namespace testing {
+
+class data_loader {
+public:
+    // set where to find the validation JSON files
+    void set_path(const std::string& path) { path_=path; }
+
+    // load JSON file from path plus name, throw exception
+    // if cannot be found or loaded.
+    nlohmann::json load(const std::string& name) const;
+
+private:
+    std::string path_=DATADIR "/validation";
+};
+
+extern data_loader g_validation_data;
+
+} // namespace testing