diff --git a/arbor/include/arbor/cable_cell.hpp b/arbor/include/arbor/cable_cell.hpp
index d942bb477b5a8e8626636c8daddeca3693c01984..4d2f5d8b9057a358bbf223c1f78cae70cf5d7393 100644
--- a/arbor/include/arbor/cable_cell.hpp
+++ b/arbor/include/arbor/cable_cell.hpp
@@ -228,16 +228,18 @@ public:
 
     cable_cell_parameter_set default_parameters;
 
-    /// Default constructor
+    // Default constructor.
     cable_cell();
 
-    /// Copy constructor
+    // Copy and move constructors.
     cable_cell(const cable_cell& other);
-
-    /// Move constructor
     cable_cell(cable_cell&& other) = default;
 
+    // Copy and move assignment operators..
     cable_cell& operator=(cable_cell&&) = default;
+    cable_cell& operator=(const cable_cell& other) {
+        return *this = cable_cell(other);
+    }
 
     /// construct from morphology
     cable_cell(const class morphology& m, const label_dict& dictionary={});
diff --git a/arbor/include/arbor/sampling.hpp b/arbor/include/arbor/sampling.hpp
index f917e0c68f96a004e2f54bc8ce2563e23098105b..b02e81ab64bbc199f1677efd660479af78b6a1c4 100644
--- a/arbor/include/arbor/sampling.hpp
+++ b/arbor/include/arbor/sampling.hpp
@@ -33,8 +33,7 @@ using sampler_association_handle = std::size_t;
 
 enum class sampling_policy {
     lax,
-    // interpolated, // placeholder: unsupported
-    // exact         // placeholder: unsupported
+    exact
 };
 
 } // namespace arb
diff --git a/arbor/include/arbor/util/pp_util.hpp b/arbor/include/arbor/util/pp_util.hpp
index 1347fcb71f8b4fab822ce6fc0d74ff3f96453310..5956ad65e16ae50e23f48176fe0661fdfdf63059 100644
--- a/arbor/include/arbor/util/pp_util.hpp
+++ b/arbor/include/arbor/util/pp_util.hpp
@@ -44,9 +44,19 @@
 #define ARB_PP_FOREACH_8_(M, A, ...)  M(A) ARB_PP_FOREACH_7_(M, __VA_ARGS__)
 #define ARB_PP_FOREACH_9_(M, A, ...)  M(A) ARB_PP_FOREACH_8_(M, __VA_ARGS__)
 #define ARB_PP_FOREACH_10_(M, A, ...) M(A) ARB_PP_FOREACH_9_(M, __VA_ARGS__)
-#define ARB_PP_GET_11TH_ARGUMENT_(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, ...) a11
+#define ARB_PP_FOREACH_11_(M, A, ...) M(A) ARB_PP_FOREACH_10_(M, __VA_ARGS__)
+#define ARB_PP_FOREACH_12_(M, A, ...) M(A) ARB_PP_FOREACH_11_(M, __VA_ARGS__)
+#define ARB_PP_FOREACH_13_(M, A, ...) M(A) ARB_PP_FOREACH_12_(M, __VA_ARGS__)
+#define ARB_PP_FOREACH_14_(M, A, ...) M(A) ARB_PP_FOREACH_13_(M, __VA_ARGS__)
+#define ARB_PP_FOREACH_15_(M, A, ...) M(A) ARB_PP_FOREACH_14_(M, __VA_ARGS__)
+#define ARB_PP_FOREACH_16_(M, A, ...) M(A) ARB_PP_FOREACH_15_(M, __VA_ARGS__)
+#define ARB_PP_FOREACH_17_(M, A, ...) M(A) ARB_PP_FOREACH_16_(M, __VA_ARGS__)
+#define ARB_PP_FOREACH_18_(M, A, ...) M(A) ARB_PP_FOREACH_17_(M, __VA_ARGS__)
+#define ARB_PP_FOREACH_19_(M, A, ...) M(A) ARB_PP_FOREACH_18_(M, __VA_ARGS__)
+#define ARB_PP_FOREACH_20_(M, A, ...) M(A) ARB_PP_FOREACH_19_(M, __VA_ARGS__)
+#define ARB_PP_GET_21ST_ARGUMENT_(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, ...) a21
 
-// Apply macro in first argument to each of the remaining arguments (up to 10).
-// Note: if __VA_ARGS__ has size N, when it is expanded the 11th argument is the ARB_PP_FOREACH_N_ macro.
+// Apply macro in first argument to each of the remaining arguments (up to 20).
+// Note: if __VA_ARGS__ has size N, when it is expanded the 21st argument is the ARB_PP_FOREACH_N_ macro.
 #define ARB_PP_FOREACH(M, ...)\
-ARB_PP_GET_11TH_ARGUMENT_(__VA_ARGS__, ARB_PP_FOREACH_10_, ARB_PP_FOREACH_9_, ARB_PP_FOREACH_8_, ARB_PP_FOREACH_7_, ARB_PP_FOREACH_6_, ARB_PP_FOREACH_5_, ARB_PP_FOREACH_4_, ARB_PP_FOREACH_3_, ARB_PP_FOREACH_2_, ARB_PP_FOREACH_1_)(M, __VA_ARGS__)
+ARB_PP_GET_21ST_ARGUMENT_(__VA_ARGS__, ARB_PP_FOREACH_20_, ARB_PP_FOREACH_19_, ARB_PP_FOREACH_18_, ARB_PP_FOREACH_17_, ARB_PP_FOREACH_16_, ARB_PP_FOREACH_15_, ARB_PP_FOREACH_14_, ARB_PP_FOREACH_13_, ARB_PP_FOREACH_12_, ARB_PP_FOREACH_11_, ARB_PP_FOREACH_10_, ARB_PP_FOREACH_9_, ARB_PP_FOREACH_8_, ARB_PP_FOREACH_7_, ARB_PP_FOREACH_6_, ARB_PP_FOREACH_5_, ARB_PP_FOREACH_4_, ARB_PP_FOREACH_3_, ARB_PP_FOREACH_2_, ARB_PP_FOREACH_1_)(M, __VA_ARGS__)
diff --git a/arbor/mc_cell_group.cpp b/arbor/mc_cell_group.cpp
index 0032d62a562b06265c6259e778c48b44217387db..454069358894ba9f547ee4f6d50c6cc8259aea01 100644
--- a/arbor/mc_cell_group.cpp
+++ b/arbor/mc_cell_group.cpp
@@ -322,18 +322,19 @@ void run_samples(
 void mc_cell_group::advance(epoch ep, time_type dt, const event_lane_subrange& event_lanes) {
     time_type tstart = lowered_->time();
 
+    // Bin and collate deliverable events from event lanes.
+
     PE(advance_eventsetup);
     staged_events_.clear();
 
-    fvm_index_type ev_begin = 0, ev_mid = 0, ev_end = 0;
-    // skip event binning if empty lanes are passed
+    // Skip event handling if nothing to deliver.
     if (event_lanes.size()) {
-
         std::vector<cell_size_type> idx_sorted_by_intdom(cell_to_intdom_.size());
         std::iota(idx_sorted_by_intdom.begin(), idx_sorted_by_intdom.end(), 0);
         util::sort_by(idx_sorted_by_intdom, [&](cell_size_type i) { return cell_to_intdom_[i]; });
 
         /// Event merging on integration domain could benefit from the use of the logic from `tree_merge_events`
+        fvm_index_type ev_begin = 0, ev_mid = 0, ev_end = 0;
         fvm_index_type prev_intdom = -1;
         for (auto i: util::count_along(gids_)) {
             unsigned count_staged = 0;
@@ -368,7 +369,6 @@ void mc_cell_group::advance(epoch ep, time_type dt, const event_lane_subrange& e
     }
     PL();
 
-
     // Create sample events and delivery information.
     //
     // For each (schedule, sampler, probe set) in the sampler association
@@ -389,6 +389,8 @@ void mc_cell_group::advance(epoch ep, time_type dt, const event_lane_subrange& e
     sample_size_type n_samples = 0;
     sample_size_type max_samples_per_call = 0;
 
+    std::vector<deliverable_event> exact_sampling_events;
+
     for (auto& sa: sampler_map_) {
         auto sample_times = util::make_range(sa.sched.events(tstart, ep.tfinal));
         if (sample_times.empty()) {
@@ -406,15 +408,40 @@ void mc_cell_group::advance(epoch ep, time_type dt, const event_lane_subrange& e
             call_info.push_back({sa.sampler, pid, p.tag, n_samples, n_samples+n_times*n_raw});
 
             for (auto t: sample_times) {
+                auto intdom = cell_to_intdom_[cell_index];
                 for (probe_handle h: p.handle.raw_handle_range()) {
-                    sample_event ev{t, (cell_gid_type)cell_to_intdom_[cell_index], {h, n_samples++}};
+                    sample_event ev{t, (cell_gid_type)intdom, {h, n_samples++}};
                     sample_events.push_back(ev);
                 }
+                if (sa.policy==sampling_policy::exact) {
+                    target_handle h(-1, 0, intdom);
+                    exact_sampling_events.push_back({t, h, 0.f});
+                }
             }
         }
         arb_assert(n_samples==call_info.back().end_offset);
     }
 
+    // Sort exact sampling events into staged events for delivery.
+    if (exact_sampling_events.size()) {
+        auto event_less =
+            [](const auto& a, const auto& b) {
+                 auto ai = event_index(a);
+                 auto bi = event_index(b);
+                 return ai<bi || (ai==bi && event_time(a)<event_time(b));
+            };
+
+        util::sort(exact_sampling_events, event_less);
+
+        std::vector<deliverable_event> merged;
+        merged.reserve(staged_events_.size()+exact_sampling_events.size());
+
+        std::merge(staged_events_.begin(), staged_events_.end(),
+                   exact_sampling_events.begin(), exact_sampling_events.end(),
+                   std::back_inserter(merged), event_less);
+        std::swap(merged, staged_events_);
+    }
+
     // Sample events must be ordered by time for the lowered cell.
     util::sort_by(sample_events, [](const sample_event& ev) { return event_time(ev); });
     util::stable_sort_by(sample_events, [](const sample_event& ev) { return event_index(ev); });
@@ -457,7 +484,7 @@ void mc_cell_group::add_sampler(sampler_association_handle h, cell_member_predic
         util::assign_from(util::filter(util::keys(probe_map_), probe_ids));
 
     if (!probeset.empty()) {
-        sampler_map_.add(h, sampler_association{std::move(sched), std::move(fn), std::move(probeset)});
+        sampler_map_.add(h, sampler_association{std::move(sched), std::move(fn), std::move(probeset), policy});
     }
 }
 
diff --git a/arbor/sampler_map.hpp b/arbor/sampler_map.hpp
index e99d6b4a2d8d1af72db077c5e5afd20980c4931c..0366885d478bb2c4685ba9e309b96973502f05db 100644
--- a/arbor/sampler_map.hpp
+++ b/arbor/sampler_map.hpp
@@ -24,6 +24,7 @@ struct sampler_association {
     schedule sched;
     sampler_function sampler;
     std::vector<cell_member_type> probe_ids;
+    sampling_policy policy;
 };
 
 // Maintain a set of associations paired with handles used for deletion.
diff --git a/doc/sampling_api.rst b/doc/sampling_api.rst
index da4a80b63ee35bfd806d9f921c90844f63fda536..95e596f4852bc8918492e58959fdb0b68334c654 100644
--- a/doc/sampling_api.rst
+++ b/doc/sampling_api.rst
@@ -183,8 +183,11 @@ Two helper functions are provided for making ``cell_member_predicate`` objects:
 The ``sampling_policy`` policy is used to modify sampling behaviour: by
 default, the ``lax`` policy is to perform a best-effort sampling that
 minimizes sampling overhead and which will not change the numerical
-behaviour of the simulation. Other policies may be implemented in the
-future, e.g. ``interpolated`` or ``exact``.
+behaviour of the simulation. The ``exact`` policy requests that samples
+are provided for the exact time specified in the schedule, even if this
+means disrupting the course of the simulation. Other policies may be
+implemented in the future, but cell groups are in general not required
+to support any policy other than ``lax``.
 
 The simulation object will pass on the sampler setting request to the cell
 group that owns the given probe id. The ``cell_group`` interface will be
@@ -277,11 +280,12 @@ The ``schedule`` class and its implementations are found in ``schedule.hpp``.
 Helper classes for probe/sampler management
 -------------------------------------------
 
-The ``simulation`` and ``mc_cell_group`` classes use classes defined in ``scheduler_map.hpp`` to simplify
-the management of sampler--probe associations and probe metdata.
+The ``simulation`` and ``mc_cell_group`` classes use classes defined in
+``scheduler_map.hpp`` to simplify the management of sampler--probe associations
+and probe metdata.
 
 ``sampler_association_map`` wraps an ``unordered_map`` between sampler association
-handles and tuples (*schedule*, *sampler*, *probe set*), with thread-safe
+handles and tuples (*schedule*, *sampler*, *probe set*, *policy*), with thread-safe
 accessors.
 
 ``probe_association_map<Handle>`` is a type alias for an unordered map between
@@ -305,6 +309,14 @@ after any postsynaptic spike events have been delivered.
 It is the responsibility of the ``mc_cell_group::advance()`` method to create the sample
 events from the entries of its ``sampler_association_map``, and to dispatch the
 sampled values to the sampler callbacks after the integration is complete.
-Given an association tuple (*schedule*, *sampler*, *probe set*) where the *schedule*
+Given an association tuple (*schedule*, *sampler*, *probe set*, *policy*) where the *schedule*
 has (non-zero) *n* sample times in the current integration interval, the ``mc_cell_group`` will
 call the *sampler* callback once for probe in *probe set*, with *n* sample values.
+
+In addition to the ``lax`` sampling polocy, ``mc_cell_group`` supports the ``exact``
+policy. Integration steps will be shortened such that any sample times associated
+with an ``exact`` policy can be satisfied precisely.
+
+
+
+
diff --git a/example/probe-demo/probe-demo.cpp b/example/probe-demo/probe-demo.cpp
index 00bafdc82751ade73dfc09d8ba52c9fa4f2129b7..832dc4158a745e578e1da4ab4690ddb47fc681fb 100644
--- a/example/probe-demo/probe-demo.cpp
+++ b/example/probe-demo/probe-demo.cpp
@@ -32,6 +32,7 @@ const char* help_msg =
     " -n, --n-cv=N        discretize with N CVs\n"
     " -t, --sample=TIME   take a sample every TIME [ms]\n"
     " -x, --at=X          take sample at relative position X along cable\n"
+    " --exact             use exact time sampling\n"
     " -h, --help          print extended usage information and exit\n"
     "\n"
     "Simulate a simple 1 mm cable with HH dynamics, taking samples according\n"
@@ -74,6 +75,7 @@ struct options {
     double sample_dt = 1.0;   // [ms]
     unsigned n_cv = 10;
 
+    bool exact = false;
     bool scalar_probe = true;
     any probe_addr;
     std::string value_name;
@@ -138,8 +140,10 @@ int main(int argc, char** argv) {
         auto context = arb::make_context();
         arb::simulation sim(R, arb::partition_load_balance(R, context), context);
 
-        sim.add_sampler(arb::all_probes, arb::regular_schedule(opt.sample_dt),
-                opt.scalar_probe? scalar_sampler: vector_sampler);
+        sim.add_sampler(arb::all_probes,
+                arb::regular_schedule(opt.sample_dt),
+                opt.scalar_probe? scalar_sampler: vector_sampler,
+                opt.exact? arb::sampling_policy::exact: arb::sampling_policy::lax);
 
         // CSV header for sample output:
         std::cout << "t, " << (opt.scalar_probe? "x, ": "x0, x1, ") << opt.value_name << '\n';
@@ -232,7 +236,8 @@ bool parse_options(options& opt, int& argc, char** argv) {
         { opt.sim_end,   "--until" },
         { opt.sample_dt, "-t", "--sample" },
         { probe_pos,     "-x", "--at" },
-        { opt.n_cv,      "-n", "--n-cv" }
+        { opt.n_cv,      "-n", "--n-cv" },
+        { to::set(opt.exact), to::flag, "--exact" }
     };
 
     if (!to::run(cli_opts, argc, argv+1)) return false;
diff --git a/test/unit/CMakeLists.txt b/test/unit/CMakeLists.txt
index bd48463e2b9851db1c1de0a8866709887821faca..3ae498dfa8b32a8a2cea72d4decc3f4ac1b11ea4 100644
--- a/test/unit/CMakeLists.txt
+++ b/test/unit/CMakeLists.txt
@@ -128,6 +128,7 @@ set(unit_sources
     test_path.cpp
     test_piecewise.cpp
     test_point.cpp
+    test_pp_util.cpp
     test_probe.cpp
     test_range.cpp
     test_ratelem.cpp
diff --git a/test/unit/test_pp_util.cpp b/test/unit/test_pp_util.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..182ce59bffc6b57858cb27595af213c7f92540b1
--- /dev/null
+++ b/test/unit/test_pp_util.cpp
@@ -0,0 +1,23 @@
+#include "../gtest.h"
+
+#include <string>
+#include <arbor/util/pp_util.hpp>
+
+TEST(pp_util, foreach) {
+#undef X
+#define X(n) #n "."
+
+    std::string str1 = "foo:" ARB_PP_FOREACH(X, a);
+    EXPECT_EQ("foo:a.", str1);
+
+#undef LETTERS10
+#define LETTERS10 a, b, c, d, e, f, g, h, i, j
+    std::string str10 = "bar:" ARB_PP_FOREACH(X, LETTERS10);
+    EXPECT_EQ("bar:a.b.c.d.e.f.g.h.i.j.", str10);
+
+    std::string str20 = "baz:" ARB_PP_FOREACH(X, a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t);
+    EXPECT_EQ("baz:a.b.c.d.e.f.g.h.i.j.k.l.m.n.o.p.q.r.s.t.", str20);
+
+#undef LETTERS10
+#undef X
+}
diff --git a/test/unit/test_probe.cpp b/test/unit/test_probe.cpp
index 85731963840dbfeca0820b448588364fb71dd4c8..1450a4e13e6bc0f24c792bc44ae20f15fe8ba6eb 100644
--- a/test/unit/test_probe.cpp
+++ b/test/unit/test_probe.cpp
@@ -13,6 +13,7 @@
 #include <arbor/schedule.hpp>
 #include <arbor/simple_sampler.hpp>
 #include <arbor/simulation.hpp>
+#include <arbor/util/pp_util.hpp>
 #include <arbor/version.hpp>
 #include <arborenv/gpu_env.hpp>
 
@@ -183,7 +184,7 @@ void run_v_i_probe_test(const context& ctx) {
 }
 
 template <typename Backend>
-void run_v_cable_probe_test(const context& ctx) {
+void run_v_cell_probe_test(const context& ctx) {
     using fvm_cell = typename backend_access<Backend>::fvm_cell;
 
     // Take the per-cable voltage over a Y-shaped cell with and without
@@ -244,9 +245,7 @@ void run_v_cable_probe_test(const context& ctx) {
 }
 
 template <typename Backend>
-void run_expsyn_g_probe_test(const context& ctx, bool coalesce_synapses = false) {
-    SCOPED_TRACE(coalesce_synapses? "coalesced": "uncoalesced");
-
+void run_expsyn_g_probe_test(const context& ctx) {
     using fvm_cell = typename backend_access<Backend>::fvm_cell;
     auto deref = [](const fvm_value_type* p) { return backend_access<Backend>::deref(p); };
 
@@ -262,75 +261,85 @@ void run_expsyn_g_probe_test(const context& ctx, bool coalesce_synapses = false)
     bs.place(loc1, "expsyn");
     bs.default_parameters.discretization = cv_policy_fixed_per_branch(2);
 
-    cable1d_recipe rec(bs, coalesce_synapses);
-    rec.add_probe(0, 10, cable_probe_point_state{0u, "expsyn", "g"});
-    rec.add_probe(0, 20, cable_probe_point_state{1u, "expsyn", "g"});
+    auto run_test = [&](bool coalesce_synapses) {
+        cable1d_recipe rec(bs, coalesce_synapses);
+        rec.add_probe(0, 10, cable_probe_point_state{0u, "expsyn", "g"});
+        rec.add_probe(0, 20, cable_probe_point_state{1u, "expsyn", "g"});
 
-    std::vector<target_handle> targets;
-    std::vector<fvm_index_type> cell_to_intdom;
-    probe_association_map<fvm_probe_info> probe_map;
+        std::vector<target_handle> targets;
+        std::vector<fvm_index_type> cell_to_intdom;
+        probe_association_map<fvm_probe_info> probe_map;
 
-    fvm_cell lcell(*ctx);
-    lcell.initialize({0}, rec, cell_to_intdom, targets, probe_map);
+        fvm_cell lcell(*ctx);
+        lcell.initialize({0}, rec, cell_to_intdom, targets, probe_map);
 
-    EXPECT_EQ(2u, rec.num_probes(0));
-    EXPECT_EQ(2u, probe_map.size());
-    ASSERT_EQ(1u, probe_map.count({0, 0}));
-    ASSERT_EQ(1u, probe_map.count({0, 1}));
+        EXPECT_EQ(2u, rec.num_probes(0));
+        EXPECT_EQ(2u, probe_map.size());
+        ASSERT_EQ(1u, probe_map.count({0, 0}));
+        ASSERT_EQ(1u, probe_map.count({0, 1}));
 
-    EXPECT_EQ(10, probe_map.at({0, 0}).tag);
-    EXPECT_EQ(20, probe_map.at({0, 1}).tag);
+        EXPECT_EQ(10, probe_map.at({0, 0}).tag);
+        EXPECT_EQ(20, probe_map.at({0, 1}).tag);
 
-    auto probe_scalar_handle = [&](cell_member_type x) {
-        return probe_map.at(x).handle.raw_handle_range()[0];
-    };
+        auto probe_scalar_handle = [&](cell_member_type x) {
+            return probe_map.at(x).handle.raw_handle_range()[0];
+        };
 
-    probe_handle p0 = probe_scalar_handle({0, 0});
-    probe_handle p1 = probe_scalar_handle({0, 1});
+        probe_handle p0 = probe_scalar_handle({0, 0});
+        probe_handle p1 = probe_scalar_handle({0, 1});
 
-    // Expect initial probe values to be intial synapse g == 0.
+        // Expect initial probe values to be intial synapse g == 0.
 
-    EXPECT_EQ(0.0, deref(p0));
-    EXPECT_EQ(0.0, deref(p1));
+        EXPECT_EQ(0.0, deref(p0));
+        EXPECT_EQ(0.0, deref(p1));
 
-    if (coalesce_synapses) {
-        // Should be the same raw pointer!
-        EXPECT_EQ(p0, p1);
-    }
+        if (coalesce_synapses) {
+            // Should be the same raw pointer!
+            EXPECT_EQ(p0, p1);
+        }
 
-    // Integrate to 3 ms, with one event at 1ms to first expsyn weight 0.5,
-    // and another at 2ms to second, weight 1.
+        // Integrate to 3 ms, with one event at 1ms to first expsyn weight 0.5,
+        // and another at 2ms to second, weight 1.
 
-    std::vector<deliverable_event> evs = {
-        {1.0, targets[0], 0.5},
-        {2.0, targets[1], 1.0}
-    };
-    const double tfinal = 3.;
-    const double dt = 0.001;
-    lcell.integrate(tfinal, dt, evs, {});
+        std::vector<deliverable_event> evs = {
+            {1.0, targets[0], 0.5},
+            {2.0, targets[1], 1.0}
+        };
+        const double tfinal = 3.;
+        const double dt = 0.001;
+        lcell.integrate(tfinal, dt, evs, {});
 
-    fvm_value_type g0 = deref(p0);
-    fvm_value_type g1 = deref(p1);
+        fvm_value_type g0 = deref(p0);
+        fvm_value_type g1 = deref(p1);
 
-    // Expected value: weight*exp(-(t_final-t_event)/tau).
-    double expected_g0 = 0.5*std::exp(-(tfinal-1.0)/tau);
-    double expected_g1 = 1.0*std::exp(-(tfinal-2.0)/tau);
+        // Expected value: weight*exp(-(t_final-t_event)/tau).
+        double expected_g0 = 0.5*std::exp(-(tfinal-1.0)/tau);
+        double expected_g1 = 1.0*std::exp(-(tfinal-2.0)/tau);
 
-    const double rtol = 1e-6;
-    if (coalesce_synapses) {
-        EXPECT_TRUE(testing::near_relative(expected_g0+expected_g1, g0, rtol));
-        EXPECT_TRUE(testing::near_relative(expected_g0+expected_g1, g1, rtol));
+        const double rtol = 1e-6;
+        if (coalesce_synapses) {
+            EXPECT_TRUE(testing::near_relative(expected_g0+expected_g1, g0, rtol));
+            EXPECT_TRUE(testing::near_relative(expected_g0+expected_g1, g1, rtol));
+        }
+        else {
+            EXPECT_TRUE(testing::near_relative(expected_g0, g0, rtol));
+            EXPECT_TRUE(testing::near_relative(expected_g1, g1, rtol));
+        }
+    };
+
+    {
+        SCOPED_TRACE("uncoalesced synapses");
+        run_test(false);
     }
-    else {
-        EXPECT_TRUE(testing::near_relative(expected_g0, g0, rtol));
-        EXPECT_TRUE(testing::near_relative(expected_g1, g1, rtol));
+
+    {
+        SCOPED_TRACE("coalesced synapses");
+        run_test(true);
     }
 }
 
 template <typename Backend>
-void run_expsyn_g_cable_probe_test(const context& ctx, bool coalesce_synapses = false) {
-    SCOPED_TRACE(coalesce_synapses? "coalesced": "uncoalesced");
-
+void run_expsyn_g_cell_probe_test(const context& ctx) {
     using fvm_cell = typename backend_access<Backend>::fvm_cell;
     auto deref = [](const auto* p) { return backend_access<Backend>::deref(p); };
 
@@ -362,89 +371,102 @@ void run_expsyn_g_cable_probe_test(const context& ctx, bool coalesce_synapses =
     const unsigned n_expsyn = 30;
 
     std::vector<cable_cell> cells(2, cell);
-    cable1d_recipe rec(cells, coalesce_synapses);
 
-    rec.add_probe(0, 0, cable_probe_point_state_cell{"expsyn", "g"});
-    rec.add_probe(1, 0, cable_probe_point_state_cell{"expsyn", "g"});
+    auto run_test = [&](bool coalesce_synapses) {
+        cable1d_recipe rec(cells, coalesce_synapses);
 
-    std::vector<target_handle> targets;
-    std::vector<fvm_index_type> cell_to_intdom;
-    probe_association_map<fvm_probe_info> probe_map;
+        rec.add_probe(0, 0, cable_probe_point_state_cell{"expsyn", "g"});
+        rec.add_probe(1, 0, cable_probe_point_state_cell{"expsyn", "g"});
 
-    fvm_cell lcell(*ctx);
-    lcell.initialize({0, 1}, rec, cell_to_intdom, targets, probe_map);
+        std::vector<target_handle> targets;
+        std::vector<fvm_index_type> cell_to_intdom;
+        probe_association_map<fvm_probe_info> probe_map;
 
-    // Send an event to each expsyn synapse with a weight = target+100*cell_gid, and
-    // integrate for a tiny time step.
+        fvm_cell lcell(*ctx);
+        lcell.initialize({0, 1}, rec, cell_to_intdom, targets, probe_map);
 
-    std::vector<deliverable_event> events;
-    for (unsigned i: {0u, 1u}) {
-        // Cells have the same number of targets, so the offset for cell 1 is exactly...
-        cell_local_size_type cell_offset = i==0? 0: targets.size()/2;
+        // Send an event to each expsyn synapse with a weight = target+100*cell_gid, and
+        // integrate for a tiny time step.
 
-        for (auto target_id: util::keys(expsyn_target_loc_map)) {
-            deliverable_event ev{0., targets.at(target_id+cell_offset), float(target_id+100*i)};
-            events.push_back(ev);
-        }
-    }
-    (void)lcell.integrate(1e-5, 1e-5, events, {});
+        std::vector<deliverable_event> events;
+        for (unsigned i: {0u, 1u}) {
+            // Cells have the same number of targets, so the offset for cell 1 is exactly...
+            cell_local_size_type cell_offset = i==0? 0: targets.size()/2;
 
-    // Independently get cv geometry to compute CV indices.
+            for (auto target_id: util::keys(expsyn_target_loc_map)) {
+                deliverable_event ev{0., targets.at(target_id+cell_offset), float(target_id+100*i)};
+                events.push_back(ev);
+            }
+        }
+        (void)lcell.integrate(1e-5, 1e-5, events, {});
 
-    cv_geometry geom = cv_geometry_from_ends(cells[0], policy.cv_boundary_points(cells[0]));
-    append(geom, cv_geometry_from_ends(cells[1], policy.cv_boundary_points(cells[1])));
+        // Independently get cv geometry to compute CV indices.
 
-    ASSERT_EQ(2u, probe_map.size());
-    for (unsigned i: {0u, 1u}) {
-        const auto* h_ptr = util::get_if<fvm_probe_multi>(probe_map.at({i, 0}).handle.info);
-        ASSERT_TRUE(h_ptr);
+        cv_geometry geom = cv_geometry_from_ends(cells[0], policy.cv_boundary_points(cells[0]));
+        append(geom, cv_geometry_from_ends(cells[1], policy.cv_boundary_points(cells[1])));
 
-        const auto* m_ptr = util::get_if<std::vector<cable_probe_point_info>>(h_ptr->metadata);
-        ASSERT_TRUE(m_ptr);
+        ASSERT_EQ(2u, probe_map.size());
+        for (unsigned i: {0u, 1u}) {
+            const auto* h_ptr = util::get_if<fvm_probe_multi>(probe_map.at({i, 0}).handle.info);
+            ASSERT_TRUE(h_ptr);
 
-        const fvm_probe_multi& h = *h_ptr;
-        const std::vector<cable_probe_point_info> m = *m_ptr;
+            const auto* m_ptr = util::get_if<std::vector<cable_probe_point_info>>(h_ptr->metadata);
+            ASSERT_TRUE(m_ptr);
 
-        ASSERT_EQ(h.raw_handles.size(), m.size());
-        ASSERT_EQ(n_expsyn, m.size());
+            const fvm_probe_multi& h = *h_ptr;
+            const std::vector<cable_probe_point_info> m = *m_ptr;
 
-        std::vector<double> expected_coalesced_cv_value(geom.size());
-        std::vector<double> expected_uncoalesced_value(targets.size());
+            ASSERT_EQ(h.raw_handles.size(), m.size());
+            ASSERT_EQ(n_expsyn, m.size());
 
-        std::vector<double> target_cv(targets.size(), (unsigned)-1);
-        std::unordered_map<fvm_size_type, unsigned> cv_expsyn_count;
+            std::vector<double> expected_coalesced_cv_value(geom.size());
+            std::vector<double> expected_uncoalesced_value(targets.size());
 
-        for (unsigned j = 0; j<n_expsyn; ++j) {
-            ASSERT_EQ(1u, expsyn_target_loc_map.count(m[j].target));
-            EXPECT_EQ(expsyn_target_loc_map.at(m[j].target), m[j].loc);
+            std::vector<double> target_cv(targets.size(), (unsigned)-1);
+            std::unordered_map<fvm_size_type, unsigned> cv_expsyn_count;
 
-            auto cv = geom.location_cv(i, m[j].loc, cv_prefer::cv_nonempty);
-            target_cv[j] = cv;
-            ++cv_expsyn_count[cv];
+            for (unsigned j = 0; j<n_expsyn; ++j) {
+                ASSERT_EQ(1u, expsyn_target_loc_map.count(m[j].target));
+                EXPECT_EQ(expsyn_target_loc_map.at(m[j].target), m[j].loc);
 
-            double event_weight = m[j].target+100*i;
-            expected_uncoalesced_value[j] = event_weight;
-            expected_coalesced_cv_value[cv] += event_weight;
-        }
+                auto cv = geom.location_cv(i, m[j].loc, cv_prefer::cv_nonempty);
+                target_cv[j] = cv;
+                ++cv_expsyn_count[cv];
 
-        for (unsigned j = 0; j<n_expsyn; ++j) {
-            if (coalesce_synapses) {
-                EXPECT_EQ(cv_expsyn_count.at(target_cv[j]), m[j].multiplicity);
+                double event_weight = m[j].target+100*i;
+                expected_uncoalesced_value[j] = event_weight;
+                expected_coalesced_cv_value[cv] += event_weight;
             }
-            else {
-                EXPECT_EQ(1u, m[j].multiplicity);
+
+            for (unsigned j = 0; j<n_expsyn; ++j) {
+                if (coalesce_synapses) {
+                    EXPECT_EQ(cv_expsyn_count.at(target_cv[j]), m[j].multiplicity);
+                }
+                else {
+                    EXPECT_EQ(1u, m[j].multiplicity);
+                }
             }
-        }
 
-        for (unsigned j = 0; j<n_expsyn; ++j) {
-            double expected_value = coalesce_synapses?
-                expected_coalesced_cv_value[target_cv[j]]:
-                expected_uncoalesced_value[j];
+            for (unsigned j = 0; j<n_expsyn; ++j) {
+                double expected_value = coalesce_synapses?
+                    expected_coalesced_cv_value[target_cv[j]]:
+                    expected_uncoalesced_value[j];
 
-            double value = deref(h.raw_handles[j]);
+                double value = deref(h.raw_handles[j]);
 
-            EXPECT_NEAR(expected_value, value, 0.01); // g values will have decayed a little.
+                EXPECT_NEAR(expected_value, value, 0.01); // g values will have decayed a little.
+            }
         }
+    };
+
+    {
+        SCOPED_TRACE("uncoalesced synapses");
+        run_test(false);
+    }
+
+    {
+        SCOPED_TRACE("coalesced synapses");
+        run_test(true);
     }
 }
 
@@ -726,7 +748,7 @@ void run_partial_density_probe_test(const context& ctx) {
 }
 
 template <typename Backend>
-void run_axial_and_ion_current_probe_sampled_test(const context& ctx) {
+void run_axial_and_ion_current_sampled_probe_test(const context& ctx) {
     // On a passive cable in steady-state, the capacitive membrane current will be zero,
     // and the axial currents should balance the ionic membrane currents in any CV.
     //
@@ -891,7 +913,7 @@ auto run_simple_sampler(
 }
 
 template <typename Backend>
-void run_v_sampled_test(const context& ctx) {
+void run_v_sampled_probe_test(const context& ctx) {
     cable_cell bs = make_cell_ball_and_stick(false);
     bs.default_parameters.discretization = cv_policy_fixed_per_branch(1);
 
@@ -924,7 +946,7 @@ void run_v_sampled_test(const context& ctx) {
 }
 
 template <typename Backend>
-void run_total_current_test(const context& ctx) {
+void run_total_current_probe_test(const context& ctx) {
     // Model two passive Y-shaped cells with a similar but not identical
     // time constant Ï„.
     //
@@ -1044,114 +1066,149 @@ void run_total_current_test(const context& ctx) {
     }
 }
 
-TEST(probe, multicore_v_i) {
-    context ctx = make_context();
-    run_v_i_probe_test<multicore::backend>(ctx);
-}
+template <typename Backend>
+void run_exact_sampling_probe_test(const context& ctx) {
+    // As the exact sampling implementation interacts with the event delivery
+    // implementation within in cable cell groups, construct a somewhat
+    // elaborate model with 4 cells and a gap junction between cell 1 and 3.
 
-TEST(probe, multicore_v_cell) {
-    context ctx = make_context();
-    run_v_cable_probe_test<multicore::backend>(ctx);
-}
+    struct adhoc_recipe: recipe {
+        std::vector<cable_cell> cells_;
+        cable_cell_global_properties gprop_;
 
-TEST(probe, multicore_v_sampled) {
-    context ctx = make_context();
-    run_v_sampled_test<multicore::backend>(ctx);
-}
+        adhoc_recipe() {
+            gprop_.default_parameters = neuron_parameter_defaults;
 
-TEST(probe, multicore_expsyn_g) {
-    context ctx = make_context();
-    run_expsyn_g_probe_test<multicore::backend>(ctx, false);
-    run_expsyn_g_probe_test<multicore::backend>(ctx, true);
-}
+            cells_.assign(4, make_cell_ball_and_stick(false));
+            cells_[0].place(mlocation{1, 0.1}, "expsyn");
+            cells_[1].place(mlocation{1, 0.1}, "exp2syn");
+            cells_[2].place(mlocation{1, 0.9}, "expsyn");
+            cells_[3].place(mlocation{1, 0.9}, "exp2syn");
 
-TEST(probe, multicore_expsyn_g_cell) {
-    context ctx = make_context();
-    run_expsyn_g_cable_probe_test<multicore::backend>(ctx, false);
-    run_expsyn_g_cable_probe_test<multicore::backend>(ctx, true);
-}
+            cells_[1].place(mlocation{1, 0.2}, gap_junction_site{});
+            cells_[3].place(mlocation{1, 0.2}, gap_junction_site{});
 
-TEST(probe, multicore_ion_conc) {
-    context ctx = make_context();
-    run_ion_density_probe_test<multicore::backend>(ctx);
-}
+        }
 
-TEST(probe, multicore_axial_and_ion_current) {
-    context ctx = make_context();
-    run_axial_and_ion_current_probe_sampled_test<multicore::backend>(ctx);
-}
+        cell_size_type num_cells() const override { return cells_.size(); }
 
-TEST(probe, multicore_partial_density) {
-    context ctx = make_context();
-    run_partial_density_probe_test<multicore::backend>(ctx);
-}
+        util::unique_any get_cell_description(cell_gid_type gid) const override {
+            return cells_.at(gid);
+        }
 
-TEST(probe, multicore_total_current) {
-    context ctx = make_context();
-    run_total_current_test<multicore::backend>(ctx);
-}
+        cell_kind get_cell_kind(cell_gid_type) const override {
+            return cell_kind::cable;
+        }
 
-#ifdef ARB_GPU_ENABLED
-TEST(probe, gpu_v_i) {
-    context ctx = make_context(proc_allocation{1, arbenv::default_gpu()});
-    if (has_gpu(ctx)) {
-        run_v_i_probe_test<gpu::backend>(ctx);
-    }
-}
+        cell_size_type num_probes(cell_gid_type) const override { return 1; }
 
-TEST(probe, gpu_v_cell) {
-    context ctx = make_context(proc_allocation{1, arbenv::default_gpu()});
-    if (has_gpu(ctx)) {
-        run_v_cable_probe_test<gpu::backend>(ctx);
-    }
-}
+        probe_info get_probe(cell_member_type pid) const override {
+            return {pid, 0, cable_probe_membrane_voltage{mlocation{1, 0.5}}};
+        }
 
-TEST(probe, gpu_v_sampled) {
-    context ctx = make_context(proc_allocation{1, arbenv::default_gpu()});
-    run_v_sampled_test<multicore::backend>(ctx);
-}
+        cell_size_type num_targets(cell_gid_type) const override { return 1; }
+
+        cell_size_type num_gap_junction_sites(cell_gid_type gid) const override {
+            return gid==1 || gid==3;
+        }
+
+        std::vector<gap_junction_connection> gap_junctions_on(cell_gid_type gid) const override {
+            switch (gid) {
+            case 1:
+                return {gap_junction_connection({gid, 0}, {3, 0}, 1.)};
+            case 3:
+                return {gap_junction_connection({gid, 0}, {1, 0}, 1.)};
+            default:
+                return {};
+            }
+        }
+
+        std::vector<event_generator> event_generators(cell_gid_type gid) const override {
+            // Send a single event to cell i at 0.1*i milliseconds.
+            pse_vector spikes = {spike_event{{gid, 0}, 0.1*gid, 1.f}};
+            return {explicit_generator(spikes)};
+        }
 
-TEST(probe, gpu_expsyn_g) {
-    context ctx = make_context(proc_allocation{1, arbenv::default_gpu()});
-    if (has_gpu(ctx)) {
-        run_expsyn_g_probe_test<gpu::backend>(ctx, false);
-        run_expsyn_g_probe_test<gpu::backend>(ctx, true);
+        util::any get_global_properties(cell_kind k) const override {
+            return k==cell_kind::cable? gprop_: util::any{};
+        }
+    };
+
+    // Check two things:
+    // 1. Membrane voltage is similar with and without exact sampling.
+    // 2. Sample times are in fact exact with exact sampling.
+
+    std::vector<trace_data<double>> lax_traces(4), exact_traces(4);
+
+    const double max_dt = 0.001;
+    const double t_end = 1.;
+    std::vector<time_type> sched_times{1./7., 3./7., 4./7., 6./7.};
+    schedule sample_sched = explicit_schedule(sched_times);
+
+    adhoc_recipe rec;
+    unsigned n_cell = rec.num_cells();
+    unsigned n_sample_time = sched_times.size();
+
+    partition_hint_map phints = {
+       {cell_kind::cable, {partition_hint::max_size, partition_hint::max_size, true}}
+    };
+    domain_decomposition one_cell_group = partition_load_balance(rec, ctx, phints);
+
+    simulation lax_sim(rec, one_cell_group, ctx);
+    for (unsigned i = 0; i<n_cell; ++i) {
+        lax_sim.add_sampler(one_probe({i, 0}), sample_sched, make_simple_sampler(lax_traces.at(i)), sampling_policy::lax);
     }
-}
+    lax_sim.run(t_end, max_dt);
 
-TEST(probe, gpu_expsyn_g_cell) {
-    context ctx = make_context(proc_allocation{1, arbenv::default_gpu()});
-    if (has_gpu(ctx)) {
-        run_expsyn_g_cable_probe_test<gpu::backend>(ctx, false);
-        run_expsyn_g_cable_probe_test<gpu::backend>(ctx, true);
+    simulation exact_sim(rec, one_cell_group, ctx);
+    for (unsigned i = 0; i<n_cell; ++i) {
+        exact_sim.add_sampler(one_probe({i, 0}), sample_sched, make_simple_sampler(exact_traces.at(i)), sampling_policy::exact);
     }
-}
+    exact_sim.run(t_end, max_dt);
 
-TEST(probe, gpu_ion_conc) {
-    context ctx = make_context(proc_allocation{1, arbenv::default_gpu()});
-    if (has_gpu(ctx)) {
-        run_ion_density_probe_test<gpu::backend>(ctx);
+    for (unsigned i = 0; i<n_cell; ++i) {
+        ASSERT_EQ(n_sample_time, lax_traces.at(i).size());
+        ASSERT_EQ(n_sample_time, exact_traces.at(i).size());
     }
-}
 
-TEST(probe, gpu_axial_and_ion_current) {
-    context ctx = make_context(proc_allocation{1, arbenv::default_gpu()});
-    if (has_gpu(ctx)) {
-        run_axial_and_ion_current_probe_sampled_test<gpu::backend>(ctx);
+    for (unsigned i = 0; i<n_cell; ++i) {
+        for (unsigned j = 0; j<n_sample_time; ++j) {
+            EXPECT_NE(sched_times.at(j), lax_traces.at(i).at(j).t);
+            EXPECT_EQ(sched_times.at(j), exact_traces.at(i).at(j).t);
+
+            EXPECT_TRUE(testing::near_relative(lax_traces.at(i).at(j).v, exact_traces.at(i).at(j).v, 0.01));
+        }
     }
 }
 
-TEST(probe, gpu_partial_density) {
-    context ctx = make_context(proc_allocation{1, arbenv::default_gpu()});
-    if (has_gpu(ctx)) {
-        run_partial_density_probe_test<multicore::backend>(ctx);
-    }
+// Generate unit tests multicore_X and gpu_X for each entry X in PROBE_TESTS,
+// which establish the appropriate arbor context and then call run_X_probe_test.
+
+#undef PROBE_TESTS
+#define PROBE_TESTS \
+    v_i, v_cell, v_sampled, expsyn_g, expsyn_g_cell, \
+    ion_density, axial_and_ion_current_sampled, partial_density, total_current, exact_sampling
+
+#undef RUN_MULTICORE
+#define RUN_MULTICORE(x) \
+TEST(probe, multicore_##x) { \
+    context ctx = make_context(); \
+    run_##x##_probe_test<multicore::backend>(ctx); \
 }
 
-TEST(probe, gpu_total_current) {
-    context ctx = make_context(proc_allocation{1, arbenv::default_gpu()});
-    if (has_gpu(ctx)) {
-        run_total_current_test<gpu::backend>(ctx);
-    }
+ARB_PP_FOREACH(RUN_MULTICORE, PROBE_TESTS)
+
+#ifdef ARB_GPU_ENABLED
+
+#undef RUN_GPU
+#define RUN_GPU(x) \
+TEST(probe, gpu_##x) { \
+    context ctx = make_context(proc_allocation{1, arbenv::default_gpu()}); \
+    if (has_gpu(ctx)) { \
+        run_##x##_probe_test<gpu::backend>(ctx); \
+    } \
 }
-#endif
+
+ARB_PP_FOREACH(RUN_GPU, PROBE_TESTS)
+
+#endif // def ARB_GPU_ENABLED