From 6beb1ce0a066f771249697b0af4b0080cd2d2ea7 Mon Sep 17 00:00:00 2001 From: Ben Cumming <louncharf@gmail.com> Date: Fri, 21 Apr 2017 15:01:45 +0200 Subject: [PATCH] Virtualize cell_group (#236) Virtualization of the `cell_group` interface is necessary for support of other (i.e. non-multicompartment) cell models, including e.g. Poisson spike generators. * Make `cell_group` an abstract base class; the previous `cell_group` class that is parameterized by the back-end FVM implementation is renamed `mc_cell_group` ('mc' stands for 'multi-compartment') and derives from the abstract `cell_group`. * Remove template parameter `Cell` from `model` type: a `model` can in principle now manage multiple types of concrete objects derived from `cell_group`. * Extend `model` constructor to take a hint about which back-end to use when constructing cell groups: `use_multicore` or `prefer_gpu`. This is a placeholder for a more sophisticated implementation once we have the requirements for a richer "ecosystem" of cell types. * Simplified some generic types to remove template dependencies between front and back ends: * Define a global `using time_type=float` in `common_types.hpp`. * Define a concrete `sampler_function` type alias for the `std::function<...>` type used for samplers. * Use a `null` back-end fallback for GPU if support is not there at compile time. --- .ycm_extra_conf.py | 4 +- miniapp/miniapp.cpp | 18 +- src/backends.hpp | 22 ++ src/backends/fvm.hpp | 37 +++ src/backends/gpu/fvm.hpp | 4 + src/backends/multicore/fvm.hpp | 4 + src/cell_group.hpp | 272 ++---------------- src/fvm_multicell.hpp | 1 - src/mc_cell_group.hpp | 272 ++++++++++++++++++ src/memory/allocator.hpp | 10 +- src/memory/gpu.hpp | 2 +- src/memory/host_coordinator.hpp | 6 +- src/memory/memory.hpp | 4 +- src/memory/wrappers.hpp | 6 +- src/model.hpp | 74 +++-- src/sampler_function.hpp | 14 + src/simple_sampler.hpp | 20 +- src/util/config.hpp | 2 +- tests/performance/io/disk_io.cpp | 2 - tests/unit/CMakeLists.txt | 5 +- tests/unit/test_backend.cpp | 23 ++ tests/unit/test_fvm_multi.cpp | 3 +- ..._cell_group.cpp => test_mc_cell_group.cpp} | 23 +- ...st_cell_group.cu => test_mc_cell_group.cu} | 5 +- tests/unit/test_probe.cpp | 1 + tests/validation/convergence_test.hpp | 2 +- tests/validation/validate_ball_and_stick.cpp | 12 +- tests/validation/validate_ball_and_stick.cu | 12 +- tests/validation/validate_ball_and_stick.hpp | 40 ++- tests/validation/validate_kinetic.cpp | 6 +- tests/validation/validate_kinetic.cu | 6 +- tests/validation/validate_kinetic.hpp | 23 +- tests/validation/validate_soma.cpp | 5 +- tests/validation/validate_soma.cu | 4 +- tests/validation/validate_soma.hpp | 7 +- tests/validation/validate_synapses.cpp | 6 +- tests/validation/validate_synapses.cu | 6 +- tests/validation/validate_synapses.hpp | 6 +- 38 files changed, 560 insertions(+), 409 deletions(-) create mode 100644 src/backends.hpp create mode 100644 src/mc_cell_group.hpp create mode 100644 src/sampler_function.hpp create mode 100644 tests/unit/test_backend.cpp rename tests/unit/{test_cell_group.cpp => test_mc_cell_group.cpp} (76%) rename tests/unit/{test_cell_group.cu => test_mc_cell_group.cu} (86%) diff --git a/.ycm_extra_conf.py b/.ycm_extra_conf.py index 035b3ed1..a48a781b 100644 --- a/.ycm_extra_conf.py +++ b/.ycm_extra_conf.py @@ -36,7 +36,7 @@ import ycm_core # CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. flags = [ '-DNDEBUG', - '-DNMC_HAVE_TBB', + '-DNMC_HAVE_CTHREAD', '-std=c++11', '-x', 'c++', @@ -54,7 +54,7 @@ flags = [ 'modcc', '-I', '/cm/shared/apps/cuda/8.0.44/include', - '-DNMC_HAVE_CUDA' + '-DNMC_HAVE_GPU' ] # Set this to the absolute path to the folder (NOT the file!) containing the diff --git a/miniapp/miniapp.cpp b/miniapp/miniapp.cpp index c9fb6b7c..474c57f3 100644 --- a/miniapp/miniapp.cpp +++ b/miniapp/miniapp.cpp @@ -17,10 +17,10 @@ #include <profiling/profiler.hpp> #include <profiling/meter_manager.hpp> #include <threading/threading.hpp> +#include <util/config.hpp> #include <util/debug.hpp> #include <util/ioutil.hpp> #include <util/nop.hpp> -#include <util/optional.hpp> #include "io.hpp" #include "miniapp_recipes.hpp" @@ -29,12 +29,6 @@ using namespace nest::mc; using global_policy = communication::global_policy; -#ifdef NMC_HAVE_CUDA -using lowered_cell = fvm::fvm_multicell<gpu::backend>; -#else -using lowered_cell = fvm::fvm_multicell<multicore::backend>; -#endif -using model_type = model<lowered_cell>; using sample_trace_type = sample_trace<time_type, double>; using file_export_type = io::exporter_spike_file<global_policy>; void banner(); @@ -101,7 +95,9 @@ int main(int argc, char** argv) { options.file_extension, options.over_write); }; - model_type m(*recipe, util::partition_view(group_divisions)); + model m(*recipe, + util::partition_view(group_divisions), + config::has_cuda? backend_policy::prefer_gpu: backend_policy::use_multicore); if (options.report_compartments) { report_compartment_stats(*recipe); } @@ -202,11 +198,7 @@ void banner() { std::cout << " starting miniapp\n"; std::cout << " - " << threading::description() << " threading support\n"; std::cout << " - communication policy: " << std::to_string(global_policy::kind()) << " (" << global_policy::size() << ")\n"; -#ifdef NMC_HAVE_CUDA - std::cout << " - gpu support: on\n"; -#else - std::cout << " - gpu support: off\n"; -#endif + std::cout << " - gpu support: " << (config::has_cuda? "on": "off") << "\n"; std::cout << "====================\n"; } diff --git a/src/backends.hpp b/src/backends.hpp new file mode 100644 index 00000000..0941d592 --- /dev/null +++ b/src/backends.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include <string> +#include <backends/fvm.hpp> + +namespace nest { +namespace mc { + +enum class backend_policy { + use_multicore, // use multicore backend for all computation + prefer_gpu // use gpu back end when supported by cell_group type +}; + +inline std::string to_string(backend_policy p) { + if (p==backend_policy::use_multicore) { + return "use_multicore"; + } + return "prefer_gpu"; +} + +} // namespace mc +} // namespace nest diff --git a/src/backends/fvm.hpp b/src/backends/fvm.hpp index a8124da0..adb9838b 100644 --- a/src/backends/fvm.hpp +++ b/src/backends/fvm.hpp @@ -2,6 +2,43 @@ #include <backends/multicore/fvm.hpp> +namespace nest { +namespace mc { + +// A null back end used as a placeholder for back ends that are not supported +// on the target platform. +struct null_backend: public multicore::backend { + static bool is_supported() { + return false; + } + + static mechanism make_mechanism( + const std::string&, view, view, const std::vector<value_type>&, const std::vector<size_type>&) + { + throw std::runtime_error("attempt to use an unsupported back end"); + } + + static bool has_mechanism(const std::string& name) { + return false; + } + + static std::string name() { + return "null"; + } +}; + +} // namespace mc +} // namespace nest + +// FIXME: This include is where cuda-specific code leaks into the main application. +// e.g.: CUDA kernels, functions marked __host__ __device__, etc. +// Hence why it is guarded with NMC_HAVE_CUDA, and not, NMC_HAVE_GPU, like elsewhere in +// the code. When we implement separate compilation of CUDA, this should be guarded with +// NMC_HAVE_GPU, and the NMC_HAVE_CUDA flag depricated. #ifdef NMC_HAVE_CUDA #include <backends/gpu/fvm.hpp> +#else +namespace nest { namespace mc { namespace gpu { + using backend = null_backend; +}}} // namespace nest::mc::gpu #endif diff --git a/src/backends/gpu/fvm.hpp b/src/backends/gpu/fvm.hpp index 80a8de85..1d932a07 100644 --- a/src/backends/gpu/fvm.hpp +++ b/src/backends/gpu/fvm.hpp @@ -17,6 +17,10 @@ namespace mc { namespace gpu { struct backend { + static bool is_supported() { + return true; + } + /// define the real and index types using value_type = double; using size_type = nest::mc::cell_lid_type; diff --git a/src/backends/multicore/fvm.hpp b/src/backends/multicore/fvm.hpp index d04928cf..08e919de 100644 --- a/src/backends/multicore/fvm.hpp +++ b/src/backends/multicore/fvm.hpp @@ -18,6 +18,10 @@ namespace mc { namespace multicore { struct backend { + static bool is_supported() { + return true; + } + /// define the real and index types using value_type = double; using size_type = nest::mc::cell_lid_type; diff --git a/src/cell_group.hpp b/src/cell_group.hpp index f6e4a500..e6153024 100644 --- a/src/cell_group.hpp +++ b/src/cell_group.hpp @@ -1,272 +1,38 @@ #pragma once -#include <cstdint> -#include <functional> -#include <iterator> +#include <memory> #include <vector> -#include <algorithms.hpp> -#include <cell.hpp> #include <common_types.hpp> #include <event_binner.hpp> #include <event_queue.hpp> +#include <sampler_function.hpp> #include <spike.hpp> -#include <util/debug.hpp> -#include <util/partition.hpp> -#include <util/range.hpp> - -#include <profiling/profiler.hpp> +#include <util/optional.hpp> +#include <util/make_unique.hpp> namespace nest { namespace mc { -template <typename LoweredCell> class cell_group { public: - using lowered_cell_type = LoweredCell; - using value_type = typename lowered_cell_type::value_type; - using size_type = typename lowered_cell_type::value_type; - using source_id_type = cell_member_type; - - using sampler_function = std::function<util::optional<time_type>(time_type, double)>; - - cell_group() = default; - - template <typename Cells> - cell_group(cell_gid_type first_gid, const Cells& cells): - gid_base_{first_gid} - { - // Create lookup structure for probe and target ids. - build_handle_partitions(cells); - std::size_t n_probes = probe_handle_divisions_.back(); - std::size_t n_targets = target_handle_divisions_.back(); - std::size_t n_detectors = algorithms::sum(util::transform_view( - cells, [](const cell& c) { return c.detectors().size(); })); - - // Allocate space to store handles. - target_handles_.resize(n_targets); - probe_handles_.resize(n_probes); - - lowered_.initialize(cells, target_handles_, probe_handles_); - - // Create a list of the global identifiers for the spike sources - auto source_gid = cell_gid_type{gid_base_}; - for (const auto& cell: cells) { - for (cell_lid_type lid=0u; lid<cell.detectors().size(); ++lid) { - spike_sources_.push_back(source_id_type{source_gid, lid}); - } - ++source_gid; - } - EXPECTS(spike_sources_.size()==n_detectors); - } - - void reset() { - spikes_.clear(); - events_.clear(); - reset_samplers(); - binner_.reset(); - lowered_.reset(); - } - - void set_binning_policy(binning_kind policy, time_type bin_interval) { - binner_ = event_binner(policy, bin_interval); - } - - void advance(time_type tfinal, time_type dt) { - EXPECTS(lowered_.state_synchronized()); - - // Bin pending events and enqueue on lowered state. - time_type ev_min_time = lowered_.max_time(); // (but we're synchronized here) - while (auto ev = events_.pop_if_before(tfinal)) { - auto handle = get_target_handle(ev->target); - auto binned_ev_time = binner_.bin(ev->target.gid, ev->time, ev_min_time); - lowered_.add_event(binned_ev_time, handle, ev->weight); - } - - lowered_.setup_integration(tfinal, dt); - - std::vector<sample_event> requeue_sample_events; - while (!lowered_.integration_complete()) { - // Take any pending samples. - // TODO: Placeholder: this will be replaced by a backend polling implementation. - - PE("sampling"); - time_type cell_max_time = lowered_.max_time(); - - requeue_sample_events.clear(); - while (auto m = sample_events_.pop_if_before(cell_max_time)) { - auto& s = samplers_[m->sampler_index]; - EXPECTS((bool)s.sampler); - - time_type cell_time = lowered_.time(s.cell_gid-gid_base_); - if (cell_time<m->time) { - // This cell hasn't reached this sample time yet. - requeue_sample_events.push_back(*m); - } - else { - auto next = s.sampler(cell_time, lowered_.probe(s.handle)); - if (next) { - m->time = std::max(*next, cell_time); - requeue_sample_events.push_back(*m); - } - } - } - for (auto& ev: requeue_sample_events) { - sample_events_.push(std::move(ev)); - } - PL(); - - // Ask lowered_ cell to integrate 'one step', delivering any - // events accordingly. - // TODO: Placeholder: with backend polling for samplers, we will - // request that the lowered cell perform the integration all the - // way to tfinal. - - lowered_.step_integration(); - - if (util::is_debug_mode() && !lowered_.is_physical_solution()) { - std::cerr << "warning: solution out of bounds for cell " - << gid_base_ << " at (max) t " << lowered_.max_time() << " ms\n"; - } - } - - // Copy out spike voltage threshold crossings from the back end, then - // generate spikes with global spike source ids. The threshold crossings - // record the local spike source index, which must be converted to a - // global index for spike communication. - PE("events"); - for (auto c: lowered_.get_spikes()) { - spikes_.push_back({spike_sources_[c.index], time_type(c.time)}); - } - // Now that the spikes have been generated, clear the old crossings - // to get ready to record spikes from the next integration period. - lowered_.clear_spikes(); - PL(); - } - - void enqueue_events(const std::vector<postsynaptic_spike_event>& events) { - for (auto& e: events) { - events_.push(e); - } - } - - const std::vector<spike>& spikes() const { - return spikes_; - } - - void clear_spikes() { - spikes_.clear(); - } - - const std::vector<source_id_type>& spike_sources() const { - return spike_sources_; - } - - void add_sampler(cell_member_type probe_id, sampler_function s, time_type start_time = 0) { - auto handle = get_probe_handle(probe_id); - - using size_type = sample_event::size_type; - auto sampler_index = size_type(samplers_.size()); - samplers_.push_back({handle, probe_id.gid, s}); - sampler_start_times_.push_back(start_time); - sample_events_.push({sampler_index, start_time}); - } - -private: - - // gid of first cell in group. - cell_gid_type gid_base_; - - // The lowered cell state (e.g. FVM) of the cell. - lowered_cell_type lowered_; - - // Spike detectors attached to the cell. - std::vector<source_id_type> spike_sources_; - - // Spikes that are generated. - std::vector<spike> spikes_; - - // Event time binning manager. - event_binner binner_; - - // Pending events to be delivered. - event_queue<postsynaptic_spike_event> events_; - - // Pending samples to be taken. - event_queue<sample_event> sample_events_; - std::vector<time_type> sampler_start_times_; - - // Handles for accessing lowered cell. - using target_handle = typename lowered_cell_type::target_handle; - std::vector<target_handle> target_handles_; - - using probe_handle = typename lowered_cell_type::probe_handle; - std::vector<probe_handle> probe_handles_; - - struct sampler_entry { - typename lowered_cell_type::probe_handle handle; - cell_gid_type cell_gid; - sampler_function sampler; - }; - - // Collection of samplers to be run against probes in this group. - std::vector<sampler_entry> samplers_; - - // Lookup table for probe ids -> local probe handle indices. - std::vector<std::size_t> probe_handle_divisions_; - - // Lookup table for target ids -> local target handle indices. - std::vector<std::size_t> target_handle_divisions_; - - // Build handle index lookup tables. - template <typename Cells> - void build_handle_partitions(const Cells& cells) { - auto probe_counts = - util::transform_view(cells, [](const cell& c) { return c.probes().size(); }); - auto target_counts = - util::transform_view(cells, [](const cell& c) { return c.synapses().size(); }); - - make_partition(probe_handle_divisions_, probe_counts); - make_partition(target_handle_divisions_, target_counts); - } - - // Use handle partition to get index from id. - template <typename Divisions> - std::size_t handle_partition_lookup(const Divisions& divisions, cell_member_type id) const { - // NB: without any assertion checking, this would just be: - // return divisions[id.gid-gid_base_]+id.index; - - EXPECTS(id.gid>=gid_base_); - - auto handle_partition = util::partition_view(divisions); - EXPECTS(id.gid-gid_base_<handle_partition.size()); - - auto ival = handle_partition[id.gid-gid_base_]; - std::size_t i = ival.first + id.index; - EXPECTS(i<ival.second); - - return i; - } - - // Get probe handle from probe id. - probe_handle get_probe_handle(cell_member_type probe_id) const { - return probe_handles_[handle_partition_lookup(probe_handle_divisions_, probe_id)]; - } + virtual ~cell_group() = default; + + virtual void reset() = 0; + virtual void set_binning_policy(binning_kind policy, time_type bin_interval) = 0; + virtual void advance(time_type tfinal, time_type dt) = 0; + virtual void enqueue_events(const std::vector<postsynaptic_spike_event>& events) = 0; + virtual const std::vector<spike>& spikes() const = 0; + virtual void clear_spikes() = 0; + virtual void add_sampler(cell_member_type probe_id, sampler_function s, time_type start_time = 0) = 0; +}; - // Get target handle from target id. - target_handle get_target_handle(cell_member_type target_id) const { - return target_handles_[handle_partition_lookup(target_handle_divisions_, target_id)]; - } +using cell_group_ptr = std::unique_ptr<cell_group>; - void reset_samplers() { - // clear all pending sample events and reset to start at time 0 - sample_events_.clear(); - using size_type = sample_event::size_type; - for(size_type i=0; i<samplers_.size(); ++i) { - sample_events_.push({i, sampler_start_times_[i]}); - } - } -}; +template <typename T, typename... Args> +cell_group_ptr make_cell_group(Args&&... args) { + return cell_group_ptr(new T(std::forward<Args>(args)...)); +} } // namespace mc } // namespace nest diff --git a/src/fvm_multicell.hpp b/src/fvm_multicell.hpp index 78cb8428..ba2cb5c3 100644 --- a/src/fvm_multicell.hpp +++ b/src/fvm_multicell.hpp @@ -8,7 +8,6 @@ #include <vector> #include <algorithms.hpp> -#include <backends/fvm.hpp> #include <cell.hpp> #include <compartment.hpp> #include <event_queue.hpp> diff --git a/src/mc_cell_group.hpp b/src/mc_cell_group.hpp new file mode 100644 index 00000000..ec3cbc72 --- /dev/null +++ b/src/mc_cell_group.hpp @@ -0,0 +1,272 @@ +#pragma once + +#include <cstdint> +#include <functional> +#include <iterator> +#include <vector> + +#include <algorithms.hpp> +#include <cell_group.hpp> +#include <cell.hpp> +#include <common_types.hpp> +#include <event_binner.hpp> +#include <event_queue.hpp> +#include <sampler_function.hpp> +#include <spike.hpp> +#include <util/debug.hpp> +#include <util/partition.hpp> +#include <util/range.hpp> + +#include <profiling/profiler.hpp> + +namespace nest { +namespace mc { + +template <typename LoweredCell> +class mc_cell_group: public cell_group { +public: + using lowered_cell_type = LoweredCell; + using value_type = typename lowered_cell_type::value_type; + using size_type = typename lowered_cell_type::value_type; + using source_id_type = cell_member_type; + + mc_cell_group() = default; + + template <typename Cells> + mc_cell_group(cell_gid_type first_gid, const Cells& cells): + gid_base_{first_gid} + { + // Create lookup structure for probe and target ids. + build_handle_partitions(cells); + std::size_t n_probes = probe_handle_divisions_.back(); + std::size_t n_targets = target_handle_divisions_.back(); + std::size_t n_detectors = algorithms::sum(util::transform_view( + cells, [](const cell& c) { return c.detectors().size(); })); + + // Allocate space to store handles. + target_handles_.resize(n_targets); + probe_handles_.resize(n_probes); + + lowered_.initialize(cells, target_handles_, probe_handles_); + + // Create a list of the global identifiers for the spike sources + auto source_gid = cell_gid_type{gid_base_}; + for (const auto& cell: cells) { + for (cell_lid_type lid=0u; lid<cell.detectors().size(); ++lid) { + spike_sources_.push_back(source_id_type{source_gid, lid}); + } + ++source_gid; + } + EXPECTS(spike_sources_.size()==n_detectors); + } + + void reset() override { + spikes_.clear(); + events_.clear(); + reset_samplers(); + binner_.reset(); + lowered_.reset(); + } + + void set_binning_policy(binning_kind policy, time_type bin_interval) override { + binner_ = event_binner(policy, bin_interval); + } + + void advance(time_type tfinal, time_type dt) override { + EXPECTS(lowered_.state_synchronized()); + + // Bin pending events and enqueue on lowered state. + time_type ev_min_time = lowered_.max_time(); // (but we're synchronized here) + while (auto ev = events_.pop_if_before(tfinal)) { + auto handle = get_target_handle(ev->target); + auto binned_ev_time = binner_.bin(ev->target.gid, ev->time, ev_min_time); + lowered_.add_event(binned_ev_time, handle, ev->weight); + } + + lowered_.setup_integration(tfinal, dt); + + std::vector<sample_event> requeue_sample_events; + while (!lowered_.integration_complete()) { + // Take any pending samples. + // TODO: Placeholder: this will be replaced by a backend polling implementation. + + PE("sampling"); + time_type cell_max_time = lowered_.max_time(); + + requeue_sample_events.clear(); + while (auto m = sample_events_.pop_if_before(cell_max_time)) { + auto& s = samplers_[m->sampler_index]; + EXPECTS((bool)s.sampler); + + time_type cell_time = lowered_.time(s.cell_gid-gid_base_); + if (cell_time<m->time) { + // This cell hasn't reached this sample time yet. + requeue_sample_events.push_back(*m); + } + else { + auto next = s.sampler(cell_time, lowered_.probe(s.handle)); + if (next) { + m->time = std::max(*next, cell_time); + requeue_sample_events.push_back(*m); + } + } + } + for (auto& ev: requeue_sample_events) { + sample_events_.push(std::move(ev)); + } + PL(); + + // Ask lowered_ cell to integrate 'one step', delivering any + // events accordingly. + // TODO: Placeholder: with backend polling for samplers, we will + // request that the lowered cell perform the integration all the + // way to tfinal. + + lowered_.step_integration(); + + if (util::is_debug_mode() && !lowered_.is_physical_solution()) { + std::cerr << "warning: solution out of bounds for cell " + << gid_base_ << " at (max) t " << lowered_.max_time() << " ms\n"; + } + } + + // Copy out spike voltage threshold crossings from the back end, then + // generate spikes with global spike source ids. The threshold crossings + // record the local spike source index, which must be converted to a + // global index for spike communication. + PE("events"); + for (auto c: lowered_.get_spikes()) { + spikes_.push_back({spike_sources_[c.index], time_type(c.time)}); + } + // Now that the spikes have been generated, clear the old crossings + // to get ready to record spikes from the next integration period. + lowered_.clear_spikes(); + PL(); + } + + void enqueue_events(const std::vector<postsynaptic_spike_event>& events) override { + for (auto& e: events) { + events_.push(e); + } + } + + const std::vector<spike>& spikes() const override { + return spikes_; + } + + void clear_spikes() override { + spikes_.clear(); + } + + const std::vector<source_id_type>& spike_sources() const { + return spike_sources_; + } + + void add_sampler(cell_member_type probe_id, sampler_function s, time_type start_time) override { + auto handle = get_probe_handle(probe_id); + + using size_type = sample_event::size_type; + auto sampler_index = size_type(samplers_.size()); + samplers_.push_back({handle, probe_id.gid, s}); + sampler_start_times_.push_back(start_time); + sample_events_.push({sampler_index, start_time}); + } + +private: + + // gid of first cell in group. + cell_gid_type gid_base_; + + // The lowered cell state (e.g. FVM) of the cell. + lowered_cell_type lowered_; + + // Spike detectors attached to the cell. + std::vector<source_id_type> spike_sources_; + + // Spikes that are generated. + std::vector<spike> spikes_; + + // Event time binning manager. + event_binner binner_; + + // Pending events to be delivered. + event_queue<postsynaptic_spike_event> events_; + + // Pending samples to be taken. + event_queue<sample_event> sample_events_; + std::vector<time_type> sampler_start_times_; + + // Handles for accessing lowered cell. + using target_handle = typename lowered_cell_type::target_handle; + std::vector<target_handle> target_handles_; + + using probe_handle = typename lowered_cell_type::probe_handle; + std::vector<probe_handle> probe_handles_; + + struct sampler_entry { + typename lowered_cell_type::probe_handle handle; + cell_gid_type cell_gid; + sampler_function sampler; + }; + + // Collection of samplers to be run against probes in this group. + std::vector<sampler_entry> samplers_; + + // Lookup table for probe ids -> local probe handle indices. + std::vector<std::size_t> probe_handle_divisions_; + + // Lookup table for target ids -> local target handle indices. + std::vector<std::size_t> target_handle_divisions_; + + // Build handle index lookup tables. + template <typename Cells> + void build_handle_partitions(const Cells& cells) { + auto probe_counts = + util::transform_view(cells, [](const cell& c) { return c.probes().size(); }); + auto target_counts = + util::transform_view(cells, [](const cell& c) { return c.synapses().size(); }); + + make_partition(probe_handle_divisions_, probe_counts); + make_partition(target_handle_divisions_, target_counts); + } + + // Use handle partition to get index from id. + template <typename Divisions> + std::size_t handle_partition_lookup(const Divisions& divisions, cell_member_type id) const { + // NB: without any assertion checking, this would just be: + // return divisions[id.gid-gid_base_]+id.index; + + EXPECTS(id.gid>=gid_base_); + + auto handle_partition = util::partition_view(divisions); + EXPECTS(id.gid-gid_base_<handle_partition.size()); + + auto ival = handle_partition[id.gid-gid_base_]; + std::size_t i = ival.first + id.index; + EXPECTS(i<ival.second); + + return i; + } + + // Get probe handle from probe id. + probe_handle get_probe_handle(cell_member_type probe_id) const { + return probe_handles_[handle_partition_lookup(probe_handle_divisions_, probe_id)]; + } + + // Get target handle from target id. + target_handle get_target_handle(cell_member_type target_id) const { + return target_handles_[handle_partition_lookup(target_handle_divisions_, target_id)]; + } + + void reset_samplers() { + // clear all pending sample events and reset to start at time 0 + sample_events_.clear(); + using size_type = sample_event::size_type; + for(size_type i=0; i<samplers_.size(); ++i) { + sample_events_.push({i, sampler_start_times_[i]}); + } + } +}; + +} // namespace mc +} // namespace nest diff --git a/src/memory/allocator.hpp b/src/memory/allocator.hpp index 3755e293..9add1a67 100644 --- a/src/memory/allocator.hpp +++ b/src/memory/allocator.hpp @@ -2,7 +2,7 @@ #include <limits> -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU #include <cuda.h> #include <cuda_runtime.h> #endif @@ -138,7 +138,7 @@ namespace impl { } #endif -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU namespace cuda { template <size_type Alignment> class pinned_policy { @@ -243,7 +243,7 @@ namespace impl { } }; } // namespace cuda -#endif // #ifdef NMC_HAVE_CUDA +#endif // #ifdef NMC_HAVE_GPU } // namespace impl template<typename T, typename Policy > @@ -318,7 +318,7 @@ namespace util { } }; -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU template <size_t Alignment> struct type_printer<impl::cuda::pinned_policy<Alignment>>{ static std::string print() { @@ -357,7 +357,7 @@ template <class T, size_t alignment=(512/8)> using hbw_allocator = allocator<T, impl::knl::hbw_policy<alignment>>; #endif -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU // For pinned and allocation set the default alignment to correspond to // the alignment of 1024 bytes, because pinned memory is allocated at // page boundaries. It is allocated at page boundaries (typically 4k), diff --git a/src/memory/gpu.hpp b/src/memory/gpu.hpp index 8e950d56..c5955eac 100644 --- a/src/memory/gpu.hpp +++ b/src/memory/gpu.hpp @@ -1,6 +1,6 @@ #pragma once -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU #include <string> #include <cstdint> diff --git a/src/memory/host_coordinator.hpp b/src/memory/host_coordinator.hpp index e9cb0698..1f45b336 100644 --- a/src/memory/host_coordinator.hpp +++ b/src/memory/host_coordinator.hpp @@ -11,7 +11,7 @@ #include "allocator.hpp" #include "util.hpp" -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU #include "gpu.hpp" #endif @@ -23,7 +23,7 @@ namespace memory { template <typename T, class Allocator> class host_coordinator; -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU template <typename T, class Allocator> class device_coordinator; #endif @@ -124,7 +124,7 @@ public: std::copy(from.begin(), from.end(), to.begin()); } -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU // copy memory from device to host template <class Alloc> void copy( diff --git a/src/memory/memory.hpp b/src/memory/memory.hpp index e5261958..50bbdf14 100644 --- a/src/memory/memory.hpp +++ b/src/memory/memory.hpp @@ -6,7 +6,7 @@ #include "definitions.hpp" #include "host_coordinator.hpp" -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU #include "device_coordinator.hpp" #endif @@ -29,7 +29,7 @@ std::ostream& operator<< (std::ostream& o, host_view<T> const& v) { return o; } -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU // specialization for pinned vectors. Use a host_coordinator, because memory is // in the host memory space, and all of the helpers (copy, set, etc) are the // same with and without page locked memory diff --git a/src/memory/wrappers.hpp b/src/memory/wrappers.hpp index ab463a23..50393017 100644 --- a/src/memory/wrappers.hpp +++ b/src/memory/wrappers.hpp @@ -5,7 +5,7 @@ #include <memory/memory.hpp> -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU #include <cuda.h> #include <cuda_runtime.h> #endif @@ -96,7 +96,7 @@ namespace util { return is_on_host<typename std::decay<T>::type>::value; } - #ifdef NMC_HAVE_CUDA + #ifdef NMC_HAVE_GPU template <typename T> struct is_on_gpu : std::false_type {}; @@ -132,7 +132,7 @@ auto on_host(const C& c) -> decltype(make_const_view(c)) { return make_const_view(c); } -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU template < typename C, typename = typename std::enable_if<util::is_on_gpu_v<C>()>::type diff --git a/src/model.hpp b/src/model.hpp index 837ddc8d..91d0cd4e 100644 --- a/src/model.hpp +++ b/src/model.hpp @@ -5,29 +5,36 @@ #include <cstdlib> +#include <backends.hpp> +#include <fvm_multicell.hpp> + #include <common_types.hpp> #include <cell.hpp> #include <cell_group.hpp> #include <communication/communicator.hpp> #include <communication/global_policy.hpp> +#include <mc_cell_group.hpp> #include <profiling/profiler.hpp> #include <recipe.hpp> +#include <sampler_function.hpp> #include <thread_private_spike_store.hpp> +#include <threading/threading.hpp> +#include <trace_sampler.hpp> #include <util/nop.hpp> #include <util/partition.hpp> #include <util/range.hpp> -#include "trace_sampler.hpp" - namespace nest { namespace mc { -// FIXME: this is going to go -template <typename Cell> +using gpu_lowered_cell = + mc_cell_group<fvm::fvm_multicell<gpu::backend>>; + +using multicore_lowered_cell = + mc_cell_group<fvm::fvm_multicell<multicore::backend>>; + class model { public: - // FIXME: this is an intermediate step to remove the template parameter from model - using cell_group_type = cell_group<Cell>; // FIXME using communicator_type = communication::communicator<communication::global_policy>; using spike_export_function = std::function<void(const std::vector<spike>&)>; @@ -37,15 +44,19 @@ public: }; template <typename Iter> - model(const recipe& rec, const util::partition_range<Iter>& groups): - cell_group_divisions_(groups.divisions().begin(), groups.divisions().end()) + model( const recipe& rec, + const util::partition_range<Iter>& groups, + backend_policy policy): + cell_group_divisions_(groups.divisions().begin(), groups.divisions().end()), + backend_policy_(policy) { // set up communicator based on partition - communicator_ = communicator_type{gid_partition()}; + communicator_ = communicator_type(gid_partition()); // generate the cell groups in parallel, with one task per cell group - cell_groups_ = std::vector<cell_group_type>{gid_partition().size()}; - threading::parallel_vector<probe_record> probes; + cell_groups_.resize(gid_partition().size()); + // thread safe vector for constructing the list of probes in parallel + threading::parallel_vector<probe_record> probe_tmp; threading::parallel_for::apply(0, cell_groups_.size(), [&](cell_gid_type i) { @@ -61,16 +72,21 @@ public: cell_lid_type j = 0; for (const auto& probe: cells[i].probes()) { cell_member_type probe_id{gid, j++}; - probes.push_back({probe_id, probe}); + probe_tmp.push_back({probe_id, probe}); } } - cell_groups_[i] = cell_group_type(gids.first, cells); + if (backend_policy_==backend_policy::use_multicore) { + cell_groups_[i] = make_cell_group<multicore_lowered_cell>(gids.first, cells); + } + else { + cell_groups_[i] = make_cell_group<gpu_lowered_cell>(gids.first, cells); + } PL(2); }); - // insert probes - probes_.assign(probes.begin(), probes.end()); + // store probes + probes_.assign(probe_tmp.begin(), probe_tmp.end()); // generate the network connections for (cell_gid_type i: util::make_span(gid_partition().bounds())) { @@ -89,14 +105,14 @@ public: } // one cell per group: - model(const recipe& rec): - model(rec, util::partition_view(util::make_span(0, rec.num_cells()+1))) + model(const recipe& rec, backend_policy policy): + model(rec, util::partition_view(util::make_span(0, rec.num_cells()+1)), policy) {} void reset() { t_ = 0.; for (auto& group: cell_groups_) { - group.reset(); + group->reset(); } communicator_.reset(); @@ -132,14 +148,14 @@ public: auto &group = cell_groups_[i]; PE("stepping","events"); - group.enqueue_events(current_events()[i]); + group->enqueue_events(current_events()[i]); PL(); - group.advance(tuntil, dt); + group->advance(tuntil, dt); PE("events"); - current_spikes().insert(group.spikes()); - group.clear_spikes(); + current_spikes().insert(group->spikes()); + group->clear_spikes(); PL(2); }); }; @@ -207,14 +223,13 @@ public: current_spikes().get().push_back({source, tspike}); } - template <typename F> - void attach_sampler(cell_member_type probe_id, F&& f, time_type tfrom = 0) { + void attach_sampler(cell_member_type probe_id, sampler_function f, time_type tfrom = 0) { if (!algorithms::in_interval(probe_id.gid, gid_partition().bounds())) { return; } const auto idx = gid_partition().index(probe_id.gid); - cell_groups_[idx].add_sampler(probe_id, std::forward<F>(f), tfrom); + cell_groups_[idx]->add_sampler(probe_id, f, tfrom); } const std::vector<probe_record>& probes() const { return probes_; } @@ -235,13 +250,13 @@ public: // Set event binning policy on all our groups. void set_binning_policy(binning_kind policy, time_type bin_interval) { for (auto& group: cell_groups_) { - group.set_binning_policy(policy, bin_interval); + group->set_binning_policy(policy, bin_interval); } } // access cell_group directly - cell_group_type& group(int i) { - return cell_groups_[i]; + cell_group& group(int i) { + return *cell_groups_[i]; } // register a callback that will perform a export of the global @@ -258,13 +273,14 @@ public: private: std::vector<cell_gid_type> cell_group_divisions_; + backend_policy backend_policy_; auto gid_partition() const -> decltype(util::partition_view(cell_group_divisions_)) { return util::partition_view(cell_group_divisions_); } time_type t_ = 0.; - std::vector<cell_group_type> cell_groups_; + std::vector<std::unique_ptr<cell_group>> cell_groups_; communicator_type communicator_; std::vector<probe_record> probes_; diff --git a/src/sampler_function.hpp b/src/sampler_function.hpp new file mode 100644 index 00000000..6a042b58 --- /dev/null +++ b/src/sampler_function.hpp @@ -0,0 +1,14 @@ +#pragma once + +#include <functional> + +#include <common_types.hpp> +#include <util/optional.hpp> + +namespace nest { +namespace mc { + +using sampler_function = std::function<util::optional<time_type>(time_type, double)>; + +} // namespace mc +} // namespace nest diff --git a/src/simple_sampler.hpp b/src/simple_sampler.hpp index cb055734..5ffc41d2 100644 --- a/src/simple_sampler.hpp +++ b/src/simple_sampler.hpp @@ -9,6 +9,7 @@ #include <vector> #include <common_types.hpp> +#include <sampler_function.hpp> #include <util/optional.hpp> #include <util/deduce_return.hpp> #include <util/transform.hpp> @@ -43,7 +44,7 @@ class simple_sampler { public: trace_data trace; - simple_sampler(float dt, float t0=0): + simple_sampler(time_type dt, time_type t0=0): t0_(t0), sample_dt_(dt), t_next_sample_(t0) @@ -54,23 +55,22 @@ public: t_next_sample_ = t0_; } - template <typename Time = float, typename Value = double> - std::function<util::optional<Time> (Time, Value)> sampler() { - return [&](Time t, Value v) -> util::optional<Time> { - if (t<(Time)t_next_sample_) { - return (Time)t_next_sample_; + sampler_function sampler() { + return [&](time_type t, double v) -> util::optional<time_type> { + if (t<t_next_sample_) { + return t_next_sample_; } else { - trace.push_back({float(t), double(v)}); + trace.push_back({t, v}); return t_next_sample_+=sample_dt_; } }; } private: - float t0_ = 0; - float sample_dt_ = 0; - float t_next_sample_ = 0; + time_type t0_ = 0; + time_type sample_dt_ = 0; + time_type t_next_sample_ = 0; }; } // namespace mc diff --git a/src/util/config.hpp b/src/util/config.hpp index 4bd3adf3..2e113bf4 100644 --- a/src/util/config.hpp +++ b/src/util/config.hpp @@ -30,7 +30,7 @@ constexpr bool has_power_measurement = true; constexpr bool has_power_measurement = false; #endif -#ifdef NMC_HAVE_CUDA +#ifdef NMC_HAVE_GPU constexpr bool has_cuda = true; #else constexpr bool has_cuda = false; diff --git a/tests/performance/io/disk_io.cpp b/tests/performance/io/disk_io.cpp index 05b64666..c671e907 100644 --- a/tests/performance/io/disk_io.cpp +++ b/tests/performance/io/disk_io.cpp @@ -17,8 +17,6 @@ using namespace nest::mc; using global_policy = communication::global_policy; -using lowered_cell = fvm::fvm_multicell<multicore::backend>; -using cell_group_type = cell_group<lowered_cell>; using timer = util::timer_type; int main(int argc, char** argv) { diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 3ac4c1ff..0bae169c 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -18,7 +18,7 @@ build_modules( set(TEST_CUDA_SOURCES test_atomics.cu - test_cell_group.cu + test_mc_cell_group.cu test_gpu_stack.cu test_matrix.cu test_spikes.cu @@ -31,6 +31,7 @@ set(TEST_CUDA_SOURCES set(TEST_SOURCES # unit tests test_algorithms.cpp + test_backend.cpp test_double_buffer.cpp test_cell.cpp test_compartments.cpp @@ -41,7 +42,7 @@ set(TEST_SOURCES test_event_binner.cpp test_filter.cpp test_fvm_multi.cpp - test_cell_group.cpp + test_mc_cell_group.cpp test_lexcmp.cpp test_mask_stream.cpp test_math.cpp diff --git a/tests/unit/test_backend.cpp b/tests/unit/test_backend.cpp new file mode 100644 index 00000000..08679d0d --- /dev/null +++ b/tests/unit/test_backend.cpp @@ -0,0 +1,23 @@ +#include <type_traits> + +#include <backends/fvm.hpp> +#include <memory/memory.hpp> +#include <util/config.hpp> + +#include "../gtest.h" + +TEST(backends, gpu_is_null) { + using backend = nest::mc::gpu::backend; + + static_assert(std::is_same<backend, nest::mc::null_backend>::value || nest::mc::config::has_cuda, + "gpu back should be defined as null when compiling without gpu support."); + + if (not nest::mc::config::has_cuda) { + EXPECT_FALSE(backend::is_supported()); + + EXPECT_FALSE(backend::has_mechanism("hh")); + EXPECT_THROW( + backend::make_mechanism("hh", backend::view(), backend::view(), {}, {}), + std::runtime_error); + } +} diff --git a/tests/unit/test_fvm_multi.cpp b/tests/unit/test_fvm_multi.cpp index 66226d9e..86777590 100644 --- a/tests/unit/test_fvm_multi.cpp +++ b/tests/unit/test_fvm_multi.cpp @@ -2,8 +2,9 @@ #include "../gtest.h" -#include <common_types.hpp> +#include <backends/multicore/fvm.hpp> #include <cell.hpp> +#include <common_types.hpp> #include <fvm_multicell.hpp> #include <util/rangeutil.hpp> diff --git a/tests/unit/test_cell_group.cpp b/tests/unit/test_mc_cell_group.cpp similarity index 76% rename from tests/unit/test_cell_group.cpp rename to tests/unit/test_mc_cell_group.cpp index 0016b24e..00491537 100644 --- a/tests/unit/test_cell_group.cpp +++ b/tests/unit/test_mc_cell_group.cpp @@ -1,8 +1,9 @@ #include "../gtest.h" -#include <cell_group.hpp> +#include <backends/multicore/fvm.hpp> #include <common_types.hpp> #include <fvm_multicell.hpp> +#include <mc_cell_group.hpp> #include <util/rangeutil.hpp> #include "common.hpp" @@ -11,19 +12,17 @@ using namespace nest::mc; using fvm_cell = fvm::fvm_multicell<nest::mc::multicore::backend>; -nest::mc::cell make_cell() { - using namespace nest::mc; +cell make_cell() { + auto c = make_cell_ball_and_stick(); - nest::mc::cell cell = make_cell_ball_and_stick(); + c.add_detector({0, 0}, 0); + c.segment(1)->set_compartments(101); - cell.add_detector({0, 0}, 0); - cell.segment(1)->set_compartments(101); - - return cell; + return c; } -TEST(cell_group, test) { - cell_group<fvm_cell> group{0, util::singleton_view(make_cell())}; +TEST(mc_cell_group, test) { + mc_cell_group<fvm_cell> group{0, util::singleton_view(make_cell())}; group.advance(50, 0.01); @@ -32,8 +31,8 @@ TEST(cell_group, test) { EXPECT_EQ(4u, group.spikes().size()); } -TEST(cell_group, sources) { - using cell_group_type = cell_group<fvm_cell>; +TEST(mc_cell_group, sources) { + using cell_group_type = mc_cell_group<fvm_cell>; auto cell = make_cell(); EXPECT_EQ(cell.detectors().size(), 1u); diff --git a/tests/unit/test_cell_group.cu b/tests/unit/test_mc_cell_group.cu similarity index 86% rename from tests/unit/test_cell_group.cu rename to tests/unit/test_mc_cell_group.cu index 16bc124d..92f7af50 100644 --- a/tests/unit/test_cell_group.cu +++ b/tests/unit/test_mc_cell_group.cu @@ -1,6 +1,7 @@ #include "../gtest.h" -#include <cell_group.hpp> +#include <backends/gpu/fvm.hpp> +#include <mc_cell_group.hpp> #include <common_types.hpp> #include <fvm_multicell.hpp> #include <util/rangeutil.hpp> @@ -25,7 +26,7 @@ nest::mc::cell make_cell() { TEST(cell_group, test) { - using cell_group_type = cell_group<fvm_cell>; + using cell_group_type = mc_cell_group<fvm_cell>; auto group = cell_group_type{0, util::singleton_view(make_cell())}; group.advance(50, 0.01); diff --git a/tests/unit/test_probe.cpp b/tests/unit/test_probe.cpp index 005eead2..8ea355ea 100644 --- a/tests/unit/test_probe.cpp +++ b/tests/unit/test_probe.cpp @@ -1,5 +1,6 @@ #include "../gtest.h" +#include <backends/multicore/fvm.hpp> #include <common_types.hpp> #include <cell.hpp> #include <fvm_multicell.hpp> diff --git a/tests/validation/convergence_test.hpp b/tests/validation/convergence_test.hpp index 4d8ea28d..7c9f5936 100644 --- a/tests/validation/convergence_test.hpp +++ b/tests/validation/convergence_test.hpp @@ -75,7 +75,7 @@ public: // reset samplers and attach to probe locations for (auto& se: cell_samplers_) { se.sampler.reset(); - m.attach_sampler(se.probe, se.sampler.template sampler<>()); + m.attach_sampler(se.probe, se.sampler.sampler()); } m.run(t_end, dt); diff --git a/tests/validation/validate_ball_and_stick.cpp b/tests/validation/validate_ball_and_stick.cpp index 109155a6..c28530a1 100644 --- a/tests/validation/validate_ball_and_stick.cpp +++ b/tests/validation/validate_ball_and_stick.cpp @@ -3,24 +3,24 @@ #include <fvm_multicell.hpp> -using lowered_cell = nest::mc::fvm::fvm_multicell<nest::mc::multicore::backend>; +const auto backend = nest::mc::backend_policy::use_multicore; TEST(ball_and_stick, neuron_ref) { - validate_ball_and_stick<lowered_cell>(); + validate_ball_and_stick(backend); } TEST(ball_and_taper, neuron_ref) { - validate_ball_and_taper<lowered_cell>(); + validate_ball_and_taper(backend); } TEST(ball_and_3stick, neuron_ref) { - validate_ball_and_3stick<lowered_cell>(); + validate_ball_and_3stick(backend); } TEST(rallpack1, numeric_ref) { - validate_rallpack1<lowered_cell>(); + validate_rallpack1(backend); } TEST(ball_and_squiggle, neuron_ref) { - validate_ball_and_squiggle<lowered_cell>(); + validate_ball_and_squiggle(backend); } diff --git a/tests/validation/validate_ball_and_stick.cu b/tests/validation/validate_ball_and_stick.cu index 52d1bf0d..3f8e4427 100644 --- a/tests/validation/validate_ball_and_stick.cu +++ b/tests/validation/validate_ball_and_stick.cu @@ -3,24 +3,24 @@ #include <fvm_multicell.hpp> -using lowered_cell = nest::mc::fvm::fvm_multicell<nest::mc::gpu::backend>; +const auto backend = nest::mc::backend_policy::prefer_gpu; TEST(ball_and_stick, neuron_ref) { - validate_ball_and_stick<lowered_cell>(); + validate_ball_and_stick(backend); } TEST(ball_and_taper, neuron_ref) { - validate_ball_and_taper<lowered_cell>(); + validate_ball_and_taper(backend); } TEST(ball_and_3stick, neuron_ref) { - validate_ball_and_3stick<lowered_cell>(); + validate_ball_and_3stick(backend); } TEST(rallpack1, numeric_ref) { - validate_rallpack1<lowered_cell>(); + validate_rallpack1(backend); } TEST(ball_and_squiggle, neuron_ref) { - validate_ball_and_squiggle<lowered_cell>(); + validate_ball_and_squiggle(backend); } diff --git a/tests/validation/validate_ball_and_stick.hpp b/tests/validation/validate_ball_and_stick.hpp index 0a438f9d..da1652e6 100644 --- a/tests/validation/validate_ball_and_stick.hpp +++ b/tests/validation/validate_ball_and_stick.hpp @@ -15,13 +15,11 @@ #include "trace_analysis.hpp" #include "validation_data.hpp" -template < - typename LoweredCell, - typename SamplerInfoSeq -> +template <typename SamplerInfoSeq> void run_ncomp_convergence_test( const char* model_name, const nest::mc::util::path& ref_data_path, + nest::mc::backend_policy backend, const nest::mc::cell& c, SamplerInfoSeq& samplers, float t_end=100.f) @@ -37,7 +35,7 @@ void run_ncomp_convergence_test( {"dt", dt}, {"sim", "nestmc"}, {"units", "mV"}, - {"backend", LoweredCell::backend::name()} + {"backend_policy", to_string(backend)} }; auto exclude = stimulus_ends(c); @@ -51,7 +49,7 @@ void run_ncomp_convergence_test( seg->set_compartments(ncomp); } } - model<LoweredCell> m(singleton_recipe{c}); + model m(singleton_recipe{c}, backend); runner.run(m, ncomp, t_end, dt, exclude); } @@ -59,8 +57,7 @@ void run_ncomp_convergence_test( runner.assert_all_convergence(); } -template <typename LoweredCell> -void validate_ball_and_stick() { +void validate_ball_and_stick(nest::mc::backend_policy backend) { using namespace nest::mc; cell c = make_cell_ball_and_stick(); @@ -73,15 +70,15 @@ void validate_ball_and_stick() { {"dend.end", {0u, 2u}, simple_sampler(sample_dt)} }; - run_ncomp_convergence_test<LoweredCell>( + run_ncomp_convergence_test( "ball_and_stick", "neuron_ball_and_stick.json", + backend, c, samplers); } -template <typename LoweredCell> -void validate_ball_and_taper() { +void validate_ball_and_taper(nest::mc::backend_policy backend) { using namespace nest::mc; cell c = make_cell_ball_and_taper(); @@ -94,15 +91,15 @@ void validate_ball_and_taper() { {"taper.end", {0u, 2u}, simple_sampler(sample_dt)} }; - run_ncomp_convergence_test<LoweredCell>( + run_ncomp_convergence_test( "ball_and_taper", "neuron_ball_and_taper.json", + backend, c, samplers); } -template <typename LoweredCell> -void validate_ball_and_3stick() { +void validate_ball_and_3stick(nest::mc::backend_policy backend) { using namespace nest::mc; cell c = make_cell_ball_and_3stick(); @@ -119,15 +116,15 @@ void validate_ball_and_3stick() { {"dend3.end", {0u, 6u}, simple_sampler(sample_dt)} }; - run_ncomp_convergence_test<LoweredCell>( + run_ncomp_convergence_test( "ball_and_3stick", "neuron_ball_and_3stick.json", + backend, c, samplers); } -template <typename LoweredCell> -void validate_rallpack1() { +void validate_rallpack1(nest::mc::backend_policy backend) { using namespace nest::mc; cell c = make_cell_simple_cable(); @@ -144,16 +141,16 @@ void validate_rallpack1() { {"cable.x1.0", {0u, 2u}, simple_sampler(sample_dt)}, }; - run_ncomp_convergence_test<LoweredCell>( + run_ncomp_convergence_test( "rallpack1", "numeric_rallpack1.json", + backend, c, samplers, 250.f); } -template <typename LoweredCell> -void validate_ball_and_squiggle() { +void validate_ball_and_squiggle(nest::mc::backend_policy backend) { using namespace nest::mc; cell c = make_cell_ball_and_squiggle(); @@ -177,9 +174,10 @@ void validate_ball_and_squiggle() { samplers); #endif - run_ncomp_convergence_test<LoweredCell>( + run_ncomp_convergence_test( "ball_and_squiggle_integrator", "neuron_ball_and_squiggle.json", + backend, c, samplers); } diff --git a/tests/validation/validate_kinetic.cpp b/tests/validation/validate_kinetic.cpp index 1c200516..b5d97b23 100644 --- a/tests/validation/validate_kinetic.cpp +++ b/tests/validation/validate_kinetic.cpp @@ -2,12 +2,12 @@ #include "../gtest.h" -using lowered_cell = nest::mc::fvm::fvm_multicell<nest::mc::multicore::backend>; +const auto backend = nest::mc::backend_policy::use_multicore; TEST(kinetic, kin1_numeric_ref) { - validate_kinetic_kin1<lowered_cell>(); + validate_kinetic_kin1(backend); } TEST(kinetic, kinlva_numeric_ref) { - validate_kinetic_kinlva<lowered_cell>(); + validate_kinetic_kinlva(backend); } diff --git a/tests/validation/validate_kinetic.cu b/tests/validation/validate_kinetic.cu index 4c32cd93..a86847f8 100644 --- a/tests/validation/validate_kinetic.cu +++ b/tests/validation/validate_kinetic.cu @@ -2,12 +2,12 @@ #include "../gtest.h" -using lowered_cell = nest::mc::fvm::fvm_multicell<nest::mc::gpu::backend>; +const auto backend = nest::mc::backend_policy::prefer_gpu; TEST(kinetic, kin1_numeric_ref) { - validate_kinetic_kin1<lowered_cell>(); + validate_kinetic_kin1(backend); } TEST(kinetic, kinlva_numeric_ref) { - validate_kinetic_kinlva<lowered_cell>(); + validate_kinetic_kinlva(backend); } diff --git a/tests/validation/validate_kinetic.hpp b/tests/validation/validate_kinetic.hpp index 7ccde7c9..535ed869 100644 --- a/tests/validation/validate_kinetic.hpp +++ b/tests/validation/validate_kinetic.hpp @@ -13,8 +13,13 @@ #include "trace_analysis.hpp" #include "validation_data.hpp" -template <typename LoweredCell> -void run_kinetic_dt(nest::mc::cell& c, float t_end, nlohmann::json meta, const std::string& ref_file) { +void run_kinetic_dt( + nest::mc::backend_policy backend, + nest::mc::cell& c, + float t_end, + nlohmann::json meta, + const std::string& ref_file) +{ using namespace nest::mc; float sample_dt = .025f; @@ -23,11 +28,11 @@ void run_kinetic_dt(nest::mc::cell& c, float t_end, nlohmann::json meta, const s }; meta["sim"] = "nestmc"; - meta["backend"] = LoweredCell::backend::name(); + meta["backend_policy"] = to_string(backend); convergence_test_runner<float> runner("dt", samplers, meta); runner.load_reference_data(ref_file); - model<LoweredCell> model(singleton_recipe{c}); + model model(singleton_recipe{c}, backend); auto exclude = stimulus_ends(c); @@ -49,8 +54,7 @@ end: runner.assert_all_convergence(); } -template <typename LoweredCell> -void validate_kinetic_kin1() { +void validate_kinetic_kin1(nest::mc::backend_policy backend) { using namespace nest::mc; // 20 µm diameter soma with single mechanism, current probe @@ -65,11 +69,10 @@ void validate_kinetic_kin1() { {"units", "nA"} }; - run_kinetic_dt<LoweredCell>(c, 100.f, meta, "numeric_kin1.json"); + run_kinetic_dt(backend, c, 100.f, meta, "numeric_kin1.json"); } -template <typename LoweredCell> -void validate_kinetic_kinlva() { +void validate_kinetic_kinlva(nest::mc::backend_policy backend) { using namespace nest::mc; // 20 µm diameter soma with single mechanism, current probe @@ -85,6 +88,6 @@ void validate_kinetic_kinlva() { {"units", "mV"} }; - run_kinetic_dt<LoweredCell>(c, 300.f, meta, "numeric_kinlva.json"); + run_kinetic_dt(backend, c, 300.f, meta, "numeric_kinlva.json"); } diff --git a/tests/validation/validate_soma.cpp b/tests/validation/validate_soma.cpp index e4e58e7b..57b094b3 100644 --- a/tests/validation/validate_soma.cpp +++ b/tests/validation/validate_soma.cpp @@ -2,8 +2,9 @@ #include "../gtest.h" -using lowered_cell = nest::mc::fvm::fvm_multicell<nest::mc::multicore::backend>; + +const auto backend = nest::mc::backend_policy::use_multicore; TEST(soma, numeric_ref) { - validate_soma<lowered_cell>(); + validate_soma(backend); } diff --git a/tests/validation/validate_soma.cu b/tests/validation/validate_soma.cu index 35355ab9..b31b91a4 100644 --- a/tests/validation/validate_soma.cu +++ b/tests/validation/validate_soma.cu @@ -2,8 +2,8 @@ #include "../gtest.h" -using lowered_cell = nest::mc::fvm::fvm_multicell<nest::mc::gpu::backend>; +const auto backend = nest::mc::backend_policy::prefer_gpu; TEST(soma, numeric_ref) { - validate_soma<lowered_cell>(); + validate_soma(backend); } diff --git a/tests/validation/validate_soma.hpp b/tests/validation/validate_soma.hpp index eee44e6b..0c533faa 100644 --- a/tests/validation/validate_soma.hpp +++ b/tests/validation/validate_soma.hpp @@ -13,13 +13,12 @@ #include "trace_analysis.hpp" #include "validation_data.hpp" -template <typename LoweredCell> -void validate_soma() { +void validate_soma(nest::mc::backend_policy backend) { using namespace nest::mc; cell c = make_cell_soma_only(); add_common_voltage_probes(c); - model<LoweredCell> model(singleton_recipe{c}); + model model(singleton_recipe{c}, backend); float sample_dt = .025f; sampler_info samplers[] = {{"soma.mid", {0u, 0u}, simple_sampler(sample_dt)}}; @@ -29,7 +28,7 @@ void validate_soma() { {"model", "soma"}, {"sim", "nestmc"}, {"units", "mV"}, - {"backend", LoweredCell::backend::name()} + {"backend_policy", to_string(backend)} }; convergence_test_runner<float> runner("dt", samplers, meta); diff --git a/tests/validation/validate_synapses.cpp b/tests/validation/validate_synapses.cpp index 23413cc9..d1331b70 100644 --- a/tests/validation/validate_synapses.cpp +++ b/tests/validation/validate_synapses.cpp @@ -3,14 +3,14 @@ #include "../gtest.h" #include "validate_synapses.hpp" -using lowered_cell = nest::mc::fvm::fvm_multicell<nest::mc::multicore::backend>; +const auto backend = nest::mc::backend_policy::use_multicore; TEST(simple_synapse, expsyn_neuron_ref) { SCOPED_TRACE("expsyn"); - run_synapse_test<lowered_cell>("expsyn", "neuron_simple_exp_synapse.json"); + run_synapse_test("expsyn", "neuron_simple_exp_synapse.json", backend); } TEST(simple_synapse, exp2syn_neuron_ref) { SCOPED_TRACE("exp2syn"); - run_synapse_test<lowered_cell>("exp2syn", "neuron_simple_exp2_synapse.json"); + run_synapse_test("exp2syn", "neuron_simple_exp2_synapse.json", backend); } diff --git a/tests/validation/validate_synapses.cu b/tests/validation/validate_synapses.cu index 0dedd584..47f0cc67 100644 --- a/tests/validation/validate_synapses.cu +++ b/tests/validation/validate_synapses.cu @@ -3,14 +3,14 @@ #include "../gtest.h" #include "validate_synapses.hpp" -using lowered_cell = nest::mc::fvm::fvm_multicell<nest::mc::gpu::backend>; +const auto backend = nest::mc::backend_policy::prefer_gpu; TEST(simple_synapse, expsyn_neuron_ref) { SCOPED_TRACE("expsyn"); - run_synapse_test<lowered_cell>("expsyn", "neuron_simple_exp_synapse.json"); + run_synapse_test("expsyn", "neuron_simple_exp_synapse.json", backend); } TEST(simple_synapse, exp2syn_neuron_ref) { SCOPED_TRACE("exp2syn"); - run_synapse_test<lowered_cell>("exp2syn", "neuron_simple_exp2_synapse.json"); + run_synapse_test("exp2syn", "neuron_simple_exp2_synapse.json", backend); } diff --git a/tests/validation/validate_synapses.hpp b/tests/validation/validate_synapses.hpp index cee90062..c3f7f294 100644 --- a/tests/validation/validate_synapses.hpp +++ b/tests/validation/validate_synapses.hpp @@ -15,10 +15,10 @@ #include "trace_analysis.hpp" #include "validation_data.hpp" -template <typename LoweredCell> void run_synapse_test( const char* syn_type, const nest::mc::util::path& ref_data_path, + nest::mc::backend_policy backend, float t_end=70.f, float dt=0.001) { @@ -30,7 +30,7 @@ void run_synapse_test( {"model", syn_type}, {"sim", "nestmc"}, {"units", "mV"}, - {"backend", LoweredCell::backend::name()} + {"backend_policy", to_string(backend)} }; cell c = make_cell_ball_and_stick(false); // no stimuli @@ -60,7 +60,7 @@ void run_synapse_test( for (int ncomp = 10; ncomp<max_ncomp; ncomp*=2) { c.cable(1)->set_compartments(ncomp); - model<LoweredCell> m(singleton_recipe{c}); + model m(singleton_recipe{c}, backend); m.group(0).enqueue_events(synthetic_events); runner.run(m, ncomp, t_end, dt, exclude); -- GitLab