Skip to content
Snippets Groups Projects
Unverified Commit 22bd848d authored by Brent Huisman's avatar Brent Huisman Committed by GitHub
Browse files

Have option to set thread count to local maximum (#1716)

* Pyarb specific.
* `proc_allocation_shim()` throws error if user sets threads to zero.
* `arbor.context` constructor accepts `threads` set to `"avail_threads"`, which sets the number of threads to `arbenv::thread_concurrency()`
  * This introduces a dependency on arbenv for Pyarb.
* Docs and tests added.

Fixes #1692 
parent 546e2ad4
No related branches found
No related tags found
No related merge requests found
......@@ -37,6 +37,11 @@ bad_global_property::bad_global_property(cell_kind kind):
kind(kind)
{}
zero_thread_requested_error::zero_thread_requested_error(unsigned nbt):
arbor_exception(pprintf("threads must be a positive integer")),
nbt(nbt)
{}
bad_probe_id::bad_probe_id(cell_member_type probe_id):
arbor_exception(pprintf("bad probe id {}", probe_id)),
probe_id(probe_id)
......
......@@ -75,6 +75,20 @@ struct gj_unsupported_lid_selection_policy: arbor_exception {
cell_tag_type label;
};
// Context errors:
struct zero_thread_requested_error: arbor_exception {
zero_thread_requested_error(unsigned nbt);
unsigned nbt;
};
// Domain decomposition errors:
struct gj_unsupported_domain_decomposition: arbor_exception {
gj_unsupported_domain_decomposition(cell_gid_type gid_0, cell_gid_type gid_1);
cell_gid_type gid_0, gid_1;
};
// Simulation errors:
struct bad_event_time: arbor_exception {
......
......@@ -87,8 +87,7 @@ The Python wrapper provides an API for:
.. attribute:: threads
The number of CPU threads available, 1 by default.
The number of CPU threads available, 1 by default. Must be set to 1 at minimum.
.. attribute:: gpu_id
The identifier of the GPU to use.
......@@ -137,7 +136,8 @@ The Python wrapper provides an API for:
.. attribute:: threads
The number of threads available locally for execution, 1 by default.
The number of threads available locally for execution. Must be set to 1 at minimum. 1 by default.
Passing ``"avail_threads"`` (as string) will query and use the maximum number of threads the system makes available.
.. attribute:: gpu_id
......
......@@ -45,7 +45,7 @@ set(pyarb_source
# unit tests of the C++ components in the Python wrapper.
add_library(pyarb_obj OBJECT ${pyarb_source})
set_target_properties(pyarb_obj PROPERTIES CXX_VISIBILITY_PRESET hidden)
target_link_libraries(pyarb_obj PRIVATE arbor arborio pybind11::module)
target_link_libraries(pyarb_obj PRIVATE arbor arborenv arborio pybind11::module)
# The Python library. MODULE will make a Python-exclusive model.
add_library(pyarb MODULE $<TARGET_OBJECTS:pyarb_obj>)
......@@ -58,7 +58,7 @@ set_target_properties(pyarb PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" SUFFIX "
# This dependency has to be spelt out again, despite being added to
# pyarb_obj because CMake.
target_link_libraries(pyarb PRIVATE arbor arborio pybind11::module)
target_link_libraries(pyarb PRIVATE arbor arborenv arborio pybind11::module)
# Add support for mpi4py if available.
if (ARB_WITH_MPI)
......
......@@ -7,6 +7,8 @@
#include <arbor/context.hpp>
#include <arbor/version.hpp>
#include <arbor/arbexcept.hpp>
#include <arborenv/concurrency.hpp>
#include "context.hpp"
#include "conversion.hpp"
......@@ -34,9 +36,9 @@ std::ostream& operator<<(std::ostream& o, const context_shim& ctx) {
// A Python shim that holds the information that describes an arb::proc_allocation.
struct proc_allocation_shim {
std::optional<int> gpu_id = {};
int num_threads = 1;
unsigned long num_threads = 1;
proc_allocation_shim(int threads, pybind11::object gpu) {
proc_allocation_shim(unsigned threads, pybind11::object gpu) {
set_num_threads(threads);
set_gpu_id(gpu);
}
......@@ -48,13 +50,15 @@ struct proc_allocation_shim {
gpu_id = py2optional<int>(gpu, "gpu_id must be None, or a non-negative integer", is_nonneg());
};
void set_num_threads(int threads) {
pyarb::assert_throw([](int n) { return n>0; }(threads), "threads must be a positive integer");
void set_num_threads(unsigned threads) {
if (0==threads) {
throw arb::zero_thread_requested_error(threads);
}
num_threads = threads;
};
std::optional<int> get_gpu_id() const { return gpu_id; }
int get_num_threads() const { return num_threads; }
unsigned get_num_threads() const { return num_threads; }
bool has_gpu() const { return bool(gpu_id); }
// helper function to use arb::make_context(arb::proc_allocation)
......@@ -63,6 +67,34 @@ struct proc_allocation_shim {
}
};
context_shim create_context(unsigned threads, pybind11::object gpu, pybind11::object mpi) {
const char* gpu_err_str = "gpu_id must be None, or a non-negative integer";
#ifndef ARB_GPU_ENABLED
if (!gpu.is_none()) {
throw pyarb_error("Attempt to set an GPU communicator but Arbor is not configured with GPU support.");
}
#endif
auto gpu_id = py2optional<int>(gpu, gpu_err_str, is_nonneg());
arb::proc_allocation alloc(threads, gpu_id.value_or(-1));
#ifndef ARB_MPI_ENABLED
if (!mpi.is_none()) {
throw pyarb_error("Attempt to set an MPI communicator but Arbor is not configured with MPI support.");
}
#else
const char* mpi_err_str = "mpi must be None, or an MPI communicator";
if (can_convert_to_mpi_comm(mpi)) {
return context_shim(arb::make_context(alloc, convert_to_mpi_comm(mpi)));
}
if (auto c = py2optional<mpi_comm_shim>(mpi, mpi_err_str)) {
return context_shim(arb::make_context(alloc, c->comm));
}
#endif
return context_shim(arb::make_context(alloc));
}
std::ostream& operator<<(std::ostream& o, const proc_allocation_shim& alloc) {
return o << "<arbor.proc_allocation: threads " << alloc.num_threads << ", gpu_id " << util::to_string(alloc.gpu_id) << ">";
}
......@@ -75,10 +107,10 @@ void register_contexts(pybind11::module& m) {
pybind11::class_<proc_allocation_shim> proc_allocation(m, "proc_allocation",
"Enumerates the computational resources on a node to be used for simulation.");
proc_allocation
.def(pybind11::init<int, pybind11::object>(),
.def(pybind11::init<unsigned, pybind11::object>(),
"threads"_a=1, "gpu_id"_a=pybind11::none(),
"Construct an allocation with arguments:\n"
" threads: The number of threads available locally for execution, 1 by default.\n"
" threads: The number of threads available locally for execution. Must be set to 1 at minimum. 1 by default.\n"
" gpu_id: The identifier of the GPU to use, None by default.\n")
.def_property("threads", &proc_allocation_shim::get_num_threads, &proc_allocation_shim::set_num_threads,
"The number of threads available locally for execution.")
......@@ -94,37 +126,26 @@ void register_contexts(pybind11::module& m) {
pybind11::class_<context_shim> context(m, "context", "An opaque handle for the hardware resources used in a simulation.");
context
.def(pybind11::init(
[](int threads, pybind11::object gpu, pybind11::object mpi){
const char* gpu_err_str = "gpu_id must be None, or a non-negative integer";
#ifndef ARB_GPU_ENABLED
if (!gpu.is_none()) {
throw pyarb_error("Attempt to set an GPU communicator but Arbor is not configured with GPU support.");
}
#endif
auto gpu_id = py2optional<int>(gpu, gpu_err_str, is_nonneg());
arb::proc_allocation alloc(threads, gpu_id.value_or(-1));
#ifndef ARB_MPI_ENABLED
if (!mpi.is_none()) {
throw pyarb_error("Attempt to set an MPI communicator but Arbor is not configured with MPI support.");
}
#else
const char* mpi_err_str = "mpi must be None, or an MPI communicator";
if (can_convert_to_mpi_comm(mpi)) {
return context_shim(arb::make_context(alloc, convert_to_mpi_comm(mpi)));
}
if (auto c = py2optional<mpi_comm_shim>(mpi, mpi_err_str)) {
return context_shim(arb::make_context(alloc, c->comm));
[](unsigned threads, pybind11::object gpu, pybind11::object mpi){
return create_context(threads, gpu, mpi);
}),
"threads"_a=1, "gpu_id"_a=pybind11::none(), "mpi"_a=pybind11::none(),
"Construct a distributed context with arguments:\n"
" threads: The number of threads available locally for execution. Must be set to 1 at minimum. 1 by default.\n"
" gpu_id: The identifier of the GPU to use, None by default. Only available if arbor.__config__['gpu']==True.\n"
" mpi: The MPI communicator, None by default. Only available if arbor.__config__['mpi']==True.\n")
.def(pybind11::init(
[](std::string threads, pybind11::object gpu, pybind11::object mpi){
if ("avail_threads" == threads) {
return create_context(arbenv::thread_concurrency(), gpu, mpi);
}
#endif
throw pyarb_error(
util::pprintf("Attempt to set threads to {}. The only valid thread options are a positive integer greater than 0, or 'avial_threads'.", threads));
return context_shim(arb::make_context(alloc));
}),
"threads"_a=1, "gpu_id"_a=pybind11::none(), "mpi"_a=pybind11::none(),
"threads"_a, "gpu_id"_a=pybind11::none(), "mpi"_a=pybind11::none(),
"Construct a distributed context with arguments:\n"
" threads: The number of threads available locally for execution, 1 by default.\n"
" threads: A string option describing the number of threads. Currently, only \"avail_threads\" is supported.\n"
" gpu_id: The identifier of the GPU to use, None by default. Only available if arbor.__config__['gpu']==True.\n"
" mpi: The MPI communicator, None by default. Only available if arbor.__config__['mpi']==True.\n")
.def(pybind11::init(
......
......@@ -46,6 +46,7 @@ PYBIND11_MODULE(_arbor, m) {
// Translate Arbor errors -> Python exceptions.
pybind11::register_exception<arb::file_not_found_error>(m, "FileNotFoundError", PyExc_FileNotFoundError);
pybind11::register_exception<arb::zero_thread_requested_error>(m, "ValueError", PyExc_ValueError);
pyarb::register_cable_loader(m);
pyarb::register_cable_probes(m, global_ptr);
......
......@@ -44,7 +44,7 @@ class TestContexts(unittest.TestCase):
arb.proc_allocation(gpu_id = 'gpu_id')
with self.assertRaises(TypeError):
arb.proc_allocation(threads = 1.)
with self.assertRaisesRegex(RuntimeError,
with self.assertRaisesRegex(ValueError,
"threads must be a positive integer"):
arb.proc_allocation(threads = 0)
with self.assertRaises(TypeError):
......@@ -69,6 +69,16 @@ class TestContexts(unittest.TestCase):
self.assertEqual(ctx.ranks, 1)
self.assertEqual(ctx.rank, 0)
def test_context_avail_threads(self):
# test that 'avail_threads' returns at least 1.
ctx = arb.context(threads = 'avail_threads', gpu_id = None)
self.assertFalse(ctx.has_mpi)
self.assertFalse(ctx.has_gpu)
self.assertTrue(ctx.threads >= 1)
self.assertEqual(ctx.ranks, 1)
self.assertEqual(ctx.rank, 0)
def test_context_allocation(self):
alloc = arb.proc_allocation()
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment