diff --git a/doc/python/hardware.rst b/doc/python/hardware.rst index f1eb88fc7f4368a74c2b74f15aa4295871b4ccac..33c403ba10301eea6089e630de5287924e0da87e 100644 --- a/doc/python/hardware.rst +++ b/doc/python/hardware.rst @@ -8,6 +8,9 @@ Arbor provides two ways for working with hardware resources: * *Prescribe* the hardware resources and their contexts for use in Arbor simulations. * *Query* available hardware resources (e.g. the number of available GPUs), and initializing MPI. +Note that to utilize some hardware features Arbor must be built and installed with the feature enabled, for example MPI or a GPU. +Please refer to the :ref:`installation guide <in_build_install>` for information on how to enable hardware support. + Available resources ------------------- @@ -127,10 +130,6 @@ The Python wrapper provides an API for: provided by :class:`context`, instead they configure contexts, which are passed to Arbor interfaces for domain decomposition and simulation. - .. function:: context() - - Construct a local context with one thread, no GPU, no MPI. - .. function:: context(threads, gpu_id, mpi) :noindex: @@ -144,20 +143,13 @@ The Python wrapper provides an API for: The identifier of the GPU to use, ``None`` by default. Must be ``None``, or a non-negative integer. + Can only be set when Arbor was built with GPU support. .. attribute:: mpi - The MPI communicator (see :class:`mpi_comm`). - mpi must be ``None``, or an MPI communicator. - - .. function:: context(alloc) - :noindex: - - Create a local context, with no distributed/MPI, that uses the local resources described by :class:`proc_allocation`. - - .. attribute:: alloc - - The computational resources, one thread and no GPU by default. + The MPI communicator (see :class:`mpi_comm`), ``None`` by default. + Must be ``None``, or an MPI communicator. + Can only be set when Arbor was built with MPI support. .. function:: context(alloc, mpi) :noindex: @@ -171,22 +163,9 @@ The Python wrapper provides an API for: .. attribute:: mpi - The MPI communicator (see :class:`mpi_comm`). + The MPI communicator (see :class:`mpi_comm`). ``None`` by default. mpi must be ``None``, or an MPI communicator. - - .. function:: context(threads, gpu_id) - :noindex: - - Create a context that uses a set number of :attr:`threads` and the GPU with id :attr:`gpu_id`. - - .. attribute:: threads - - The number of threads available locally for execution, 1 by default. - - .. attribute:: gpu_id - - The identifier of the GPU to use, ``None`` by default. - Must be ``None``, or a non-negative integer. + Can only be set when Arbor was built with MPI support. Contexts can be queried for information about which features a context has enabled, whether it has a GPU, how many threads are in its thread pool. diff --git a/python/context.cpp b/python/context.cpp index e8cd4dbcb61a0cd5365f5d69c2c1f45f13ce234b..4bf1e462239621d2b65f2dfaec8f636f94209de4 100644 --- a/python/context.cpp +++ b/python/context.cpp @@ -93,66 +93,67 @@ void register_contexts(pybind11::module& m) { // context pybind11::class_<context_shim> context(m, "context", "An opaque handle for the hardware resources used in a simulation."); context - .def(pybind11::init<>( - [](){return context_shim(arb::make_context());}), - "Construct a local context with one thread, no GPU, no MPI by default.\n" - ) - .def(pybind11::init( - [](const proc_allocation_shim& alloc){ - return context_shim(arb::make_context(alloc.allocation())); }), - "alloc"_a, - "Construct a local context with argument:\n" - " alloc: The computational resources to be used for the simulation.\n") -#ifdef ARB_MPI_ENABLED - .def(pybind11::init( - [](proc_allocation_shim alloc, pybind11::object mpi){ - const char* mpi_err_str = "mpi must be None, or an MPI communicator"; - auto a = alloc.allocation(); // unwrap the C++ resource_allocation description - if (can_convert_to_mpi_comm(mpi)) { - return context_shim(arb::make_context(a, convert_to_mpi_comm(mpi))); - } - if (auto c = py2optional<mpi_comm_shim>(mpi, mpi_err_str)) { - return context_shim(arb::make_context(a, c->comm)); - } - return context_shim(arb::make_context(a)); - }), - "alloc"_a, "mpi"_a=pybind11::none(), - "Construct a distributed context with arguments:\n" - " alloc: The computational resources to be used for the simulation.\n" - " mpi: The MPI communicator, None by default.\n") .def(pybind11::init( [](int threads, pybind11::object gpu, pybind11::object mpi){ const char* gpu_err_str = "gpu_id must be None, or a non-negative integer"; - const char* mpi_err_str = "mpi must be None, or an MPI communicator"; + +#ifndef ARB_GPU_ENABLED + if (!gpu.is_none()) { + throw pyarb_error("Attempt to set an GPU communicator but Arbor is not configured with GPU support."); + } +#endif auto gpu_id = py2optional<int>(gpu, gpu_err_str, is_nonneg()); arb::proc_allocation alloc(threads, gpu_id.value_or(-1)); +#ifndef ARB_MPI_ENABLED + if (!mpi.is_none()) { + throw pyarb_error("Attempt to set an MPI communicator but Arbor is not configured with MPI support."); + } +#else + const char* mpi_err_str = "mpi must be None, or an MPI communicator"; if (can_convert_to_mpi_comm(mpi)) { return context_shim(arb::make_context(alloc, convert_to_mpi_comm(mpi))); } if (auto c = py2optional<mpi_comm_shim>(mpi, mpi_err_str)) { return context_shim(arb::make_context(alloc, c->comm)); } +#endif return context_shim(arb::make_context(alloc)); }), "threads"_a=1, "gpu_id"_a=pybind11::none(), "mpi"_a=pybind11::none(), "Construct a distributed context with arguments:\n" " threads: The number of threads available locally for execution, 1 by default.\n" - " gpu_id: The identifier of the GPU to use, None by default.\n" - " mpi: The MPI communicator, None by default.\n") -#else + " gpu_id: The identifier of the GPU to use, None by default. Only available if arbor.__config__['gpu']==True.\n" + " mpi: The MPI communicator, None by default. Only available if arbor.__config__['mpi']==True.\n") .def(pybind11::init( - [](int threads, pybind11::object gpu){ - auto gpu_id = py2optional<int>(gpu, "gpu_id must be None, or a non-negative integer", is_nonneg()); - return context_shim(arb::make_context(arb::proc_allocation(threads, gpu_id.value_or(-1)))); - }), - "threads"_a=1, "gpu_id"_a=pybind11::none(), - "Construct a local context with arguments:\n" - " threads: The number of threads available locally for execution, 1 by default.\n" - " gpu_id: The identifier of the GPU to use, None by default.\n") + [](proc_allocation_shim alloc, pybind11::object mpi){ + auto a = alloc.allocation(); // unwrap the C++ resource_allocation description +#ifndef ARB_GPU_ENABLED + if (a.has_gpu()) { + throw pyarb_error("Attempt to set an GPU communicator but Arbor is not configured with GPU support."); + } +#endif +#ifndef ARB_MPI_ENABLED + if (!mpi.is_none()) { + throw pyarb_error("Attempt to set an MPI communicator but Arbor is not configured with MPI support."); + } +#else + const char* mpi_err_str = "mpi must be None, or an MPI communicator"; + if (can_convert_to_mpi_comm(mpi)) { + return context_shim(arb::make_context(a, convert_to_mpi_comm(mpi))); + } + if (auto c = py2optional<mpi_comm_shim>(mpi, mpi_err_str)) { + return context_shim(arb::make_context(a, c->comm)); + } #endif + return context_shim(arb::make_context(a)); + }), + "alloc"_a, "mpi"_a=pybind11::none(), + "Construct a distributed context with arguments:\n" + " alloc: The computational resources to be used for the simulation.\n" + " mpi: The MPI communicator, None by default. Only available if arbor.__config__['mpi']==True.\n") .def_property_readonly("has_mpi", [](const context_shim& ctx){return arb::has_mpi(ctx.context);}, "Whether the context uses MPI for distributed communication.") .def_property_readonly("has_gpu", [](const context_shim& ctx){return arb::has_gpu(ctx.context);}, diff --git a/python/mpi.cpp b/python/mpi.cpp index d3de709f86c1116d7fb41300be4a744b625f75e0..1661a42dcf811af0a6c96db142b9d611303813d0 100644 --- a/python/mpi.cpp +++ b/python/mpi.cpp @@ -34,7 +34,7 @@ MPI_Comm convert_to_mpi_comm(pybind11::object o) { return *PyMPIComm_Get(o.ptr()); } #endif - throw arb::mpi_error(MPI_ERR_OTHER, "Unable to convert to an MPI Communicatior"); + throw arb::mpi_error(MPI_ERR_OTHER, "Invalid MPI Communicatior"); } mpi_comm_shim::mpi_comm_shim(pybind11::object o) {