From 7ec2088aa9a05cdcd05c0b2c8375c46645086049 Mon Sep 17 00:00:00 2001
From: akuesters <42005107+akuesters@users.noreply.github.com>
Date: Thu, 23 May 2019 09:25:09 +0200
Subject: [PATCH] Py feature context (#744)

 + Included conversion from `pybind11::object()` to `arb::util::optional` for `arbor.proc_allocation().gpu_id` and exception handling for `arbor.context(mpi=comm)`.

+ cleaned-up `test_event_generator.py`

+ for discussion: How do we want to handle the strings? Include them in appropriate `.cpp` or collect them in `strings.c/hpp`?
---
 python/context.cpp                            | 156 ++++++++++++++----
 python/strings.cpp                            |  15 +-
 python/strings.hpp                            |   1 -
 python/test/unit/test_contexts.py             |  80 +++++----
 python/test/unit/test_event_generators.py     |  28 ++--
 .../unit_distributed/test_contexts_arbmpi.py  |  22 ++-
 .../unit_distributed/test_contexts_mpi4py.py  |  23 ++-
 7 files changed, 231 insertions(+), 94 deletions(-)

diff --git a/python/context.cpp b/python/context.cpp
index 61492e59..be043ec0 100644
--- a/python/context.cpp
+++ b/python/context.cpp
@@ -8,6 +8,8 @@
 #include <pybind11/pybind11.h>
 
 #include "context.hpp"
+#include "conversion.hpp"
+#include "error.hpp"
 #include "strings.hpp"
 
 #ifdef ARB_MPI_ENABLED
@@ -16,67 +18,155 @@
 
 namespace pyarb {
 
+namespace {
+auto is_nonneg_int = [](int n){ return n>=0; };
+}
+
+// A Python shim that holds the information that describes an arb::proc_allocation.
+struct proc_allocation_shim {
+    using opt_int = arb::util::optional<int>;
+
+    opt_int gpu_id = {};
+    int num_threads = 1;
+
+    proc_allocation_shim(): proc_allocation_shim(1, pybind11::none()) {}
+
+    proc_allocation_shim(int threads, pybind11::object gpu) {
+        set_num_threads(threads);
+        set_gpu_id(gpu);
+    }
+
+    // getter and setter (in order to assert when being set)
+    void set_gpu_id(pybind11::object gpu) {
+        gpu_id = py2optional<int>(gpu, "gpu id must be None, or a non-negative integer.", is_nonneg_int);
+    };
+
+    void set_num_threads(int threads) {
+        pyarb::assert_throw([](int n) { return n>0; }(threads), "threads must be a positive integer.");
+        num_threads = threads;
+    };
+
+    const opt_int get_gpu_id()      const { return gpu_id; }
+    const int     get_num_threads() const { return num_threads; }
+    const bool    has_gpu()         const { return (gpu_id.value_or(-1) >= 0) ? true : false; }
+
+    // helper function to use arb::make_context(arb::proc_allocation)
+    arb::proc_allocation allocation() const {
+        return arb::proc_allocation(num_threads, gpu_id.value_or(-1));
+    }
+};
+
+// Helper template for printing C++ optional types in Python.
+// Prints either the value, or None if optional value is not set.
+template <typename T>
+std::string to_string(const arb::util::optional<T>& o) {
+    if (!o) return "None";
+
+    std::stringstream s;
+    s << *o;
+    return s.str();
+}
+
+std::string proc_alloc_string(const proc_allocation_shim& a) {
+    std::stringstream s;
+    s << "<hardware resource allocation: threads " << a.num_threads
+      << ", gpu id " << to_string(a.gpu_id);
+    s << ">";
+    return s.str();
+}
+
+// helper functions to wrap arb::make_context
+arb::context make_context_shim(const proc_allocation_shim& Alloc) {
+    return arb::make_context(Alloc.allocation());
+}
+
+template <typename Comm>
+arb::context make_context_shim(const proc_allocation_shim& Alloc, Comm comm) {
+    return arb::make_context(Alloc.allocation(), comm);
+}
+
 void register_contexts(pybind11::module& m) {
     using namespace std::string_literals;
     using namespace pybind11::literals;
+    using opt_int = arb::util::optional<int>;
 
-    pybind11::class_<arb::proc_allocation> proc_allocation(m, "proc_allocation");
+    // proc_allocation
+    pybind11::class_<proc_allocation_shim> proc_allocation(m, "proc_allocation",
+        "Enumerates the computational resources on a node to be used for simulation.");
     proc_allocation
-        .def(pybind11::init<>())
-        .def(pybind11::init<int, int>(), "threads"_a, "gpu"_a=-1,
-             "Arguments:\n"
-             "  threads: The number of threads available locally for execution.\n"
-             "  gpu:     The index of the GPU to use, defaults to -1 for no GPU.\n")
-        .def_readwrite("threads", &arb::proc_allocation::num_threads,
+        .def(pybind11::init<int, pybind11::object>(),
+            "threads"_a=1, "gpu_id"_a=pybind11::none(),
+            "Construct an allocation with arguments:\n"
+            "  threads: The number of threads available locally for execution (default 1).\n"
+            "  gpu_id:  The index of the GPU to use (default None).\n")
+        .def_property("threads", &proc_allocation_shim::get_num_threads, &proc_allocation_shim::set_num_threads,
             "The number of threads available locally for execution.")
-        .def_readwrite("gpu_id", &arb::proc_allocation::gpu_id,
+        .def_property("gpu_id", &proc_allocation_shim::get_gpu_id, &proc_allocation_shim::set_gpu_id,
             "The identifier of the GPU to use.\n"
             "Corresponds to the integer index used to identify GPUs in CUDA API calls.")
-        .def_property_readonly("has_gpu", &arb::proc_allocation::has_gpu,
+	.def_property_readonly("has_gpu", &proc_allocation_shim::has_gpu,
             "Whether a GPU is being used (True/False).")
-        .def("__str__", &proc_allocation_string)
-        .def("__repr__", &proc_allocation_string);
+        .def("__str__", &proc_alloc_string)
+        .def("__repr__", &proc_alloc_string);
 
-    pybind11::class_<context_shim> context(m, "context");
+    // context
+    pybind11::class_<context_shim> context(m, "context", "An opaque handle for the hardware resources used in a simulation.");
     context
         .def(pybind11::init<>(
-            [](){return context_shim(arb::make_context());}))
+            [](){return context_shim(arb::make_context());}),
+            "Construct a local context with one thread, no GPU, no MPI by default.\n"
+            )
         .def(pybind11::init(
-            [](const arb::proc_allocation& alloc){return context_shim(arb::make_context(alloc));}),
+            [](const proc_allocation_shim& alloc){
+                return context_shim(make_context_shim(alloc));
+            }),
              "alloc"_a,
-             "Argument:\n"
+             "Construct a local context with argument:\n"
              "  alloc:   The computational resources to be used for the simulation.\n")
 #ifdef ARB_MPI_ENABLED
         .def(pybind11::init(
-            [](const arb::proc_allocation& alloc, mpi_comm_shim c){return context_shim(arb::make_context(alloc, c.comm));}),
-             "alloc"_a, "c"_a,
-             "Arguments:\n"
+            [](const proc_allocation_shim& alloc, pybind11::object mpi){
+                if (mpi.is_none()) {
+                    return context_shim(make_context_shim(alloc));
+                }
+                auto c = py2optional<mpi_comm_shim>(mpi,
+                        "mpi must be None, or an MPI communicator.");
+                auto comm = c.value_or(MPI_COMM_WORLD).comm;
+                return context_shim(make_context_shim(alloc, comm));
+            }),
+             "alloc"_a, "mpi"_a=pybind11::none(),
+             "Construct a distributed context with arguments:\n"
              "  alloc:   The computational resources to be used for the simulation.\n"
-             "  c:       The MPI communicator.\n")
+             "  mpi:     The MPI communicator (default None).\n")
         .def(pybind11::init(
             [](int threads, pybind11::object gpu, pybind11::object mpi){
-                arb::proc_allocation alloc(threads, gpu.is_none()? -1: pybind11::cast<int>(gpu));
+                opt_int gpu_id = py2optional<int>(gpu,
+                        "gpu id must be None, or a non-negative integer.", is_nonneg_int);
+                arb::proc_allocation alloc(threads, gpu_id.value_or(-1));
                 if (mpi.is_none()) {
                     return context_shim(arb::make_context(alloc));
                 }
-                auto& c = pybind11::cast<mpi_comm_shim&>(mpi);
-                return context_shim(arb::make_context(alloc, c.comm));
+                auto c = py2optional<mpi_comm_shim>(mpi,
+                        "mpi must be None, or an MPI communicator.");
+                auto comm = c.value_or(MPI_COMM_WORLD).comm;
+                return context_shim(arb::make_context(alloc, comm));
             }),
-             "threads"_a=1, "gpu"_a=pybind11::none(), "mpi"_a=pybind11::none(),
-             "Arguments:\n"
-             "  threads: The number of threads available locally for execution.\n"
-             "  gpu:     The index of the GPU to use, defaults to None for no GPU.\n"
-             "  mpi:     The MPI communicator, defaults to None for no MPI.\n")
+             "threads"_a=1, "gpu_id"_a=pybind11::none(), "mpi"_a=pybind11::none(),
+             "Construct a distributed context with arguments:\n"
+             "  threads: The number of threads available locally for execution (default 1).\n"
+             "  gpu_id:  The index of the GPU to use (default None).\n"
+             "  mpi:     The MPI communicator (default None).\n")
 #else
         .def(pybind11::init(
             [](int threads, pybind11::object gpu){
-                int gpu_id = gpu.is_none()? -1: pybind11::cast<int>(gpu);
-                return context_shim(arb::make_context(arb::proc_allocation(threads, gpu_id)));
+                opt_int gpu_id = py2optional<int>(gpu,
+                        "gpu id must be None, or a non-negative integer.", is_nonneg_int);
+                return context_shim(arb::make_context(arb::proc_allocation(threads, gpu_id.value_or(-1))));
             }),
-             "threads"_a=1, "gpu"_a=pybind11::none(),
-             "Arguments:\n"
-             "  threads: The number of threads available locally for execution.\n"
-             "  gpu:     The index of the GPU to use, defaults to None for no GPU.\n")
+             "threads"_a=1, "gpu_id"_a=pybind11::none(),
+             "Construct a local context with arguments:\n"
+             "  threads: The number of threads available locally for execution (default 1).\n"
+             "  gpu_id:  The index of the GPU to use (default None).\n")
 #endif
         .def_property_readonly("has_mpi", [](const context_shim& ctx){return arb::has_mpi(ctx.context);},
             "Whether the context uses MPI for distributed communication.")
diff --git a/python/strings.cpp b/python/strings.cpp
index 17887666..a2eb0e4e 100644
--- a/python/strings.cpp
+++ b/python/strings.cpp
@@ -21,23 +21,10 @@ std::string context_string(const arb::context& c) {
     const bool mpi = arb::has_mpi(c);
     s << "<context: threads " << arb::num_threads(c)
       << ", gpu " << (gpu? "yes": "None")
-      << ", distributed " << (mpi? "MPI": "Local")
+      << ", distributed " << (mpi? "MPI": "local")
       << " ranks " << arb::num_ranks(c)
       << ">";
     return s.str();
 }
 
-std::string proc_allocation_string(const arb::proc_allocation& a) {
-    std::stringstream s;
-    s << "<hardware resource allocation: threads " << a.num_threads << ", gpu ";
-    if (a.has_gpu()) {
-        s << a.gpu_id;
-    }
-    else {
-        s << "None";
-    }
-    s << ">";
-    return s.str();
-}
-
 } // namespace pyarb
diff --git a/python/strings.hpp b/python/strings.hpp
index dba70801..0e378af4 100644
--- a/python/strings.hpp
+++ b/python/strings.hpp
@@ -10,6 +10,5 @@ namespace pyarb {
 
 std::string cell_member_string(const arb::cell_member_type&);
 std::string context_string(const arb::context&);
-std::string proc_allocation_string(const arb::proc_allocation&);
 
 } // namespace pyarb
diff --git a/python/test/unit/test_contexts.py b/python/test/unit/test_contexts.py
index 6ec70a62..e007207f 100644
--- a/python/test/unit/test_contexts.py
+++ b/python/test/unit/test_contexts.py
@@ -20,48 +20,72 @@ all tests for non-distributed arb.context
 """
 
 class Contexts(unittest.TestCase):
-    def test_default_context(self):
-        ctx = arb.context()
-
-    def test_resources(self):
+    def test_default_allocation(self):
         alloc = arb.proc_allocation()
 
-        # test that by default proc_allocation has 1 thread and no GPU, no MPI
+        # test that by default proc_allocation has 1 thread and no GPU
         self.assertEqual(alloc.threads, 1)
+        self.assertEqual(alloc.gpu_id, None)
         self.assertFalse(alloc.has_gpu)
-        self.assertEqual(alloc.gpu_id, -1)
 
+    def test_set_allocation(self):
+        alloc = arb.proc_allocation()
+
+        # test changing allocation
         alloc.threads = 20
         self.assertEqual(alloc.threads, 20)
+        alloc.gpu_id = 0
+        self.assertEqual(alloc.gpu_id, 0)
+        self.assertTrue(alloc.has_gpu)
+        alloc.gpu_id = None
+        self.assertFalse(alloc.has_gpu)
 
-    def test_context(self):
-        alloc = arb.proc_allocation()
+    def test_exceptions_allocation(self):
+        with self.assertRaisesRegex(RuntimeError,
+            "gpu id must be None, or a non-negative integer."):
+            arb.proc_allocation(gpu_id = 1.)
+        with self.assertRaisesRegex(RuntimeError,
+            "gpu id must be None, or a non-negative integer."):
+            arb.proc_allocation(gpu_id = -1)
+        with self.assertRaisesRegex(RuntimeError,
+            "gpu id must be None, or a non-negative integer."):
+            arb.proc_allocation(gpu_id = 'gpu_id')
+        with self.assertRaises(TypeError):
+            arb.proc_allocation(threads = 1.)
+        with self.assertRaisesRegex(RuntimeError,
+            "threads must be a positive integer."):
+             arb.proc_allocation(threads = 0)
+        with self.assertRaises(TypeError):
+            arb.proc_allocation(threads = None)
 
-        ctx1 = arb.context()
+    def test_default_context(self):
+        ctx = arb.context()
 
-        self.assertEqual(ctx1.threads, alloc.threads)
-        self.assertEqual(ctx1.has_gpu, alloc.has_gpu)
+        # test that by default context has 1 thread and no GPU, no MPI
+        self.assertFalse(ctx.has_mpi)
+        self.assertFalse(ctx.has_gpu)
+        self.assertEqual(ctx.threads, 1)
+        self.assertEqual(ctx.ranks, 1)
+        self.assertEqual(ctx.rank, 0)
 
-        # default construction does not use GPU or MPI
-        self.assertEqual(ctx1.threads, 1)
-        self.assertFalse(ctx1.has_gpu)
-        self.assertFalse(ctx1.has_mpi)
-        self.assertEqual(ctx1.ranks, 1)
-        self.assertEqual(ctx1.rank, 0)
+    def test_context(self):
+        ctx = arb.context(threads = 42, gpu_id = None)
 
-        # change allocation
-        alloc.threads = 23
-        self.assertEqual(alloc.threads, 23)
-        alloc.gpu_id = -1
-        self.assertEqual(alloc.gpu_id, -1)
+        self.assertFalse(ctx.has_mpi)
+        self.assertFalse(ctx.has_gpu)
+        self.assertEqual(ctx.threads, 42)
+        self.assertEqual(ctx.ranks, 1)
+        self.assertEqual(ctx.rank, 0)
 
-        # test context construction with proc_allocation()
-        ctx2 = arb.context(alloc)
-        self.assertEqual(ctx2.threads, alloc.threads)
-        self.assertEqual(ctx2.has_gpu, alloc.has_gpu)
-        self.assertEqual(ctx2.ranks, 1)
-        self.assertEqual(ctx2.rank, 0)
+    def test_context_allocation(self):
+        alloc = arb.proc_allocation()
 
+        # test context construction with proc_allocation()
+        ctx = arb.context(alloc)
+        self.assertEqual(ctx.threads, alloc.threads)
+        self.assertEqual(ctx.has_gpu, alloc.has_gpu)
+        self.assertEqual(ctx.ranks, 1)
+        self.assertEqual(ctx.rank, 0)
 
 def suite():
     # specify class and test functions in tuple (here: all tests starting with 'test' from class Contexts
diff --git a/python/test/unit/test_event_generators.py b/python/test/unit/test_event_generators.py
index 9c7ef472..5febad17 100644
--- a/python/test/unit/test_event_generators.py
+++ b/python/test/unit/test_event_generators.py
@@ -52,17 +52,17 @@ class RegularSchedule(unittest.TestCase):
     def test_exceptions_regular_schedule(self):
         with self.assertRaisesRegex(RuntimeError,
             "tstart must a non-negative number, or None."):
-            arb.regular_schedule(tstart=-1.)
+            arb.regular_schedule(tstart = -1.)
         with self.assertRaisesRegex(RuntimeError,
             "dt must be a non-negative number."):
-            arb.regular_schedule(dt=-0.1)
+            arb.regular_schedule(dt = -0.1)
         with self.assertRaises(TypeError):
-            arb.regular_schedule(dt=None)
+            arb.regular_schedule(dt = None)
         with self.assertRaises(TypeError):
-            arb.regular_schedule(dt="dt")
+            arb.regular_schedule(dt = 'dt')
         with self.assertRaisesRegex(RuntimeError,
             "tstop must a non-negative number, or None."):
-            arb.regular_schedule(tstop='tstop')
+            arb.regular_schedule(tstop = 'tstop')
 
 class ExplicitSchedule(unittest.TestCase):
     def test_times_contor_explicit_schedule(self):
@@ -129,24 +129,24 @@ class PoissonSchedule(unittest.TestCase):
     def test_exceptions_poisson_schedule(self):
         with self.assertRaisesRegex(RuntimeError,
             "tstart must be a non-negative number."):
-            arb.poisson_schedule(tstart=-10.)
+            arb.poisson_schedule(tstart = -10.)
         with self.assertRaises(TypeError):
-            arb.poisson_schedule(tstart=None)
+            arb.poisson_schedule(tstart = None)
         with self.assertRaises(TypeError):
-            arb.poisson_schedule(tstart="tstart")
+            arb.poisson_schedule(tstart = 'tstart')
         with self.assertRaisesRegex(RuntimeError,
             "frequency must be a non-negative number."):
-            arb.poisson_schedule(freq=-100.)
+            arb.poisson_schedule(freq = -100.)
         with self.assertRaises(TypeError):
-            arb.poisson_schedule(freq="freq")
+            arb.poisson_schedule(freq = 'freq')
         with self.assertRaises(TypeError):
-            arb.poisson_schedule(seed=-1)
+            arb.poisson_schedule(seed = -1)
         with self.assertRaises(TypeError):
-            arb.poisson_schedule(seed=10.)
+            arb.poisson_schedule(seed = 10.)
         with self.assertRaises(TypeError):
-            arb.poisson_schedule(seed="seed")
+            arb.poisson_schedule(seed = 'seed')
         with self.assertRaises(TypeError):
-            arb.poisson_schedule(seed=None)
+            arb.poisson_schedule(seed = None)
 
 def suite():
     # specify class and test functions in tuple (here: all tests starting with 'test' from classes RegularSchedule, ExplicitSchedule and PoissonSchedule
diff --git a/python/test/unit_distributed/test_contexts_arbmpi.py b/python/test/unit_distributed/test_contexts_arbmpi.py
index 7698f8b1..05183078 100644
--- a/python/test/unit_distributed/test_contexts_arbmpi.py
+++ b/python/test/unit_distributed/test_contexts_arbmpi.py
@@ -40,19 +40,39 @@ class Contexts_arbmpi(unittest.TestCase):
     def test_initialized_arbmpi(self):
         self.assertTrue(arb.mpi_is_initialized())
 
-    def test_context_arbmpi(self):
+    def test_communicator_arbmpi(self):
         comm = arb.mpi_comm()
 
         # test that by default communicator is MPI_COMM_WORLD
         self.assertEqual(str(comm), '<mpi_comm: MPI_COMM_WORLD>')
 
+    def test_context_arbmpi(self):
+        comm = arb.mpi_comm()
+
         # test context with mpi
+        ctx = arb.context(mpi=comm)
+        self.assertTrue(ctx.has_mpi)
+
+    def test_context_allocation_arbmpi(self):
+        comm = arb.mpi_comm()
+
+        # test context with alloc and mpi
         alloc = arb.proc_allocation()
         ctx = arb.context(alloc, comm)
 
         self.assertEqual(ctx.threads, alloc.threads)
         self.assertTrue(ctx.has_mpi)
 
+    def test_exceptions_context_arbmpi(self):
+        alloc = arb.proc_allocation()
+
+        with self.assertRaisesRegex(RuntimeError,
+            "mpi must be None, or an MPI communicator."):
+            arb.context(mpi='MPI_COMM_WORLD')
+        with self.assertRaisesRegex(RuntimeError,
+            "mpi must be None, or an MPI communicator."):
+            arb.context(alloc, mpi=0)
+
     def test_finalized_arbmpi(self):
         self.assertFalse(arb.mpi_is_finalized())
 
diff --git a/python/test/unit_distributed/test_contexts_mpi4py.py b/python/test/unit_distributed/test_contexts_mpi4py.py
index be1dc248..44af8cb8 100644
--- a/python/test/unit_distributed/test_contexts_mpi4py.py
+++ b/python/test/unit_distributed/test_contexts_mpi4py.py
@@ -29,7 +29,7 @@ all tests for distributed arb.context using mpi4py
 # Only test class if env var ARB_WITH_MPI4PY=ON
 @unittest.skipIf(mpi_enabled == False or mpi4py_enabled == False, "MPI/mpi4py not enabled")
 class Contexts_mpi4py(unittest.TestCase):
-    def test_initialize_mpi4py(self):
+    def test_initialized_mpi4py(self):
         # test mpi initialization (automatically when including mpi4py: https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html)
         self.assertTrue(mpi.Is_initialized())
 
@@ -42,14 +42,31 @@ class Contexts_mpi4py(unittest.TestCase):
     def test_context_mpi4py(self):
         comm = arb.mpi_comm_from_mpi4py(mpi.COMM_WORLD)
 
-        # test context with mpi usage
+        # test context with mpi
+        ctx = arb.context(mpi=comm)
+        self.assertTrue(ctx.has_mpi)
+
+    def test_context_allocation_mpi4py(self):
+        comm = arb.mpi_comm_from_mpi4py(mpi.COMM_WORLD)
+
+        # test context with alloc and mpi
         alloc = arb.proc_allocation()
         ctx = arb.context(alloc, comm)
 
         self.assertEqual(ctx.threads, alloc.threads)
         self.assertTrue(ctx.has_mpi)
 
-    def test_finalize_mpi4py(self):
+    def test_exceptions_context_arbmpi(self):
+        alloc = arb.proc_allocation()
+
+        with self.assertRaisesRegex(RuntimeError,
+            "mpi must be None, or an MPI communicator."):
+            arb.context(mpi='MPI_COMM_WORLD')
+        with self.assertRaisesRegex(RuntimeError,
+            "mpi must be None, or an MPI communicator."):
+            arb.context(alloc, mpi=0)
+
+    def test_finalized_mpi4py(self):
         # test mpi finalization (automatically when including mpi4py, but only just before the Python process terminates)
         self.assertFalse(mpi.Is_finalized())
 
-- 
GitLab