diff --git a/.gitmodules b/.gitmodules
index 177f10db7189693a304db5fb457aa022afb1c76f..5c44019564397a2a554e435bbe222fc45358b94f 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -4,3 +4,6 @@
 [submodule "sphinx_rtd_theme"]
 	path = ext/sphinx_rtd_theme
 	url = https://github.com/rtfd/sphinx_rtd_theme.git
+[submodule "python/pybind11"]
+	path = python/pybind11
+	url = https://github.com/pybind/pybind11.git
diff --git a/.ycm_extra_conf.py b/.ycm_extra_conf.py
index 7f04ca2ea73382b5ad271071dc06bfb25a3f580f..f3ea3705c154a4978ab0d580ac9df58f502ebf71 100644
--- a/.ycm_extra_conf.py
+++ b/.ycm_extra_conf.py
@@ -55,6 +55,12 @@ flags = [
     '-I',
     'build/include',
     '-I',
+    '/cm/shared/apps/cuda/8.0.44/include', # TODO: run a command to find this on "any" system
+    '-I',
+    'python/pybind11/include',
+    '-I',
+    '/usr/include/python3.6m', # TODO: run a command to find this on "any" system
+    '-I',
     'sup/include',
 ]
 
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 629661359dab1e04aa48c7b1409e7960df6af0c2..2cb8256bb7ac2b3c969f14dc37dd36acdad49a52 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -53,6 +53,12 @@ option(ARB_WITH_PROFILING "use built-in profiling" OFF)
 option(ARB_WITH_ASSERTIONS "enable arb_assert() assertions in code" OFF)
 
 
+#----------------------------------------------------------
+# Python front end for Arbor:
+#----------------------------------------------------------
+
+option(ARB_WITH_PYTHON "enable Python front end" OFF)
+
 #----------------------------------------------------------
 # Global CMake configuration
 #----------------------------------------------------------
@@ -179,6 +185,15 @@ if(ARB_WITH_ASSERTIONS)
     target_compile_definitions(arbor-config-defs INTERFACE ARB_HAVE_ASSERTIONS)
 endif()
 
+# Python bindings
+#----------------------------------------------------------
+option(ARB_WITH_PYTHON "enable python front end" OFF)
+if(ARB_WITH_PYTHON)
+    # Required to link the dynamic libraries for python modules.
+    # Effectively adds '-fpic' flag to CXX_FLAGS.
+    set(CMAKE_POSITION_INDEPENDENT_CODE ON)
+endif()
+
 # Threading model
 #-----------------
 
@@ -354,7 +369,7 @@ add_subdirectory(arborenv)
 # unit, unit-mpi, unit-local, unit-modcc, validate
 add_subdirectory(test)
 
-# miniapp, brunel-minapp, event-gen 
+# miniapp, brunel-minapp, event-gen:
 add_subdirectory(example)
 
 # lmorpho:
@@ -365,9 +380,14 @@ add_subdirectory(doc)
 
 # validation-data:
 if(ARB_BUILD_VALIDATION_DATA)
-    add_subdirectory(validation) # validation-data
+    add_subdirectory(validation)
 endif()
 
+# python interface:
+if (ARB_WITH_PYTHON)
+    add_subdirectory(python)
+endif ()
+
 #----------------------------------------------------------
 # Generate CMake config/version files for install.
 #----------------------------------------------------------
diff --git a/arbor/include/arbor/domain_decomposition.hpp b/arbor/include/arbor/domain_decomposition.hpp
index 4b5d6e9d7624c8ab623cf84e5eab9ebb30abd3d1..ebc7e34bb3dbabf5a134664c3832ca28eca1a51f 100644
--- a/arbor/include/arbor/domain_decomposition.hpp
+++ b/arbor/include/arbor/domain_decomposition.hpp
@@ -13,13 +13,13 @@ namespace arb {
 /// Metadata for a local cell group.
 struct group_description {
     /// The kind of cell in the group. All cells in a cell_group have the same type.
-    const cell_kind kind;
+    cell_kind kind;
 
     /// The gids of the cells in the cell_group, sorted in ascending order.
-    const std::vector<cell_gid_type> gids;
+    std::vector<cell_gid_type> gids;
 
     /// The back end on which the cell_group is to run.
-    const backend_kind backend;
+    backend_kind backend;
 
     group_description(cell_kind k, std::vector<cell_gid_type> g, backend_kind b):
         kind(k), gids(std::move(g)), backend(b)
diff --git a/arbor/include/arbor/event_generator.hpp b/arbor/include/arbor/event_generator.hpp
index 247c53a3a0c448afcac44f7aa9b287617d988234..ecd0e476a9dff5bb6367b3b25f1bd5aca59af301 100644
--- a/arbor/include/arbor/event_generator.hpp
+++ b/arbor/include/arbor/event_generator.hpp
@@ -164,14 +164,22 @@ private:
 // Convenience routines for making schedule_generator:
 
 inline event_generator regular_generator(
-    cell_member_type target, float weight, time_type tstart, time_type dt, time_type tstop=terminal_time)
+    cell_member_type target,
+    float weight,
+    time_type tstart,
+    time_type dt,
+    time_type tstop=terminal_time)
 {
     return schedule_generator(target, weight, regular_schedule(tstart, dt, tstop));
 }
 
 template <typename RNG>
 inline event_generator poisson_generator(
-    cell_member_type target, float weight, time_type tstart, time_type rate_kHz, const RNG& rng)
+    cell_member_type target,
+    float weight,
+    time_type tstart,
+    time_type rate_kHz,
+    const RNG& rng)
 {
     return schedule_generator(target, weight, poisson_schedule(tstart, rate_kHz, rng));
 }
diff --git a/arbor/include/arbor/profile/meter_manager.hpp b/arbor/include/arbor/profile/meter_manager.hpp
index c264660e7f306cc94c5c342796f1ddbe3b1687d3..a2da58b0d44f81a363ad5545175c6f13d8ed6bf0 100644
--- a/arbor/include/arbor/profile/meter_manager.hpp
+++ b/arbor/include/arbor/profile/meter_manager.hpp
@@ -47,7 +47,6 @@ public:
     const std::vector<std::string>& checkpoint_names() const;
     const std::vector<double>& times() const;
 
-    const context& ctx() const;
 };
 
 // Simple type for gathering distributed meter information
diff --git a/arbor/include/arbor/schedule.hpp b/arbor/include/arbor/schedule.hpp
index 4a9213e0be60fb0b935b689017618a3092e80951..15e8d62b3152f1c1a86a6eff0280f39103d9e420 100644
--- a/arbor/include/arbor/schedule.hpp
+++ b/arbor/include/arbor/schedule.hpp
@@ -121,7 +121,9 @@ private:
 };
 
 inline schedule regular_schedule(
-    time_type t0, time_type dt, time_type t1 = std::numeric_limits<time_type>::max())
+    time_type t0,
+    time_type dt,
+    time_type t1 = std::numeric_limits<time_type>::max())
 {
     return schedule(regular_schedule_impl(t0, dt, t1));
 }
diff --git a/cmake/FindPythonModule.cmake b/cmake/FindPythonModule.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..90484e4305d369613c22b9a819e904b59c75c939
--- /dev/null
+++ b/cmake/FindPythonModule.cmake
@@ -0,0 +1,29 @@
+# https://cmake.org/pipermail/cmake/2011-January/041666.html
+include(FindPackageHandleStandardArgs)
+function(find_python_module module)
+    string(TOUPPER ${module} module_upper)
+
+    if(NOT PY_${module_upper})
+        if(ARGC GREATER 1 AND ARGV1 STREQUAL "REQUIRED")
+            set(${module}_FIND_REQUIRED TRUE)
+        endif()
+
+        # A module's location is usually a directory, but for binary modules
+        # it's a .so file.
+        execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
+            "import re, ${module}; print(re.compile('/__init__.py.*').sub('',${module}.__file__))"
+            RESULT_VARIABLE _${module}_status
+            OUTPUT_VARIABLE _${module}_location
+            ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+
+        if(NOT _${module}_status)
+            set(HAVE_${module_upper} ON CACHE INTERNAL "Python module available")
+            set(PY_${module_upper} ${_${module}_location} CACHE STRING "Location of Python module ${module}")
+        else()
+            set(HAVE_${module_upper} OFF CACHE INTERNAL "Python module available")
+        endif()
+
+    endif(NOT PY_${module_upper})
+
+    find_package_handle_standard_args(PY_${module} DEFAULT_MSG PY_${module_upper})
+endfunction(find_python_module)
diff --git a/doc/index.rst b/doc/index.rst
index a025536d0667f947964e1e5e58afb603e05532d2..b068806899a2d4c4a745591fc30a7279d6ebfa17 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -42,11 +42,6 @@ Some key features include:
 
    install
 
-.. toctree::
-   :caption: Users:
-
-   users
-
 .. toctree::
    :caption: C++ API:
 
diff --git a/doc/users.rst b/doc/users.rst
deleted file mode 100644
index d344fd237659f260ff94f3d6bba070ae71724f1c..0000000000000000000000000000000000000000
--- a/doc/users.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-Using Arbor
-##############
-
-
-Introduction to the user guide, with examples and detailed descriptions of features goes here.
diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1ab9e95c09d022f31be16bf8fc7eda5099536456
--- /dev/null
+++ b/python/CMakeLists.txt
@@ -0,0 +1,46 @@
+include(FindPythonModule) # required for find_python_module
+
+# Set up pybind11 as an external project.
+set(pb11_src_dir "${PROJECT_SOURCE_DIR}/python/pybind11")
+check_git_submodule(pybind11 "${pb11_src_dir}")
+
+if(NOT pybind11_avail)
+    message(FATAL_ERROR "The git submodule for pybind11 is not available, required for python support")
+endif()
+
+# Set up pybind11, which is used to generate Python bindings.
+# Pybind11 has good cmake support, so just add the pybind11 directory,
+# instead of using find_package.
+set(PYBIND11_CPP_STANDARD -std=c++14)
+add_subdirectory(pybind11)
+
+# The Python library. MODULE will make a Python-exclusive model.
+add_library(pyarb MODULE
+    context.cpp
+    exception.cpp
+    mpi.cpp
+    pyarb.cpp
+    strings.cpp
+)
+
+target_link_libraries(pyarb PRIVATE arbor pybind11::module)
+
+# The output name of the pyarb .so file is "arbor", to facilitate "import arbor"
+set_target_properties(pyarb PROPERTIES OUTPUT_NAME arbor)
+# With this, the full name of the library will be something like:
+#   arbor.cpython-37m-x86_64-linux-gnu.so
+set_target_properties(pyarb PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" SUFFIX "${PYTHON_MODULE_EXTENSION}")
+
+# Add support for mpi4py if available.
+if (ARB_WITH_MPI)
+    find_python_module(mpi4py)
+    if (HAVE_MPI4PY)
+        target_include_directories(pyarb PRIVATE "${PY_MPI4PY}/include")
+        target_compile_definitions(pyarb PRIVATE -DARB_WITH_MPI4PY)
+    endif()
+endif()
+
+# Determine the installation path, according to the Python version.
+find_package(PythonInterp REQUIRED)
+set(ARB_PYEXECDIR "${CMAKE_INSTALL_LIBDIR}/python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}/site-packages")
+install(TARGETS pyarb LIBRARY DESTINATION ${ARB_PYEXECDIR})
diff --git a/python/context.cpp b/python/context.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b3ddb8c8ba63f9003b3b4b00f26078a2b2163988
--- /dev/null
+++ b/python/context.cpp
@@ -0,0 +1,96 @@
+#include <iostream>
+
+#include <sstream>
+#include <string>
+
+#include <arbor/context.hpp>
+#include <arbor/version.hpp>
+
+#include "context.hpp"
+#include "strings.hpp"
+
+#include <pybind11/pybind11.h>
+
+#ifdef ARB_MPI_ENABLED
+#include "mpi.hpp"
+#endif
+
+namespace pyarb {
+
+void register_contexts(pybind11::module& m) {
+    using namespace std::string_literals;
+    using namespace pybind11::literals;
+
+    pybind11::class_<arb::proc_allocation> proc_allocation(m, "proc_allocation");
+    proc_allocation
+        .def(pybind11::init<>())
+        .def(pybind11::init<int, int>(), "threads"_a, "gpu"_a=-1,
+             "Arguments:\n"
+             "  threads: The number of threads available locally for execution.\n"
+             "  gpu:     The index of the GPU to use, defaults to -1 for no GPU.\n")
+        .def_readwrite("threads", &arb::proc_allocation::num_threads,
+            "The number of threads available locally for execution.")
+        .def_readwrite("gpu_id", &arb::proc_allocation::gpu_id,
+            "The identifier of the GPU to use.\n"
+            "Corresponds to the integer index used to identify GPUs in CUDA API calls.")
+        .def_property_readonly("has_gpu", &arb::proc_allocation::has_gpu,
+            "Whether a GPU is being used (True/False).")
+        .def("__str__", &proc_allocation_string)
+        .def("__repr__", &proc_allocation_string);
+
+    pybind11::class_<context_shim> context(m, "context");
+    context
+        .def(pybind11::init<>(
+            [](){return context_shim(arb::make_context());}))
+        .def(pybind11::init(
+            [](const arb::proc_allocation& alloc){return context_shim(arb::make_context(alloc));}),
+             "alloc"_a,
+             "Argument:\n"
+             "  alloc:   The computational resources to be used for the simulation.\n")
+#ifdef ARB_MPI_ENABLED
+        .def(pybind11::init(
+            [](const arb::proc_allocation& alloc, mpi_comm_shim c){return context_shim(arb::make_context(alloc, c.comm));}),
+             "alloc"_a, "c"_a,
+             "Arguments:\n"
+             "  alloc:   The computational resources to be used for the simulation.\n"
+             "  c:       The MPI communicator.\n")
+        .def(pybind11::init(
+            [](int threads, pybind11::object gpu, pybind11::object mpi){
+                arb::proc_allocation alloc(threads, gpu.is_none()? -1: pybind11::cast<int>(gpu));
+                if (mpi.is_none()) {
+                    return context_shim(arb::make_context(alloc));
+                }
+                auto& c = pybind11::cast<mpi_comm_shim&>(mpi);
+                return context_shim(arb::make_context(alloc, c.comm));
+            }),
+             "threads"_a=1, "gpu"_a=pybind11::none(), "mpi"_a=pybind11::none(),
+             "Arguments:\n"
+             "  threads: The number of threads available locally for execution.\n"
+             "  gpu:     The index of the GPU to use, defaults to None for no GPU.\n"
+             "  mpi:     The MPI communicator, defaults to None for no MPI.\n")
+#else
+        .def(pybind11::init(
+            [](int threads, pybind11::object gpu){
+                int gpu_id = gpu.is_none()? -1: pybind11::cast<int>(gpu);
+                return context_shim(arb::make_context(arb::proc_allocation(threads, gpu_id)));
+            }),
+             "threads"_a=1, "gpu"_a=pybind11::none(),
+             "Arguments:\n"
+             "  threads: The number of threads available locally for execution.\n"
+             "  gpu:     The index of the GPU to use, defaults to None for no GPU.\n")
+#endif
+        .def_property_readonly("has_mpi", [](const context_shim& ctx){return arb::has_mpi(ctx.context);},
+            "Whether the context uses MPI for distributed communication.")
+        .def_property_readonly("has_gpu", [](const context_shim& ctx){return arb::has_gpu(ctx.context);},
+            "Whether the context has a GPU.")
+        .def_property_readonly("threads", [](const context_shim& ctx){return arb::num_threads(ctx.context);},
+            "The number of threads in the context's thread pool.")
+        .def_property_readonly("ranks", [](const context_shim& ctx){return arb::num_ranks(ctx.context);},
+            "The number of distributed domains (equivalent to the number of MPI ranks).")
+        .def_property_readonly("rank", [](const context_shim& ctx){return arb::rank(ctx.context);},
+            "The numeric id of the local domain (equivalent to MPI rank).")
+        .def("__str__", [](const context_shim& c){return context_string(c.context);})
+        .def("__repr__", [](const context_shim& c){return context_string(c.context);});
+}
+
+} // namespace pyarb
diff --git a/python/context.hpp b/python/context.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..8fff4bc3b2a002148f5cc65de85f00d12158982c
--- /dev/null
+++ b/python/context.hpp
@@ -0,0 +1,12 @@
+#pragma once
+
+#include <arbor/context.hpp>
+
+namespace pyarb {
+
+struct context_shim {
+    arb::context context;
+    context_shim(arb::context&& c): context(std::move(c)) {}
+};
+
+} // namespace pyarb
diff --git a/python/exception.cpp b/python/exception.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3be0e787a3ee78294aedd7e90242e500f413c8c0
--- /dev/null
+++ b/python/exception.cpp
@@ -0,0 +1,11 @@
+#include <string>
+
+#include "exception.hpp"
+
+namespace pyarb {
+
+python_error::python_error(const std::string& message):
+    arbor_exception("arbor python wrapper error: " + message + "\n")
+{}
+
+} // namespace pyarb
diff --git a/python/exception.hpp b/python/exception.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..5f352154b8597ecbb4e8e8a245a76891ee4928da
--- /dev/null
+++ b/python/exception.hpp
@@ -0,0 +1,17 @@
+#pragma once
+
+#include <string>
+
+#include <arbor/arbexcept.hpp>
+
+namespace pyarb {
+
+using arb::arbor_exception;
+
+// Python wrapper errors
+
+struct python_error: arbor_exception {
+    explicit python_error(const std::string& message);
+};
+
+} // namespace pyarb
diff --git a/python/mpi.cpp b/python/mpi.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f075575758fb2d963a87be1fee037e6c46639681
--- /dev/null
+++ b/python/mpi.cpp
@@ -0,0 +1,123 @@
+#include <sstream>
+#include <string>
+
+#include <arbor/version.hpp>
+
+#include <pybind11/pybind11.h>
+
+#ifdef ARB_MPI_ENABLED
+#include <arbor/communication/mpi_error.hpp>
+
+#include <mpi.h>
+
+#include "mpi.hpp"
+
+#ifdef ARB_WITH_MPI4PY
+#include <mpi4py/mpi4py.h>
+#endif
+#endif
+
+namespace pyarb {
+
+// Some helper functions to determine how Arbor was compiled
+bool mpi_compiled() {
+    #ifdef ARB_MPI_ENABLED
+    return true;
+    #else
+    return false;
+    #endif
+}
+
+bool mpi4py_compiled() {
+    #ifdef ARB_WITH_MPI4PY
+    return true;
+    #else
+    return false;
+    #endif
+}
+
+void register_queryenvvars(pybind11::module& m) {
+    using namespace std::string_literals;
+    
+    m.def("mpi_compiled", &mpi_compiled, "Check if Arbor was compiled with MPI.");
+    m.def("mpi4py_compiled", &mpi4py_compiled, "Check if Arbor was compiled with MPI4PY.");
+}
+    
+#ifdef ARB_MPI_ENABLED
+#ifdef ARB_WITH_MPI4PY
+
+mpi_comm_shim comm_from_mpi4py(pybind11::object& o) {
+    import_mpi4py();
+
+    // If object is already a mpi4py communicator, return
+    if (PyObject_TypeCheck(o.ptr(), &PyMPIComm_Type)) {
+        return mpi_comm_shim(*PyMPIComm_Get(o.ptr()));
+    }
+    throw arb::mpi_error(MPI_ERR_OTHER, "The argument is not an mpi4py communicator");
+}
+
+#endif
+
+// Some helper functions for initializing and finalizing MPI.
+// Arbor requires at least MPI_THREAD_SERIALIZED, because the communication task
+// that uses MPI can be run on any thread, and there will never be more than one
+// concurrent communication task.
+
+void mpi_init() {
+    int provided = MPI_THREAD_SINGLE;
+    int ev = MPI_Init_thread(nullptr, nullptr, MPI_THREAD_SERIALIZED, &provided);
+    if (ev) {
+        throw arb::mpi_error(ev, "MPI_Init_thread");
+    }
+    else if (provided<MPI_THREAD_SERIALIZED) {
+        throw arb::mpi_error(MPI_ERR_OTHER, "MPI_Init_thread: MPI_THREAD_SERIALIZED unsupported");
+    }
+}
+
+void mpi_finalize() {
+    MPI_Finalize();
+}
+
+int mpi_is_initialized() {
+    int initialized;
+    MPI_Initialized(&initialized);
+    return initialized; 
+}
+
+int mpi_is_finalized() {
+    int finalized;
+    MPI_Finalized(&finalized);
+    return finalized;
+}
+// Define the stringifier for mpi_comm_shim here, to minimise the ifdefication
+// elsewhere in this wrapper code.
+
+std::string mpi_comm_string(const mpi_comm_shim& c) {
+    std::stringstream s;
+
+    s << "<mpi_comm: ";
+    if (c.comm==MPI_COMM_WORLD) s << "MPI_COMM_WORLD>";
+    else s << c.comm << ">";
+    return s.str();
+}
+
+void register_mpi(pybind11::module& m) {
+    using namespace std::string_literals;
+
+    pybind11::class_<mpi_comm_shim> mpi_comm(m, "mpi_comm");
+    mpi_comm
+        .def(pybind11::init<>())
+        .def("__str__", &mpi_comm_string)
+        .def("__repr__", &mpi_comm_string);
+
+    m.def("mpi_init", &mpi_init, "Initialize MPI with MPI_THREAD_SINGLE, as required by Arbor.");
+    m.def("mpi_finalize", &mpi_finalize, "Finalize MPI (calls MPI_Finalize)");
+    m.def("mpi_is_initialized", &mpi_is_initialized, "Check if MPI is initialized.");
+    m.def("mpi_is_finalized", &mpi_is_finalized, "Check if MPI is finalized.");
+
+    #ifdef ARB_WITH_MPI4PY
+    m.def("mpi_comm_from_mpi4py", comm_from_mpi4py);
+    #endif
+}
+#endif
+} // namespace pyarb
diff --git a/python/mpi.hpp b/python/mpi.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..61ba646c34461742c760ba7418a6d78e1e40bd00
--- /dev/null
+++ b/python/mpi.hpp
@@ -0,0 +1,20 @@
+#pragma once
+
+#ifdef ARB_MPI_ENABLED
+#include <mpi.h>
+
+namespace pyarb {
+// A shim is required for MPI_Comm, because OpenMPI defines it as a pointer to
+// a forward-declared type, which pybind11 won't allow as an argument.
+// MPICH and its derivatives use an integer.
+
+struct mpi_comm_shim {
+    MPI_Comm comm = MPI_COMM_WORLD;
+
+    mpi_comm_shim() = default;
+    mpi_comm_shim(MPI_Comm c): comm(c) {}
+};
+
+} // namespace pyarb
+#endif
+
diff --git a/python/pyarb.cpp b/python/pyarb.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8f3be8f09b89bb44d06816b212392dc051c9ef74
--- /dev/null
+++ b/python/pyarb.cpp
@@ -0,0 +1,26 @@
+#include <arbor/version.hpp>
+
+#include <pybind11/pybind11.h>
+
+// Forward declarations of functions used to register API
+// types and functions to be exposed to Python.
+namespace pyarb {
+
+void register_contexts(pybind11::module& m);
+void register_queryenvvars(pybind11::module& m);
+#ifdef ARB_MPI_ENABLED
+void register_mpi(pybind11::module& m);
+#endif
+
+}
+
+PYBIND11_MODULE(arbor, m) {
+    m.doc() = "arbor: Python bindings for Arbor.";
+    m.attr("__version__") = ARB_VERSION;
+
+    pyarb::register_contexts(m);
+    pyarb::register_queryenvvars(m);
+    #ifdef ARB_MPI_ENABLED
+    pyarb::register_mpi(m);
+    #endif
+}
diff --git a/python/pybind11 b/python/pybind11
new file mode 160000
index 0000000000000000000000000000000000000000..e2b884c33bcde70b2ea562ffa52dd7ebee276d50
--- /dev/null
+++ b/python/pybind11
@@ -0,0 +1 @@
+Subproject commit e2b884c33bcde70b2ea562ffa52dd7ebee276d50
diff --git a/python/strings.cpp b/python/strings.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..abcb06499d4930996452874c355e4e3a603c32a7
--- /dev/null
+++ b/python/strings.cpp
@@ -0,0 +1,34 @@
+#include <string>
+#include <sstream>
+
+#include <arbor/context.hpp>
+
+#include "strings.hpp"
+
+namespace pyarb {
+
+std::string proc_allocation_string(const arb::proc_allocation& a) {
+    std::stringstream s;
+    s << "<hardware resource allocation: threads " << a.num_threads << ", gpu ";
+    if (a.has_gpu()) {
+        s << a.gpu_id;
+    }
+    else {
+        s << "None";
+    }
+    s << ">";
+    return s.str();
+}
+
+std::string context_string(const arb::context& c) {
+    std::stringstream s;
+    const bool gpu = arb::has_gpu(c);
+    const bool mpi = arb::has_mpi(c);
+    s << "<context: threads " << arb::num_threads(c)
+      << ", gpu " << (gpu? "yes": "None")
+      << ", distributed " << (mpi? "MPI": "Local")
+      << " ranks " << arb::num_ranks(c)
+      << ">";
+    return s.str();
+}
+} // namespace pyarb
diff --git a/python/strings.hpp b/python/strings.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c0e9bc042bd1e22c1fbb0aae8b5bf5e56c27ff59
--- /dev/null
+++ b/python/strings.hpp
@@ -0,0 +1,16 @@
+#pragma once
+
+/*
+ * Utilities for generating string representations of types.
+ */
+
+#include <string>
+
+#include <arbor/context.hpp>
+
+namespace pyarb {
+
+std::string context_string(const arb::context&);
+std::string proc_allocation_string(const arb::proc_allocation&);
+
+} // namespace pyarb
diff --git a/python/test/__init__.py b/python/test/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e08cb8eb903201ac28cee30f1249428d5f86388e
--- /dev/null
+++ b/python/test/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+#
+# __init__.py
diff --git a/python/test/options.py b/python/test/options.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e191c1de16349ed15cee63ea3eb1222271cafbf
--- /dev/null
+++ b/python/test/options.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+#
+# options.py
+
+import argparse
+
+import os
+
+import arbor as arb
+
+def parse_arguments(args=None, namespace=None):
+    parser = argparse.ArgumentParser()
+
+    # add arguments as needed (e.g. -d, --dryrun Number of dry run ranks)
+    parser.add_argument("-v", "--verbosity", nargs='?', const=0, type=int, choices=[0, 1, 2], default=0, help="increase output verbosity")
+    #parser.add_argument("-d", "--dryrun", type=int, default=100 , help="number of dry run ranks")
+    args = parser.parse_args()
+    return args
diff --git a/python/test/readme.md b/python/test/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..dee3e090f10230f5abfa4e1bcb81319f5bb36a77
--- /dev/null
+++ b/python/test/readme.md
@@ -0,0 +1,87 @@
+## Directory Structure
+```
+|- test\
+    |- options.py
+    |- unit\
+        |- runner.py
+        |- test_contexts.py
+        |- ...
+    |- unit-distributed\
+        |- runner.py
+        |- test_contexts_arbmpi.py
+        |- test_contexts_mpi4py.py
+        |- ...
+```
+
+In parent folder `test`: 
+- `options.py`: set global options (define arg parser)
+
+In subfolders `unit`/`unit_distributed`: 
+- `test_xxxs.py`: define unittest class with test methods and own test suite (named: test module)
+- `runner.py`: run all tests in this subfolder (which are defined as suite in test modules) 
+
+## Usage
+### with `unittest` from SUBFOLDER: 
+
+* to run all tests in subfolder:  
+```             
+python -m unittest [-v]
+```
+* to run module: 
+```  
+python -m unittest module [-v]
+```  
+, e.g. in `test/unit` use `python -m unittest test_contexts -v`
+* to run class in module: 
+```
+python -m unittest module.class [-v]
+```  
+, eg. in `test/unit` use `python -m unittest test_contexts.Contexts -v`
+* to run method in class in module: 
+```  
+python -m unittest module.class.method [-v]
+```  
+, eg. in `test/unit` use `python -m unittest test_contexts.Contexts.test_context -v`
+
+### with `runner.py` and argument(s) `-v {0,1,2}` from SUBFOLDER: 
+
+* to run all tests in subfolder:   
+```  
+python -m runner[-v2]
+```   
+or `python runner.py [-v2]`
+* to run module: 
+```  
+python -m test_xxxs [-v2]
+```   
+or `python test_xxxs.py [-v2]`
+* running classes or methods not possible this way
+
+### from any other folder: 
+
+* to run all tests:   
+```
+python path/to/runner.py [-v2]
+```
+* to run module: 
+```  
+python path/to/test_xxxs.py [-v2]
+```   
+
+## Adding new tests
+
+1. In suitable folder `test/unit` (no MPI) or `test/unit_distributed` (MPI), create `test_xxxs.py` file
+2. In  `test_xxxs.py` file, define 
+  a) a unittest `class Xxxs(unittest.TestCase)` with test methods `test_yyy` 
+  b) a suite function `suite()` consisting of all desired tests returning a unittest suite `unittest.makeSuite(Xxxs, ('test'))` (for all defined tests, tuple of selected tests possible); steering of which tests to include happens here!
+  c) a run function `run()` with a unittest runner `unittest.TextTestRunner` running the `suite()` via `runner.run(suite())`
+  d) a `if __name__ == "__main__":` calling `run()`
+3. Add module to `runner.py` in subfolder by adding `test_xxxs`
+  a) to import: in `try` add `import test_xxxs`, in `except` add `from test.subfolder import test_xxxs`
+  b) to `test_modules` list
+
+## Naming convention
+
+- modules: `test_xxxs` (all lower case, ending with `s` since module can consist of multiple classes)
+- class(es): `Xxxs` (first letter upper case, ending with `s` since class can consist of multiple test functions)
+- functions: `test_yyy` (always starting with `test`since suite is build from all methods starting with `test`)
diff --git a/python/test/unit/__init__.py b/python/test/unit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e08cb8eb903201ac28cee30f1249428d5f86388e
--- /dev/null
+++ b/python/test/unit/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+#
+# __init__.py
diff --git a/python/test/unit/runner.py b/python/test/unit/runner.py
new file mode 100644
index 0000000000000000000000000000000000000000..56cd8a67c99ee4b12bc3625a2e27b137417db420
--- /dev/null
+++ b/python/test/unit/runner.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+#
+# runner.py
+
+import unittest
+
+# to be able to run .py file from child directory
+import sys, os
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
+
+try:
+    import options
+    import test_contexts
+    # add more if needed
+except ModuleNotFoundError:
+    from test import options
+    from test.unit import test_contexts
+    # add more if needed
+
+test_modules = [\
+    test_contexts\
+] # add more if needed
+
+def suite():
+    loader = unittest.TestLoader()
+
+    suites = []
+    for test_module in test_modules:
+        test_module_suite = test_module.suite()
+        suites.append(test_module_suite)
+
+    suite = unittest.TestSuite(suites)
+
+    return suite
+
+if __name__ == "__main__":
+    v = options.parse_arguments().verbosity
+    runner = unittest.TextTestRunner(verbosity = v)
+    runner.run(suite())
diff --git a/python/test/unit/test_contexts.py b/python/test/unit/test_contexts.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8c6feff4a7702629ea72866fcd6d87f51daa2ee
--- /dev/null
+++ b/python/test/unit/test_contexts.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+#
+# test_contexts.py
+
+import unittest
+
+import arbor as arb
+
+# to be able to run .py file from child directory
+import sys, os
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
+
+try:
+    import options
+except ModuleNotFoundError:
+    from test import options 
+
+"""
+all tests for non-distributed arb.context
+"""
+
+class Contexts(unittest.TestCase):
+    def test_default(self):
+        ctx = arb.context()
+
+    def test_resources(self):
+        alloc = arb.proc_allocation()
+
+        # test that by default proc_allocation has 1 thread and no GPU, no MPI
+        self.assertEqual(alloc.threads, 1)
+        self.assertFalse(alloc.has_gpu)
+        self.assertEqual(alloc.gpu_id, -1)
+
+        alloc.threads = 20
+        self.assertEqual(alloc.threads, 20)
+
+    def test_context(self):
+        alloc = arb.proc_allocation()
+
+        ctx1 = arb.context()
+
+        self.assertEqual(ctx1.threads, alloc.threads)
+        self.assertEqual(ctx1.has_gpu, alloc.has_gpu)
+
+        # default construction does not use GPU or MPI
+        self.assertEqual(ctx1.threads, 1)
+        self.assertFalse(ctx1.has_gpu)
+        self.assertFalse(ctx1.has_mpi)
+        self.assertEqual(ctx1.ranks, 1)
+        self.assertEqual(ctx1.rank, 0)
+
+        # change allocation
+        alloc.threads = 23
+        self.assertEqual(alloc.threads, 23)
+        alloc.gpu_id = -1
+        self.assertEqual(alloc.gpu_id, -1)
+
+        # test context construction with proc_allocation()
+        ctx2 = arb.context(alloc)
+        self.assertEqual(ctx2.threads, alloc.threads)
+        self.assertEqual(ctx2.has_gpu, alloc.has_gpu)
+        self.assertEqual(ctx2.ranks, 1)
+        self.assertEqual(ctx2.rank, 0)
+
+
+def suite():
+    # specify class and test functions in tuple (here: all tests starting with 'test' from class Contexts
+    suite = unittest.makeSuite(Contexts, ('test'))
+    return suite
+
+def run():
+    v = options.parse_arguments().verbosity
+    runner = unittest.TextTestRunner(verbosity = v)
+    runner.run(suite())
+
+if __name__ == "__main__":
+    run()
diff --git a/python/test/unit_distributed/__init__.py b/python/test/unit_distributed/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e08cb8eb903201ac28cee30f1249428d5f86388e
--- /dev/null
+++ b/python/test/unit_distributed/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+#
+# __init__.py
diff --git a/python/test/unit_distributed/runner.py b/python/test/unit_distributed/runner.py
new file mode 100644
index 0000000000000000000000000000000000000000..afd1276c8cebfc3d2042fa9b1b139e85a6b2d4de
--- /dev/null
+++ b/python/test/unit_distributed/runner.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+#
+# runner.py
+
+import unittest
+import arbor as arb
+if (arb.mpi4py_compiled() and arb.mpi_compiled()):
+    import mpi4py.MPI as mpi
+
+# to be able to run .py file from child directory
+import sys, os
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
+
+try:
+    import options
+    import test_contexts_arbmpi
+    import test_contexts_mpi4py
+    # add more if needed
+except ModuleNotFoundError:
+    from test import options
+    from test.unit_distributed import test_contexts_arbmpi
+    from test.unit_distributed import test_contexts_mpi4py
+    # add more if needed
+
+test_modules = [\
+    test_contexts_arbmpi,\
+    test_contexts_mpi4py\
+] # add more if needed
+
+def suite():
+    loader = unittest.TestLoader()
+
+    suites = []
+    for test_module in test_modules:
+        test_module_suite = test_module.suite()
+        suites.append(test_module_suite)
+
+    suite = unittest.TestSuite(suites)
+
+    return suite
+
+
+if __name__ == "__main__":
+    v = options.parse_arguments().verbosity 
+    
+    if not arb.mpi_is_initialized():
+        print(" Runner initializing mpi")
+        arb.mpi_init()
+
+    if arb.mpi4py_compiled():
+        comm = arb.mpi_comm_from_mpi4py(mpi.COMM_WORLD)
+    elif arb.mpi_compiled():
+        comm = arb.mpi_comm()
+
+    alloc = arb.proc_allocation()
+    ctx = arb.context(alloc, comm)
+    rank = ctx.rank
+
+    if rank == 0:
+        runner = unittest.TextTestRunner(verbosity = v)
+    else:
+        sys.stdout = open(os.devnull, 'w')
+        runner = unittest.TextTestRunner(stream=sys.stdout)
+
+    runner.run(suite())
+
+    if not arb.mpi_is_finalized():
+        #print(" Runner finalizing mpi")
+       arb.mpi_finalize()
+    #else:
+       #print(" mpi already finalized!")
diff --git a/python/test/unit_distributed/test_contexts_arbmpi.py b/python/test/unit_distributed/test_contexts_arbmpi.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4fca97024671ff77772852b06bef57ae2bbb9cd
--- /dev/null
+++ b/python/test/unit_distributed/test_contexts_arbmpi.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+#
+# test_contexts_arbmpi.py
+
+import unittest
+
+import arbor as arb
+
+# to be able to run .py file from child directory
+import sys, os
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
+
+try:
+    import options
+except ModuleNotFoundError:
+    from test import options
+
+"""
+all tests for distributed arb.context using arbor mpi wrappers
+"""
+@unittest.skipIf(arb.mpi_compiled() == False, "MPI not enabled!")
+class Contexts_arbmpi(unittest.TestCase):
+    # Initialize mpi only once in this class (when adding classes move initialization to setUpModule()
+    @classmethod
+    def setUpClass(self):
+        #print("setUp --- TestContextMPI class")
+        self.local_mpi = False
+        if not arb.mpi_is_initialized():
+            #print("    Initializing mpi")
+            arb.mpi_init()
+            self.local_mpi = True
+        #else:
+            #print("    mpi already initialized")
+    # Finalize mpi only once in this class (when adding classes move finalization to setUpModule()
+    @classmethod
+    def tearDownClass(self):
+        #print("tearDown --- TestContextMPI class")
+        #print("    Finalizing mpi")
+        #if (arb.mpi4py_compiled() == False and arb.mpi_is_finalized() == False):
+        if self.local_mpi: 
+            #print("    Finalizing mpi")
+            arb.mpi_finalize()
+        #else:
+            #print("    No finalizing due to further testing with mpi4py")
+    
+    def test_initialized_arbmpi(self):
+        self.assertTrue(arb.mpi_is_initialized())
+
+    def test_context_arbmpi(self):
+        comm = arb.mpi_comm()
+
+        # test that by default communicator is MPI_COMM_WORLD
+        self.assertEqual(str(comm), '<mpi_comm: MPI_COMM_WORLD>')
+        #print(comm)
+
+        # test context with mpi
+        alloc = arb.proc_allocation()
+        ctx = arb.context(alloc, comm)
+
+        self.assertEqual(ctx.threads, alloc.threads)
+        self.assertTrue(ctx.has_mpi)
+        #print(ctx)
+
+    def test_finalized_arbmpi(self):
+        self.assertFalse(arb.mpi_is_finalized())
+
+def suite():
+    # specify class and test functions as tuple (here: all tests starting with 'test' from class Contexts_arbmpi
+    suite = unittest.makeSuite(Contexts_arbmpi, ('test'))
+    return suite
+
+def run():
+    v = options.parse_arguments().verbosity
+    
+    if not arb.mpi_is_initialized():
+        arb.mpi_init()
+
+    comm = arb.mpi_comm()
+    alloc = arb.proc_allocation()
+    ctx = arb.context(alloc, comm)
+    rank = ctx.rank
+    
+    if rank == 0:
+        runner = unittest.TextTestRunner(verbosity = v)
+    else:
+        sys.stdout = open(os.devnull, 'w')
+        runner = unittest.TextTestRunner(stream=sys.stdout)
+
+    runner.run(suite())
+
+    if not arb.mpi_is_finalized():
+        arb.mpi_finalize()
+
+if __name__ == "__main__":
+    run()
diff --git a/python/test/unit_distributed/test_contexts_mpi4py.py b/python/test/unit_distributed/test_contexts_mpi4py.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d13c8cd2f5471c8c8eefd2584514b99f471e337
--- /dev/null
+++ b/python/test/unit_distributed/test_contexts_mpi4py.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+#
+# test_contexts_mpi4py.py
+
+import unittest
+
+import arbor as arb
+
+# to be able to run .py file from child directory
+import sys, os
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
+
+try:
+    import options
+except ModuleNotFoundError:
+    from test import options
+
+if (arb.mpi4py_compiled() and arb.mpi_compiled()):
+    import mpi4py.MPI as mpi
+
+"""
+all tests for distributed arb.context using mpi4py
+"""
+# Only test class if env var ARB_WITH_MPI4PY=ON
+@unittest.skipIf(arb.mpi_compiled() == False or arb.mpi4py_compiled() == False, "MPI/mpi4py not enabled!")
+class Contexts_mpi4py(unittest.TestCase):
+    def test_initialize_mpi4py(self):
+        # test mpi initialization (automatically when including mpi4py: https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html)
+        self.assertTrue(mpi.Is_initialized())
+
+    def test_communicator_mpi4py(self):
+        comm = arb.mpi_comm_from_mpi4py(mpi.COMM_WORLD)
+
+        # test that set communicator is MPI_COMM_WORLD
+        self.assertEqual(str(comm), '<mpi_comm: MPI_COMM_WORLD>')
+        #print(comm)
+
+    def test_context_mpi4py(self):
+        comm = arb.mpi_comm_from_mpi4py(mpi.COMM_WORLD)
+
+        # test context with mpi usage
+        alloc = arb.proc_allocation()
+        ctx = arb.context(alloc, comm)
+
+        self.assertEqual(ctx.threads, alloc.threads)
+        self.assertTrue(ctx.has_mpi)
+        #print(ctx)
+
+    def test_finalize_mpi4py(self):
+        # test mpi finalization (automatically when including mpi4py, but only just before the Python process terminates)
+        self.assertFalse(mpi.Is_finalized())
+
+def suite():
+    # specify class and test functions as tuple (here: all tests starting with 'test' from class Contexts_mpi4py
+    suite = unittest.makeSuite(Contexts_mpi4py, ('test'))
+    return suite
+
+def run():
+    v = options.parse_arguments().verbosity
+
+    comm = arb.mpi_comm_from_mpi4py(mpi.COMM_WORLD)
+    alloc = arb.proc_allocation()
+    ctx = arb.context(alloc, comm)
+    rank = ctx.rank
+    
+    if rank == 0:
+        runner = unittest.TextTestRunner(verbosity = v)
+    else:
+        sys.stdout = open(os.devnull, 'w') 
+        runner = unittest.TextTestRunner(stream=sys.stdout)
+
+    runner.run(suite())
+
+if __name__ == "__main__":
+    run()