diff --git a/arbor/arbexcept.cpp b/arbor/arbexcept.cpp
index 36ec2a6397031219b4185df60e74e970fd321fdb..50ac0968c07222a30514d4cfcaedf4c624263b0f 100644
--- a/arbor/arbexcept.cpp
+++ b/arbor/arbexcept.cpp
@@ -37,6 +37,11 @@ bad_global_property::bad_global_property(cell_kind kind):
     kind(kind)
 {}
 
+zero_thread_requested_error::zero_thread_requested_error(unsigned nbt):
+    arbor_exception(pprintf("threads must be a positive integer")),
+    nbt(nbt)
+{}
+
 bad_probe_id::bad_probe_id(cell_member_type probe_id):
     arbor_exception(pprintf("bad probe id {}", probe_id)),
     probe_id(probe_id)
diff --git a/arbor/include/arbor/arbexcept.hpp b/arbor/include/arbor/arbexcept.hpp
index 9851823d3cfd2c34b1a57da6bee92b9e9ca7f324..4a06571b3fa07b10b292034a424c4920cae358b9 100644
--- a/arbor/include/arbor/arbexcept.hpp
+++ b/arbor/include/arbor/arbexcept.hpp
@@ -75,6 +75,20 @@ struct gj_unsupported_lid_selection_policy: arbor_exception {
     cell_tag_type label;
 };
 
+// Context errors:
+
+struct zero_thread_requested_error: arbor_exception {
+    zero_thread_requested_error(unsigned nbt);
+    unsigned nbt;
+};
+
+// Domain decomposition errors:
+
+struct gj_unsupported_domain_decomposition: arbor_exception {
+    gj_unsupported_domain_decomposition(cell_gid_type gid_0, cell_gid_type gid_1);
+    cell_gid_type gid_0, gid_1;
+};
+
 // Simulation errors:
 
 struct bad_event_time: arbor_exception {
diff --git a/doc/python/hardware.rst b/doc/python/hardware.rst
index 33c403ba10301eea6089e630de5287924e0da87e..4cbfe56a3d612b562e312f1728a9b360d1b3ac62 100644
--- a/doc/python/hardware.rst
+++ b/doc/python/hardware.rst
@@ -87,8 +87,7 @@ The Python wrapper provides an API for:
 
     .. attribute:: threads
 
-        The number of CPU threads available, 1 by default.
-
+        The number of CPU threads available, 1 by default. Must be set to 1 at minimum.
     .. attribute:: gpu_id
 
         The identifier of the GPU to use.
@@ -137,7 +136,8 @@ The Python wrapper provides an API for:
 
         .. attribute:: threads
 
-            The number of threads available locally for execution, 1 by default.
+            The number of threads available locally for execution. Must be set to 1 at minimum. 1 by default.
+            Passing ``"avail_threads"`` (as string) will query and use the maximum number of threads the system makes available.
 
         .. attribute:: gpu_id
 
diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt
index dd2028f062bd34a5ddc57dc1d361e864e3be229c..10220bf93a36431e415b0b03ffb019d1ba1d4f4d 100644
--- a/python/CMakeLists.txt
+++ b/python/CMakeLists.txt
@@ -45,7 +45,7 @@ set(pyarb_source
 # unit tests of the C++ components in the Python wrapper.
 add_library(pyarb_obj OBJECT ${pyarb_source})
 set_target_properties(pyarb_obj PROPERTIES CXX_VISIBILITY_PRESET hidden)
-target_link_libraries(pyarb_obj PRIVATE arbor arborio pybind11::module)
+target_link_libraries(pyarb_obj PRIVATE arbor arborenv arborio pybind11::module)
 
 # The Python library. MODULE will make a Python-exclusive model.
 add_library(pyarb MODULE $<TARGET_OBJECTS:pyarb_obj>)
@@ -58,7 +58,7 @@ set_target_properties(pyarb PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" SUFFIX "
 
 # This dependency has to be spelt out again, despite being added to
 # pyarb_obj because CMake.
-target_link_libraries(pyarb PRIVATE arbor arborio pybind11::module)
+target_link_libraries(pyarb PRIVATE arbor arborenv arborio pybind11::module)
 
 # Add support for mpi4py if available.
 if (ARB_WITH_MPI)
diff --git a/python/context.cpp b/python/context.cpp
index 4bf1e462239621d2b65f2dfaec8f636f94209de4..80fcbc61c93fe01e90b92a8c052054a43708e8f5 100644
--- a/python/context.cpp
+++ b/python/context.cpp
@@ -7,6 +7,8 @@
 
 #include <arbor/context.hpp>
 #include <arbor/version.hpp>
+#include <arbor/arbexcept.hpp>
+#include <arborenv/concurrency.hpp>
 
 #include "context.hpp"
 #include "conversion.hpp"
@@ -34,9 +36,9 @@ std::ostream& operator<<(std::ostream& o, const context_shim& ctx) {
 // A Python shim that holds the information that describes an arb::proc_allocation.
 struct proc_allocation_shim {
     std::optional<int> gpu_id = {};
-    int num_threads = 1;
+    unsigned long num_threads = 1;
 
-    proc_allocation_shim(int threads, pybind11::object gpu) {
+    proc_allocation_shim(unsigned threads, pybind11::object gpu) {
         set_num_threads(threads);
         set_gpu_id(gpu);
     }
@@ -48,13 +50,15 @@ struct proc_allocation_shim {
         gpu_id = py2optional<int>(gpu, "gpu_id must be None, or a non-negative integer", is_nonneg());
     };
 
-    void set_num_threads(int threads) {
-        pyarb::assert_throw([](int n) { return n>0; }(threads), "threads must be a positive integer");
+    void set_num_threads(unsigned threads) {
+        if (0==threads) {
+            throw arb::zero_thread_requested_error(threads);
+        }
         num_threads = threads;
     };
 
     std::optional<int> get_gpu_id() const { return gpu_id; }
-    int get_num_threads() const { return num_threads; }
+    unsigned get_num_threads() const { return num_threads; }
     bool has_gpu() const { return bool(gpu_id); }
 
     // helper function to use arb::make_context(arb::proc_allocation)
@@ -63,6 +67,34 @@ struct proc_allocation_shim {
     }
 };
 
+context_shim create_context(unsigned threads, pybind11::object gpu, pybind11::object mpi) {
+    const char* gpu_err_str = "gpu_id must be None, or a non-negative integer";
+
+#ifndef ARB_GPU_ENABLED
+    if (!gpu.is_none()) {
+        throw pyarb_error("Attempt to set an GPU communicator but Arbor is not configured with GPU support.");
+    }
+#endif
+
+    auto gpu_id = py2optional<int>(gpu, gpu_err_str, is_nonneg());
+    arb::proc_allocation alloc(threads, gpu_id.value_or(-1));
+
+#ifndef ARB_MPI_ENABLED
+    if (!mpi.is_none()) {
+        throw pyarb_error("Attempt to set an MPI communicator but Arbor is not configured with MPI support.");
+    }
+#else
+    const char* mpi_err_str = "mpi must be None, or an MPI communicator";
+    if (can_convert_to_mpi_comm(mpi)) {
+        return context_shim(arb::make_context(alloc, convert_to_mpi_comm(mpi)));
+    }
+    if (auto c = py2optional<mpi_comm_shim>(mpi, mpi_err_str)) {
+        return context_shim(arb::make_context(alloc, c->comm));
+    }
+#endif
+    return context_shim(arb::make_context(alloc));
+}
+
 std::ostream& operator<<(std::ostream& o, const proc_allocation_shim& alloc) {
     return o << "<arbor.proc_allocation: threads " << alloc.num_threads << ", gpu_id " << util::to_string(alloc.gpu_id) << ">";
 }
@@ -75,10 +107,10 @@ void register_contexts(pybind11::module& m) {
     pybind11::class_<proc_allocation_shim> proc_allocation(m, "proc_allocation",
         "Enumerates the computational resources on a node to be used for simulation.");
     proc_allocation
-        .def(pybind11::init<int, pybind11::object>(),
+        .def(pybind11::init<unsigned, pybind11::object>(),
             "threads"_a=1, "gpu_id"_a=pybind11::none(),
             "Construct an allocation with arguments:\n"
-            "  threads: The number of threads available locally for execution, 1 by default.\n"
+            "  threads: The number of threads available locally for execution. Must be set to 1 at minimum. 1 by default.\n"
             "  gpu_id:  The identifier of the GPU to use, None by default.\n")
         .def_property("threads", &proc_allocation_shim::get_num_threads, &proc_allocation_shim::set_num_threads,
             "The number of threads available locally for execution.")
@@ -94,37 +126,26 @@ void register_contexts(pybind11::module& m) {
     pybind11::class_<context_shim> context(m, "context", "An opaque handle for the hardware resources used in a simulation.");
     context
         .def(pybind11::init(
-            [](int threads, pybind11::object gpu, pybind11::object mpi){
-                const char* gpu_err_str = "gpu_id must be None, or a non-negative integer";
-
-#ifndef ARB_GPU_ENABLED
-                if (!gpu.is_none()) {
-                    throw pyarb_error("Attempt to set an GPU communicator but Arbor is not configured with GPU support.");
-                }
-#endif
-
-                auto gpu_id = py2optional<int>(gpu, gpu_err_str, is_nonneg());
-                arb::proc_allocation alloc(threads, gpu_id.value_or(-1));
-
-#ifndef ARB_MPI_ENABLED
-                if (!mpi.is_none()) {
-                    throw pyarb_error("Attempt to set an MPI communicator but Arbor is not configured with MPI support.");
-                }
-#else
-                const char* mpi_err_str = "mpi must be None, or an MPI communicator";
-                if (can_convert_to_mpi_comm(mpi)) {
-                    return context_shim(arb::make_context(alloc, convert_to_mpi_comm(mpi)));
-                }
-                if (auto c = py2optional<mpi_comm_shim>(mpi, mpi_err_str)) {
-                    return context_shim(arb::make_context(alloc, c->comm));
+            [](unsigned threads, pybind11::object gpu, pybind11::object mpi){
+                return create_context(threads, gpu, mpi);
+            }),
+            "threads"_a=1, "gpu_id"_a=pybind11::none(), "mpi"_a=pybind11::none(),
+            "Construct a distributed context with arguments:\n"
+            "  threads: The number of threads available locally for execution. Must be set to 1 at minimum. 1 by default.\n"
+            "  gpu_id:  The identifier of the GPU to use, None by default. Only available if arbor.__config__['gpu']==True.\n"
+            "  mpi:     The MPI communicator, None by default. Only available if arbor.__config__['mpi']==True.\n")
+        .def(pybind11::init(
+            [](std::string threads, pybind11::object gpu, pybind11::object mpi){
+                if ("avail_threads" == threads) {
+                    return create_context(arbenv::thread_concurrency(), gpu, mpi);
                 }
-#endif
+                throw pyarb_error(
+                        util::pprintf("Attempt to set threads to {}. The only valid thread options are a positive integer greater than 0, or 'avial_threads'.", threads));
 
-                return context_shim(arb::make_context(alloc));
             }),
-            "threads"_a=1, "gpu_id"_a=pybind11::none(), "mpi"_a=pybind11::none(),
+            "threads"_a, "gpu_id"_a=pybind11::none(), "mpi"_a=pybind11::none(),
             "Construct a distributed context with arguments:\n"
-            "  threads: The number of threads available locally for execution, 1 by default.\n"
+            "  threads: A string option describing the number of threads. Currently, only \"avail_threads\" is supported.\n"
             "  gpu_id:  The identifier of the GPU to use, None by default. Only available if arbor.__config__['gpu']==True.\n"
             "  mpi:     The MPI communicator, None by default. Only available if arbor.__config__['mpi']==True.\n")
         .def(pybind11::init(
diff --git a/python/pyarb.cpp b/python/pyarb.cpp
index 85871de6fc0c2216758bf490c05ad0c0de705740..c168d8be4f48c9715bb92edf2c7f30217f2d7ce1 100644
--- a/python/pyarb.cpp
+++ b/python/pyarb.cpp
@@ -46,6 +46,7 @@ PYBIND11_MODULE(_arbor, m) {
 
     // Translate Arbor errors -> Python exceptions.
     pybind11::register_exception<arb::file_not_found_error>(m, "FileNotFoundError", PyExc_FileNotFoundError);
+    pybind11::register_exception<arb::zero_thread_requested_error>(m, "ValueError", PyExc_ValueError);
 
     pyarb::register_cable_loader(m);
     pyarb::register_cable_probes(m, global_ptr);
diff --git a/python/test/unit/test_contexts.py b/python/test/unit/test_contexts.py
index d5728d820f1fe4dbe3d7eb3e5db45bbcf13866ea..7ed3a7d3cc093411e188954e748e54d6c00736c6 100644
--- a/python/test/unit/test_contexts.py
+++ b/python/test/unit/test_contexts.py
@@ -44,7 +44,7 @@ class TestContexts(unittest.TestCase):
             arb.proc_allocation(gpu_id = 'gpu_id')
         with self.assertRaises(TypeError):
             arb.proc_allocation(threads = 1.)
-        with self.assertRaisesRegex(RuntimeError,
+        with self.assertRaisesRegex(ValueError,
             "threads must be a positive integer"):
              arb.proc_allocation(threads = 0)
         with self.assertRaises(TypeError):
@@ -69,6 +69,16 @@ class TestContexts(unittest.TestCase):
         self.assertEqual(ctx.ranks, 1)
         self.assertEqual(ctx.rank, 0)
 
+    def test_context_avail_threads(self):
+        # test that 'avail_threads' returns at least 1.
+        ctx = arb.context(threads = 'avail_threads', gpu_id = None)
+
+        self.assertFalse(ctx.has_mpi)
+        self.assertFalse(ctx.has_gpu)
+        self.assertTrue(ctx.threads >= 1)
+        self.assertEqual(ctx.ranks, 1)
+        self.assertEqual(ctx.rank, 0)
+
     def test_context_allocation(self):
         alloc = arb.proc_allocation()