diff --git a/.github/workflows/ciwheel.yml b/.github/workflows/ciwheel.yml
new file mode 100644
index 0000000000000000000000000000000000000000..44dd204b18454345ff0f74b5c8dc208e13b73afb
--- /dev/null
+++ b/.github/workflows/ciwheel.yml
@@ -0,0 +1,89 @@
+name: Arbor on Wheels
+
+on:
+  push:
+    branches: [ ciwheel ]
+    tags:
+      - v*
+
+jobs:
+  build_binary_wheels:
+    name: Build wheels on ${{ matrix.os }}
+    runs-on: ${{ matrix.os }}
+    strategy:
+      matrix:
+        os: [ubuntu-latest, macos-latest]
+
+    steps:
+      - uses: actions/checkout@v2
+        with:
+          fetch-depth: 0
+          submodules: recursive
+
+      - name: Build wheels Linux
+        if: ${{ startsWith(matrix.os, 'ubuntu') }}
+        uses: joerick/cibuildwheel@v1.9.0
+        with:
+          output-dir: dist
+        env:
+          CIBW_BEFORE_BUILD: python -m pip install numpy setuptools
+          CIBW_BUILD: "cp3?-manylinux_x86_64"
+          CIBW_SKIP: "cp35-*"
+          CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014
+          # CIBW_TEST_COMMAND: TODO
+
+      - name: Build wheels macos
+        if: ${{ startsWith(matrix.os, 'macos') }}
+        uses: joerick/cibuildwheel@v1.9.0
+        with:
+          output-dir: dist
+        env:
+          MACOSX_DEPLOYMENT_TARGET: "10.15" #needed to undo some CIBW settings
+          CIBW_BEFORE_BUILD: python -m pip install numpy setuptools
+          CIBW_BUILD: "cp3?-macosx_x86_64"
+          CIBW_SKIP: "cp35-*"
+          CIBW_ARCHS_MACOS: x86_64 universal2
+          # CIBW_TEST_COMMAND: TODO
+
+      # this action runs auditwheel automatically with the following args:
+      # https://cibuildwheel.readthedocs.io/en/stable/options/#repair-wheel-command
+
+      - uses: actions/upload-artifact@v2
+        with:
+          name: dist
+          path: dist/*.whl
+
+  build_sdist:
+    name: Build sdist
+    runs-on: ubuntu-latest
+
+    steps:
+      - name: Set up Python
+        uses: actions/setup-python@v2
+      - uses: actions/checkout@v2
+        with:
+          fetch-depth: 0
+          submodules: recursive
+      - name: Make sdist
+        run:  python setup.py sdist
+      - uses: actions/upload-artifact@v2
+        with:
+          name: dist
+          path: dist/*.tar.gz
+
+# TODO
+  # upload_test_pypi:
+  #   name: upload to test pypi
+  #   runs-on: ubuntu-latest
+  #   needs: [build_binary_wheels, build_sdist]
+  #   steps:
+  #     - uses: actions/download-artifact@v2
+  #       with:
+  #         name: dist
+  #     - name: Publish distribution 📦 to Test PyPI
+  #       run: |
+  #         pip install twine
+  #         twine upload -r testpypi dist/*
+  #       env:
+  #         TWINE_USERNAME: __token__
+  #         TWINE_PASSWORD: ${{ secrets.ciwheeltest }}
diff --git a/.github/workflows/ebrains.yml b/.github/workflows/ebrains.yml
index 7a527e91f95624162de35e1c2e3ac14613c46323..60932fa6b11dc6a97c87af3d01954ffd0c826e46 100644
--- a/.github/workflows/ebrains.yml
+++ b/.github/workflows/ebrains.yml
@@ -1,6 +1,8 @@
 name: Mirror to Ebrains
 
-on: push
+on:
+  push:
+    branches: [ master ]
 
 jobs:
   to_ebrains:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a71711d952a7708e796bc249bdf1ad0f50a39216..b3b0618e1fcf668cbe07b93d11a8444fed62678b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -259,7 +259,11 @@ endif()
 if(ARB_WITH_PYTHON)
     cmake_dependent_option(ARB_USE_BUNDLED_PYBIND11 "Use bundled pybind11" ON "ARB_WITH_PYTHON;ARB_USE_BUNDLED_LIBS" OFF)
 
-    find_package(Python3 ${arb_py_version} COMPONENTS Interpreter Development REQUIRED)
+    if(DEFINED ENV{CIBUILDWHEEL})
+        find_package(Python3 ${arb_py_version} COMPONENTS Interpreter Development.Module REQUIRED)
+    else()
+        find_package(Python3 ${arb_py_version} COMPONENTS Interpreter Development REQUIRED)
+    endif()
 
     # Required to link the dynamic libraries for python modules.
     # Effectively adds '-fpic' flag to CXX_FLAGS.
diff --git a/doc/install/build_install.rst b/doc/install/build_install.rst
index 7181512c400ba5c03c833a99852637511510947c..9de38d00ed9a9870844dfbe5f77b37102e948d03 100644
--- a/doc/install/build_install.rst
+++ b/doc/install/build_install.rst
@@ -359,6 +359,9 @@ to implement these kernels. Arbor currently has vectorization support for x86 ar
 with AVX, AVX2 or AVX512 ISA extensions; and for AArch64 ARM architectures with NEON and SVE
 (first available on ARMv8-A).
 
+.. note::
+  Note that on x86-64 platforms compilation will fail if you enable vectorization, but the CPU or ``-DARB_ARCH`` does not support any form of AVX.
+
 .. _install-gpu:
 
 GPU backend
diff --git a/doc/install/python.rst b/doc/install/python.rst
index ad2066a62b9925bdd3ffeb7c2b895ac497716746..0ecfafd76eb9d832d3f8dbec62a1f2e1f006cfc6 100644
--- a/doc/install/python.rst
+++ b/doc/install/python.rst
@@ -21,9 +21,9 @@ Every point release of Arbor is pushed to the Python Package Index. The easiest
 .. note::
     You will need to have some development packages installed in order to build Arbor this way.
 
-    * Ubuntu/Debian: `sudo apt install git build-essential python3-dev python3-pip`
-    * Fedora/CentOS/Red Hat: `sudo yum install git @development-tools python3-devel python3-pip`
-    * macOS: get `brew` `here <https://brew.sh>`_ and run `brew install cmake clang python3`
+    * Ubuntu/Debian: `sudo apt install git build-essential python3-dev python3-pip libxml2-dev`
+    * Fedora/CentOS/Red Hat: `sudo yum install git @development-tools python3-devel python3-pip libxml2-devel`
+    * macOS: get `brew` `here <https://brew.sh>`_ and run `brew install cmake clang python3 libxml2`
     * Windows: the simplest way is to use `WSL <https://docs.microsoft.com/en-us/windows/wsl/install-win10>`_ and then follow the instructions for Ubuntu.
 
 If you wish to get the latest Arbor straight from
@@ -73,7 +73,7 @@ be used to configure the installation:
 * ``--mpi``: Enable MPI support (requires MPI library).
 * ``--gpu``: Enable GPU support for NVIDIA GPUs with nvcc using ``cuda``, or with clang using ``cuda-clang`` (both require cudaruntime).
   Enable GPU support for AMD GPUs with hipcc using ``hip``. By default set to ``none``, which disables gpu support.
-* ``--vec``: Enable vectorization. This might require choosing an appropriate architecture using ``--arch``.
+* ``--vec``: Enable vectorization. This might require choosing an appropriate architecture using ``--arch``. Note that on x86-64 platforms compilation will fail if you enable vectorization, but the CPU or ``--arch`` does not support any form of AVX.
 * ``--arch``: CPU micro-architecture to target. By default this is set to ``native``.
 
 If calling ``setup.py`` the flags must come after ``install`` on the command line,
diff --git a/setup.py b/setup.py
index 48f1108766ecd93d154a58cb10a4dc9581cead5a..ac92bf81513d9190e5f41277a52dc4ae5ea05a50 100644
--- a/setup.py
+++ b/setup.py
@@ -6,6 +6,13 @@ from setuptools import Extension
 from setuptools.command.build_ext import build_ext
 from setuptools.command.install import install
 import subprocess
+try:
+    from wheel.bdist_wheel import bdist_wheel
+    WHEEL_INSTALLED = True
+except:
+    #wheel package not installed.
+    WHEEL_INSTALLED = False
+    pass
 
 # Singleton class that holds the settings configured using command line
 # options. This information has to be stored in a singleton so that it
@@ -19,7 +26,7 @@ class CL_opt:
                                'gpu': 'none',
                                'vec': False,
                                'arch': 'native',
-                               'neuroml': False,
+                               'neuroml': True,
                                'bundled': True}
 
     def settings(self):
@@ -28,6 +35,17 @@ class CL_opt:
 def cl_opt():
     return CL_opt().settings()
 
+# extend user_options the same way for all Command()s
+user_options_ = [
+        ('mpi',   None, 'enable mpi support (requires MPI library)'),
+        ('gpu=',  None, 'enable nvidia cuda support (requires cudaruntime and nvcc) or amd hip support. Supported values: '
+                        'none, cuda, cuda-clang, hip'),
+        ('vec',   None, 'enable vectorization'),
+        ('arch=', None, 'cpu architecture, e.g. haswell, skylake, armv8.2-a+sve, znver2 (default native).'),
+        ('neuroml', None, 'enable parsing neuroml morphologies in Arbor (requires libxml)'),
+        ('sysdeps', None, 'don\'t use bundled 3rd party C++ dependencies (pybind11 and json). This flag forces use of dependencies installed on the system.')
+    ]
+
 # VERSION is in the same path as setup.py
 here = os.path.abspath(os.path.dirname(__file__))
 with open(os.path.join(here, 'VERSION')) as version_file:
@@ -49,15 +67,7 @@ def check_cmake():
 #    python3 setup.py install --mpi --arch=skylake
 #    pip3 install --install-option '--mpi' --install-option '--arch=skylake' .
 class install_command(install):
-    user_options = install.user_options + [
-        ('mpi',   None, 'enable mpi support (requires MPI library)'),
-        ('gpu=',  None, 'enable nvidia cuda support (requires cudaruntime and nvcc) or amd hip support. Supported values: '
-                        'none, cuda, cuda-clang, hip'),
-        ('vec',   None, 'enable vectorization'),
-        ('arch=', None, 'cpu architecture, e.g. haswell, skylake, armv8.2-a+sve, znver2 (default native).'),
-        ('neuroml', None, 'enable parsing neuroml morphologies in Arbor (requires libxml)'),
-        ('sysdeps', None, 'don\'t use bundled 3rd party C++ dependencies (pybind11 and json). This flag forces use of dependencies installed on the system.')
-    ]
+    user_options = install.user_options + user_options_
 
     def initialize_options(self):
         install.initialize_options(self)
@@ -90,6 +100,41 @@ class install_command(install):
 
         install.run(self)
 
+if WHEEL_INSTALLED:
+    class bdist_wheel_command(bdist_wheel):
+        user_options = bdist_wheel.user_options + user_options_
+
+        def initialize_options(self):
+            bdist_wheel.initialize_options(self)
+            self.mpi  = None
+            self.gpu  = None
+            self.arch = None
+            self.vec  = None
+            self.neuroml = None
+            self.sysdeps = None
+
+        def finalize_options(self):
+            bdist_wheel.finalize_options(self)
+
+        def run(self):
+            # The options are stored in global variables:
+            opt = cl_opt()
+            #   mpi  : build with MPI support (boolean).
+            opt['mpi']  = self.mpi is not None
+            #   gpu  : compile for AMD/NVIDIA GPUs and choose compiler (string).
+            opt['gpu']  = "none" if self.gpu is None else self.gpu
+            #   vec  : generate SIMD vectorized kernels for CPU micro-architecture (boolean).
+            opt['vec']  = self.vec is not None
+            #   arch : target CPU micro-architecture (string).
+            opt['arch'] = "native" if self.arch is None else self.arch
+            #   neuroml : compile with neuroml support for morphologies.
+            opt['neuroml'] = self.neuroml is not None
+            #   bundled : use bundled/git-submoduled 3rd party libraries.
+            #             By default use bundled libs.
+            opt['bundled'] = self.sysdeps is None
+
+            bdist_wheel.run(self)
+
 class cmake_extension(Extension):
     def __init__(self, name):
         Extension.__init__(self, name, sources=[])
@@ -163,8 +208,12 @@ setuptools.setup(
     zip_safe=False,
     ext_modules=[cmake_extension('arbor')],
     cmdclass={
-        'build_ext': cmake_build,
-        'install':   install_command,
+        'build_ext':   cmake_build,
+        'install':     install_command,
+        'bdist_wheel': bdist_wheel_command,
+    } if WHEEL_INSTALLED else {
+        'build_ext':   cmake_build,
+        'install':     install_command,
     },
 
     author='The Arbor dev team.',