From f685f0e31c33f06e8c7deff518df23a8e2ce37fd Mon Sep 17 00:00:00 2001
From: Thorsten Hater <24411438+thorstenhater@users.noreply.github.com>
Date: Tue, 21 Jun 2022 14:49:53 +0200
Subject: [PATCH] Adopt flake8 (#1908)

---
 .flake8                                       | 24 ++++++++++++
 .github/workflows/lint.yml                    | 13 ++++++-
 doc/conf.py                                   |  7 ++--
 doc/contrib/pr.rst                            | 11 ++++++
 doc/scripts/gen-labels.py                     |  8 ++--
 example/lfp/neuron_lfp_example.py             |  6 +--
 python/__init__.py                            |  5 ++-
 python/example/brunel.py                      | 20 +++++-----
 python/example/gap_junctions.py               |  5 ++-
 python/example/network_ring.py                | 13 ++++---
 python/example/network_ring_mpi.py            | 11 +++---
 python/example/network_ring_mpi_plot.py       |  3 +-
 python/example/single_cell_allen.py           |  1 +
 python/example/single_cell_detailed_recipe.py |  1 +
 python/example/single_cell_model.py           |  3 +-
 python/example/single_cell_nml.py             |  4 +-
 python/example/single_cell_recipe.py          |  3 +-
 python/example/single_cell_swc.py             |  4 +-
 python/test/fixtures.py                       |  2 -
 python/test/unit/test_cable_probes.py         |  1 -
 python/test/unit/test_catalogues.py           |  2 +-
 python/test/unit/test_clear_samplers.py       |  5 +--
 python/test/unit/test_contexts.py             |  1 -
 python/test/unit/test_decor.py                |  2 -
 .../test/unit/test_domain_decompositions.py   | 10 ++---
 python/test/unit/test_event_generators.py     |  1 -
 python/test/unit/test_identifiers.py          |  2 +-
 python/test/unit/test_morphology.py           |  2 -
 python/test/unit/test_multiple_connections.py | 21 +++++++----
 python/test/unit/test_profiling.py            |  1 -
 python/test/unit/test_schedules.py            |  1 -
 .../unit_distributed/test_contexts_arbmpi.py  |  2 +-
 .../unit_distributed/test_contexts_mpi4py.py  |  4 +-
 .../test_domain_decompositions.py             | 37 ++++++++++---------
 .../test/unit_distributed/test_simulator.py   |  5 +--
 scripts/build-catalogue.in                    |  7 ++--
 scripts/patchwheel.py                         |  4 +-
 scripts/where.py                              |  3 +-
 setup.py                                      |  1 -
 validation/ref/neuron/ball_and_squiggle.py    |  7 +++-
 validation/ref/neuron/nrn_validation.py       |  2 -
 41 files changed, 161 insertions(+), 104 deletions(-)
 create mode 100644 .flake8

diff --git a/.flake8 b/.flake8
new file mode 100644
index 00000000..5269b25b
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,24 @@
+[flake8]
+max-line-length = 88
+extend-ignore =
+              # for black
+              E203,
+              # zealous line lengths
+              E501,
+              # ambiguous varnames I ./. l etc
+              E741,
+select = C,E,F,W,B,B950
+max_complexity = 15
+extend-exclude =
+               # 3rd party
+               ext,
+               python/pybind11,
+               # auto-generated
+               doc/scripts/inputs.py
+               doc/scripts/make_images.py
+               # hidden
+               .*,
+               # artifacts
+               build,
+               # nah, don't care
+               spack/package.py
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index b1c643f4..21ab7e3b 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -11,6 +11,14 @@ jobs:
     strategy:
       fail-fast: false
     steps:
+      - name: Set up Python
+        uses: actions/setup-python@v2
+        with:
+          python-version: 3.6
+      - name: Get packages
+        run: |
+          python -m pip install --upgrade pip
+          pip install flake8
       - name: Clone w/ submodules
         uses: actions/checkout@v2
         with:
@@ -18,5 +26,8 @@ jobs:
       - name: Python Formatting
         uses: psf/black@stable
         with:
-          options: --check --extend-exclude '/(ext|python/pybind11|doc/scripts/.*_theme)'
+          options: --check --extend-exclude '/(ext|python/pybind11|doc/scripts/.*_theme|doc/scripts/inputs.py)'
           src: scripts/build-catalogue.in .
+      - name: Python analysis
+        run: |
+          flake8 scripts/build-catalogue.in .
diff --git a/doc/conf.py b/doc/conf.py
index e6c2dd34..424686cf 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -1,8 +1,7 @@
 #!/usr/bin/env python3
 # -*- coding: utf-8 -*-
-import sys, os
-import subprocess as sp
-from tempfile import TemporaryDirectory
+import sys
+import os
 
 # Add /scripts to path. Used for Furo theme and to generate images
 this_path = os.path.split(os.path.abspath(__file__))[0]
@@ -58,7 +57,7 @@ img_path = this_path + "/gen-images"
 if not os.path.exists(img_path):
     os.mkdir(img_path)
 
-import make_images
+import make_images  # noqa:E402
 
 make_images.generate(img_path)
 
diff --git a/doc/contrib/pr.rst b/doc/contrib/pr.rst
index c22c59cd..a47539ec 100644
--- a/doc/contrib/pr.rst
+++ b/doc/contrib/pr.rst
@@ -129,6 +129,17 @@ Each pull request is reviewed according to these guidelines:
    summary as commit message.
 -  Consider using Gitpod to review larger PRs, see under checks on the Github PR page.
 
+.. _contribpr-lint:
+
+Pull requests will also be subject to a series of automated checks
+
+- Python formatting will be checked using the `black <https://black.readthedocs.io/en/stable/index.html>`__ formatter
+- Python files will be checked for common errors and code smells using `flake8 <https://flake8.pycqa.org/en/latest/>`__
+- C++ code will be run against a suite of sanitizers under the `clang <https://clang.llvm.org/docs/index.html>`__ umbrella. The following checks are enabled
+  - `undefined behavior <https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html>`__: under/overflow, null-deref, ...
+  - `threads <https://clang.llvm.org/docs/ThreadSanitizer.html>`__: data races and other threading related issues
+  - `memory <https://clang.llvm.org/docs/AddressSanitizer.html>`__: illegal accesses, use-after-free, double free, ...
+
 .. _contribpr-merge:
 
 Merging a PR
diff --git a/doc/scripts/gen-labels.py b/doc/scripts/gen-labels.py
index 8747e327..69dd8e36 100644
--- a/doc/scripts/gen-labels.py
+++ b/doc/scripts/gen-labels.py
@@ -10,18 +10,16 @@ def is_collocated(l, r):
 
 
 def write_morphology(name, morph):
-    string = "tmp = [".format(name)
+    string = "tmp = ["
     for i in range(morph.num_branches):
-        first = True
+        last_dist = None
         sections = "["
         for seg in morph.branch_segments(i):
-            if not first:
+            if last_dist is not None:
                 if is_collocated((seg.prox.x, seg.prox.y), (last_dist.x, last_dist.y)):
                     sections += ", "
                 else:
                     sections += "], ["
-
-            first = False
             p = seg.prox
             d = seg.dist
             sections += "Segment(({}, {}, {}), ({}, {}, {}), {})".format(
diff --git a/example/lfp/neuron_lfp_example.py b/example/lfp/neuron_lfp_example.py
index d0953e8a..4b8b9315 100755
--- a/example/lfp/neuron_lfp_example.py
+++ b/example/lfp/neuron_lfp_example.py
@@ -260,8 +260,8 @@ def plot_results(cell, electrode):
         xlim=[-150, 150],
         ylim=[-100, 600],
         title="morphology",
-        xlabel="x ($\mu$m)",
-        ylabel="y ($\mu$m)",
+        xlabel=r"x ($\mu$m)",
+        ylabel=r"y ($\mu$m)",
     )
     ax_syn = fig.add_subplot(
         332, ylabel="nA", title="synaptic current", xlabel="time (ms)"
@@ -273,7 +273,7 @@ def plot_results(cell, electrode):
         338, ylabel="nA", xlabel="time (ms)", title="membrane current"
     )
     ax_ep = fig.add_subplot(
-        133, ylabel="$\mu$V", xlabel="time (ms)", title="Extracellular potential"
+        133, ylabel=r"$\mu$V", xlabel="time (ms)", title="Extracellular potential"
     )
 
     plot_comp_idx = 0
diff --git a/python/__init__.py b/python/__init__.py
index 17ccfc65..cd72d942 100644
--- a/python/__init__.py
+++ b/python/__init__.py
@@ -4,7 +4,8 @@
 # The library will be installed in the same path as this file, which will imports
 # the compiled part of the wrapper from the _arbor namespace.
 
-from ._arbor import *
+from ._arbor import *  # noqa: F403
+
 
 # Parse VERSION file for the Arbor version string.
 def get_version():
@@ -16,7 +17,7 @@ def get_version():
 
 
 __version__ = get_version()
-__config__ = config()
+__config__ = config()  # noqa:F405
 
 # Remove get_version from arbor module.
 del get_version
diff --git a/python/example/brunel.py b/python/example/brunel.py
index 5455ca69..a147e737 100755
--- a/python/example/brunel.py
+++ b/python/example/brunel.py
@@ -6,20 +6,22 @@ import numpy as np
 from numpy.random import RandomState
 
 """
-A Brunel network consists of nexc excitatory LIF neurons and ninh inhibitory LIF neurons.
-Each neuron in the network receives in_degree_prop * nexc excitatory connections
-chosen randomly, in_degree_prop * ninh inhibitory connections and next (external) Poisson connections.
-All the connections have the same delay. The strenght of excitatory and Poisson connections is given by
-parameter weight, whereas the strength of inhibitory connections is rel_inh_strength * weight.
-Poisson neurons all spike independently with expected number of spikes given by parameter poiss_lambda.
-Because of the refractory period, the activity is mostly driven by Poisson neurons and
-recurrent connections have a small effect.
+A Brunel network consists of nexc excitatory LIF neurons and ninh inhibitory
+LIF neurons. Each neuron in the network receives in_degree_prop * nexc
+excitatory connections chosen randomly, in_degree_prop * ninh inhibitory
+connections and next (external) Poisson connections. All the connections have
+the same delay. The strenght of excitatory and Poisson connections is given by
+parameter weight, whereas the strength of inhibitory connections is
+rel_inh_strength * weight. Poisson neurons all spike independently with expected
+number of spikes given by parameter poiss_lambda. Because of the refractory
+period, the activity is mostly driven by Poisson neurons and recurrent
+connections have a small effect.
 
 Call with parameters, for example:
 ./brunel.py -n 400 -m 100 -e 20 -p 0.1 -w 1.2 -d 1 -g 0.5 -l 5 -t 100 -s 1 -G 50 -S 123 -f spikes.txt
-
 """
 
+
 # Samples m unique values in interval [start, end) - gid.
 # We exclude gid because we don't want self-loops.
 def sample_subset(gen, gid, start, end, m):
diff --git a/python/example/gap_junctions.py b/python/example/gap_junctions.py
index 8d5ce084..2076f02a 100644
--- a/python/example/gap_junctions.py
+++ b/python/example/gap_junctions.py
@@ -1,7 +1,8 @@
 #!/usr/bin/env python3
 
 import arbor
-import pandas, seaborn
+import pandas
+import seaborn
 import matplotlib.pyplot as plt
 
 # Construct chains of cells linked with gap junctions,
@@ -28,7 +29,7 @@ def make_cable_cell(gid):
     )
 
     # Single dendrite with radius 2 μm and length 40 μm, (tag = 2)
-    b = tree.append(s, arbor.mpoint(0, 0, 0, 2), arbor.mpoint(40, 0, 0, 2), tag=2)
+    tree.append(s, arbor.mpoint(0, 0, 0, 2), arbor.mpoint(40, 0, 0, 2), tag=2)
 
     # Label dictionary for cell components
     labels = arbor.label_dict()
diff --git a/python/example/network_ring.py b/python/example/network_ring.py
index 9cb97a3a..3de7bfb7 100755
--- a/python/example/network_ring.py
+++ b/python/example/network_ring.py
@@ -2,7 +2,8 @@
 # This script is included in documentation. Adapt line numbers if touched.
 
 import arbor
-import pandas, seaborn
+import pandas
+import seaborn
 from math import sqrt
 
 # Construct a cell with the following morphology.
@@ -25,19 +26,19 @@ def make_cable_cell(gid):
         arbor.mnpos, arbor.mpoint(-12, 0, 0, 6), arbor.mpoint(0, 0, 0, 6), tag=1
     )
 
-    # Single dendrite (tag=3) of length 50 μm and radius 2 μm attached to soma.
+    # (b0) Single dendrite (tag=3) of length 50 μm and radius 2 μm attached to soma.
     b0 = tree.append(s, arbor.mpoint(0, 0, 0, 2), arbor.mpoint(50, 0, 0, 2), tag=3)
 
     # Attach two dendrites (tag=3) of length 50 μm to the end of the first dendrite.
-    # Radius tapers from 2 to 0.5 μm over the length of the dendrite.
-    b1 = tree.append(
+    # (b1) Radius tapers from 2 to 0.5 μm over the length of the dendrite.
+    tree.append(
         b0,
         arbor.mpoint(50, 0, 0, 2),
         arbor.mpoint(50 + 50 / sqrt(2), 50 / sqrt(2), 0, 0.5),
         tag=3,
     )
-    # Constant radius of 1 μm over the length of the dendrite.
-    b2 = tree.append(
+    # (b2) Constant radius of 1 μm over the length of the dendrite.
+    tree.append(
         b0,
         arbor.mpoint(50, 0, 0, 1),
         arbor.mpoint(50 + 50 / sqrt(2), -50 / sqrt(2), 0, 1),
diff --git a/python/example/network_ring_mpi.py b/python/example/network_ring_mpi.py
index 7ccd2d53..b56e81e8 100644
--- a/python/example/network_ring_mpi.py
+++ b/python/example/network_ring_mpi.py
@@ -2,7 +2,7 @@
 # This script is included in documentation. Adapt line numbers if touched.
 
 import arbor
-import pandas, seaborn
+import pandas
 from math import sqrt
 
 # Run with srun -n NJOBS python network_ring_mpi.py
@@ -31,15 +31,16 @@ def make_cable_cell(gid):
     b0 = tree.append(s, arbor.mpoint(0, 0, 0, 2), arbor.mpoint(50, 0, 0, 2), tag=3)
 
     # Attach two dendrites (tag=3) of length 50 μm to the end of the first dendrite.
-    # Radius tapers from 2 to 0.5 μm over the length of the dendrite.
-    b1 = tree.append(
+    # As there's no further use for them, we discard the returned handles.
+    # (b1) Radius tapers from 2 to 0.5 μm over the length of the dendrite.
+    _ = tree.append(
         b0,
         arbor.mpoint(50, 0, 0, 2),
         arbor.mpoint(50 + 50 / sqrt(2), 50 / sqrt(2), 0, 0.5),
         tag=3,
     )
-    # Constant radius of 1 μm over the length of the dendrite.
-    b2 = tree.append(
+    # (b2) Constant radius of 1 μm over the length of the dendrite.
+    _ = tree.append(
         b0,
         arbor.mpoint(50, 0, 0, 1),
         arbor.mpoint(50 + 50 / sqrt(2), -50 / sqrt(2), 0, 1),
diff --git a/python/example/network_ring_mpi_plot.py b/python/example/network_ring_mpi_plot.py
index 594cbd65..d5ea5d32 100644
--- a/python/example/network_ring_mpi_plot.py
+++ b/python/example/network_ring_mpi_plot.py
@@ -2,7 +2,8 @@
 # This script is included in documentation. Adapt line numbers if touched.
 
 import glob
-import pandas, seaborn
+import pandas
+import seaborn
 
 results = glob.glob("result_mpi_*.csv")
 
diff --git a/python/example/single_cell_allen.py b/python/example/single_cell_allen.py
index 511fd33b..11276199 100644
--- a/python/example/single_cell_allen.py
+++ b/python/example/single_cell_allen.py
@@ -7,6 +7,7 @@ import seaborn
 import pandas
 import matplotlib.pyplot as plt
 
+
 # (3) A function that parses the Allen parameter fit file into components for an arbor.decor
 # NB. Needs to be adjusted when using a different model
 def load_allen_fit(fit):
diff --git a/python/example/single_cell_detailed_recipe.py b/python/example/single_cell_detailed_recipe.py
index dd658f66..ef1a4497 100644
--- a/python/example/single_cell_detailed_recipe.py
+++ b/python/example/single_cell_detailed_recipe.py
@@ -88,6 +88,7 @@ cell = arbor.cable_cell(morph, labels, decor)
 
 probe = arbor.cable_probe_membrane_voltage('"custom_terminal"')
 
+
 # (6) Create a class that inherits from arbor.recipe
 class single_recipe(arbor.recipe):
 
diff --git a/python/example/single_cell_model.py b/python/example/single_cell_model.py
index 74df6778..95101782 100755
--- a/python/example/single_cell_model.py
+++ b/python/example/single_cell_model.py
@@ -2,7 +2,8 @@
 # This script is included in documentation. Adapt line numbers if touched.
 
 import arbor
-import pandas, seaborn  # You may have to pip install these.
+import pandas  # You may have to pip install these.
+import seaborn  # You may have to pip install these.
 
 # (1) Create a morphology with a single (cylindrical) segment of length=diameter=6 μm
 tree = arbor.segment_tree()
diff --git a/python/example/single_cell_nml.py b/python/example/single_cell_nml.py
index 395f1f73..7082cd6a 100755
--- a/python/example/single_cell_nml.py
+++ b/python/example/single_cell_nml.py
@@ -1,8 +1,8 @@
 #!/usr/bin/env python3
 import arbor
 from arbor import mechanism as mech
-from arbor import location as loc
-import pandas, seaborn
+import pandas
+import seaborn
 import sys
 
 # Load a cell morphology from an nml file.
diff --git a/python/example/single_cell_recipe.py b/python/example/single_cell_recipe.py
index facd5c8d..cace98de 100644
--- a/python/example/single_cell_recipe.py
+++ b/python/example/single_cell_recipe.py
@@ -2,7 +2,8 @@
 # This script is included in documentation. Adapt line numbers if touched.
 
 import arbor
-import pandas, seaborn  # You may have to pip install these.
+import pandas  # You may have to pip install these.
+import seaborn  # You may have to pip install these.
 
 # The corresponding generic recipe version of `single_cell_model.py`.
 
diff --git a/python/example/single_cell_swc.py b/python/example/single_cell_swc.py
index 0a808b85..06bdbf58 100755
--- a/python/example/single_cell_swc.py
+++ b/python/example/single_cell_swc.py
@@ -12,8 +12,8 @@
 
 import arbor
 from arbor import mechanism as mech
-from arbor import location as loc
-import pandas, seaborn
+import pandas
+import seaborn
 import sys
 
 # Load a cell morphology from an swc file.
diff --git a/python/test/fixtures.py b/python/test/fixtures.py
index 7885b12a..1e82a8b0 100644
--- a/python/test/fixtures.py
+++ b/python/test/fixtures.py
@@ -1,10 +1,8 @@
 import arbor
 import functools
 from functools import lru_cache as cache
-import unittest
 from pathlib import Path
 import subprocess
-import warnings
 import atexit
 
 _mpi_enabled = arbor.__config__["mpi"]
diff --git a/python/test/unit/test_cable_probes.py b/python/test/unit/test_cable_probes.py
index d2a66057..e49b3930 100644
--- a/python/test/unit/test_cable_probes.py
+++ b/python/test/unit/test_cable_probes.py
@@ -2,7 +2,6 @@
 
 import unittest
 import arbor as A
-from .. import fixtures
 
 """
 tests for cable probe wrappers
diff --git a/python/test/unit/test_catalogues.py b/python/test/unit/test_catalogues.py
index 6ef96f91..7e9608df 100644
--- a/python/test/unit/test_catalogues.py
+++ b/python/test/unit/test_catalogues.py
@@ -15,7 +15,7 @@ class recipe(arb.recipe):
         self.props = arb.neuron_cable_properties()
         try:
             self.props.catalogue = arb.load_catalogue("dummy-catalogue.so")
-        except:
+        except Exception:
             print("Catalogue not found. Are you running from build directory?")
             raise
         self.props.catalogue = arb.default_catalogue()
diff --git a/python/test/unit/test_clear_samplers.py b/python/test/unit/test_clear_samplers.py
index 062affb7..f294c69f 100644
--- a/python/test/unit/test_clear_samplers.py
+++ b/python/test/unit/test_clear_samplers.py
@@ -6,9 +6,8 @@ import unittest
 import arbor as A
 import numpy as np
 
-# to be able to run .py file from child directory
-import sys, os
-from .. import fixtures, cases
+from .. import fixtures
+from .. import cases
 
 """
 all tests for the simulator wrapper
diff --git a/python/test/unit/test_contexts.py b/python/test/unit/test_contexts.py
index c5aecc78..2f550c55 100644
--- a/python/test/unit/test_contexts.py
+++ b/python/test/unit/test_contexts.py
@@ -5,7 +5,6 @@
 import unittest
 
 import arbor as arb
-from .. import fixtures
 
 """
 all tests for non-distributed arb.context
diff --git a/python/test/unit/test_decor.py b/python/test/unit/test_decor.py
index e51aec51..59dcf0d9 100644
--- a/python/test/unit/test_decor.py
+++ b/python/test/unit/test_decor.py
@@ -3,8 +3,6 @@
 import unittest
 import arbor as A
 
-from .. import fixtures
-
 """
 Tests for decor and decoration wrappers.
 TODO: Coverage for more than just iclamp.
diff --git a/python/test/unit/test_domain_decompositions.py b/python/test/unit/test_domain_decompositions.py
index a7aa303e..b08ef2b1 100644
--- a/python/test/unit/test_domain_decompositions.py
+++ b/python/test/unit/test_domain_decompositions.py
@@ -5,7 +5,6 @@
 import unittest
 
 import arbor as arb
-from .. import fixtures
 
 # check Arbor's configuration of mpi and gpu
 gpu_enabled = arb.__config__["gpu"]
@@ -14,6 +13,7 @@ gpu_enabled = arb.__config__["gpu"]
 all tests for non-distributed arb.domain_decomposition
 """
 
+
 # Dummy recipe
 class homo_recipe(arb.recipe):
     def __init__(self, n=4):
@@ -76,7 +76,7 @@ class TestDomain_Decompositions(unittest.TestCase):
             self.assertEqual(grp.kind, arb.cell_kind.cable)
 
     # 1 cpu core, 1 gpu; assumes all cells will be placed on gpu in a single cell group
-    @unittest.skipIf(gpu_enabled == False, "GPU not enabled")
+    @unittest.skipIf(gpu_enabled is False, "GPU not enabled")
     def test_domain_decomposition_homogenous_GPU(self):
         n_cells = 10
         recipe = homo_recipe(n_cells)
@@ -139,7 +139,7 @@ class TestDomain_Decompositions(unittest.TestCase):
                 self.assertEqual(k, recipe.cell_kind(gid))
 
     # 1 cpu core, 1 gpu; assumes cable cells will be placed on gpu in a single cell group; spike cells are on cpu in cell groups of size 1
-    @unittest.skipIf(gpu_enabled == False, "GPU not enabled")
+    @unittest.skipIf(gpu_enabled is False, "GPU not enabled")
     def test_domain_decomposition_heterogenous_GPU(self):
         n_cells = 10
         recipe = hetero_recipe(n_cells)
@@ -237,7 +237,7 @@ class TestDomain_Decompositions(unittest.TestCase):
             RuntimeError,
             "unable to perform load balancing because cell_kind::cable has invalid suggested cpu_cell_group size of 0",
         ):
-            decomp = arb.partition_load_balance(recipe, context, hints)
+            arb.partition_load_balance(recipe, context, hints)
 
         cable_hint = arb.partition_hint()
         cable_hint.prefer_gpu = False
@@ -256,4 +256,4 @@ class TestDomain_Decompositions(unittest.TestCase):
             RuntimeError,
             "unable to perform load balancing because cell_kind::spike_source has invalid suggested gpu_cell_group size of 0",
         ):
-            decomp = arb.partition_load_balance(recipe, context, hints)
+            arb.partition_load_balance(recipe, context, hints)
diff --git a/python/test/unit/test_event_generators.py b/python/test/unit/test_event_generators.py
index b992e9e8..b4338702 100644
--- a/python/test/unit/test_event_generators.py
+++ b/python/test/unit/test_event_generators.py
@@ -5,7 +5,6 @@
 import unittest
 
 import arbor as arb
-from .. import fixtures
 
 """
 all tests for event generators (regular, explicit, poisson)
diff --git a/python/test/unit/test_identifiers.py b/python/test/unit/test_identifiers.py
index f682e569..69c2948e 100644
--- a/python/test/unit/test_identifiers.py
+++ b/python/test/unit/test_identifiers.py
@@ -5,7 +5,7 @@
 import unittest
 
 import arbor as arb
-from .. import fixtures
+
 
 """
 all tests for identifiers, indexes, kinds
diff --git a/python/test/unit/test_morphology.py b/python/test/unit/test_morphology.py
index 90454ada..4d0eb7a5 100644
--- a/python/test/unit/test_morphology.py
+++ b/python/test/unit/test_morphology.py
@@ -6,7 +6,6 @@ import unittest
 import arbor as A
 import numpy as N
 import math
-from .. import fixtures
 
 """
 tests for morphology-related classes
@@ -115,7 +114,6 @@ class TestPlacePwlin(unittest.TestCase):
         x0p = iso(s0p)
         x0d = iso(s0d)
         x1p = iso(s1p)
-        x1d = iso(s1d)
 
         L0 = place.at(A.location(0, 0))
         L0s = place.all_at(A.location(0, 0))
diff --git a/python/test/unit/test_multiple_connections.py b/python/test/unit/test_multiple_connections.py
index 37b0f16e..b23326c4 100644
--- a/python/test/unit/test_multiple_connections.py
+++ b/python/test/unit/test_multiple_connections.py
@@ -10,13 +10,20 @@ import arbor as arb
 from .. import fixtures
 
 """
-tests for multiple connections onto the same postsynaptic label and for one connection that has the same net impact as the multiple-connection paradigm,
-thereby testing the selection policies 'round_robin', 'round_robin_halt', and 'univalent'
-
-NOTE: In principle, a plasticity (STDP) mechanism is employed here to test if a selected connection uses the correct instance of the mechanism.
-      Thus, the scenario in Test #1 is intentionally "a wrong one", as opposed to the scenario in Test #2. In Test #1, one presynaptic neuron effectively connects _via one synapse_ to two postsynaptic neurons,
-      and the spike at t=0.8ms in presynaptic neuron 0 will enhance potentiation in both the first and the second synapse mechanism. In Test #2, this is prevented by the 'round_robin_halt' policy, whereby the 
-      potentiation in the second synapse mechanism is only enhanced by spikes of presynaptic neuron 1.
+Tests for multiple connections onto the same postsynaptic label and for one
+connection that has the same net impact as the multiple-connection paradigm,
+thereby testing the selection policies 'round_robin', 'round_robin_halt', and
+'univalent'
+
+NOTE: In principle, a plasticity (STDP) mechanism is employed here to test if a
+      selected connection uses the correct instance of the mechanism. Thus, the
+      scenario in Test #1 is intentionally "a wrong one", as opposed to the
+      scenario in Test #2. In Test #1, one presynaptic neuron effectively
+      connects _via one synapse_ to two postsynaptic neurons, and the spike at
+      t=0.8ms in presynaptic neuron 0 will enhance potentiation in both the
+      first and the second synapse mechanism. In Test #2, this is prevented by
+      the 'round_robin_halt' policy, whereby the potentiation in the second
+      synapse mechanism is only enhanced by spikes of presynaptic neuron 1.
 """
 
 
diff --git a/python/test/unit/test_profiling.py b/python/test/unit/test_profiling.py
index 0c0d484d..9249573c 100644
--- a/python/test/unit/test_profiling.py
+++ b/python/test/unit/test_profiling.py
@@ -6,7 +6,6 @@ import unittest
 
 import arbor as arb
 import functools
-from .. import fixtures
 
 """
 all tests for profiling
diff --git a/python/test/unit/test_schedules.py b/python/test/unit/test_schedules.py
index c3394bf4..0a7c747e 100644
--- a/python/test/unit/test_schedules.py
+++ b/python/test/unit/test_schedules.py
@@ -5,7 +5,6 @@
 import unittest
 
 import arbor as arb
-from .. import fixtures
 
 """
 all tests for schedules (regular, explicit, poisson)
diff --git a/python/test/unit_distributed/test_contexts_arbmpi.py b/python/test/unit_distributed/test_contexts_arbmpi.py
index d1b98005..fca089bf 100644
--- a/python/test/unit_distributed/test_contexts_arbmpi.py
+++ b/python/test/unit_distributed/test_contexts_arbmpi.py
@@ -5,7 +5,7 @@
 import unittest
 
 import arbor as arb
-from .. import fixtures, cases
+from .. import cases
 
 """
 all tests for distributed arb.context using arbor mpi wrappers
diff --git a/python/test/unit_distributed/test_contexts_mpi4py.py b/python/test/unit_distributed/test_contexts_mpi4py.py
index 0d7a3554..335907f7 100644
--- a/python/test/unit_distributed/test_contexts_mpi4py.py
+++ b/python/test/unit_distributed/test_contexts_mpi4py.py
@@ -5,7 +5,7 @@
 import unittest
 
 import arbor as arb
-from .. import fixtures, cases
+from .. import cases
 
 # check Arbor's configuration of mpi
 mpi_enabled = arb.__config__["mpi"]
@@ -17,6 +17,8 @@ if mpi_enabled and mpi4py_enabled:
 """
 all tests for distributed arb.context using mpi4py
 """
+
+
 # Only test class if env var ARB_WITH_MPI4PY=ON
 @cases.skipIfNotDistributed()
 class TestContexts_mpi4py(unittest.TestCase):
diff --git a/python/test/unit_distributed/test_domain_decompositions.py b/python/test/unit_distributed/test_domain_decompositions.py
index 29df3496..e19bd55c 100644
--- a/python/test/unit_distributed/test_domain_decompositions.py
+++ b/python/test/unit_distributed/test_domain_decompositions.py
@@ -5,7 +5,7 @@
 import unittest
 
 import arbor as arb
-from .. import fixtures, cases
+from .. import cases
 
 # check Arbor's configuration of mpi and gpu
 mpi_enabled = arb.__config__["mpi"]
@@ -15,6 +15,7 @@ gpu_enabled = arb.__config__["gpu"]
 all tests for distributed arb.domain_decomposition
 """
 
+
 # Dummy recipe
 class homo_recipe(arb.recipe):
     def __init__(self, n=4):
@@ -173,7 +174,7 @@ class TestDomain_Decompositions_Distributed(unittest.TestCase):
             context = arb.context(threads=1, gpu_id=None)
 
         N = context.ranks
-        I = context.rank
+        R = context.rank
 
         # 10 cells per domain
         n_local = 10
@@ -186,12 +187,12 @@ class TestDomain_Decompositions_Distributed(unittest.TestCase):
         self.assertEqual(decomp.num_global_cells, n_global)
         self.assertEqual(len(decomp.groups), n_local)
 
-        b = I * n_local
-        e = (I + 1) * n_local
+        b = R * n_local
+        e = (R + 1) * n_local
         gids = list(range(b, e))
 
         for gid in gids:
-            self.assertEqual(I, decomp.gid_domain(gid))
+            self.assertEqual(R, decomp.gid_domain(gid))
 
         # Each cell group contains 1 cell of kind cable
         # Each group should also be tagged for cpu execution
@@ -206,7 +207,7 @@ class TestDomain_Decompositions_Distributed(unittest.TestCase):
             self.assertEqual(grp.kind, arb.cell_kind.cable)
 
     # 1 node with 1 cpu core, 1 gpu; assumes all cells will be placed on gpu in a single cell group
-    @unittest.skipIf(gpu_enabled == False, "GPU not enabled")
+    @unittest.skipIf(gpu_enabled is False, "GPU not enabled")
     def test_domain_decomposition_homogenous_GPU(self):
 
         if mpi_enabled:
@@ -216,7 +217,7 @@ class TestDomain_Decompositions_Distributed(unittest.TestCase):
             context = arb.context(threads=1, gpu_id=0)
 
         N = context.ranks
-        I = context.rank
+        R = context.rank
 
         # 10 cells per domain
         n_local = 10
@@ -229,12 +230,13 @@ class TestDomain_Decompositions_Distributed(unittest.TestCase):
         self.assertEqual(decomp.num_global_cells, n_global)
         self.assertEqual(len(decomp.groups), 1)
 
-        b = I * n_local
-        e = (I + 1) * n_local
+        b = R * n_local
+        e = (R + 1) * n_local
+
         gids = list(range(b, e))
 
         for gid in gids:
-            self.assertEqual(I, decomp.gid_domain(gid))
+            self.assertEqual(R, decomp.gid_domain(gid))
 
         # Each cell group contains 1 cell of kind cable
         # Each group should also be tagged for gpu execution
@@ -256,7 +258,7 @@ class TestDomain_Decompositions_Distributed(unittest.TestCase):
             context = arb.context(threads=1, gpu_id=None)
 
         N = context.ranks
-        I = context.rank
+        R = context.rank
 
         # 10 cells per domain
         n_local = 10
@@ -270,12 +272,13 @@ class TestDomain_Decompositions_Distributed(unittest.TestCase):
         self.assertEqual(decomp.num_global_cells, n_global)
         self.assertEqual(len(decomp.groups), n_local)
 
-        b = I * n_local
-        e = (I + 1) * n_local
+        b = R * n_local
+        e = (R + 1) * n_local
+
         gids = list(range(b, e))
 
         for gid in gids:
-            self.assertEqual(I, decomp.gid_domain(gid))
+            self.assertEqual(R, decomp.gid_domain(gid))
 
         # Each cell group contains 1 cell of kind cable
         # Each group should also be tagged for cpu execution
@@ -422,12 +425,10 @@ class TestDomain_Decompositions_Distributed(unittest.TestCase):
 
     def test_domain_decomposition_exceptions(self):
         nranks = 1
-        rank = 0
         if mpi_enabled:
             comm = arb.mpi_comm()
             context = arb.context(threads=1, gpu_id=None, mpi=comm)
             nranks = context.ranks
-            rank = context.rank
         else:
             context = arb.context(threads=1, gpu_id=None)
 
@@ -442,7 +443,7 @@ class TestDomain_Decompositions_Distributed(unittest.TestCase):
             RuntimeError,
             "unable to perform load balancing because cell_kind::cable has invalid suggested cpu_cell_group size of 0",
         ):
-            decomp1 = arb.partition_load_balance(recipe, context, hints1)
+            arb.partition_load_balance(recipe, context, hints1)
 
         hint2 = arb.partition_hint()
         hint2.prefer_gpu = True
@@ -453,4 +454,4 @@ class TestDomain_Decompositions_Distributed(unittest.TestCase):
             RuntimeError,
             "unable to perform load balancing because cell_kind::cable has invalid suggested gpu_cell_group size of 0",
         ):
-            decomp2 = arb.partition_load_balance(recipe, context, hints2)
+            arb.partition_load_balance(recipe, context, hints2)
diff --git a/python/test/unit_distributed/test_simulator.py b/python/test/unit_distributed/test_simulator.py
index ea657a95..50f3ca7b 100644
--- a/python/test/unit_distributed/test_simulator.py
+++ b/python/test/unit_distributed/test_simulator.py
@@ -3,9 +3,8 @@
 # test_simulator.py
 
 import unittest
-import numpy as np
 import arbor as A
-from .. import fixtures, cases
+from .. import cases
 
 mpi_enabled = A.__config__["mpi"]
 
@@ -56,7 +55,7 @@ class lifN_recipe(A.recipe):
 class TestSimulator(unittest.TestCase):
     def init_sim(self):
         comm = A.mpi_comm()
-        context = A.context(threads=1, gpu_id=None, mpi=A.mpi_comm())
+        context = A.context(threads=1, gpu_id=None, mpi=comm)
         self.rank = context.rank
         self.ranks = context.ranks
 
diff --git a/scripts/build-catalogue.in b/scripts/build-catalogue.in
index 9e3384eb..0dc73ce7 100755
--- a/scripts/build-catalogue.in
+++ b/scripts/build-catalogue.in
@@ -2,11 +2,10 @@
 
 import subprocess as sp
 import sys
-from tempfile import mkdtemp, TemporaryDirectory
+from tempfile import mkdtemp
 import os
 from pathlib import Path
 import shutil
-import string
 import argparse
 import re
 
@@ -205,6 +204,8 @@ if debug:
         def __exit__(*args, **kwargs):
             pass
 
+else:
+    from tempfile import TemporaryDirectory
 
 with TemporaryDirectory() as tmp:
     tmp = Path(tmp)
@@ -246,7 +247,7 @@ with TemporaryDirectory() as tmp:
         sp.run(make_cmd, shell=True, check=True, stdout=out, stderr=err)
         shutil.copy2(f"{name}-catalogue.so", pwd)
     except sp.CalledProcessError as e:
-        import sys, traceback as tb
+        import traceback as tb
 
         if not verbose:
             # Not in verbose mode, so we have captured the
diff --git a/scripts/patchwheel.py b/scripts/patchwheel.py
index ef7262c4..c1c6ed46 100644
--- a/scripts/patchwheel.py
+++ b/scripts/patchwheel.py
@@ -1,4 +1,6 @@
-import shutil, subprocess, argparse
+import shutil
+import subprocess
+import argparse
 from pathlib import Path
 
 
diff --git a/scripts/where.py b/scripts/where.py
index 08307a66..24790ab6 100644
--- a/scripts/where.py
+++ b/scripts/where.py
@@ -1,4 +1,5 @@
-import sys, sysconfig
+import sys
+import sysconfig
 
 pfx = sys.stdin.read()
 try:
diff --git a/setup.py b/setup.py
index ecf8dbcc..7511cd70 100644
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,6 @@
 from pathlib import Path
 from sys import executable as python
 from skbuild import setup
-import os, platform
 
 # Hard coded options, because scikit-build does not do build options.
 # Override by instructing CMAKE, e.g.:
diff --git a/validation/ref/neuron/ball_and_squiggle.py b/validation/ref/neuron/ball_and_squiggle.py
index 63d01589..ae3aab67 100644
--- a/validation/ref/neuron/ball_and_squiggle.py
+++ b/validation/ref/neuron/ball_and_squiggle.py
@@ -7,10 +7,15 @@ import nrn_validation as V
 
 V.override_defaults_from_args()
 
+
 # dendrite geometry: 100 µm long, varying diameter.
 length = 100.0
 npoints = 200
-radius = lambda x: math.exp(-x) * (math.sin(40 * x) * 0.05 + 0.1) + 0.1
+
+
+def radius(x):
+    return math.exp(-x) * (math.sin(40 * x) * 0.05 + 0.1) + 0.1
+
 
 xs = [float(i) / (npoints - 1) for i in range(npoints)]
 geom = [(length * x, 2.0 * radius(x)) for x in xs]
diff --git a/validation/ref/neuron/nrn_validation.py b/validation/ref/neuron/nrn_validation.py
index 5952b978..72df64e7 100644
--- a/validation/ref/neuron/nrn_validation.py
+++ b/validation/ref/neuron/nrn_validation.py
@@ -5,7 +5,6 @@ import sys
 import os
 import re
 import numpy as np
-import neuron
 from neuron import h
 
 # This is super annoying: without neuron.gui, need
@@ -277,7 +276,6 @@ def run_nrn_sim(tend, sample_dt=0.025, report_t=None, report_dt=None, dt=None, *
     )
 
     # and section reports too
-    vreport_t = list(vreport_t_hoc)
     for name, length, nseg, ps, vs in vreports:
         obs = np.column_stack([np.array(v) for v in vs])
         xs = [length * p for p in ps]
-- 
GitLab