diff --git a/src/pyhxtorch/hxtorch/examples/spiking/calib_neuron.py b/src/pyhxtorch/hxtorch/examples/spiking/calib_neuron.py
new file mode 100644
index 0000000000000000000000000000000000000000..6878eba81f7bccb1845623ee14a56079fcba8ec4
--- /dev/null
+++ b/src/pyhxtorch/hxtorch/examples/spiking/calib_neuron.py
@@ -0,0 +1,79 @@
+""" """
+import torch
+import hxtorch
+import hxtorch.spiking as hxsnn
+
+hxtorch.logger.default_config(level=hxtorch.logger.LogLevel.TRACE)
+
+
+def main():
+    log = hxtorch.logger.get("hxtorch.examples.spiking.calib_neuron")
+
+    # Initialize hardware
+    hxtorch.init_hardware()
+
+    # Experiment
+    experiment = hxsnn.Experiment(mock=False)
+
+    # Neuron- and calibration-parameters
+    lif_params = hxsnn.functional.CalibratedCUBALIFParams(
+        tau_mem=20e-6,
+        tau_syn=6e-6,
+        refractory_time=1e-6,
+        leak=80.,
+        reset=80.,
+        threshold=torch.tensor([80., 90., 100., 120.]))
+
+    # Modules
+    synapse = hxsnn.Synapse(
+        in_features=2, out_features=4, experiment=experiment)
+    neuron = hxsnn.Neuron(size=4, experiment=experiment, params=lif_params)
+
+    # Weights
+    torch.nn.init.normal_(synapse.weight, mean=63., std=0.)
+
+    # Input
+    spikes = torch.zeros((20, 1, synapse.in_features))
+    spikes[5] = 1.
+
+    # Forward
+    spike_handle = hxsnn.NeuronHandle(spikes)
+    output = neuron(synapse(spike_handle))
+
+    # Execute
+    hxsnn.run(experiment=experiment, runtime=spikes.shape[0])
+
+    # Print spike output. Neuron 0 should spike more often than neuron 1.
+    # Neurons 2 and 3 should emit no spikes.
+    log.INFO("Spikes: ", output.spikes.to_sparse())
+
+    # Calibration results are stored to be accessed by user
+    log.INFO("Neuron params: ", neuron.params)
+
+    # Modify parameters, e.g. leak over threshold
+    neuron.params.leak = 1022
+
+    # Zero input
+    spikes[:, :, :] = 0.
+
+    # Forward
+    spike_handle = hxsnn.NeuronHandle(spikes)
+    output = neuron(synapse(spike_handle))
+    # Execute
+    hxsnn.run(experiment=experiment, runtime=spikes.shape[0])
+
+    log.INFO("Spikes: ", output.spikes.to_sparse())
+
+    # Release
+    hxtorch.release_hardware()
+
+    # Return observables
+    return output.spikes, output.v_cadc
+
+
+if __name__ == "__main__":
+    hxtorch.logger.default_config(level=hxtorch.logger.LogLevel.TRACE)
+    for key in ["hxcomm", "grenade", "stadls", "calix"]:
+        other_logger = hxtorch.logger.get(key)
+        hxtorch.logger.set_loglevel(other_logger, hxtorch.logger.LogLevel.WARN)
+    spiketrains, traces = main()
diff --git a/src/pyhxtorch/hxtorch/examples/spiking/yinyang_model.py b/src/pyhxtorch/hxtorch/examples/spiking/yinyang_model.py
index 1dd8ef6010414be7e4095c86e438c29b8aaa2978..88281c790ac85ab4476c108ec61e89f5ba3f4d96 100644
--- a/src/pyhxtorch/hxtorch/examples/spiking/yinyang_model.py
+++ b/src/pyhxtorch/hxtorch/examples/spiking/yinyang_model.py
@@ -57,17 +57,16 @@ class SNN(torch.nn.Module):
 
         # Neuron parameters
         lif_params = F.CUBALIFParams(
-            tau_mem_inv=1. / tau_mem, tau_syn_inv=1. / tau_syn, alpha=alpha)
-        li_params = F.CUBALIParams(
-            tau_mem_inv=1. / tau_mem, tau_syn_inv=1. / tau_syn)
+            tau_mem=tau_mem, tau_syn=tau_syn, alpha=alpha)
+        li_params = F.CUBALIParams(tau_mem=tau_mem, tau_syn=tau_syn)
 
         # Experiment instance to work on
         self.exp = hxsnn.Experiment(
             mock=mock, dt=dt)
         if not mock:
             self.exp.default_execution_instance.load_calib(
-                calib_path if calib_path else calib_helper.nightly_calib_path(
-                    "spiking2"))
+                calib_path if calib_path
+                else calib_helper.nightly_calix_native_path("spiking2"))
 
         # Repeat input
         self.input_repetitions = input_repetitions
diff --git a/src/pyhxtorch/hxtorch/spiking/__init__.py b/src/pyhxtorch/hxtorch/spiking/__init__.py
index afe2f8923448b9b4352fca83a97a0fdc9815ec88..eb5ed1fbe91c6e59935529fae852f3b6ea4dcd9f 100644
--- a/src/pyhxtorch/hxtorch/spiking/__init__.py
+++ b/src/pyhxtorch/hxtorch/spiking/__init__.py
@@ -5,6 +5,7 @@ from hxtorch.spiking.modules import (
     ReadoutNeuron, IAFNeuron, Synapse, SparseSynapse, BatchDropout)
 from hxtorch.spiking.handle import (
     TensorHandle, SynapseHandle, NeuronHandle, ReadoutNeuronHandle)
+from hxtorch.spiking.calibrated_params import CalibratedParams
 from hxtorch.spiking.experiment import Experiment
 from hxtorch.spiking.execution_instance import ExecutionInstance
 from hxtorch.spiking.run import run
diff --git a/src/pyhxtorch/hxtorch/spiking/calibrated_params.py b/src/pyhxtorch/hxtorch/spiking/calibrated_params.py
new file mode 100644
index 0000000000000000000000000000000000000000..6852d6779adfbd27f649758eea78fbb33a265c17
--- /dev/null
+++ b/src/pyhxtorch/hxtorch/spiking/calibrated_params.py
@@ -0,0 +1,226 @@
+"""
+Generic parameter object holding hardware configurable neuron parameters.
+"""
+from __future__ import annotations
+from typing import TYPE_CHECKING, List, Union, Optional
+import dataclasses
+import pylogging as logger
+import numpy as np
+import quantities as pq
+import torch
+from dlens_vx_v3 import halco
+
+if TYPE_CHECKING:
+    from calix.spiking.neuron import NeuronCalibTarget
+
+
+@dataclasses.dataclass(unsafe_hash=True)
+class CalibratedParams:
+    """
+    Parameters for any (of currently available) forward and backward path.
+    """
+    # calix's neuron-calibrateable parameters
+    leak: torch.Tensor = 80
+    reset: torch.Tensor = 80
+    threshold: torch.Tensor = 125
+    tau_mem: torch.Tensor = 1e-5
+    tau_syn: torch.Tensor = 1e-5
+    i_synin_gm: Union[int, torch.Tensor] = 500
+    e_coba_reversal: Optional[torch.Tensor] = None
+    e_coba_reference: Optional[torch.Tensor] = None
+    membrane_capacitance: torch.Tensor = 63
+    refractory_time: torch.Tensor = 2e-6
+    synapse_dac_bias: Union[int, torch.Tensor] = 600
+    holdoff_time: torch.Tensor = 0
+
+    log = logger.get("hxtorch.GenericParams")
+    logger.set_loglevel(log, logger.LogLevel.TRACE)
+
+    def from_calix_targets(
+            self, targets: NeuronCalibTarget,
+            neurons: List[halco.LogicalNeuronOnDLS]) -> None:
+        """
+        Load the params from a calix calibration target.
+
+        :param targets: The calix calibration targets to read the params from.
+        :param neurons: The neuron coordinates to which this params object is
+            assigned to.
+        """
+        size = len(neurons)
+        coords = self._get_an_indices(neurons)
+        mapping = self._get_an_to_in_pop_indices(neurons)
+        selector = self._get_in_pop_indices(neurons)
+        assert len(coords) == len(mapping)
+        assert len(selector) == size
+
+        # NOTE: We demand that all AtomicNeurons of the same LogicalNeuron do
+        #       have the same values. This might change in the future if multi-
+        #       compartment neurons are introduced.
+        def assert_same_values_per_neuron(entity: torch.Tensor):
+            if len(entity.shape) > 1:
+                for row_values in entity:
+                    return assert_same_values_per_neuron(row_values)
+            assert all(
+                torch.all(nrn_values[0] == nrn_values)
+                for nrn_values in [
+                    torch.tensor([entity[coords[mapping == i]]])
+                    for i in range(size)]) or all(
+                        torch.isnan(torch.tensor(entity[coords])))
+            return None
+
+        # i_synin_gm
+        self.i_synin_gm = torch.tensor(targets.i_synin_gm) \
+            * torch.ones(2, dtype=torch.int64)
+
+        # synapse_dac_bias
+        self.synapse_dac_bias = torch.tensor(targets.synapse_dac_bias)
+
+        # tau_mem, refractory_time, holdoff_time
+        # leak, reset, threshold, membrane_capacitance
+        for key in ["tau_mem", "refractory_time", "holdoff_time",
+                    "leak", "reset", "threshold", "membrane_capacitance"]:
+            value = torch.tensor(getattr(targets, key))
+            if not value.shape:  # Single number
+                setattr(self, key, torch.full(
+                    (halco.AtomicNeuronOnDLS.size,), value)[selector])
+            if len(value.shape) == 1:  # 1D
+                assert_same_values_per_neuron(value)
+                setattr(self, key, value[selector])
+
+        # tau_syn
+        if (not isinstance(targets.tau_syn, np.ndarray)
+                or not targets.tau_syn.shape):
+            self.tau_syn = torch.full(
+                (halco.AtomicNeuronOnDLS.size,),
+                torch.tensor(targets.tau_syn))[selector]
+        elif targets.tau_syn.shape == (halco.AtomicNeuronOnDLS.size,):
+            assert_same_values_per_neuron(targets.tau_syn)
+            self.tau_syn = torch.tensor(
+                targets.tau_syn).repeat(2, 1)[:, selector]
+        elif targets.tau_syn.shape == (halco.SynapticInputOnNeuron.size,):
+            self.tau_syn = torch.tensor(targets.tau_syn).repeat(
+                1, halco.AtomicNeuronOnDLS.size)[:, selector]
+        elif targets.tau_syn.shape == (halco.SynapticInputOnNeuron.size,
+                                       halco.AtomicNeuronOnDLS.size):
+            assert_same_values_per_neuron(torch.tensor(targets.tau_syn))
+            self.tau_syn = torch.tensor(targets.tau_syn)[:, selector]
+
+        # e_coba_reversal, e_coba_reference
+        for key in ["e_coba_reversal", "e_coba_reference"]:
+            value = getattr(targets, key)
+            if value is None:
+                setattr(self, key, value)
+            elif value.shape == (halco.SynapticInputOnNeuron.size,):
+                setattr(self, key, torch.tensor(value).repeat(
+                        (1, halco.AtomicNeuronOnDLS.size))[:, selector])
+            elif value.shape == (halco.SynapticInputOnNeuron.size,
+                                 halco.AtomicNeuronOnDLS.size):
+                assert_same_values_per_neuron(value)
+                setattr(self, key, torch.tensor(value)[:, selector])
+
+    def to_calix_targets(
+            self, targets: NeuronCalibTarget,
+            neurons: List[halco.LogicalNeuronOnDLS]) -> NeuronCalibTarget:
+        """
+        Add the params to a calix calibration target.
+
+        :param targets: The calix calibration targets to append configuration
+            indicated by this params object to.
+        :param neurons: The neuron coordinates to which this params object is
+            assigned to.
+
+        :return: Returns the calibration target with desired target parameters
+            at the neuron coordinates associated with this params object.
+        """
+        size = len(neurons)
+        coords = self._get_an_indices(neurons)
+        mapping = self._get_an_to_in_pop_indices(neurons)
+        assert len(coords) == len(mapping)
+
+        # i_synin_gm
+        i_synin_gm = np.array(self.i_synin_gm)
+        if not i_synin_gm.shape:
+            i_synin_gm = i_synin_gm * np.ones(2, dtype=np.int64)
+        for i in range(2):
+            if (targets.i_synin_gm[i] is not None
+                    and targets.i_synin_gm[i] != i_synin_gm[i]):
+                raise AttributeError(
+                    f"'i_synin_gm[{i}]' requires same value for all neurons")
+        targets.i_synin_gm = i_synin_gm
+
+        # synapse_dac_bias
+        if (targets.synapse_dac_bias is not None
+                and self.synapse_dac_bias != targets.synapse_dac_bias):
+            raise AttributeError(
+                "'synapse_dac_bias' requires same value for all neurons")
+        targets.synapse_dac_bias = int(self.synapse_dac_bias)
+
+        # tau_syn
+        targets.tau_syn[:, coords] = pq.Quantity(
+            self._resize(self.tau_syn, size, rows=2).numpy() * 1e6,
+            "us")[:, mapping]
+
+        # e_coba_reversal
+        if self.e_coba_reversal is None:
+            targets.e_coba_reversal[:, coords] = np.repeat(
+                np.array([np.inf, -np.inf])[:, np.newaxis],
+                size, axis=1)[:, mapping]
+        else:
+            targets.e_coba_reversal[:, coords] = self._resize(
+                self.e_coba_reversal, size, rows=2).numpy()[:, mapping]
+
+        # e_coba_reference
+        if self.e_coba_reference is None:
+            targets.e_coba_reference[:, coords] = np.ones((
+                halco.SynapticInputOnNeuron.size, size))[:, mapping] * np.nan
+        else:
+            targets.e_coba_reference[:, coords] = self._resize(
+                self.e_coba_reference, size, rows=2).numpy()[:, mapping]
+
+        # tau_mem, refractory_time, holdoff_time
+        for key in ["tau_mem", "refractory_time", "holdoff_time"]:
+            getattr(targets, key)[coords] = pq.Quantity(
+                self._resize(
+                    getattr(self, key), size).numpy() * 1e6, "us")[mapping]
+
+        # leak, reset, threshold, membrane_capacitance
+        for key in ["leak", "reset", "threshold", "membrane_capacitance"]:
+            getattr(targets, key)[coords] = self._resize(
+                getattr(self, key), size).numpy()[mapping]
+        return targets
+
+    # pylint: disable=too-many-return-statements
+    @staticmethod
+    def _resize(entity: torch.tensor, size: int, rows: int = 1):
+        if not isinstance(entity, torch.Tensor):
+            entity = torch.tensor(entity)
+        if rows == 1:
+            if not entity.shape:
+                return torch.full((size,), entity)
+            if entity.shape[0] == 1:
+                return entity.clone().detach().repeat(size)
+            return entity.clone().detach()
+        assert rows == 2
+        if not entity.shape:
+            return torch.full((2, size), entity)
+        if len(entity.shape) == 1:
+            if entity.size == 2:
+                return entity.repeat((1, size)).clone().detach()
+            if entity.size == halco.AtomicNeuronOnDLS.size:
+                return entity.repeat((2, 1)).clone().detach()
+        return entity.clone().detach()
+
+    @staticmethod
+    def _get_an_indices(neurons: List[halco.LogicalNeuronOnDLS]):
+        return [an.toEnum().value() for neuron in neurons
+                for an in neuron.get_atomic_neurons()]
+
+    @staticmethod
+    def _get_in_pop_indices(neurons: List[halco.LogicalNeuronOnDLS]):
+        return [neuron.get_atomic_neurons()[0].toEnum().value()
+                for neuron in neurons]
+
+    @staticmethod
+    def _get_an_to_in_pop_indices(neurons: List[halco.LogicalNeuronOnDLS]):
+        return [in_pop_id for in_pop_id, neuron in enumerate(neurons)
+                for _ in neuron.get_atomic_neurons()]
diff --git a/src/pyhxtorch/hxtorch/spiking/execution_instance.py b/src/pyhxtorch/hxtorch/spiking/execution_instance.py
index ede6c0c49106b9ab65243e5d2e4dba18eb0b8f15..441f76fc3f07f32ff34abe599219ba1e0668987a 100644
--- a/src/pyhxtorch/hxtorch/spiking/execution_instance.py
+++ b/src/pyhxtorch/hxtorch/spiking/execution_instance.py
@@ -2,16 +2,26 @@
 Definition of ExecutionInstance, wrapping grenade.common.ExecutionInstanceID,
 and providing functionality for chip instance configuration
 """
-from typing import Dict, List, Optional, Union
+from __future__ import annotations
+from typing import TYPE_CHECKING, Dict, List, Optional, Union
 from abc import ABC, abstractmethod
 from pathlib import Path
+import numpy as np
 
 from dlens_vx_v3 import lola, sta
 import pygrenade_vx as grenade
 import pylogging as logger
+from _hxtorch_core import init_hardware, release_hardware
+
+# pylint: disable=import-error, no-name-in-module
+from calix import calibrate
+from calix.spiking import SpikingCalibTarget, SpikingCalibOptions
+from calix.spiking.neuron import NeuronCalibTarget
 
 from hxtorch.spiking.neuron_placement import NeuronPlacement
 from hxtorch.spiking.utils import calib_helper
+if TYPE_CHECKING:
+    from hxtorch.spiking.modules import HXModule
 
 
 class ExecutionInstances(set):
@@ -61,6 +71,7 @@ class BaseExecutionInstance(ABC):
         self._id = grenade.common.ExecutionInstanceID(id(self))
         self.cadc_neurons: Optional[
             Dict[int, grenade.network.CADCRecording.Neuron]] = {}
+        self.modules: List[HXModule] = None
 
     def __hash__(self) -> int:
         return hash(self._id)
@@ -76,8 +87,8 @@ class BaseExecutionInstance(ABC):
         return self._id
 
     @abstractmethod
-    def prepare_static_config(self) -> None:
-        """ Prepare the static configuration of the instance """
+    def calibrate(self) -> None:
+        """ Handle the calibration of the instance """
 
     @abstractmethod
     def cadc_recordings(self) -> grenade.network.CADCRecording:
@@ -104,6 +115,7 @@ class ExecutionInstance(BaseExecutionInstance):
     def __init__(
             self,
             calib_path: Optional[Union[Path, str]] = None,
+            calib_cache_dir: Optional[Union[Path, str]] = None,
             input_loopback: bool = False) -> None:
         """
         :param input_loopback: Record input spikes and use them for gradient
@@ -114,9 +126,13 @@ class ExecutionInstance(BaseExecutionInstance):
 
         self.log = logger.get(f"hxtorch.spiking.execution_instance.{self}")
 
-        self._calib_path = calib_path
+        # Load chip objects
+        self.calib_path = calib_path
+        self.calib = None
+        self.chip = None
         if calib_path is not None:
-            self.chip = self.load_calib(calib_path)
+            self.chip = self.load_calib(self.calib_path)
+        self.calib_cache_dir = calib_cache_dir
 
         self.input_loopback = input_loopback
         self.record_cadc_into_dram: Optional[bool] = None
@@ -144,20 +160,83 @@ class ExecutionInstance(BaseExecutionInstance):
             calib is loaded.
         :return: Returns the chip object for the given calibration.
         """
-        # If no calib path is given we load spiking nightly calib
+        assert calib_path is not None
         self.log.INFO(f"Loading calibration from {calib_path}")
-        self.chip = calib_helper.chip_from_file(calib_path)
+        if str(calib_path).endswith(".pkl"):
+            self.calib = calib_helper.calib_from_calix_native(calib_path)
+            self.chip = self.calib.to_chip()
+        else:
+            self.chip = calib_helper.chip_from_file(calib_path)
+        self.calib_path = calib_path
         return self.chip
 
-    def prepare_static_config(self):
-        """ Prepare the static configuration of the instance """
-        # If chip is still None we load default nightly calib
-        if self.chip is None:
+    def calibrate(self):
+        """
+        Manage calibration of chip instance. In case a calibration path is
+        provided, parameters for the modules are loaded if possible. If no
+        calibration path is given, calibration targets are attempted to be
+        loaded from the modules parameter objects and the chip will be
+        calibrated accordingly. If no parameter changes are detected, the chip
+        will not be recalibrated.
+        """
+        if not any(m.calib_changed_since_last_run() for m in self.modules
+                   if hasattr(m, "calib_changed_since_last_run")):
+            return
+
+        execute_calib = False
+        if not self.calib_path:
+            self.log.TRACE("No calibration path present. Try to infer "
+                           + "parameters for calibrations")
+            # gather calibration information
+            target = \
+                SpikingCalibTarget(
+                    neuron_target=NeuronCalibTarget.DenseDefault)
+            # initialize `synapse_dac_bias` and `i_synin_gm` as `None` to allow
+            # check for different values in different populations
+            target.neuron_target.synapse_dac_bias = None
+            target.neuron_target.i_synin_gm = np.array([None, None])
+            # if any neuron module has params, use for calibration
+            for module in self.modules:
+                if hasattr(module, "calibration_from_params") and hasattr(
+                        module, "params") and module.params is not None \
+                        and hasattr(module.params, "to_calix_targets"):
+                    self.log.INFO(f"Add calib params of '{module}'.")
+                    module.calibration_from_params(target)
+                    execute_calib = True
+
+        # otherwise use nightly calibration
+        if not self.calib_path and not execute_calib:
             self.log.INFO(
-                "No chip object present. Using chip object with default "
-                + "nightly calib.")
-            self.chip = self.load_calib(calib_helper.nightly_calib_path())
-
+                "No chip object present and no parameters for calibration "
+                + "provided. Using chip object with default nightly calib.")
+            self.chip = self.load_calib(
+                calib_helper.nightly_calix_native_path())
+
+        if not execute_calib:
+            # Make sure experiment holds chip config
+            assert self.chip is not None
+            # EA: TODO: Probably we should do this in each case
+            if self.calib is None:
+                self.log.WARN(
+                    "Tried to infer params from calib but no readable "
+                    "calibration present. This might be because a coco binary "
+                    "was indicated as calibration file. Skipped.")
+            else:
+                self.log.INFO(
+                    "Try to infer params from loaded calibration file...")
+                for module in self.modules:
+                    if hasattr(module, "params_from_calibration"):
+                        module.params_from_calibration(self.calib.target)
+        else:
+            release_hardware()
+            self.log.INFO("Calibrating...")
+            self.calib = calibrate(
+                target, SpikingCalibOptions(), self.calib_cache_dir)
+            dumper = sta.PlaybackProgramBuilderDumper()
+            self.calib.apply(dumper)
+            self.chip = sta.convert_to_chip(dumper.done())
+            init_hardware()
+            self.log.INFO("Calibration finished... ")
         self.log.TRACE(f"Prepared static config of {self}.")
 
     def cadc_recordings(self) -> grenade.network.CADCRecording:
diff --git a/src/pyhxtorch/hxtorch/spiking/experiment.py b/src/pyhxtorch/hxtorch/spiking/experiment.py
index b71da4ca6fdf156bb7e9bc0409834edcb6e97b0a..15a17adbcd8987977a551514130e12fd33291b13 100644
--- a/src/pyhxtorch/hxtorch/spiking/experiment.py
+++ b/src/pyhxtorch/hxtorch/spiking/experiment.py
@@ -142,10 +142,14 @@ class Experiment(BaseExperiment):
         configurations to. Additionally this method defines the
         pre_static_config builder injected to grenade at run.
         """
+        self._execution_instances.update([
+            module.execution_instance for module in self.modules.nodes])
         if self._static_config_prepared:  # Only do this once
             return
         for execution_instance in self._execution_instances:
-            execution_instance.prepare_static_config()
+            modules = [m for m in self.modules.nodes
+                       if m.execution_instance == execution_instance]
+            execution_instance.modules = modules
         self._static_config_prepared = True
         log.TRACE("Preparation of static config done.")
 
@@ -360,6 +364,11 @@ class Experiment(BaseExperiment):
         """
         self._projections.append(module)
 
+    def _calibrate(self):
+        """ """
+        for execution_instance in self._execution_instances:
+            execution_instance.calibrate()
+
     def get_hw_results(self, runtime: Optional[int]) \
             -> Dict[grenade.network.PopulationOnNetwork,
                     Tuple[Optional[torch.Tensor], ...]]:
@@ -375,8 +384,6 @@ class Experiment(BaseExperiment):
             the corresponding module's `post_process` method.
         """
         if not self.mock:
-            self._execution_instances.update([
-                module.execution_instance for module in self.modules.nodes])
             self._prepare_static_config()
 
         # Preprocess layer
@@ -393,6 +400,9 @@ class Experiment(BaseExperiment):
                         self._projections, self._populations):
                 module.register_hw_entity()
 
+        # Calibration
+        self._calibrate()
+
         # Generate network graph
         network = self._generate_network_graphs()
 
diff --git a/src/pyhxtorch/hxtorch/spiking/functional/__init__.py b/src/pyhxtorch/hxtorch/spiking/functional/__init__.py
index c4ffd90ddcbaf762b53829bc371507880dcc1e76..62ac8fe0f078cb867221855abc8482918fc0341c 100644
--- a/src/pyhxtorch/hxtorch/spiking/functional/__init__.py
+++ b/src/pyhxtorch/hxtorch/spiking/functional/__init__.py
@@ -1,5 +1,7 @@
-from hxtorch.spiking.functional.lif import CUBALIFParams, cuba_lif_integration
-from hxtorch.spiking.functional.li import CUBALIParams, cuba_li_integration
+from hxtorch.spiking.functional.lif import (
+    CalibratedCUBALIFParams, CUBALIFParams, cuba_lif_integration)
+from hxtorch.spiking.functional.li import (
+    CalibratedCUBALIParams, CUBALIParams, cuba_li_integration)
 from hxtorch.spiking.functional.iaf import (
     CUBAIAFParams, cuba_iaf_integration, cuba_refractory_iaf_integration)
 from hxtorch.spiking.functional.linear import linear, linear_sparse
diff --git a/src/pyhxtorch/hxtorch/spiking/functional/iaf.py b/src/pyhxtorch/hxtorch/spiking/functional/iaf.py
index dbf163223b26cb214d2e4afccaf94c4a9b167ad7..5af62b21bf6a4f9854f57f29778ba3d24d9c6199 100644
--- a/src/pyhxtorch/hxtorch/spiking/functional/iaf.py
+++ b/src/pyhxtorch/hxtorch/spiking/functional/iaf.py
@@ -3,21 +3,30 @@ Integrate and fire neurons
 """
 # Allow redefining builtin for PyTorch consistency
 # pylint: disable=redefined-builtin, invalid-name, too-many-locals
-from typing import NamedTuple, Tuple, Optional
+from typing import NamedTuple, Tuple, Optional, Union
+import dataclasses
 import torch
 
-from hxtorch.spiking.functional.threshold import threshold
+from hxtorch.spiking.calibrated_params import CalibratedParams
+from hxtorch.spiking.functional.threshold import threshold as spiking_threshold
 from hxtorch.spiking.functional.refractory import refractory_update
 from hxtorch.spiking.functional.unterjubel import Unterjubel
 
 
 class CUBAIAFParams(NamedTuple):
     """ Parameters for IAF integration and backward path """
-    tau_mem_inv: torch.Tensor
-    tau_syn_inv: torch.Tensor
-    tau_ref: torch.Tensor = torch.tensor(0.)
-    v_th: torch.Tensor = torch.tensor(1.)
-    v_reset: torch.Tensor = torch.tensor(0.)
+    tau_mem: torch.Tensor
+    tau_syn: torch.Tensor
+    refractory_time: torch.Tensor = torch.tensor(0.)
+    threshold: torch.Tensor = torch.tensor(1.)
+    reset: torch.Tensor = torch.tensor(0.)
+    alpha: float = 50.0
+    method: str = "superspike"
+
+
+@dataclasses.dataclass(unsafe_hash=True)
+class CalibratedCUBAIAFParams(CalibratedParams):
+    """ Parameters for CUBA LIF integration and backward path """
     alpha: float = 50.0
     method: str = "superspike"
 
@@ -42,21 +51,23 @@ def iaf_step(z: torch.Tensor, v: torch.Tensor, i: torch.Tensor,
     :returns: Returns a tuple (z, v, i) holding the tensors of time step t + 1.
     """
     # Membrane increment
-    dv = dt * params.tau_mem_inv * i
+    dv = dt / params.tau_mem * i
     v = Unterjubel.apply(dv + v, v_hw) if z_hw is not None else dv + v
     # Current
-    di = -dt * params.tau_syn_inv * i
+    di = -dt / params.tau_syn * i
     i = i + di + input
     # Spikes
-    spike = threshold(v - params.v_th, params.method, params.alpha)
+    spike = spiking_threshold(
+        v - params.threshold, params.method, params.alpha)
     z = Unterjubel.apply(spike, z_hw) if z_hw is not None else spike
     # Reset
     if z_hw is None:
-        v = (1 - z.detach()) * v + z.detach() * params.v_reset
+        v = (1 - z.detach()) * v + z.detach() * params.reset
     return z, v, i
 
 
-def cuba_iaf_integration(input: torch.Tensor, params: NamedTuple,
+def cuba_iaf_integration(input: torch.Tensor,
+                         params: Union[CalibratedCUBAIAFParams, CUBAIAFParams],
                          hw_data: Optional[torch.Tensor] = None,
                          dt: float = 1e-6) \
         -> Tuple[torch.Tensor, torch.Tensor]:
@@ -66,9 +77,9 @@ def cuba_iaf_integration(input: torch.Tensor, params: NamedTuple,
     Integrates according to:
         v^{t+1} = dt / \tau_{men} * (v_l - v^t + i^t) + v^t
         i^{t+1} = i^t * (1 - dt / \tau_{syn}) + x^t
-        z^{t+1} = 1 if v^{t+1} > params.v_th
+        z^{t+1} = 1 if v^{t+1} > params.threshold
         v^{t+1} = v_reset if z^{t+1} == 1
-    Assumes i^0, v^0 = 0., v_reset
+    Assumes i^0, v^0 = 0., params.reset
     :note: One `dt` synaptic delay between input and output
     :param input: Input spikes in shape (batch, time, neurons).
     :param params: LIFParams object holding neuron parameters.
@@ -78,7 +89,7 @@ def cuba_iaf_integration(input: torch.Tensor, params: NamedTuple,
     dev = input.device
     T, bs, ps = input.shape
     z, i, v = torch.zeros(bs, ps).to(dev), torch.tensor(0.).to(dev), \
-        torch.empty(bs, ps).fill_(params.v_reset).to(dev)
+        torch.empty(bs, ps).fill_(params.reset).to(dev)
     z_hw, v_cadc, v_madc = None, None, None
 
     if hw_data is not None:
@@ -130,7 +141,7 @@ def cuba_refractory_iaf_integration(input: torch.Tensor, params: NamedTuple,
     dev = input.device
     T, bs, ps = input.shape
     z, i, v = torch.zeros(bs, ps).to(dev), torch.tensor(0.).to(dev), \
-        torch.empty(bs, ps).fill_(params.v_reset).to(dev)
+        torch.empty(bs, ps).fill_(params.reset).to(dev)
     z_hw, v_cadc, v_madc = None, None, None
 
     if hw_data is not None:
diff --git a/src/pyhxtorch/hxtorch/spiking/functional/li.py b/src/pyhxtorch/hxtorch/spiking/functional/li.py
index 75e826d98f149d2319014fb37c80dd8e85f5b5c3..945cee0a486bdd35b056f4480f7a8fa8895870f1 100644
--- a/src/pyhxtorch/hxtorch/spiking/functional/li.py
+++ b/src/pyhxtorch/hxtorch/spiking/functional/li.py
@@ -1,8 +1,11 @@
 """
 Leaky-integrate neurons
 """
-from typing import NamedTuple, Optional
+from typing import NamedTuple, Optional, Union
+import dataclasses
 import torch
+
+from hxtorch.spiking.calibrated_params import CalibratedParams
 from hxtorch.spiking.functional.unterjubel import Unterjubel
 
 
@@ -10,14 +13,20 @@ class CUBALIParams(NamedTuple):
 
     """ Parameters for CUBA LI integration and backward path """
 
-    tau_mem_inv: torch.Tensor
-    tau_syn_inv: torch.Tensor
-    v_leak: torch.Tensor = torch.tensor(0.)
+    tau_mem: torch.Tensor
+    tau_syn: torch.Tensor
+    leak: torch.Tensor = torch.tensor(0.)
+
+
+@dataclasses.dataclass(unsafe_hash=True)
+class CalibratedCUBALIParams(CalibratedParams):
+    """ Parameters for CUBA LI integration and backward path """
 
 
 # Allow redefining builtin for PyTorch consistency
 # pylint: disable=redefined-builtin, invalid-name, too-many-locals
-def cuba_li_integration(input: torch.Tensor, params: CUBALIParams,
+def cuba_li_integration(input: torch.Tensor,
+                        params: Union[CalibratedCUBALIParams, CUBALIParams],
                         hw_data: Optional[torch.Tensor] = None,
                         dt: float = 1e-6) -> torch.Tensor:
     """
@@ -38,7 +47,7 @@ def cuba_li_integration(input: torch.Tensor, params: CUBALIParams,
     dev = input.device
     T, bs, ps = input.shape
     i, v = torch.tensor(0.).to(dev), \
-        torch.empty(bs, ps).fill_(params.v_leak).to(dev)
+        torch.empty(bs, ps).fill_(params.leak).to(dev)
     v_cadc, v_madc = None, None
 
     if hw_data is not None:
@@ -49,12 +58,12 @@ def cuba_li_integration(input: torch.Tensor, params: CUBALIParams,
 
     for ts in range(T):
         # Membrane
-        dv = dt * params.tau_mem_inv * (params.v_leak - v + i)
+        dv = dt / params.tau_mem * (params.leak - v + i)
         v = Unterjubel.apply(v + dv, v_cadc[ts]) \
             if v_cadc is not None else v + dv
 
         # Current
-        di = -dt * params.tau_syn_inv * i
+        di = -dt / params.tau_syn * i
         i = i + di + input[ts]
 
         # Save data
diff --git a/src/pyhxtorch/hxtorch/spiking/functional/lif.py b/src/pyhxtorch/hxtorch/spiking/functional/lif.py
index d802457dad87ddaa0003c55ed9b048336ea630e6..5a1f01e1640f21f2d1a0252b834ba980ab50870b 100644
--- a/src/pyhxtorch/hxtorch/spiking/functional/lif.py
+++ b/src/pyhxtorch/hxtorch/spiking/functional/lif.py
@@ -1,10 +1,12 @@
 """
 Leaky-integrate and fire neurons
 """
-from typing import NamedTuple, Tuple, Optional
+from typing import NamedTuple, Tuple, Optional, Union
+import dataclasses
 import torch
 
-from hxtorch.spiking.functional.threshold import threshold
+from hxtorch.spiking.calibrated_params import CalibratedParams
+from hxtorch.spiking.functional.threshold import threshold as spiking_threshold
 from hxtorch.spiking.functional.unterjubel import Unterjubel
 
 
@@ -12,12 +14,19 @@ class CUBALIFParams(NamedTuple):
 
     """ Parameters for CUBA LIF integration and backward path """
 
-    tau_mem_inv: torch.Tensor
-    tau_syn_inv: torch.Tensor
-    tau_ref: torch.Tensor = torch.tensor(0.)
-    v_leak: torch.Tensor = torch.tensor(0.)
-    v_th: torch.Tensor = torch.tensor(1.)
-    v_reset: torch.Tensor = torch.tensor(0.)
+    tau_mem: torch.Tensor
+    tau_syn: torch.Tensor
+    refractory_time: torch.Tensor = torch.tensor(0.)
+    leak: torch.Tensor = torch.tensor(0.)
+    threshold: torch.Tensor = torch.tensor(1.)
+    reset: torch.Tensor = torch.tensor(0.)
+    alpha: float = 50.0
+    method: str = "superspike"
+
+
+@dataclasses.dataclass(unsafe_hash=True)
+class CalibratedCUBALIFParams(CalibratedParams):
+    """ Parameters for CUBA LIF integration and backward path """
     alpha: float = 50.0
     method: str = "superspike"
 
@@ -25,7 +34,7 @@ class CUBALIFParams(NamedTuple):
 # Allow redefining builtin for PyTorch consistency
 # pylint: disable=redefined-builtin, invalid-name, too-many-locals
 def cuba_lif_integration(input: torch.Tensor,
-                         params: CUBALIFParams,
+                         params: Union[CalibratedCUBALIFParams, CUBALIFParams],
                          hw_data: Optional[torch.Tensor] = None,
                          dt: float = 1e-6) \
         -> Tuple[torch.Tensor, torch.Tensor]:
@@ -35,8 +44,8 @@ def cuba_lif_integration(input: torch.Tensor,
     Integrates according to:
         i^{t+1} = i^t * (1 - dt / \tau_{syn}) + x^t
         v^{t+1} = dt / \tau_{men} * (v_l - v^t + i^t) + v^t
-        z^{t+1} = 1 if v^{t+1} > params.v_th
-        v^{t+1} = params.v_reset if z^{t+1} == 1
+        z^{t+1} = 1 if v^{t+1} > params.threshold
+        v^{t+1} = params.reset if z^{t+1} == 1
 
     Assumes i^0, v^0 = 0, v_leak
     :note: One `dt` synaptic delay between input and output
@@ -53,7 +62,7 @@ def cuba_lif_integration(input: torch.Tensor,
     dev = input.device
     T, bs, ps = input.shape
     z, i, v = torch.zeros(bs, ps).to(dev), torch.tensor(0.).to(dev), \
-        torch.empty(bs, ps).fill_(params.v_leak).to(dev)
+        torch.empty(bs, ps).fill_(params.leak).to(dev)
     z_hw, v_cadc, v_madc = None, None, None
 
     if hw_data is not None:
@@ -65,21 +74,22 @@ def cuba_lif_integration(input: torch.Tensor,
 
     for ts in range(T):
         # Membrane decay
-        dv = dt * params.tau_mem_inv * ((params.v_leak - v) + i)
+        dv = dt / params.tau_mem * ((params.leak - v) + i)
         v = Unterjubel.apply(v + dv, v_cadc[ts]) \
             if v_cadc is not None else v + dv
 
         # Current
-        di = -dt * params.tau_syn_inv * i
+        di = -dt / params.tau_syn * i
         i = i + di + input[ts]
 
         # Spikes
-        spike = threshold(v - params.v_th, params.method, params.alpha)
+        spike = spiking_threshold(
+            v - params.threshold, params.method, params.alpha)
         z = Unterjubel.apply(spike, z_hw[ts]) if z_hw is not None else spike
 
         # Reset
         if v_cadc is None:
-            v = (1 - z.detach()) * v + z.detach() * params.v_reset
+            v = (1 - z.detach()) * v + z.detach() * params.reset
 
         # Save data
         current.append(i)
diff --git a/src/pyhxtorch/hxtorch/spiking/functional/refractory.py b/src/pyhxtorch/hxtorch/spiking/functional/refractory.py
index a747953358f1b1906ed7027faff920e0fab77acd..8fc1d8ea82970ef28510786ea49bf477686a3299 100644
--- a/src/pyhxtorch/hxtorch/spiking/functional/refractory.py
+++ b/src/pyhxtorch/hxtorch/spiking/functional/refractory.py
@@ -24,9 +24,9 @@ def refractory_update(z: torch.Tensor, v: torch.Tensor,
     # Refractory mask
     ref_mask = (ref_state > 0).long()
     # Update neuron states
-    v = (1 - ref_mask) * v + ref_mask * params.v_reset
+    v = (1 - ref_mask) * v + ref_mask * params.reset
     z = (1 - ref_mask) * z
     # Update refractory state
     ref_state = (1 - z) * torch.nn.functional.relu(ref_state - ref_mask) \
-        + z * params.tau_ref / dt
+        + z * params.refractory_time / dt
     return z, v, ref_state
diff --git a/src/pyhxtorch/hxtorch/spiking/modules/batch_dropout.py b/src/pyhxtorch/hxtorch/spiking/modules/batch_dropout.py
index 2597065c4cedacc3dbb8aa6488ed72c1fba76efd..072f26849f2f473e1654be2a3ff1452120fe3ba7 100644
--- a/src/pyhxtorch/hxtorch/spiking/modules/batch_dropout.py
+++ b/src/pyhxtorch/hxtorch/spiking/modules/batch_dropout.py
@@ -11,6 +11,7 @@ import hxtorch.spiking.functional as F
 from hxtorch.spiking.handle import NeuronHandle
 from hxtorch.spiking.modules.hx_module import HXFunctionalModule
 if TYPE_CHECKING:
+    from hxtorch.spiking.modules.hx_module import HXBaseModule
     from hxtorch.spiking.experiment import Experiment
 
 log = logger.get("hxtorch.spiking.modules")
@@ -46,7 +47,6 @@ class BatchDropout(HXFunctionalModule):  # pylint: disable=abstract-method
             functionality or a torch.autograd.Function implementing the
             module's forward and backward operation. Defaults to
             `batch_dropout`.
-        :param execution_instance: Execution instance to place to.
         """
         super().__init__(experiment=experiment, func=func)
 
diff --git a/src/pyhxtorch/hxtorch/spiking/modules/types.py b/src/pyhxtorch/hxtorch/spiking/modules/types.py
index 1def57bd7f015ae87a9855b09e6475615fdc0229..5437935a4746b243aa632a80fa6348fd6c111561 100644
--- a/src/pyhxtorch/hxtorch/spiking/modules/types.py
+++ b/src/pyhxtorch/hxtorch/spiking/modules/types.py
@@ -2,10 +2,11 @@
 Define module types
 """
 from __future__ import annotations
-from typing import TYPE_CHECKING, Union, Optional, Callable
+from typing import TYPE_CHECKING, Callable, Union, Dict, Optional
 import torch
 from hxtorch.spiking.modules.hx_module import HXModule
 if TYPE_CHECKING:
+    from calix.spiking import SpikingCalibTarget
     from hxtorch.spiking.experiment import Experiment
     from hxtorch.spiking.execution_instance import ExecutionInstance
 
@@ -32,11 +33,56 @@ class Population(HXModule):
         """
         super().__init__(experiment, func, execution_instance)
         self.size = size
+        self.read_params_from_calibration = True
+        self.params = None
+        self._params_hash = None
 
     def extra_repr(self) -> str:
         """ Add additional information """
         return f"size={self.size}, {super().extra_repr()}"
 
+    def calib_changed_since_last_run(self) -> bool:
+        if not hasattr(self, "params"):
+            return False
+        new_params_hash = hash(self.params)
+        calibrate = self._params_hash != new_params_hash
+        self._params_hash = new_params_hash
+        return calibrate
+
+    def params_from_calibration(
+            self, spiking_calib_target: SpikingCalibTarget) -> None:
+        if hasattr(self, "params"):
+            # Create a hash in each case, otherwise
+            # calib_changed_since_last_run gets triggered
+            self._params_hash = hash(self.params)
+        if (not self.read_params_from_calibration
+                or not hasattr(self, "params")
+                or not hasattr(self.params, "from_calix_targets")):
+            return
+        # get populations HW neurons
+        neurons = self.execution_instance.neuron_placement.id2logicalneuron(
+            self.unit_ids)
+        self.params = self.params.from_calix_targets(
+            spiking_calib_target.neuron_target, neurons)
+        self._params_hash = hash(self.params)
+        # get params from calib target
+        self.extra_kwargs.update({"params": self.params})
+
+    def calibration_from_params(
+            self, spiking_calib_target: SpikingCalibTarget) -> Dict:
+        """
+        Add population specific calibration targets to the experiment-wide
+        calibration target, which holds information for all populations.
+
+        :param spiking_calib_target: Calibration target parameters of all
+            neuron populations registered in the self.experiment instance.
+        :returns: The chip_wide_calib_target with adjusted parameters.
+        """
+        neurons = self.execution_instance.neuron_placement.id2logicalneuron(
+            self.unit_ids)
+        return self.params.to_calix_targets(
+            spiking_calib_target.neuron_target, neurons)
+
 
 # c.f.: https://github.com/pytorch/pytorch/issues/42305
 # pylint: disable=abstract-method
diff --git a/src/pyhxtorch/hxtorch/spiking/utils/calib_helper.py b/src/pyhxtorch/hxtorch/spiking/utils/calib_helper.py
index c6d06edc7f64bc11eac23812c3c31a92fb25c11f..db5b990dd6db0693a230507ed7e521124384a1fb 100644
--- a/src/pyhxtorch/hxtorch/spiking/utils/calib_helper.py
+++ b/src/pyhxtorch/hxtorch/spiking/utils/calib_helper.py
@@ -1,6 +1,9 @@
 """
 Helpers to handle calibrations
 """
+from __future__ import annotations
+from typing import Union
+import pickle
 from pathlib import Path
 from dlens_vx_v3 import sta
 
@@ -30,6 +33,29 @@ def chip_from_file(path: str) -> dict:
     return chip_from_portable_binary(data)
 
 
+def calib_from_calix_native(path: Union[str, Path]) -> dict:
+    """
+    Extract chip config from calix-native pickle dump
+
+    :param path: path to file containing pickled calix result and target.
+    """
+    with open(path, "rb") as calibfile:
+        result = pickle.load(calibfile)
+    # return result.to_chip()
+    return result
+
+
+def target_from_calix_native(path: Union[str, Path]) -> dict:
+    """
+    Extract target dict from calix-native pickle dump
+
+    :param path: path to file containing pickled calix result and target.
+    """
+    with open(path, "rb") as calibfile:
+        result = pickle.load(calibfile)
+    return result.target
+
+
 def nightly_calib_path(name: str = "spiking") -> Path:
     """
     Find path for nightly calibration.
@@ -38,3 +64,15 @@ def nightly_calib_path(name: str = "spiking") -> Path:
     path = f"/wang/data/calibration/hicann-dls-sr-hx/{identifier}/stable/"\
         f"latest/{name}_cocolist.pbin"
     return Path(path)
+
+
+def nightly_calix_native_path(name: str = "spiking") -> Path:
+    """
+    Find path for nightly calibration of calix-native format
+
+    :param name: calibration name prefix.
+    """
+    identifier = _hxtorch_core.get_unique_identifier()
+    path = f"/wang/data/calibration/hicann-dls-sr-hx/{identifier}/stable/"\
+        f"latest/{name}_calix-native.pkl"
+    return Path(path)
diff --git a/tests/hw/test_spiking_calib.py b/tests/hw/test_spiking_calib.py
new file mode 100644
index 0000000000000000000000000000000000000000..c593b754c128556f955fcf6f9ccdd12eac838b47
--- /dev/null
+++ b/tests/hw/test_spiking_calib.py
@@ -0,0 +1,375 @@
+"""
+Test SNN examples
+"""
+import unittest
+from functools import partial
+import torch
+import numpy as np
+import quantities as pq
+
+from dlens_vx_v3 import halco
+from calix.spiking.neuron import NeuronCalibTarget
+
+from hxtorch.spiking.calibrated_params import CalibratedParams
+
+
+class TestCalibratedParams(unittest.TestCase):
+    """ Tests implicit neuron calibration """
+
+    def test_init(self) -> None:
+        """ Test construction without errors """
+        # Default
+        CalibratedParams()
+
+        # Fill
+        CalibratedParams(
+            leak=torch.as_tensor(80),
+            reset=torch.as_tensor(80),
+            threshold=torch.as_tensor(125),
+            tau_mem=torch.as_tensor(10.),
+            tau_syn=torch.as_tensor(10.),
+            i_synin_gm=torch.as_tensor(500),
+            e_coba_reversal=torch.as_tensor(500),
+            e_coba_reference=torch.as_tensor(500),
+            membrane_capacitance=torch.as_tensor(63),
+            refractory_time=torch.as_tensor(2.),
+            synapse_dac_bias=torch.as_tensor(600),
+            holdoff_time=torch.as_tensor(0))
+
+    def test_from_calix_targets(self) -> None:
+        # Some logical neurons
+        neurons = [
+            halco.LogicalNeuronOnDLS(
+                halco.LogicalNeuronCompartments(
+                    {halco.CompartmentOnLogicalNeuron():
+                     [halco.AtomicNeuronOnLogicalNeuron()]}),
+                     halco.AtomicNeuronOnDLS(halco.EnumRanged_512_(i)))
+                     for i in range(23, 48)]
+        coords = [
+            an.toEnum().value() for neuron in neurons
+            for an in neuron.get_atomic_neurons()]
+        selector = [
+            neuron.get_atomic_neurons()[0].toEnum().value()
+            for neuron in neurons]
+
+        # Default
+        target = NeuronCalibTarget.DenseDefault
+        params = CalibratedParams()
+        params.from_calix_targets(target, neurons)
+        self.check_params(target, params, selector, coords)
+
+        # All numbers
+        target = NeuronCalibTarget(
+            leak=80,
+            reset=70,
+            threshold=125,
+            tau_mem=10. * pq.us,
+            tau_syn=10. * pq.us,
+            i_synin_gm=500,
+            e_coba_reversal=None,
+            e_coba_reference=None,
+            membrane_capacitance=63,
+            refractory_time=2. * pq.us,
+            synapse_dac_bias=600,
+            holdoff_time=0 * pq.us)
+        params = CalibratedParams()
+        params.from_calix_targets(target, neurons)
+        self.check_params(target, params, selector, coords)
+
+        # Some larger logical neurons
+        neurons = [
+            halco.LogicalNeuronOnDLS(
+                halco.LogicalNeuronCompartments(
+                    {halco.CompartmentOnLogicalNeuron():
+                     [halco.AtomicNeuronOnLogicalNeuron(
+                         halco.EnumRanged_256_(0)),
+                      halco.AtomicNeuronOnLogicalNeuron(
+                          halco.EnumRanged_256_(1))]}),
+                     halco.AtomicNeuronOnDLS(halco.EnumRanged_512_(2 * i)))
+                     for i in range(25)]
+        coords = [
+            an.toEnum().value() for neuron in neurons
+            for an in neuron.get_atomic_neurons()]
+        selector = [
+            neuron.get_atomic_neurons()[0].toEnum().value()
+            for neuron in neurons]
+
+        params = CalibratedParams()
+        params.from_calix_targets(target, neurons)
+        self.check_params(target, params, selector, coords)
+
+        target = NeuronCalibTarget(
+            leak=80,
+            reset=70,
+            threshold=125,
+            tau_mem=10. * pq.us,
+            tau_syn=10. * pq.us,
+            i_synin_gm=500,
+            e_coba_reversal=None,
+            e_coba_reference=None,
+            membrane_capacitance=63,
+            refractory_time=2. * pq.us,
+            synapse_dac_bias=600,
+            holdoff_time=0 * pq.us)
+        params = CalibratedParams()
+        params.from_calix_targets(target, neurons)
+        self.check_params(target, params, selector, coords)
+
+    def test_to_calix_targets(self) -> None:
+        # Default target as ExecutionInstance does
+        # Some logical neurons
+        neurons = [
+            halco.LogicalNeuronOnDLS(
+                halco.LogicalNeuronCompartments(
+                    {halco.CompartmentOnLogicalNeuron():
+                     [halco.AtomicNeuronOnLogicalNeuron()]}),
+                     halco.AtomicNeuronOnDLS(halco.EnumRanged_512_(i)))
+                     for i in range(23, 48)]
+        coords = [
+            an.toEnum().value() for neuron in neurons
+            for an in neuron.get_atomic_neurons()]
+        size = len(neurons)
+
+        # All numbers, non-default
+        params = CalibratedParams(
+            leak=110,
+            reset=120,
+            threshold=130,
+            tau_mem=2e-5,
+            tau_syn=3e-5,
+            i_synin_gm=600,
+            e_coba_reversal=None,
+            e_coba_reference=None,
+            membrane_capacitance=55,
+            refractory_time=3e-6,
+            synapse_dac_bias=700,
+            holdoff_time=1)
+        target = NeuronCalibTarget.DenseDefault
+        target.synapse_dac_bias = None
+        target.i_synin_gm = np.array([None, None])
+        params.to_calix_targets(target, neurons)
+        self.check_targets(target, params, coords)
+
+        # All tensors, non-default
+        params = CalibratedParams(
+            leak=torch.tensor(110),
+            reset=torch.tensor(120),
+            threshold=torch.tensor(130),
+            tau_mem=torch.tensor(2e-5),
+            tau_syn=torch.tensor(3e-5),
+            i_synin_gm=torch.tensor(600),
+            e_coba_reversal=None,
+            e_coba_reference=None,
+            membrane_capacitance=torch.tensor(55),
+            refractory_time=torch.tensor(3e-6),
+            synapse_dac_bias=torch.tensor(700),
+            holdoff_time=torch.tensor(1))
+        target = NeuronCalibTarget.DenseDefault
+        target.synapse_dac_bias = None
+        target.i_synin_gm = np.array([None, None])
+        params.to_calix_targets(target, neurons)
+        self.check_targets(target, params, coords)
+
+        # All tensors of size, non-default
+        params = CalibratedParams(
+            leak=torch.full((size,), 110),
+            reset=torch.full((size,), 120),
+            threshold=torch.full((size,), 130),
+            tau_mem=torch.full((size,), 2e-5),
+            tau_syn=torch.full((2, size,), 3e-5),
+            i_synin_gm=torch.full((2,), 600),
+            e_coba_reversal=None,
+            e_coba_reference=None,
+            membrane_capacitance=torch.full((size,), 55),
+            refractory_time=torch.full((size,), 3e-6),
+            synapse_dac_bias=torch.tensor(700),
+            holdoff_time=torch.full((size,), 1))
+        target = NeuronCalibTarget.DenseDefault
+        target.synapse_dac_bias = None
+        target.i_synin_gm = np.array([None, None])
+        params.to_calix_targets(target, neurons)
+        self.check_targets(target, params, coords)
+
+        target.i_synin_gm = np.array([400, None])
+        self.assertRaises(
+            AttributeError, partial(params.to_calix_targets, target, neurons))
+
+        target.synapse_dac_bias = 600
+        self.assertRaises(
+            AttributeError, partial(params.to_calix_targets, target, neurons))
+
+    def check_params(self, target, params, selector, coords):
+        for key in ["tau_mem", "refractory_time", "holdoff_time",
+                    "leak", "reset", "threshold", "membrane_capacitance"]:
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor([len(selector)]),
+                    torch.tensor(getattr(params, key).shape)))
+            if (not isinstance(getattr(target, key), np.ndarray)
+                or not getattr(target, key).shape):
+                self.assertTrue(
+                    torch.all(
+                        torch.tensor(getattr(target, key))
+                        == getattr(params, key)))
+            elif getattr(target, key).shape == (halco.AtomicNeuronOnDLS.size,):
+                self.assertTrue(
+                    torch.equal(
+                        torch.tensor(
+                            getattr(target, key)[selector]),
+                            getattr(params, key)))
+
+        # tau_syn
+        if (not isinstance(target.tau_syn, np.ndarray)
+            or not target.tau_syn.shape):
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor([len(selector)]),
+                    torch.tensor(params.tau_syn.shape)))
+            self.assertTrue(
+                torch.all(
+                    torch.tensor(target.tau_syn) == params.tau_syn))
+        elif target.tau_syn.shape == (
+            halco.SynapticInputOnNeuron.size, halco.AtomicNeuronOnDLS.size):
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor(
+                        [halco.SynapticInputOnNeuron.size, len(selector)]),
+                    torch.tensor(params.tau_syn.shape)))
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor(target.tau_syn[:, selector]), params.tau_syn))
+        elif target.tau_syn.shape == (halco.SynapticInputOnNeuron.size,):
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor([len(selector)]),
+                    torch.tensor(params.tau_syn.shape)))
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor(target.tau_syn), params.tau_syn))
+        elif target.tau_syn.shape == (halco.AtomicNeuronOnDLS.size,):
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor([len(selector)]),
+                    torch.tensor(params.tau_syn.shape)))
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor(target.tau_syn)[selector], params.tau_syn))
+
+        for key in ["e_coba_reversal", "e_coba_reference"]:
+            if getattr(target, key) is None:
+                self.assertIsNone(getattr(params, key))
+            else:
+                self.assertTrue(
+                    torch.equal(
+                        torch.tensor(
+                            [halco.SynapticInputOnNeuron.size, len(selector)]),
+                        torch.tensor(getattr(params, key).shape)))
+                if getattr(target, key).shape == (
+                    halco.SynapticInputOnNeuron.size,):
+                    if all(torch.isnan(getattr(target, key))):
+                        self.assertTrue(all(torch.isnan(getattr(params, key))))
+                    else:
+                        self.assertTrue(
+                            torch.all(torch.tensor(getattr(target, key)))
+                            == getattr(params, key))
+                else:
+                    if torch.all(
+                        torch.isnan(
+                            torch.tensor(getattr(target, key))[:, coords])):
+                        self.assertTrue(
+                            torch.all(torch.isnan(getattr(params, key))))
+                    else:
+                        self.assertTrue(
+                            torch.all(
+                                torch.tensor(
+                                    getattr(target, key))[:, selector]
+                                    == getattr(params, key)))
+
+    def check_targets(self, target, params, coords):
+        # Leak
+        for key in ["leak",
+                    "reset",
+                    "threshold",
+                    "membrane_capacitance"]:
+            this_target = getattr(target, key)
+            this_param = getattr(params, key)
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor(this_target.shape),
+                    torch.tensor([halco.AtomicNeuronOnDLS.size])))
+            self.assertTrue(
+                torch.all(torch.tensor(this_target[coords]) == this_param))
+
+        for key in ["tau_mem",
+                    "refractory_time",
+                    "holdoff_time"]:
+            this_target = getattr(target, key)
+            this_param = getattr(params, key)
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor(this_target.shape),
+                    torch.tensor([halco.AtomicNeuronOnDLS.size])))
+            self.assertTrue(
+                torch.all(
+                    torch.tensor(this_target[coords]) == this_param * 1e6))
+
+        # e_coba_reversal
+        self.assertTrue(
+            torch.equal(
+                torch.tensor(target.e_coba_reversal.shape),
+                torch.tensor([2, halco.AtomicNeuronOnDLS.size])))
+        if params.e_coba_reversal is not None:
+            self.assertTrue(
+                torch.all(
+                    torch.tensor(target.e_coba_reversal[:, coords])
+                    == params.e_coba_reversal))
+        else:
+            self.assertTrue(
+                torch.all(
+                    torch.tensor(target.e_coba_reversal[:, coords])
+                    == torch.tensor([[torch.inf], [-torch.inf]])))
+
+        # e_coba_reference
+        self.assertTrue(
+            torch.equal(
+                torch.tensor(target.e_coba_reference.shape),
+                torch.tensor(
+                    [halco.SynapticInputOnNeuron.size,
+                     halco.AtomicNeuronOnDLS.size])))
+        if params.e_coba_reference is not None:
+            self.assertTrue(
+                torch.all(
+                    torch.tensor(target.e_coba_reference[:, coords])
+                    == params.e_coba_reference))
+        else:
+            self.assertTrue(
+                torch.all(torch.isnan(torch.tensor(
+                    target.e_coba_reference[:, coords]))))
+
+        # tau syn
+        self.assertTrue(
+            torch.equal(
+                torch.tensor(target.tau_syn.shape),
+                torch.tensor([halco.SynapticInputOnNeuron.size,
+                              halco.AtomicNeuronOnDLS.size])))
+        self.assertTrue(
+            torch.all(
+                torch.tensor(
+                    target.tau_syn[:, coords]) == params.tau_syn * 1e6))
+
+        # i_synin_gm
+        self.assertTrue(
+            torch.equal(
+                torch.tensor(
+                    target.i_synin_gm.shape),
+                    torch.tensor([halco.SynapticInputOnNeuron.size])))
+        self.assertTrue(
+            torch.all(
+                torch.tensor(target.i_synin_gm) == params.i_synin_gm))
+
+        # synapse_dac_bias
+        self.assertEqual(target.synapse_dac_bias, params.synapse_dac_bias)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tests/hw/test_spiking_examples.py b/tests/hw/test_spiking_examples.py
index d749bb04d780f82ee89b7061bdaee0825d3e7601..da91fe1e5c160319cb8d8acd62021d534bd41c06 100644
--- a/tests/hw/test_spiking_examples.py
+++ b/tests/hw/test_spiking_examples.py
@@ -4,6 +4,7 @@ Test SNN examples
 import unittest
 
 from hxtorch.examples.spiking import yinyang
+from hxtorch.examples.spiking import calib_neuron
 
 
 class YinYangExampleTest(unittest.TestCase):
@@ -59,5 +60,15 @@ class YinYangExampleTest(unittest.TestCase):
         self.test_training(mock=True)
 
 
+class CalibNeuronExampleTest(unittest.TestCase):
+    """ Test example calibration of neuron """
+
+    def test_calib_neuron(self) -> None:
+        spikes, _ = calib_neuron.main()
+
+        self.assertGreater(spikes.to_sparse().shape[0], 0)
+
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/tests/hw/test_spiking_execution_instance.py b/tests/hw/test_spiking_execution_instance.py
index eddf3becbdede2a54658611cff070413a9403440..aea88b1774aded23b589d5f4bb7988b2592f1f44 100644
--- a/tests/hw/test_spiking_execution_instance.py
+++ b/tests/hw/test_spiking_execution_instance.py
@@ -3,10 +3,13 @@ import unittest
 import hxtorch
 import pylogging as logger
 import pygrenade_vx as grenade
+from copy import deepcopy
 
 from hxtorch.spiking.execution_instance import (
     ExecutionInstance, ExecutionInstances)
 from hxtorch.spiking.utils import calib_helper
+from hxtorch.spiking.calibrated_params import CalibratedParams
+import hxtorch.spiking as hxsnn
 
 logger.default_config(level=logger.LogLevel.INFO)
 logger = logger.get("hxtorch.test.hw.test_spiking_execution_instance")
@@ -45,7 +48,7 @@ class TestExecutionInstances(HXTestCase):
 
         # load chips
         for inst in instances:
-            inst.prepare_static_config()
+            inst.load_calib(calib_helper.nightly_calib_path())
         chips = instances.chips
         self.assertEqual(len(chips), 3)
         self.assertTrue(
@@ -90,8 +93,6 @@ class TestExecutionInstances(HXTestCase):
         inst2 = ExecutionInstance()
         inst3 = ExecutionInstance()
         instances = ExecutionInstances([inst1, inst2, inst3])
-        for inst in instances:
-            inst.prepare_static_config()
 
         hooks = instances.playback_hooks
         self.assertIsInstance(hooks, dict)
@@ -127,23 +128,77 @@ class TestExecutionInstance(HXTestCase):
         inst.load_calib(calib_path)
         self.assertIsNotNone(inst.chip)
 
-    def test_prepare_static_config(self):
+    def test_calibrate(self):
         """ Test prepare static config """
-        # Test calib
-        # Test chip is None
+        # Calib path assigned
+        # Native calix path
+        # -> params should be loadable and no calibration executed
+        inst = ExecutionInstance(
+            calib_path=calib_helper.nightly_calix_native_path())
+        exp = hxsnn.Experiment()
+        syn = hxsnn.Synapse(10, 10, exp, execution_instance=inst)
+        params = CalibratedParams(leak=0)
+        old_params = deepcopy(params)
+        nrn = hxsnn.Neuron(
+            10, exp, execution_instance=inst, params=params)
+        # Ensure modules get placed
+        syn.register_hw_entity()
+        nrn.register_hw_entity()
+        inst.modules = [syn, nrn]
+        self.assertNotEqual(0, len(inst.modules))
+        inst.calibrate()
+        self.assertIsNotNone(inst.calib)
+        self.assertNotEqual(params.leak[0], old_params.leak)
+
+        # Non-native calix path
+        # -> params should not be loadable and no calibration executed
+        inst = ExecutionInstance(
+            calib_path=calib_helper.nightly_calib_path())
+        exp = hxsnn.Experiment()
+        syn = hxsnn.Synapse(10, 10, exp, execution_instance=inst)
+        params = CalibratedParams(leak=0)
+        old_params = deepcopy(params)
+        nrn = hxsnn.Neuron(
+            10, exp, execution_instance=inst, params=params)
+        # Ensure modules get placed
+        syn.register_hw_entity()
+        nrn.register_hw_entity()
+        inst.modules = [syn, nrn]
+        self.assertNotEqual(0, len(inst.modules))
+        inst.calibrate()
+        self.assertIsNone(inst.calib)
+        self.assertEqual(params, old_params)
+
+        # No calib path assigned
+        # -> try to load params from params objects if it has `to_calix_target`
+        # Does not have it -> load nightly at least
         inst = ExecutionInstance()
-        self.assertIsNone(inst.chip)
-        inst.prepare_static_config()
-        self.assertIsNotNone(inst.chip)
+        exp = hxsnn.Experiment()
+        syn = hxsnn.Synapse(10, 10, exp, execution_instance=inst)
+        nrn = hxsnn.Neuron(10, exp, execution_instance=inst)
+        inst.modules = [syn, nrn]
+        self.assertNotEqual(0, len(inst.modules))
 
-        # Test chip is not None -> No new chip
-        calib_path = calib_helper.nightly_calib_path()
-        inst = ExecutionInstance(calib_path=calib_path)
-        chip1 = inst.chip
+        inst.calibrate()
+        self.assertIsNotNone(inst.chip)
+        self.assertEqual(
+            inst.calib_path, calib_helper.nightly_calix_native_path())
+        
+        # Now we assign CalibratedParams which has `to_calix_target`
+        # Should calibrate
+        inst = ExecutionInstance()
+        exp = hxsnn.Experiment()
+        syn = hxsnn.Synapse(10, 10, exp, execution_instance=inst)
+        nrn = hxsnn.Neuron(
+            10, exp, execution_instance=inst, params=CalibratedParams())
+        # Ensure modules get placed
+        syn.register_hw_entity()
+        nrn.register_hw_entity()
+        inst.modules = [syn, nrn]
+        self.assertNotEqual(0, len(inst.modules))
+        inst.calibrate()
+        self.assertIsNotNone(inst.calib)
         self.assertIsNotNone(inst.chip)
-        inst.prepare_static_config()
-        chip2 = inst.chip
-        self.assertEqual(chip1, chip2)
 
     def test_cadc_recordings(self):
         """ Test CADC recordings """
diff --git a/tests/hw/test_spiking_modules.py b/tests/hw/test_spiking_modules.py
index b28db6e47d817458ca57686bd473d51b4681e44b..eea35ad2b0274372e0d97575b98472cd24fc5ef6 100644
--- a/tests/hw/test_spiking_modules.py
+++ b/tests/hw/test_spiking_modules.py
@@ -11,6 +11,7 @@ import matplotlib.pyplot as plt
 from dlens_vx_v3 import lola, halco
 import hxtorch
 from hxtorch import spiking as hxsnn
+from hxtorch.spiking.utils import calib_helper
 from hxtorch.spiking.execution_instance import ExecutionInstance
 
 hxtorch.logger.default_config(level=hxtorch.logger.LogLevel.ERROR)
@@ -407,7 +408,9 @@ class TestNeuron(HWTestCase):
         """
         # Enable bypass
         experiment = hxsnn.Experiment(dt=self.dt)
-        execution_instance = ExecutionInstance()
+        execution_instance = ExecutionInstance(
+            # Hack that chip will not be overwritten
+            calib_path=calib_helper.nightly_calib_path())
         execution_instance.chip = lola.Chip.default_neuron_bypass
         experiment.default_execution_instance = execution_instance
 
@@ -718,7 +721,9 @@ class TestIAFNeuron(HWTestCase):
         """
         # Enable bypass
         experiment = hxsnn.Experiment(dt=self.dt)
-        execution_instance = ExecutionInstance()
+        execution_instance = ExecutionInstance(
+            # Hack that chip will not be overwritten
+            calib_path=calib_helper.nightly_calib_path())
         execution_instance.chip = lola.Chip.default_neuron_bypass
         experiment.default_execution_instance = execution_instance
         # Modules
diff --git a/tests/sw/test_spiking_calibrated_params.py b/tests/sw/test_spiking_calibrated_params.py
new file mode 100644
index 0000000000000000000000000000000000000000..96ca9a2d1a1ca1a2f363b5343bd8aa05c88f4fe0
--- /dev/null
+++ b/tests/sw/test_spiking_calibrated_params.py
@@ -0,0 +1,379 @@
+"""
+Test SNN examples
+"""
+import unittest
+from functools import partial
+import torch
+import numpy as np
+import quantities as pq
+
+from dlens_vx_v3 import halco
+from calix.spiking.neuron import NeuronCalibTarget
+
+from hxtorch.spiking.calibrated_params import CalibratedParams
+
+
+class TestCalibratedParams(unittest.TestCase):
+    """ Tests implicit neuron calibration """
+
+    def test_init(self) -> None:
+        """ Test construction without errors """
+        # Default
+        CalibratedParams()
+
+        # Fill
+        CalibratedParams(
+            leak=torch.as_tensor(80),
+            reset=torch.as_tensor(80),
+            threshold=torch.as_tensor(125),
+            tau_mem=torch.as_tensor(10.),
+            tau_syn=torch.as_tensor(10.),
+            i_synin_gm=torch.as_tensor(500),
+            e_coba_reversal=torch.as_tensor(500),
+            e_coba_reference=torch.as_tensor(500),
+            membrane_capacitance=torch.as_tensor(63),
+            refractory_time=torch.as_tensor(2.),
+            synapse_dac_bias=torch.as_tensor(600),
+            holdoff_time=torch.as_tensor(0))
+
+    def test_from_calix_targets(self) -> None:
+        """ Test conversion from calix targets to params """
+        # Some logical neurons
+        neurons = [
+            halco.LogicalNeuronOnDLS(
+                halco.LogicalNeuronCompartments(
+                    {halco.CompartmentOnLogicalNeuron():
+                     [halco.AtomicNeuronOnLogicalNeuron()]}),
+                     halco.AtomicNeuronOnDLS(halco.EnumRanged_512_(i)))
+                     for i in range(23, 48)]
+        coords = [
+            an.toEnum().value() for neuron in neurons
+            for an in neuron.get_atomic_neurons()]
+        selector = [
+            neuron.get_atomic_neurons()[0].toEnum().value()
+            for neuron in neurons]
+
+        # Default
+        target = NeuronCalibTarget.DenseDefault
+        params = CalibratedParams()
+        params.from_calix_targets(target, neurons)
+        self.check_params(target, params, selector, coords)
+
+        # All numbers
+        target = NeuronCalibTarget(
+            leak=80,
+            reset=70,
+            threshold=125,
+            tau_mem=10. * pq.us,
+            tau_syn=10. * pq.us,
+            i_synin_gm=500,
+            e_coba_reversal=None,
+            e_coba_reference=None,
+            membrane_capacitance=63,
+            refractory_time=2. * pq.us,
+            synapse_dac_bias=600,
+            holdoff_time=0 * pq.us)
+        params = CalibratedParams()
+        params.from_calix_targets(target, neurons)
+        self.check_params(target, params, selector, coords)
+
+        # Some larger logical neurons
+        neurons = [
+            halco.LogicalNeuronOnDLS(
+                halco.LogicalNeuronCompartments(
+                    {halco.CompartmentOnLogicalNeuron():
+                     [halco.AtomicNeuronOnLogicalNeuron(
+                         halco.EnumRanged_256_(0)),
+                      halco.AtomicNeuronOnLogicalNeuron(
+                          halco.EnumRanged_256_(1))]}),
+                     halco.AtomicNeuronOnDLS(halco.EnumRanged_512_(2 * i)))
+                     for i in range(25)]
+        coords = [
+            an.toEnum().value() for neuron in neurons
+            for an in neuron.get_atomic_neurons()]
+        selector = [
+            neuron.get_atomic_neurons()[0].toEnum().value()
+            for neuron in neurons]
+
+        params = CalibratedParams()
+        params.from_calix_targets(target, neurons)
+        self.check_params(target, params, selector, coords)
+
+        target = NeuronCalibTarget(
+            leak=80,
+            reset=70,
+            threshold=125,
+            tau_mem=10. * pq.us,
+            tau_syn=10. * pq.us,
+            i_synin_gm=500,
+            e_coba_reversal=None,
+            e_coba_reference=None,
+            membrane_capacitance=63,
+            refractory_time=2. * pq.us,
+            synapse_dac_bias=600,
+            holdoff_time=0 * pq.us)
+        params = CalibratedParams()
+        params.from_calix_targets(target, neurons)
+        self.check_params(target, params, selector, coords)
+
+    def test_to_calix_targets(self) -> None:
+        """ Test conversion from params to calix targets """
+        # Default target as ExecutionInstance does
+        # Some logical neurons
+        neurons = [
+            halco.LogicalNeuronOnDLS(
+                halco.LogicalNeuronCompartments(
+                    {halco.CompartmentOnLogicalNeuron():
+                     [halco.AtomicNeuronOnLogicalNeuron()]}),
+                     halco.AtomicNeuronOnDLS(halco.EnumRanged_512_(i)))
+                     for i in range(23, 48)]
+        coords = [
+            an.toEnum().value() for neuron in neurons
+            for an in neuron.get_atomic_neurons()]
+        size = len(neurons)
+
+        # All numbers, non-default
+        params = CalibratedParams(
+            leak=110,
+            reset=120,
+            threshold=130,
+            tau_mem=2e-5,
+            tau_syn=3e-5,
+            i_synin_gm=600,
+            e_coba_reversal=None,
+            e_coba_reference=None,
+            membrane_capacitance=55,
+            refractory_time=3e-6,
+            synapse_dac_bias=700,
+            holdoff_time=1)
+        target = NeuronCalibTarget.DenseDefault
+        target.synapse_dac_bias = None
+        target.i_synin_gm = np.array([None, None])
+        params.to_calix_targets(target, neurons)
+        self.check_targets(target, params, coords)
+
+        # All tensors, non-default
+        params = CalibratedParams(
+            leak=torch.tensor(110),
+            reset=torch.tensor(120),
+            threshold=torch.tensor(130),
+            tau_mem=torch.tensor(2e-5),
+            tau_syn=torch.tensor(3e-5),
+            i_synin_gm=torch.tensor(600),
+            e_coba_reversal=None,
+            e_coba_reference=None,
+            membrane_capacitance=torch.tensor(55),
+            refractory_time=torch.tensor(3e-6),
+            synapse_dac_bias=torch.tensor(700),
+            holdoff_time=torch.tensor(1))
+        target = NeuronCalibTarget.DenseDefault
+        target.synapse_dac_bias = None
+        target.i_synin_gm = np.array([None, None])
+        params.to_calix_targets(target, neurons)
+        self.check_targets(target, params, coords)
+
+        # All tensors of size, non-default
+        params = CalibratedParams(
+            leak=torch.full((size,), 110),
+            reset=torch.full((size,), 120),
+            threshold=torch.full((size,), 130),
+            tau_mem=torch.full((size,), 2e-5),
+            tau_syn=torch.full((2, size,), 3e-5),
+            i_synin_gm=torch.full((2,), 600),
+            e_coba_reversal=None,
+            e_coba_reference=None,
+            membrane_capacitance=torch.full((size,), 55),
+            refractory_time=torch.full((size,), 3e-6),
+            synapse_dac_bias=torch.tensor(700),
+            holdoff_time=torch.full((size,), 1))
+        target = NeuronCalibTarget.DenseDefault
+        target.synapse_dac_bias = None
+        target.i_synin_gm = np.array([None, None])
+        params.to_calix_targets(target, neurons)
+        self.check_targets(target, params, coords)
+
+        target.i_synin_gm = np.array([400, None])
+        self.assertRaises(
+            AttributeError, partial(params.to_calix_targets, target, neurons))
+
+        target.synapse_dac_bias = 600
+        self.assertRaises(
+            AttributeError, partial(params.to_calix_targets, target, neurons))
+
+    def check_params(self, target, params, selector, coords):
+        """ checks if params have expected shape and values """
+        for key in ["tau_mem", "refractory_time", "holdoff_time",
+                    "leak", "reset", "threshold", "membrane_capacitance"]:
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor([len(selector)]),
+                    torch.tensor(getattr(params, key).shape)))
+            if (not isinstance(getattr(target, key), np.ndarray)
+                or not getattr(target, key).shape):
+                self.assertTrue(
+                    torch.all(
+                        torch.tensor(getattr(target, key))
+                        == getattr(params, key)))
+            elif getattr(target, key).shape == (halco.AtomicNeuronOnDLS.size,):
+                self.assertTrue(
+                    torch.equal(
+                        torch.tensor(
+                            getattr(target, key)[selector]),
+                            getattr(params, key)))
+
+        # tau_syn 
+        if (not isinstance(target.tau_syn, np.ndarray)
+            or not target.tau_syn.shape):
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor([len(selector)]),
+                    torch.tensor(params.tau_syn.shape)))
+            self.assertTrue(
+                torch.all(
+                    torch.tensor(target.tau_syn) == params.tau_syn))
+        elif target.tau_syn.shape == (
+            halco.SynapticInputOnNeuron.size, halco.AtomicNeuronOnDLS.size):
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor(
+                        [halco.SynapticInputOnNeuron.size, len(selector)]),
+                    torch.tensor(params.tau_syn.shape)))
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor(target.tau_syn[:, selector]), params.tau_syn))
+        elif target.tau_syn.shape == (halco.SynapticInputOnNeuron.size,):
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor([len(selector)]),
+                    torch.tensor(params.tau_syn.shape)))
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor(target.tau_syn), params.tau_syn))
+        elif target.tau_syn.shape == (halco.AtomicNeuronOnDLS.size,):
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor([len(selector)]),
+                    torch.tensor(params.tau_syn.shape)))
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor(target.tau_syn)[selector], params.tau_syn))
+
+        for key in ["e_coba_reversal", "e_coba_reference"]:
+            if getattr(target, key) is None:
+                self.assertIsNone(getattr(params, key))
+            else:
+                self.assertTrue(
+                    torch.equal(
+                        torch.tensor(
+                            [halco.SynapticInputOnNeuron.size, len(selector)]),
+                        torch.tensor(getattr(params, key).shape)))
+                if getattr(target, key).shape == (
+                    halco.SynapticInputOnNeuron.size,):
+                    if all(torch.isnan(getattr(target, key))):
+                        self.assertTrue(all(torch.isnan(getattr(params, key))))
+                    else:
+                        self.assertTrue(
+                            torch.all(torch.tensor(getattr(target, key)))
+                            == getattr(params, key))
+                else:
+                    if torch.all(
+                        torch.isnan(
+                            torch.tensor(getattr(target, key))[:, coords])):
+                        self.assertTrue(
+                            torch.all(torch.isnan(getattr(params, key))))
+                    else:
+                        self.assertTrue(
+                            torch.all(
+                                torch.tensor(
+                                    getattr(target, key))[:, selector]
+                                    == getattr(params, key)))
+
+    def check_targets(self, target, params, coords):
+        """ Checks if targets have expected shapes and values """
+        # Leak
+        for key in ["leak",
+                    "reset",
+                    "threshold",
+                    "membrane_capacitance"]:
+            this_target = getattr(target, key)
+            this_param = getattr(params, key)
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor(this_target.shape),
+                    torch.tensor([halco.AtomicNeuronOnDLS.size])))
+            self.assertTrue(
+                torch.all(torch.tensor(this_target[coords]) == this_param))
+
+        for key in ["tau_mem",
+                    "refractory_time",
+                    "holdoff_time"]:
+            this_target = getattr(target, key)
+            this_param = getattr(params, key)
+            self.assertTrue(
+                torch.equal(
+                    torch.tensor(this_target.shape),
+                    torch.tensor([halco.AtomicNeuronOnDLS.size])))
+            self.assertTrue(
+                torch.all(
+                    torch.tensor(this_target[coords]) == this_param * 1e6))
+
+        # e_coba_reversal
+        self.assertTrue(
+            torch.equal(
+                torch.tensor(target.e_coba_reversal.shape),
+                torch.tensor([2, halco.AtomicNeuronOnDLS.size])))
+        if params.e_coba_reversal is not None:
+            self.assertTrue(
+                torch.all(
+                    torch.tensor(target.e_coba_reversal[:, coords])
+                    == params.e_coba_reversal))
+        else:
+            self.assertTrue(
+                torch.all(
+                    torch.tensor(target.e_coba_reversal[:, coords])
+                    == torch.tensor([[torch.inf], [-torch.inf]])))
+
+        # e_coba_reference
+        self.assertTrue(
+            torch.equal(
+                torch.tensor(target.e_coba_reference.shape),
+                torch.tensor(
+                    [halco.SynapticInputOnNeuron.size,
+                     halco.AtomicNeuronOnDLS.size])))
+        if params.e_coba_reference is not None:
+            self.assertTrue(
+                torch.all(
+                    torch.tensor(target.e_coba_reference[:, coords])
+                    == params.e_coba_reference))
+        else:
+            self.assertTrue(
+                torch.all(torch.isnan(torch.tensor(
+                    target.e_coba_reference[:, coords]))))
+
+        # tau syn
+        self.assertTrue(
+            torch.equal(
+                torch.tensor(target.tau_syn.shape),
+                torch.tensor([halco.SynapticInputOnNeuron.size,
+                              halco.AtomicNeuronOnDLS.size])))
+        self.assertTrue(
+            torch.all(
+                torch.tensor(
+                    target.tau_syn[:, coords]) == params.tau_syn * 1e6))
+
+        # i_synin_gm
+        self.assertTrue(
+            torch.equal(
+                torch.tensor(
+                    target.i_synin_gm.shape),
+                    torch.tensor([halco.SynapticInputOnNeuron.size])))
+        self.assertTrue(
+            torch.all(
+                torch.tensor(target.i_synin_gm) == params.i_synin_gm))
+
+        # synapse_dac_bias
+        self.assertEqual(target.synapse_dac_bias, params.synapse_dac_bias)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tests/sw/test_spiking_cuba_iaf_integration.py b/tests/sw/test_spiking_cuba_iaf_integration.py
index a0fc9233a88f2daea648933672513cfb58c8516e..4e60a64a2ca9a7ce5887c099a6dfd8b17c36e4e7 100644
--- a/tests/sw/test_spiking_cuba_iaf_integration.py
+++ b/tests/sw/test_spiking_cuba_iaf_integration.py
@@ -25,11 +25,11 @@ class TestIAFIntegration(unittest.TestCase):
         """ Test IAF integration """
         # Params
         params = CUBAIAFParams(
-            tau_mem_inv=1. / 6e-6,
-            tau_syn_inv=1. / 6e-6,
-            tau_ref=1e-6,
-            v_th=1.,
-            v_reset=-0.1)
+            tau_mem=6e-6,
+            tau_syn=6e-6,
+            refractory_time=1e-6,
+            threshold=1.,
+            reset=-0.1)
 
         # Inputs
         inputs = torch.zeros(100, 10, 5)
@@ -71,11 +71,11 @@ class TestIAFIntegration(unittest.TestCase):
         """ Test IAF integration with hardware data """
         # Params
         params = CUBAIAFParams(
-            tau_mem_inv=1. / 6e-6,
-            tau_syn_inv=1. / 6e-6,
-            tau_ref=0e-6,
-            v_th=1.,
-            v_reset=-0.1)
+            tau_mem=6e-6,
+            tau_syn=6e-6,
+            refractory_time=0e-6,
+            threshold=1.,
+            reset=-0.1)
 
         # Inputs
         inputs = torch.zeros(100, 10, 5)
@@ -136,11 +136,11 @@ class TestIAFIntegration(unittest.TestCase):
         """ Test refractory IAF integration """
         # Params
         params = CUBAIAFParams(
-            tau_mem_inv=1. / 6e-6,
-            tau_syn_inv=1. / 6e-6,
-            tau_ref=1e-6,
-            v_th=1.,
-            v_reset=-0.1)
+            tau_mem=6e-6,
+            tau_syn=6e-6,
+            refractory_time=1e-6,
+            threshold=1.,
+            reset=-0.1)
 
         # Inputs
         inputs = torch.zeros(100, 10, 5)
@@ -183,11 +183,11 @@ class TestIAFIntegration(unittest.TestCase):
         """ Test refractory IAF integration with hardware data """
         # Params
         params = CUBAIAFParams(
-            tau_mem_inv=1. / 6e-6,
-            tau_syn_inv=1. / 6e-6,
-            tau_ref=0e-6,
-            v_th=1.,
-            v_reset=-0.1)
+            tau_mem=6e-6,
+            tau_syn=6e-6,
+            refractory_time=0e-6,
+            threshold=1.,
+            reset=-0.1)
 
         # Inputs
         inputs = torch.zeros(100, 10, 5)
diff --git a/tests/sw/test_spiking_cuba_li_integration.py b/tests/sw/test_spiking_cuba_li_integration.py
index dd1b0f1113189719c173e52f71ddcbfe66aba035..96b8cd41c7d397eb9307e1d45c3785939e8ff099 100644
--- a/tests/sw/test_spiking_cuba_li_integration.py
+++ b/tests/sw/test_spiking_cuba_li_integration.py
@@ -22,7 +22,7 @@ class TestLIIntegration(unittest.TestCase):
     def test_cuba_li_integration(self):
         """ Test CUBA LI integration """
         # Params
-        params = CUBALIParams(tau_mem_inv=1./6e-6, tau_syn_inv=1./6e-6)
+        params = CUBALIParams(tau_mem=6e-6, tau_syn=6e-6)
 
         # Inputs
         inputs = torch.zeros(100, 10, 5)
@@ -61,7 +61,7 @@ class TestLIIntegration(unittest.TestCase):
     def test_cuba_li_integration_hw_data(self):
         """ Test CUBA LI integration with hardware data """
         # Params
-        params = CUBALIParams(tau_mem_inv=1./6e-6, tau_syn_inv=1./6e-6)
+        params = CUBALIParams(tau_mem=6e-6, tau_syn=6e-6)
 
         # Inputs
         inputs = torch.zeros(100, 10, 5)
diff --git a/tests/sw/test_spiking_cuba_lif_integration.py b/tests/sw/test_spiking_cuba_lif_integration.py
index ea72925456c29dec426afc1fa9cb3ea7fcd90f2d..574427a69fbef0e4363c74b654e9a52effbb469c 100644
--- a/tests/sw/test_spiking_cuba_lif_integration.py
+++ b/tests/sw/test_spiking_cuba_lif_integration.py
@@ -23,11 +23,11 @@ class TestLIFIntegration(unittest.TestCase):
         """ Test CUBA LIF integration """
         # Params
         params = CUBALIFParams(
-            tau_mem_inv=1./6e-6,
-            tau_syn_inv=1./6e-6,
-            tau_ref=1e-6,
-            v_th=0.7,
-            v_reset=-0.1)
+            tau_mem=6e-6,
+            tau_syn=6e-6,
+            refractory_time=1e-6,
+            threshold=0.7,
+            reset=-0.1)
 
         # Inputs
         inputs = torch.zeros(100, 10, 5)
@@ -70,11 +70,11 @@ class TestLIFIntegration(unittest.TestCase):
         """ Test CUBA LIF integration with hardware data """
         # Params
         params = CUBALIFParams(
-            tau_mem_inv=1./6e-6,
-            tau_syn_inv=1./6e-6,
-            tau_ref=0e-6,
-            v_th=1.,
-            v_reset=-0.1)
+            tau_mem=6e-6,
+            tau_syn=6e-6,
+            refractory_time=0e-6,
+            threshold=1.,
+            reset=-0.1)
 
         # Inputs
         inputs = torch.zeros(100, 10, 5)