diff --git a/CITATION.cff b/CITATION.cff
index e7edd3cb2124d43caf9d846afca16f238c82c86e..2634c060c0b9c7e1a8968b05cc57d6b17630ab41 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -15,9 +15,9 @@ license: Apache-2.0
 
 repository-code: https://github.com/FZJ-INM1-BDA/siibra-python
 
-version: v1.0.1-alpha.2
+version: v1.0.1-alpha.4
 
-date-released: 2025-03-12
+date-released: 2025-04-10
 
 doi: 10.5281/zenodo.7885728
 
diff --git a/examples/tutorials/2025-paper-fig3.py b/examples/tutorials/2025-paper-fig3.py
index fc7a63dea847b53365c6a007b1a079118723a346..8504cba4ca28eaf32ace15010afd5e1301056b92 100644
--- a/examples/tutorials/2025-paper-fig3.py
+++ b/examples/tutorials/2025-paper-fig3.py
@@ -27,7 +27,6 @@ reveal additional multimodal data features characterizing the ROIs.
 """
 
 # %%
-import re
 import numpy as np
 import pandas as pd
 import matplotlib.pyplot as plt
@@ -39,6 +38,7 @@ assert siibra.__version__ >= "1.0.1"
 
 sns.set_style("dark")
 
+
 # %%
 # Input: Activation map or other feature distribution as image volume in MNI space
 # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -72,7 +72,7 @@ plotting.plot_glass_brain(
 
 # %%
 # Split input volume into cluster components
-# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 #
 # There are many ways to get components out of a feature map. Here we use siibra to
 # - draw random points from the distribution encoded by the input volume, then
@@ -105,11 +105,11 @@ view.add_markers(
 )
 
 # %%
-# Assign peaks and clusters to cytoarchitectonic regions
-# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+# Assign clusters to cytoarchitectonic regions
+# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 #
 # To assign the clusters to brain regions, we build feature maps from each cluster
-# and assign them to the Julich-Brain probabilistic maps. The assignemint is one-to-many
+# and assign them to the Julich-Brain probabilistic maps. The assignment is one-to-many
 # since the structures in the image and parcellation are continuous. Assignments
 # report correlation, intersection over union, and some other measures which we can use
 # to filter and sort them. The result is an assignment table from cluster components
@@ -120,24 +120,8 @@ min_map_value = 0.5
 pmaps = siibra.get_map(
     parcellation="julich 3.0.3", space="mni152", maptype="statistical"
 )
-assignments = []
-
-# assign peaks to regions
-peaks = input_volume.find_peaks(mindist=5, sigma_mm=0)
-with siibra.QUIET:
-    df = pmaps.assign(peaks)
-df.query(f"`map value` >= {min_map_value}", engine="python", inplace=True)
-df["type"] = "peak"
-df["id"] = df["input structure"]
-assignments.append(df[["type", "id", "region", "map value"]])
-
-view = plotting.plot_glass_brain(
-    input_volume.fetch(), alpha=1, cmap="RdBu", symmetric_cbar=True
-)
-view.add_markers(peaks.as_list(), marker_size=30)
 
-# %%
-# assign clusters to regions
+assignments = []
 for cl in clusterlabels:
     clustermap = siibra.volumes.from_pointcloud(samples, label=cl, target=input_volume)
     plotting.plot_glass_brain(
@@ -150,12 +134,11 @@ for cl in clusterlabels:
     with siibra.QUIET:
         df = pmaps.assign(clustermap)
     df.query(f"correlation >= {min_correlation}", engine="python", inplace=True)
-    df["type"] = "cluster"
-    df["id"] = cl
-    assignments.append(df[["type", "id", "region", "correlation", "map value"]])
+    df['id'] = cl
+    assignments.append(df[["id", "region", "correlation", "map value"]])
 
 all_assignments = pd.concat(assignments).sort_values(by="correlation", ascending=False)
-all_assignments
+all_assignments[:9].round(2)
 
 # %%
 # plot the three primary assigned probability maps
@@ -174,64 +157,58 @@ for n, a in all_assignments.iterrows():
 # Find features
 # ^^^^^^^^^^^^^
 #
-# To demonstrate multimodal feature profiling, we only choose the first connected region.
-
-
-def shortname(region):
-    return (
-        re.sub("\s*\(.*\)\s*|\s*Areaa\s*", " ", region.name)
-        .replace("left", "L")
-        .replace("right", "R")
-        .strip()
-    )
-
-
-def filter_features(feats, region):
-    return [f for f in feats if any(r.assign(region) for r in f.anchor.regions)]
+# To demonstrate multimodal feature profiling, we choose one of the connected regions.
+selected_region = siibra.get_region("julich 3.0.3", "Area hOc1 (V1, 17, CalcS) left")
 
+# %%
+# Let us first query receptor density fingerprint for this region
+receptor_fingerprints = siibra.features.get(selected_region, "receptor density fingerprint")
+fp = receptor_fingerprints[0]
+print("\n".join(fp.urls))
+fig, axs = plt.subplots(1, 1, figsize=(4, 3.2))
+fp.plot(ax=axs, capsize=4)
 
-def plot_receptors(region, ax):
-    fts = filter_features(siibra.features.get(region, "ReceptorDensityFingerprint"), region)
-    fts[0].plot(ax=ax)
 
+# %%
+# Now, query for gene expressions for the same region
+genes = ["gabarapl1", "gabarapl2", "maoa", "tac1"]
+gene_expressions = siibra.features.get(selected_region, "gene expressions", gene=genes)
+print("\n".join(gene_expressions[0].urls))
+fig, axs = plt.subplots(1, 1, figsize=(4, 3.5))
+gene_expressions[0].plot(ax=axs)
 
-def plot_celldensities(region, ax):
-    fts = filter_features(siibra.features.get(region, "LayerwiseCellDensity"), region)
-    fts[0].plot(ax=ax)
 
+# %%
+# We can next obtain cell body density values for this region
+cell_densities = siibra.features.get(selected_region, "LayerwiseCellDensity")
+print("\n".join(cell_densities[0].urls))
+fig, axs = plt.subplots(1, 1, figsize=(4, 2.8))
+cell_densities[0].plot(ax=axs, textwrap=30)
+# axs.set_title(axs.get_title().replace("in", "\n"))
 
-def plot_gene_expressions(region, ax, genes):
-    fts = siibra.features.get(region, "GeneExpressions", gene=genes)
-    fts[0].plot(ax=ax)
+# %%
+# Lastly, we can obtain the regional profile of streamline count type
+# parcellation-based connectivity matrices
+connectivity_matrices = siibra.features.get(selected_region, "StreamlineCounts")
+conn = connectivity_matrices[0]  # select the first set of matrices
+print("\n".join(conn.urls))
 
 
-def plot_connectivity(region, ax):
-    fts = siibra.features.get(region, "StreamlineCounts")
-    conndata = fts[0][0].get_profile(region).data
-    conndata.rename(index={r: shortname(r) for r in conndata.index}, inplace=True)
-    conndata[:15].plot(kind="bar", ax=ax)
-    plt.xticks(ha="right")
-    plt.tight_layout()
-    plt.grid(True)
-    plt.title(f"Connectivity for {region.name}")
+def shorten_name(region):
+    # to simplify readability
+    return (
+        region.replace("Area ", "")
+        .replace(" (GapMap)", "")
+        .replace("left", "L")
+        .replace("right", "R")
+    )
 
 
-# %%
-selected_region = siibra.get_region("julich 3.0.3", "Area hOc1 (V1, 17, CalcS) left")
-plot_funcs = [
-    lambda r, a: plot_receptors(r, a),
-    lambda r, a: plot_celldensities(r, a),
-    lambda r, a: plot_connectivity(r, a),
-    lambda r, a: plot_gene_expressions(
-        r, a, ["gabarapl1", "gabarapl2", "maoa", "tac1"]
-    ),
-]
-
-fig, axs = plt.subplots(1, len(plot_funcs), figsize=(3.5 * len(plot_funcs), 4))
-for ax, func in zip(axs.ravel(), plot_funcs):
-    func(r=selected_region, a=ax)
-    ax.grid(True)
-    xtl = ax.get_xticklabels()
-    ax.set_xticklabels(xtl, rotation=55, ha="right")
-plt.suptitle("")
-plt.tight_layout()
+fig, axs = plt.subplots(1, 1, figsize=(4, 4.5))
+conn.plot(selected_region, ax=axs, max_rows=15, kind="bar", rot=50, width=0.7)
+axs.set_ylabel(
+    f"Mean of {conn.modality} \u00b1 std \n in {len(conn.elements)} {conn.indexing_attributes[0]}s"
+)
+axs.xaxis.set_ticklabels([shorten_name(t.get_text()) for t in axs.xaxis.get_majorticklabels()])
+plt.grid(True, 'minor')
+plt.title(f"Connectivity for {selected_region.name}")
diff --git a/examples/tutorials/2025-paper-fig4.py b/examples/tutorials/2025-paper-fig4.py
index 15a902cc1715a7d7010f6470e6443f2a2dff51fe..e9a083b327538387fee7fc3aff3e5b8472f7229f 100644
--- a/examples/tutorials/2025-paper-fig4.py
+++ b/examples/tutorials/2025-paper-fig4.py
@@ -23,8 +23,6 @@ The tutorial shows how maps and mutimodal regional measurements are obtained usi
 # %%
 import matplotlib.pyplot as plt
 from nilearn import plotting
-import pandas as pd
-import re
 import seaborn as sns
 import siibra
 
@@ -38,6 +36,7 @@ sns.set_style("dark")
 
 jubrain = siibra.parcellations.JULICH_BRAIN_CYTOARCHITECTONIC_ATLAS_V3_0_3
 pmaps = jubrain.get_map(space="mni152", maptype="statistical")
+print(jubrain.publications[0]['citation'])
 
 # %%
 # Obtain definitions and probabilistic maps in MNI asymmetric space of area IFG 44
@@ -53,73 +52,59 @@ for r in regions:
         colorbar=False,
         annotate=False,
         symmetric_cbar=True,
+        title=r.name,
     )
-    plt.savefig(f"{r.key}.png")
 
 # %%
-# For each of the two brain areas, retrieve average densities of a selection of monogenetic
-# neurotransmitter receptors, layer-specific distributions of cell bodies, as well as expressions
-# of a selection of genes coding for these receptors.
+# For each of the two brain areas, query for layer-specific distributions of cell bodies.
+fig, axs = plt.subplots(1, len(regions), sharey=True, figsize=(8, 2.7))
+for i, region in enumerate(regions):
+    layerwise_cellbody_densities = siibra.features.get(region, "layerwise cell density")
+    layerwise_cellbody_densities[0].plot(ax=axs[i], textwrap=25)
+    print(layerwise_cellbody_densities[0].urls)
+    axs[i].set_ylim(25, 115)
 
-receptors = ["M1", "M2", "M3", "D1", "5-HT1A", "5-HT2"]
-genes = ["CHRM1", "CHRM2", "CHRM3", "HTR1A", "HTR2A", "DRD1"]
-modalities = [
-    ("receptor density fingerprint", {}, {"receptors": receptors, "rot": 90}),
-    ("layerwise cell density", {}, {"rot": 0}),
-    ("gene expressions", {"gene": genes}, {"rot": 90}),
-]
-fig, axs = plt.subplots(
-    len(modalities) + 1, len(regions), figsize=(4 * len(regions), 11), sharey="row"
-)
-ymax = [1000, 150, None]
+# %%
+# Next, retrieve average densities of a selection of monogenetic
+# neurotransmitter receptors.
+receptors = ["M1", "M2", "M3", "5-HT1A", "5-HT2", "D1"]
+fig, axs = plt.subplots(1, len(regions), sharey=True, figsize=(8, 3.5))
+for i, region in enumerate(regions):
+    receptor_fingerprints = siibra.features.get(region, "receptor density fingerprint")
+    receptor_fingerprints[0].plot(receptors=receptors, ax=axs[i])
+    print(receptor_fingerprints[0].urls)
+    axs[i].set_ylim(0, 1000)
+    axs[i].title.set_size(12)
+    transmitters = [
+        siibra.vocabularies.RECEPTOR_SYMBOLS[r]['neurotransmitter']['name']
+        for r in receptors
+    ]
+    axs[i].set_xticklabels([f"{r}\n({n})" for r, n in zip(receptors, transmitters)])
 
+# %%
+# Now, for further insight, query for expressions of a selection of genes coding
+# for these receptors.
+genes = ["CHRM1", "CHRM2", "CHRM3", "HTR1A", "HTR2A", "DRD1"]
+fig, axs = plt.subplots(1, len(regions), sharey=True, figsize=(7, 3))
 for i, region in enumerate(regions):
-    axs[0, i].imshow(plt.imread(f"{region.key}.png"))
-    axs[0, i].set_title(f'{region.name.replace("Area ", "")}')
-    axs[0, i].axis("off")
-    for j, (modality, kwargs, plotargs) in enumerate(modalities):
-        fts = siibra.features.get(region, modality, **kwargs)
-        assert len(fts) > 0
-        if len(fts) > 1:
-            print(f"More than one feature found for {modality}, {region.name}")
-        f = fts[0]
-        f.plot(ax=axs[j + 1, i], **plotargs)
-        if modality == "receptor density fingerprint":
-            # add neurotransmitter names to  receptor names in xtick labels
-            transmitters = [re.sub(r"(^.*\()|(\))", "", n) for n in f.neurotransmitters]
-            axs[j + 1, i].set_xticklabels(
-                [f"{r}\n({n})" for r, n in zip(receptors, transmitters)]
-            )
-        if ymax[j] is not None:
-            axs[j + 1, i].set_ylim(0, ymax[j])
-        if "std" in axs[j + 1, i].yaxis.get_label_text():
-            axs[j + 1, i].set_ylabel(
-                axs[j + 1, i].yaxis.get_label_text().replace("std", "std\n")
-            )
-        axs[j + 1, i].set_title(f"{fts[0].modality}")
-fig.suptitle("")
-fig.tight_layout()
+    gene_expressions = siibra.features.get(region, "gene expressions", gene=genes)
+    assert len(gene_expressions) == 1
+    gene_expressions[0].plot(ax=axs[i])
+    axs[i].title.set_size(11)
+    print(gene_expressions[0].urls)
 
 # %%
 # For each of the two brain areas, collect functional connectivity profiles referring to
 # temporal correlation of fMRI timeseries of several hundred subjects from the Human Connectome
 # Project. We show the strongest connections per brain area for the average connectivity patterns
-fts = siibra.features.get(regions[0], "functional connectivity")
-conn = fts[1]
-
-# aggregate connectivity profiles for first region across subjects
-D1 = (
-    pd.concat([c.get_profile(regions[0]).data for c in conn], axis=1)
-    .agg(["mean", "std"], axis=1)
-    .sort_values(by="mean", ascending=False)
-)
-
-# aggregate connectivity profiles for second region across subjects
-D2 = (
-    pd.concat([c.get_profile(regions[1]).data for c in conn], axis=1)
-    .agg(["mean", "std"], axis=1)
-    .sort_values(by="mean", ascending=False)
-)
+fts = siibra.features.get(jubrain, "functional connectivity")
+for cf in fts:
+    if cf.cohort != "HCP":
+        continue
+    if cf.paradigm == "Resting state (EmpCorrFC concatenated)":
+        conn = cf
+        break
+print(conn.urls)
 
 
 # plot both average connectivity profiles to target regions
@@ -132,27 +117,24 @@ def shorten_name(n):
     )
 
 
-fig, (a1, a2) = plt.subplots(1, 2, sharey=True, figsize=(3.6 * len(regions), 4.1))
-kwargs = {"kind": "bar", "width": 0.85, "logy": True}
-D1.iloc[:17]["mean"].plot(
-    **kwargs, yerr=D1.iloc[:17]["std"], ax=a1, ylabel=shorten_name(regions[0].name)
-)
-D2.iloc[:17]["mean"].plot(
-    **kwargs, yerr=D2.iloc[:17]["std"], ax=a2, ylabel=shorten_name(regions[1].name)
-)
-a1.set_ylabel("temporal correlation")
-a1.xaxis.set_ticklabels(
-    [shorten_name(t.get_text()) for t in a1.xaxis.get_majorticklabels()]
-)
-a2.xaxis.set_ticklabels(
-    [shorten_name(t.get_text()) for t in a2.xaxis.get_majorticklabels()]
-)
-a1.grid(True)
-a2.grid(True)
-a1.set_title(regions[0].name.replace("Area ", ""))
-a2.set_title(regions[1].name.replace("Area ", ""))
+plotkwargs = {
+    "kind": "bar",
+    "width": 0.85,
+    "logscale": True,
+    "xlabel": "",
+    "ylabel": "temporal correlation",
+    "rot": 90,
+    "ylim": (0.3, 1.1),
+    "capsize": 2,
+}
+fig, axs = plt.subplots(1, len(regions), sharey=True, figsize=(7, 4.5))
+for i, region in enumerate(regions):
+    plotkwargs["ax"] = axs[i]
+    conn.plot(region, max_rows=17, **plotkwargs)
+    axs[i].xaxis.set_ticklabels([shorten_name(t.get_text()) for t in axs[i].xaxis.get_majorticklabels()])
+    axs[i].set_title(region.name.replace("Area ", ""), fontsize=8)
+    axs[i].grid(True, 'minor')
 plt.suptitle("Functional Connectivity")
-plt.tight_layout()
 
 # %%
 # For both brain areas, sample representative cortical image patches at 1µm
@@ -160,7 +142,7 @@ plt.tight_layout()
 # The image patches display clearly the differences in laminar structure of the two regions
 
 selected_sections = [4950, 1345]
-fig, axs = plt.subplots(1, len(regions))
+fig, axs = plt.subplots(1, len(regions), sharey=True)
 for r, sn, ax in zip(regions, selected_sections, axs):
     # find 1 micron sections intersecting the region
     pmap = pmaps.get_volume(r)
@@ -176,6 +158,4 @@ for r, sn, ax in zip(regions, selected_sections, axs):
     ax.axis("off")
     ax.set_title(r.name)
 
-plt.tight_layout()
-
 # sphinx_gallery_thumbnail_number = 2
diff --git a/examples/tutorials/2025-paper-fig5.py b/examples/tutorials/2025-paper-fig5.py
index 4506cd475c56319fc2f6959e3419f2e459bd8376..100629e2388f65d1de2b7fac7c36b087b9ed14d4 100644
--- a/examples/tutorials/2025-paper-fig5.py
+++ b/examples/tutorials/2025-paper-fig5.py
@@ -37,7 +37,6 @@ the full resolution image data for the identified cortical patch from the underl
 # %%
 from nilearn import plotting
 import matplotlib.pyplot as plt
-import numpy as np
 import siibra
 
 assert siibra.__version__ >= "1.0.1"
@@ -67,9 +66,7 @@ patch = patches[0]
 # siibra samples the patch in upright position, but keeps its
 # original orientation in the affine transformation encoded in the NIfTI.
 # Let's first plot the pure voxel data of the patch to see that.
-plt.imshow(
-    patch.fetch().get_fdata().squeeze(), cmap='gray'
-)
+plt.imshow(patch.fetch().get_fdata().squeeze(), cmap='gray')
 
 # %%
 # To understand where and how siibra actually sampled this patch,
@@ -88,15 +85,18 @@ roi = patch_locations.boundingbox.zoom(1.5)
 
 # fetch the section at reduced resolution for plotting.
 section_img = patch.section.fetch(resolution_mm=0.2)
+fig = plt.figure(figsize=(6, 5))
 display = plotting.plot_img(
-    section_img, display_mode="y", cmap='gray', annotate=False,
+    section_img,
+    display_mode="y",
+    cmap='gray',
+    annotate=False,
+    title=f"BigBrain section #{patch.bigbrain_section}",
+    figure=fig,
 )
-display.title(f"BigBrain section #{patch.bigbrain_section}", size=8)
-
 # Annotate the region of interest bounding box
-ax = list(display.axes.values())[0].ax
 (x, w), _, (z, d) = zip(roi.minpoint, roi.shape)
-ax.add_patch(plt.Rectangle((x, z), w, d, ec='k', fc='none', lw=1))
+fig.axes[1].add_patch(plt.Rectangle((x, z), w, d, ec='k', fc='none', lw=1))
 
 
 # %%
@@ -111,9 +111,11 @@ for dim, size in enumerate(roi.shape):
         roi.maxpoint[dim] = patch.get_boundingbox().maxpoint[dim]
 
 # Fetch the region of interest from the section, and plot it.
+fig = plt.figure(figsize=(6, 5))
 roi_img = patch.section.fetch(voi=roi, resolution_mm=-1)
-display = plotting.plot_img(roi_img, display_mode="y", cmap='gray', annotate=False)
-ax = list(display.axes.values())[0].ax
+display = plotting.plot_img(
+    roi_img, display_mode="y", cmap='gray', annotate=False, figure=fig
+)
 
 # Intersect cortical layer surfaces with the image plane
 plane = siibra.Plane.from_image(patch)
@@ -129,26 +131,27 @@ for layername, contours in layer_contours.items():
     layercolor = layermap.get_colormap().colors[layermap.get_index(layername).label]
     for contour in contours:
         for segment in contour.crop(roi):
-            X, _, Z = segment.coordinates.T
-            ax.plot(X, Z, "-", ms=4, color=layercolor)
+            fig.axes[1].plot(
+                segment.coordinates.T[0, :], segment.coordinates.T[2, :],
+                "-", ms=4, color=layercolor
+            )
 
 # %%
 # Plot the region of interest again, this time with the cortical profile that
 # defined the patch, as well as other candidate patch's locations
 # with their relevance scores, ie. probabilities.
-display = plotting.plot_img(roi_img, display_mode="y", cmap='gray', annotate=False)
-ax = list(display.axes.values())[0].ax
+fig = plt.figure(figsize=(6, 5))
+display = plotting.plot_img(
+    roi_img, display_mode="y", cmap='gray', annotate=False, figure=fig
+)
 
 # Concatenate all coordinates of the layer 4 intersected contours.
 layer = "cortical layer 4 right"
-XYZ = np.vstack([c.coordinates for c in layer_contours[layer]])
-layerpoints = siibra.PointCloud(XYZ, space='bigbrain')
+layerpoints = siibra.PointCloud.union(*[c for c in layer_contours[layer]])
 patch_probs = region_map.evaluate_points(layerpoints)
-X, _, Z = layerpoints.coordinates.T
-ax.scatter(X, Z, c=patch_probs, s=10)
+fig.axes[1].scatter(layerpoints.coordinates.T[0, :], layerpoints.coordinates.T[2, :], c=patch_probs, s=10)
 
 # plot the cortical profile in red
-X, _, Z = patch.profile.coordinates.T
-ax.plot(X, Z, "r-", lw=2)
+fig.axes[1].plot(patch.profile.coordinates.T[0, :], patch.profile.coordinates.T[2, :], "r-", lw=2)
 
-# sphinx_gallery_thumbnail_number = -2
+# sphinx_gallery_thumbnail_number = -1
diff --git a/siibra/VERSION b/siibra/VERSION
index 9bce71cc1363aa22d1a7f42da5dc9cd345ec3b9f..db8f562647ad92277dc09fc59dfd5647e64b56bd 100644
--- a/siibra/VERSION
+++ b/siibra/VERSION
@@ -1 +1 @@
-1.0.1-alpha.2
+1.0.1-alpha.4
diff --git a/siibra/commons.py b/siibra/commons.py
index 254f9a8c72b90ae6664d892e032e5ba5cad9d45e..ed5dc255e20ebbf296d94b3b8f65bc27e0c834ea 100644
--- a/siibra/commons.py
+++ b/siibra/commons.py
@@ -534,13 +534,21 @@ def resample_img_to_img(
     -------
     Nifti1Image
     """
+    from nilearn._version import version as nilearn_version
+    from packaging.version import Version
+
     interpolation = "nearest" if np.array_equal(np.unique(source_img.dataobj), [0, 1]) else "linear"
-    resampled_img = resample_to_img(
+    kwargs = dict(
         source_img=source_img,
         target_img=target_img,
         interpolation=interpolation,
         force_resample=True,  # False is intended for testing. see nilearn docs
     )
+    if Version(nilearn_version) >= Version("0.11.0"):
+        # because nilearn>=0.11.0 don't support "copy_header" and python <= 3.8
+        kwargs["copy_header"] = True  # use new default in nilearn >= 0.11.0
+
+    resampled_img = resample_to_img(**kwargs)
     return resampled_img
 
 
diff --git a/siibra/core/assignment.py b/siibra/core/assignment.py
index 8707fb1ca8c21866812dfa3cdd761d97f46bde94..f392ca7b998aa2565d3cdd758cfb8d6d48a5a60f 100644
--- a/siibra/core/assignment.py
+++ b/siibra/core/assignment.py
@@ -26,12 +26,12 @@ T = TypeVar("T")
 
 class Qualification(Enum):
     EXACT = 1
-    OVERLAPS = 2
-    CONTAINED = 3
-    CONTAINS = 4
-    APPROXIMATE = 5
-    HOMOLOGOUS = 6
-    OTHER_VERSION = 7
+    APPROXIMATE = 2
+    OTHER_VERSION = 3
+    CONTAINED = 4
+    CONTAINS = 5
+    OVERLAPS = 6
+    HOMOLOGOUS = 7
 
     @property
     def verb(self):
@@ -67,6 +67,17 @@ class Qualification(Enum):
         assert self in inverses, f"{str(self)} inverses cannot be found."
         return inverses[self]
 
+    def __lt__(self, other: "Qualification"):
+        """
+        This is used to sort feature query results. Since it is very difficult
+        to determine a well-ordering principle and it is difficult to sort
+        without one, the enum values are used for sorting. This means
+        not all comparisons have logical basis but they are well-defined,
+        making it reproducible but also clearly distinguishes important
+        comparisons.
+        """
+        return self.value < other.value
+
     def __str__(self):
         return f"{self.__class__.__name__}={self.name.lower()}"
 
@@ -108,4 +119,4 @@ class AnatomicalAssignment(Generic[T]):
     def __lt__(self, other: 'AnatomicalAssignment'):
         if not isinstance(other, AnatomicalAssignment):
             raise ValueError(f"Cannot compare AnatomicalAssignment with instances of '{type(other)}'")
-        return self.qualification.value < other.qualification.value
+        return self.qualification < other.qualification
diff --git a/siibra/features/anchor.py b/siibra/features/anchor.py
index 85bba60a07d787f9d68b62d66ed0301b90d951d0..92642bd27e2d1d73239b29f989f3fb068705a8ca 100644
--- a/siibra/features/anchor.py
+++ b/siibra/features/anchor.py
@@ -176,9 +176,7 @@ class AnatomicalAnchor:
                 assignments.append(region.assign(concept))
             self._assignments[concept] = sorted(a for a in assignments if a is not None)
 
-        self._last_matched_concept = concept \
-            if len(self._assignments[concept]) > 0 \
-            else None
+        self._last_matched_concept = concept if len(self._assignments[concept]) > 0 else None
         return self._assignments[concept]
 
     def matches(self, concept: Union[BrainStructure, Space]) -> bool:
diff --git a/siibra/features/connectivity/regional_connectivity.py b/siibra/features/connectivity/regional_connectivity.py
index 540d84558901b5034e3201e6127d4961f2a0ae35..fd5a9f945ba8f0bdcbb358ba69f794fa4f7fac85 100644
--- a/siibra/features/connectivity/regional_connectivity.py
+++ b/siibra/features/connectivity/regional_connectivity.py
@@ -104,6 +104,7 @@ class RegionalConnectivity(Feature, Compoundable):
         self._matrix = None
         self._subject = subject
         self._feature = feature
+        self._matrix_std = None  # only used for compound feature
 
     @property
     def subject(self):
@@ -170,6 +171,9 @@ class RegionalConnectivity(Feature, Compoundable):
         merged._matrix = elements[0]._arraylike_to_dataframe(
             np.stack(all_arrays).mean(0)
         )
+        merged._matrix_std = elements[0]._arraylike_to_dataframe(
+            np.stack(all_arrays).std(0)
+        )
         return merged
 
     def _plot_matrix(
@@ -258,10 +262,14 @@ class RegionalConnectivity(Feature, Compoundable):
             non-symmetric matrices. ('column' or 'row')
         """
         matrix = self.data
+        assert isinstance(matrix, pd.DataFrame)
+        matrix_std = self._matrix_std
         if direction.lower() not in ['column', 'row']:
             raise ValueError("Direction can only be 'column' or 'row'")
         if direction.lower() == 'row':
             matrix = matrix.transpose()
+            if matrix_std is not None:
+                matrix_std = matrix_std.transpose()
 
         def matches(r1, r2):
             if isinstance(r1, tuple):
@@ -270,32 +278,38 @@ class RegionalConnectivity(Feature, Compoundable):
                 assert isinstance(r1, _region.Region)
                 return r1.matches(r2)
 
-        regions = [r for r in matrix.index if matches(r, region)]
-        if len(regions) == 0:
+        # decode region spec
+        region_candidates = [r for r in matrix.index if matches(r, region)]
+        if len(region_candidates) == 0:
             raise ValueError(f"Invalid region specification: {region}")
-        elif len(regions) > 1:
-            raise ValueError(f"Region specification {region} matched more than one profile: {regions}")
-        else:
-            name = self.modality
-            series = matrix[regions[0]]
-            last_index = len(series) - 1 if max_rows is None \
-                else min(max_rows, len(series) - 1)
-            return Tabular(
-                description=self.description,
-                modality=f"{self.modality} {self.cohort}",
-                anchor=_anchor.AnatomicalAnchor(
-                    species=list(self.anchor.species)[0],
-                    region=regions[0]
-                ),
-                data=(
-                    series[:last_index]
-                    .to_frame(name=name)
-                    .query(f'`{name}` > {min_connectivity}')
-                    .sort_values(by=name, ascending=False)
-                    .rename_axis('Target regions')
-                ),
-                datasets=self.datasets
-            )
+        if len(region_candidates) > 1:
+            raise ValueError(f"Region specification {region} matched more than one profile: {region_candidates}")
+        region = region_candidates[0]
+
+        # create DataFrame
+        data = matrix[region].to_frame('mean')
+        if matrix_std is not None:
+            data = pd.concat([data, matrix_std[region].rename('std')], axis=1)
+
+        last_index = len(data) if max_rows is None else min(max_rows, len(data))
+
+        data = (
+            data
+            .query(f'`mean` > {min_connectivity}')
+            .sort_values(by="mean", ascending=False)
+            .rename_axis('Target regions')
+        )[:last_index]
+
+        return Tabular(
+            description=self.description,
+            modality=f"{self.modality} {self.cohort}",
+            anchor=_anchor.AnatomicalAnchor(
+                species=list(self.anchor.species)[0],
+                region=region
+            ),
+            data=data,
+            datasets=self.datasets
+        )
 
     def plot(
         self,
@@ -336,8 +350,8 @@ class RegionalConnectivity(Feature, Compoundable):
         profile = self.get_profile(regions, min_connectivity, max_rows, direction)
         kwargs["kind"] = kwargs.get("kind", "barh")
         if backend == "matplotlib":
-            kwargs["logx"] = kwargs.get("logx", logscale)
-            return profile.data.plot(*args, backend=backend, **kwargs)
+            kwargs["logy"] = kwargs.get("logy", logscale)
+            return profile.plot(*args, backend=backend, **kwargs)
         elif backend == "plotly":
             kwargs.update({
                 "color": kwargs.get("color", profile.data.columns[0]),
diff --git a/siibra/features/feature.py b/siibra/features/feature.py
index 1b207e65cb5a6dd4bad53798d2ec08eb4ae33fba..969049ec751fdd9bc5aa3c0ccd5d4721a144cfbf 100644
--- a/siibra/features/feature.py
+++ b/siibra/features/feature.py
@@ -583,7 +583,10 @@ class Feature:
         # with the query concept.
         live_instances = feature_type._livequery(concept, **kwargs)
 
-        results = list(dict.fromkeys(preconfigured_instances + live_instances))
+        results = sorted(
+            dict.fromkeys(preconfigured_instances + live_instances),  # to remove duplicates
+            key=lambda f: min(f.last_match_result) if f.last_match_result else False,  # to order according to assignmnent ranking
+        )
         return CompoundFeature._compound(results, concept)
 
     @classmethod
diff --git a/siibra/features/tabular/gene_expression.py b/siibra/features/tabular/gene_expression.py
index e3ad893db2c0ce1ad2d76abff0dc69900ee30137..1db8a5f92f85cae1b6076c4ec6910b39f1a26445 100644
--- a/siibra/features/tabular/gene_expression.py
+++ b/siibra/features/tabular/gene_expression.py
@@ -225,6 +225,7 @@ class GeneExpressions(
             datasets=datasets
         )
         self.unit = "expression level"
+        self._genes = list(set(genes))
 
     def plot(self, *args, backend="matplotlib", **kwargs):
         """
@@ -239,18 +240,34 @@ class GeneExpressions(
             Keyword arguments are passed on to the plot command.
         """
         wrapwidth = kwargs.pop("textwrap") if "textwrap" in kwargs else 40
-        kwargs["title"] = kwargs.pop("title", None) \
-            or "\n".join(wrap(f"{self.modality} measured in {self.anchor._regionspec or self.anchor.location}", wrapwidth))
-        kwargs["kind"] = "box"
+        kwargs["title"] = kwargs.pop(
+            "title",
+            "\n".join(wrap(
+                f"{self.modality}\n{self.anchor._regionspec or self.anchor.location}",
+                wrapwidth
+            ))
+        )
+        kwargs["kind"] = kwargs.get("kind", "box")
         if backend == "matplotlib":
-            for arg in ['yerr', 'y', 'ylabel', 'xlabel', 'width']:
-                assert arg not in kwargs
-            default_kwargs = {
-                "grid": True, "legend": False, 'by': "gene",
-                'column': ['level'], 'showfliers': False, 'ax': None,
-                'ylabel': 'expression level'
-            }
-            return self.data.plot(*args, **{**default_kwargs, **kwargs}, backend=backend)
+            if kwargs["kind"] == "box":
+                from matplotlib.pyplot import tight_layout
+
+                title = kwargs.pop("title")
+                default_kwargs = {
+                    "grid": True,
+                    'by': "gene",
+                    'column': ['level'],
+                    'showfliers': False,
+                    'ylabel': 'expression level',
+                    'xlabel': 'gene',
+                    'color': 'dimgray',
+                    'rot': 90 if len(self._genes) > 1 else 0,
+                }
+                ax, *_ = self.data.plot(*args, backend=backend, **{**default_kwargs, **kwargs})
+                ax.set_title(title)
+                tight_layout()
+                return ax
+            return self.data.plot(*args, backend=backend, **kwargs)
         elif backend == "plotly":
             kwargs["title"] = kwargs["title"].replace('\n', "<br>")
             return self.data.plot(y='level', x='gene', backend=backend, **kwargs)
diff --git a/siibra/features/tabular/layerwise_cell_density.py b/siibra/features/tabular/layerwise_cell_density.py
index 6f5af0f0265e7223bc570362d2409a015e64233c..ade11706096b17109b9cc96032ded68fff33c6f3 100644
--- a/siibra/features/tabular/layerwise_cell_density.py
+++ b/siibra/features/tabular/layerwise_cell_density.py
@@ -15,8 +15,8 @@
 
 import numpy as np
 import pandas as pd
+from textwrap import wrap
 
-from . import cortical_profile
 from . import tabular, cell_reader, layer_reader
 from .. import anchor as _anchor
 from ... import commons
@@ -37,6 +37,7 @@ class LayerwiseCellDensity(
         "detected cells in that layer with the area covered by the layer. Therefore, each profile contains 6 measurement points. "
         "The cortical depth is estimated from the measured layer thicknesses."
     )
+    BIGBRAIN_VOLUMETRIC_SHRINKAGE_FACTOR = 1.931
 
     def __init__(
         self,
@@ -57,13 +58,13 @@ class LayerwiseCellDensity(
             id=id,
             prerelease=prerelease,
         )
-        self.unit = "# detected cells/0.1mm3"
+        self.unit = "# detected cells / $0.1mm^3$"
         self._filepairs = list(zip(segmentfiles, layerfiles))
         self._densities = None
 
     def _load_densities(self):
-        density_dict = {}
-        for i, (cellfile, layerfile) in enumerate(self._filepairs):
+        data = []
+        for cellfile, layerfile in self._filepairs:
             try:
                 cells = requests.HttpRequest(cellfile, func=cell_reader).data
                 layers = requests.HttpRequest(layerfile, func=layer_reader).data
@@ -72,22 +73,82 @@ class LayerwiseCellDensity(
                 commons.logger.error(f"Skipping to bootstrap a {self.__class__.__name__} feature, cannot access file resource.")
                 continue
             counts = cells.layer.value_counts()
-            areas = layers["Area(micron**2)"]
-            indices = np.intersect1d(areas.index, counts.index)
-            density_dict[i] = counts[indices] / areas * 100 ** 2 * 5
-        return pd.DataFrame(density_dict)
+            # compute the volumetric shrinkage corrections in the same ways as it was used
+            # for the pdf reports in the underlying dataset
+            shrinkage_volumetric = self.BIGBRAIN_VOLUMETRIC_SHRINKAGE_FACTOR
+            layer_volumes = (
+                layers["Area(micron**2)"]  # this is the number of pixels, shrinkage corrected from the dataset
+                * 20  # go to cube micrometer in one patch with 20 micron thickness
+                * np.cbrt(shrinkage_volumetric)  # compensate linear shrinkage for 3rd dimension
+                / 100 ** 3  # go to 0.1 cube millimeter
+            )
+            fields = cellfile.split("/")
+            for layer in layer_volumes.index:
+                data.append({
+                    'layer': layer,
+                    'layername': layers["Name"].loc[layer],
+                    'counts': counts.loc[layer],
+                    'area_mu2': layers["Area(micron**2)"].loc[layer],
+                    'volume': layer_volumes.loc[layer],
+                    'density': counts.loc[layer] / layer_volumes.loc[layer],
+                    'regionspec': fields[-5],
+                    'section': int(fields[-3]),
+                    'patch': int(fields[-2]),
+                })
+        return pd.DataFrame(data)
 
     @property
     def data(self):
         if self._data_cached is None:
-            densities = self._load_densities()
-            self._data_cached = pd.DataFrame(
-                np.array([
-                    list(densities.mean(axis=1)),
-                    list(densities.std(axis=1))
-                ]).T,
-                columns=['mean', 'std'],
-                index=[cortical_profile.CorticalProfile.LAYERS[_] for _ in densities.index]
-            )
-            self._data_cached.index.name = 'layer'
+            self._data_cached = self._load_densities()
+            # self._data_cached.index.name = 'layer'
         return self._data_cached
+
+    def plot(self, *args, backend="matplotlib", **kwargs):
+        wrapwidth = kwargs.pop("textwrap") if "textwrap" in kwargs else 40
+        kwargs["title"] = kwargs.pop(
+            "title",
+            "\n".join(wrap(
+                f"{self.modality} in {self.anchor._regionspec or self.anchor.location}",
+                wrapwidth
+            ))
+        )
+        kwargs["kind"] = kwargs.get("kind", "box")
+        kwargs["ylabel"] = kwargs.get(
+            "ylabel",
+            f"\n{self.unit}" if hasattr(self, 'unit') else ""
+        )
+        if backend == "matplotlib":
+            if kwargs["kind"] == "box":
+                from matplotlib.pyplot import tight_layout
+
+                np.random.seed(int(self.data["density"].mean()))
+
+                title = kwargs.pop("title")
+                default_kwargs = {
+                    "grid": True,
+                    'by': "layername",
+                    'column': ['density'],
+                    'showfliers': False,
+                    'xlabel': 'layer',
+                    'color': 'dimgray',
+                }
+                ax, *_ = self.data.plot(*args, backend=backend, **{**default_kwargs, **kwargs})
+                for i, (layer, d) in enumerate(self.data.groupby('layername')):
+                    ax.scatter(
+                        np.random.normal(i + 1, 0.05, len(d.density)),
+                        d.density,
+                        c='b', s=3
+                    )
+                ax.set_title(title)
+                tight_layout()
+                return ax
+            return self.data.plot(*args, backend=backend, **kwargs)
+        elif backend == "plotly":
+            kwargs["title"] = kwargs["title"].replace('\n', "<br>")
+            yaxis_title = kwargs.pop("ylabel")
+            fig = self.data.plot(y='density', x='layer', points="all", backend=backend, **kwargs)
+            fig.update_layout(yaxis_title=yaxis_title)
+            return fig
+        else:
+            return self.data.plot(*args, backend=backend, **kwargs)
diff --git a/siibra/features/tabular/tabular.py b/siibra/features/tabular/tabular.py
index a50cfb8787a0007d8568772e6c80ed7b49cab4bc..8447856848d6fc477bd385446c3f6dcfb65ff09f 100644
--- a/siibra/features/tabular/tabular.py
+++ b/siibra/features/tabular/tabular.py
@@ -99,14 +99,20 @@ class Tabular(feature.Feature):
             if kwargs.get("error_y") is None:
                 kwargs["yerr"] = kwargs.get("yerr", 'std' if 'std' in self.data.columns else None)
                 yerr_label = f" \u00b1 {kwargs.get('yerr')}" if kwargs.get('yerr') else ''
-            kwargs["width"] = kwargs.get("width", 0.95)
+            if kwargs.get('kind') == 'bar':
+                kwargs["width"] = kwargs.get("width", 0.8)
+                kwargs["edgecolor"] = kwargs.get('edgecolor', 'black')
+                kwargs["linewidth"] = kwargs.get('linewidth', 1.0)
+                kwargs["capsize"] = kwargs.get('capsize', 4)
             kwargs["ylabel"] = kwargs.get(
                 "ylabel",
                 f"{kwargs['y']}{yerr_label}" + f"\n{self.unit}" if hasattr(self, 'unit') else ""
             )
             kwargs["grid"] = kwargs.get("grid", True)
             kwargs["legend"] = kwargs.get("legend", False)
-            xticklabel_rotation = kwargs.get("xticklabel_rotation", 60)
+            kwargs["color"] = kwargs.get('color', 'darkgrey')
+
+            xticklabel_rotation = kwargs.get("rot", 60)
             ax = self.data.plot(*args, backend=backend, **kwargs)
             ax.set_title(ax.get_title(), fontsize="medium")
             ax.set_xticklabels(
@@ -114,6 +120,8 @@ class Tabular(feature.Feature):
                 rotation=xticklabel_rotation,
                 ha='center' if xticklabel_rotation % 90 == 0 else 'right'
             )
+            ax.spines['top'].set_visible(False)
+            ax.spines['right'].set_visible(False)
             plt.tight_layout()
             return ax
         elif backend == "plotly":
diff --git a/siibra/livequeries/allen.py b/siibra/livequeries/allen.py
index aefb1809b4b2e647ad4644c125fb96b4e4b19c99..c25412d85077fc6b6d51578529593db84d3cdaa0 100644
--- a/siibra/livequeries/allen.py
+++ b/siibra/livequeries/allen.py
@@ -22,6 +22,7 @@ import numpy as np
 
 from . import query as _query
 from ..core import structure
+from ..core.region import Region
 from ..features import anchor as _anchor
 from ..features.tabular.gene_expression import GeneExpressions
 from ..commons import logger, Species
@@ -173,6 +174,8 @@ class AllenBrainAtlasQuery(_query.LiveQuery, args=['gene'], FeatureType=GeneExpr
             explanation=explanation
         )]
         anchor._last_matched_concept = concept
+        if isinstance(concept, Region):
+            anchor._regionspec = concept.name
 
         return [GeneExpressions(
             anchor=anchor,
diff --git a/siibra/locations/pointcloud.py b/siibra/locations/pointcloud.py
index 721d8a08eafc4e8c7b1464f11ab912dc2d7d7de9..8f3704ac852e567d988b0e15a8b801d37c272266 100644
--- a/siibra/locations/pointcloud.py
+++ b/siibra/locations/pointcloud.py
@@ -358,7 +358,7 @@ class Contour(PointCloud):
     def __init__(self, coordinates, space=None, sigma_mm=0, labels: list = None):
         PointCloud.__init__(self, coordinates, space, sigma_mm, labels)
 
-    def crop(self, voi: "_boundingbox.BoundingBox"):
+    def crop(self, voi: "_boundingbox.BoundingBox") -> List["Contour"]:
         """
         Crop the contour with a volume of interest.
         Since the contour might be split from the cropping,
diff --git a/siibra/volumes/sparsemap.py b/siibra/volumes/sparsemap.py
index 558285fa4e7f422b1b78574787124902feebfea0..82775a1e3223f4846e801ff3177f19f67cfb6171 100644
--- a/siibra/volumes/sparsemap.py
+++ b/siibra/volumes/sparsemap.py
@@ -18,11 +18,10 @@ from os import path, makedirs
 from typing import Dict, List
 
 import numpy as np
-from nilearn import image
 
 from . import parcellationmap, volume as _volume
 from .providers import provider
-from ..commons import MapIndex, logger, connected_components, siibra_tqdm
+from ..commons import MapIndex, logger, connected_components, siibra_tqdm, resample_img_to_img
 from ..locations import boundingbox
 from ..retrieval.cache import CACHE
 from ..retrieval.requests import HttpRequest, FileLoader
@@ -353,30 +352,23 @@ class SparseMap(parcellationmap.Map):
         split_components: bool, default: True
             Whether to split the query volume into disjoint components.
         """
+        from nibabel import Nifti1Image
+
         queryimg = queryvolume.fetch()
-        imgdata = np.asanyarray(queryimg.dataobj)
-        imgaffine = queryimg.affine
+        assert isinstance(queryimg, Nifti1Image)
         assignments = []
 
-        # resample query image into this image's voxel space, if required
-        if (imgaffine - self.affine).sum() == 0:
-            querydata = imgdata.squeeze()
-        else:
-            if issubclass(imgdata.dtype.type, np.integer):
-                interp = "nearest"
-            else:
-                interp = "linear"
-            from nibabel import Nifti1Image
-            queryimg = image.resample_img(
-                Nifti1Image(imgdata, imgaffine),
-                target_affine=self.affine,
-                target_shape=self.shape,
-                interpolation=interp,
+        # resample query image into this image's voxel space, if required (nilearn checks)
+        queryimg = resample_img_to_img(
+            source_img=queryimg,
+            target_img=Nifti1Image(
+                np.zeros(self.shape), affine=self.affine, dtype=queryimg.dataobj.dtype
             )
-            querydata = np.asanyarray(queryimg.dataobj).squeeze()
+        )
+        self.space.get_template()
+        querydata = np.asanyarray(queryimg.dataobj).squeeze()
 
-        iter_func = connected_components if split_components \
-            else lambda img: [(1, img)]
+        iter_func = connected_components if split_components else lambda img: [(1, img)]
 
         for mode, modemask in iter_func(querydata):
 
diff --git a/test/features/test_cells.py b/test/features/test_cells.py
index 15949246bf9640367fd3e2567cf6191f1ccf31e6..d27459c9e9cbb0b6701dfc25d262f872de215cc7 100644
--- a/test/features/test_cells.py
+++ b/test/features/test_cells.py
@@ -19,10 +19,8 @@ class TestCorticalCellDistribution(unittest.TestCase):
         )
         assert len(features) > 0
         feature = features[0]
-        assert (feature.data["mean"].mean() > 90) and (
-            feature.data["mean"].mean() < 100
-        )
-        assert feature.data.shape == (6, 2)
+        assert feature.data["density"].mean() == 78.86556888985415
+        assert feature.data.shape == (60, 9)
 
     def test_check_layer_density_with_valid_range(self):
         features = siibra.features.get(
@@ -30,9 +28,9 @@ class TestCorticalCellDistribution(unittest.TestCase):
         )
         assert len(features) > 0
         feature = features[0]
-        layer_density = feature.data["mean"]["I"]
+        layer_density = feature.data[feature.data["layername"] == "I"]["density"]
         assert layer_density is not None
-        layer_density = feature.data["mean"]["VI"]
+        layer_density = feature.data[feature.data["layername"] == "VI"]["density"]
         assert layer_density is not None