diff --git a/PyNutil/io/read_and_write.py b/PyNutil/io/read_and_write.py
index c4c5e228fd8d1177908cf8d72b573e5c065c7d6f..133fc58db5f6be309291a618303b94fa16df3e4d 100644
--- a/PyNutil/io/read_and_write.py
+++ b/PyNutil/io/read_and_write.py
@@ -208,79 +208,6 @@ def write_points_to_meshview(points, point_names, filename, info_file):
     write_points(region_dict, filename, info_file)
 
 
-# I think this might not need to be its own function :)
-def save_dataframe_as_csv(df_to_save, output_csv):
-    """Function for saving a df as a CSV file"""
-    df_to_save.to_csv(output_csv, sep=";", na_rep="", index=False)
-
-
-def flat_to_array(file, labelfile):
-    """Read flat file, write into an np array, assign label file values, return array"""
-    if file.endswith(".flat"):
-        with open(file, "rb") as f:
-            # I don't know what b is, w and h are the width and height that we get from the
-            # flat file header
-            b, w, h = struct.unpack(">BII", f.read(9))
-            # Data is a one dimensional list of values
-            # It has the shape width times height
-            data = struct.unpack(">" + ("xBH"[b] * (w * h)), f.read(b * w * h))
-    elif file.endswith(".seg"):
-        with open(file, "rb") as f:
-
-            def byte():
-                return f.read(1)[0]
-
-            def code():
-                c = byte()
-                if c < 0:
-                    raise "!"
-                return c if c < 128 else (c & 127) | (code() << 7)
-
-            if "SegRLEv1" != f.read(8).decode():
-                raise "Header mismatch"
-            atlas = f.read(code()).decode()
-            codes = [code() for x in range(code())]
-            w = code()
-            h = code()
-            data = []
-            while len(data) < w * h:
-                data += [codes[byte() if len(codes) <= 256 else code()]] * (code() + 1)
-
-    # convert flat file data into an array, previously data was a tuple
-    imagedata = np.array(data)
-
-    # create an empty image array in the right shape, write imagedata into image_array
-    image = np.zeros((h, w))
-    for x in range(w):
-        for y in range(h):
-            image[y, x] = imagedata[x + y * w]
-
-    image_arr = np.array(image)
-    # return image_arr
-
-    """assign label file values into image array"""
-    labelfile = pd.read_csv(labelfile)
-    allen_id_image = np.zeros((h, w))  # create an empty image array
-    coordsy, coordsx = np.meshgrid(list(range(w)), list(range(h)))
-    values = image_arr[
-        coordsx, coordsy
-    ]  # assign x,y coords from image_array into values
-    lbidx = labelfile["idx"].values
-    allen_id_image = lbidx[values.astype(int)]
-    return allen_id_image
-
-
-def files_in_directory(directory):
-    """return list of flat file names in a directory"""
-    list_of_files = []
-    for file in os.scandir(directory):
-        if file.path.endswith(".flat") and file.is_file:
-            filename = os.path.basename(file)
-            newfilename, file_ext = os.path.splitext(filename)
-            list_of_files.append(newfilename)
-    return list_of_files
-
-
 def read_atlas_volume(atlas_volume_path):
     """return data from atlas volume"""
     data, header = nrrd.read(atlas_volume_path)
diff --git a/PyNutil/main.py b/PyNutil/main.py
index c28a63c54c679be458e581b2e7bc7cf1e20a2909..3642a72d5a1e98f84e6dfc1828dd4ebe65ec667d 100644
--- a/PyNutil/main.py
+++ b/PyNutil/main.py
@@ -1,13 +1,15 @@
 import json
 from .io.atlas_loader import load_atlas_data, load_custom_atlas
-from .processing.data_analysis import quantify_labeled_points, map_to_custom_regions, apply_custom_regions
+from .processing.data_analysis import (
+    quantify_labeled_points,
+    map_to_custom_regions,
+    apply_custom_regions,
+)
 from .io.file_operations import save_analysis_output
 from .io.read_and_write import open_custom_region_file
 from .processing.coordinate_extraction import folder_to_atlas_space
 
 
-
-
 class PyNutil:
     """
     A class used to perform brain-wide quantification and spatial analysis of features in serial section images.
@@ -153,7 +155,7 @@ class PyNutil:
                 self.centroids_len,
                 self.segmentation_filenames,
                 self.per_point_undamaged,
-                self.per_centroid_undamaged
+                self.per_centroid_undamaged,
             ) = folder_to_atlas_space(
                 self.segmentation_folder,
                 self.alignment_json,
@@ -204,7 +206,7 @@ class PyNutil:
                 self.centroids_labels,
                 self.atlas_labels,
                 self.per_point_undamaged,
-                self.per_centroid_undamaged
+                self.per_centroid_undamaged,
             )
             if self.custom_regions_dict is not None:
                 self.custom_label_df, self.label_df = apply_custom_regions(
diff --git a/PyNutil/processing/coordinate_extraction.py b/PyNutil/processing/coordinate_extraction.py
index 210be15890bbb1ed3dcd08fa67956fb8ca5f6c69..f6d26c2bfdd6f8aa7773f6df3f7b2519d3effd63 100644
--- a/PyNutil/processing/coordinate_extraction.py
+++ b/PyNutil/processing/coordinate_extraction.py
@@ -3,7 +3,6 @@ import pandas as pd
 from ..io.read_and_write import load_visualign_json
 from .counting_and_load import flat_to_dataframe, rescale_image, load_image
 from .visualign_deformations import triangulate
-from glob import glob
 import cv2
 from skimage import measure
 import threading
@@ -44,15 +43,17 @@ def get_centroids_and_area(segmentation, pixel_cut_off=0):
     coords = np.array([label.coords for label in labels_info], dtype=object)
     return centroids, area, coords
 
+
 def update_spacing(anchoring, width, height, grid_spacing):
     if len(anchoring) != 9:
         print("Anchoring does not have 9 elements.")
-    ow = np.sqrt(sum([anchoring[i+3] ** 2 for i in range(3)]))
-    oh = np.sqrt(sum([anchoring[i+6] ** 2 for i in range(3)]))
+    ow = np.sqrt(sum([anchoring[i + 3] ** 2 for i in range(3)]))
+    oh = np.sqrt(sum([anchoring[i + 6] ** 2 for i in range(3)]))
     xspacing = int(width * grid_spacing / ow)
     yspacing = int(height * grid_spacing / oh)
     return xspacing, yspacing
 
+
 def create_damage_mask(section, grid_spacing):
     width = section["width"]
     height = section["height"]
@@ -66,7 +67,10 @@ def create_damage_mask(section, grid_spacing):
     y_coords = np.arange(gridy, height, yspacing)
 
     num_markers = len(grid_values)
-    markers = [(x_coords[i % len(x_coords)], y_coords[i // len(x_coords)]) for i in range(num_markers)]
+    markers = [
+        (x_coords[i % len(x_coords)], y_coords[i // len(x_coords)])
+        for i in range(num_markers)
+    ]
 
     binary_image = np.ones((len(y_coords), len(x_coords)), dtype=int)
 
@@ -76,6 +80,7 @@ def create_damage_mask(section, grid_spacing):
 
     return binary_image
 
+
 def folder_to_atlas_space(
     folder,
     quint_alignment,
@@ -105,9 +110,15 @@ def folder_to_atlas_space(
     slices, gridspacing = load_visualign_json(quint_alignment)
     segmentations = get_segmentations(folder)
     flat_files, flat_file_nrs = get_flat_files(folder, use_flat)
-    points_list, centroids_list, region_areas_list, centroids_labels, points_labels,per_point_undamaged_list,per_centroid_undamaged_list  = (
-        initialize_lists(len(segmentations))
-    )
+    (
+        points_list,
+        centroids_list,
+        region_areas_list,
+        centroids_labels,
+        points_labels,
+        per_point_undamaged_list,
+        per_centroid_undamaged_list,
+    ) = initialize_lists(len(segmentations))
     threads = create_threads(
         segmentations,
         slices,
@@ -126,11 +137,25 @@ def folder_to_atlas_space(
         object_cutoff,
         atlas_volume,
         use_flat,
-        gridspacing
+        gridspacing,
     )
     start_and_join_threads(threads)
-    points, centroids, points_labels, centroids_labels, points_len, centroids_len, per_point_undamaged_list, per_centroid_undamaged_list = (
-        process_results(points_list, centroids_list, points_labels, centroids_labels, per_point_undamaged_list, per_centroid_undamaged_list)
+    (
+        points,
+        centroids,
+        points_labels,
+        centroids_labels,
+        points_len,
+        centroids_len,
+        per_point_undamaged_list,
+        per_centroid_undamaged_list,
+    ) = process_results(
+        points_list,
+        centroids_list,
+        points_labels,
+        centroids_labels,
+        per_point_undamaged_list,
+        per_centroid_undamaged_list,
     )
     return (
         points,
@@ -142,7 +167,7 @@ def folder_to_atlas_space(
         centroids_len,
         segmentations,
         per_point_undamaged_list,
-        per_centroid_undamaged_list
+        per_centroid_undamaged_list,
     )
 
 
@@ -184,7 +209,7 @@ def initialize_lists(length):
         centroids_labels,
         points_labels,
         per_point_undamaged_list,
-        per_centroid_undamaged_list
+        per_centroid_undamaged_list,
     )
 
 
@@ -206,7 +231,7 @@ def create_threads(
     object_cutoff,
     atlas_volume,
     use_flat,
-    gridspacing
+    gridspacing,
 ):
     """
     Creates threads for processing segmentations.
@@ -262,7 +287,7 @@ def create_threads(
                 object_cutoff,
                 atlas_volume,
                 use_flat,
-                gridspacing
+                gridspacing,
             ),
         )
         threads.append(x)
@@ -310,7 +335,7 @@ def get_region_areas(
     slice_dict,
     atlas_volume,
     triangulation,
-    damage_mask
+    damage_mask,
 ):
     """
     Gets the region areas.
@@ -328,11 +353,16 @@ def get_region_areas(
     Returns:
         DataFrame: DataFrame with region areas.
     """
-    atlas_map = load_image(flat_file_atlas,slice_dict["anchoring"], atlas_volume, triangulation, (seg_width, seg_height), atlas_labels)
-
-    region_areas = flat_to_dataframe(
-        atlas_map, damage_mask, (seg_width, seg_height)
+    atlas_map = load_image(
+        flat_file_atlas,
+        slice_dict["anchoring"],
+        atlas_volume,
+        triangulation,
+        (seg_width, seg_height),
+        atlas_labels,
     )
+
+    region_areas = flat_to_dataframe(atlas_map, damage_mask, (seg_width, seg_height))
     return region_areas, atlas_map
 
 
@@ -393,7 +423,7 @@ def segmentation_to_atlas_space(
         slice_dict,
         atlas_volume,
         triangulation,
-        damage_mask
+        damage_mask,
     )
     atlas_map = rescale_image(atlas_map, (reg_width, reg_height))
     y_scale, x_scale = transform_to_registration(
@@ -412,16 +442,21 @@ def segmentation_to_atlas_space(
         np.round(scaled_centroidsX).astype(int), np.round(scaled_centroidsY).astype(int)
     ]
     if damage_mask is not None:
-        damage_mask = cv2.resize(damage_mask.astype(np.uint8), (atlas_map.shape[::-1]), interpolation=cv2.INTER_NEAREST).astype(bool)
+        damage_mask = cv2.resize(
+            damage_mask.astype(np.uint8),
+            (atlas_map.shape[::-1]),
+            interpolation=cv2.INTER_NEAREST,
+        ).astype(bool)
         per_point_undamaged = damage_mask[
-                np.round(scaled_x).astype(int), np.round(scaled_y).astype(int)
-            ]
+            np.round(scaled_x).astype(int), np.round(scaled_y).astype(int)
+        ]
         per_centroid_undamaged = damage_mask[
-            np.round(scaled_centroidsX).astype(int), np.round(scaled_centroidsY).astype(int)
-            ]
+            np.round(scaled_centroidsX).astype(int),
+            np.round(scaled_centroidsY).astype(int),
+        ]
     else:
         per_point_undamaged = np.ones(scaled_x.shape, dtype=bool)
-        per_centroid_undamaged =  np.ones(scaled_centroidsX.shape, dtype=bool)
+        per_centroid_undamaged = np.ones(scaled_centroidsX.shape, dtype=bool)
     per_point_labels = per_point_labels[per_point_undamaged]
     per_centroid_labels = per_centroid_labels[per_centroid_undamaged]
     new_x, new_y, centroids_new_x, centroids_new_y = get_transformed_coordinates(
@@ -452,7 +487,9 @@ def segmentation_to_atlas_space(
         per_centroid_undamaged if centroids is not None else []
     )
     points_labels[index] = np.array(per_point_labels if points is not None else [])
-    per_point_undamaged_list[index] = np.array(per_point_undamaged if points is not None else [])
+    per_point_undamaged_list[index] = np.array(
+        per_point_undamaged if points is not None else []
+    )
 
 
 def get_triangulation(slice_dict, reg_width, reg_height, non_linear):
diff --git a/PyNutil/processing/counting_and_load.py b/PyNutil/processing/counting_and_load.py
index c52c45abf25e26be800770216b3b45e9e33fd9d7..f86af434af781377675c661f093c5b5444851730 100644
--- a/PyNutil/processing/counting_and_load.py
+++ b/PyNutil/processing/counting_and_load.py
@@ -6,51 +6,13 @@ from .generate_target_slice import generate_target_slice
 from .visualign_deformations import transform_vec
 
 
-# related to counting and load
-def label_points(points, label_volume, scale_factor=1):
-    """
-    Assigns points to regions based on the label_volume.
-
-    Args:
-        points (list): List of points.
-        label_volume (ndarray): Volume with region labels.
-        scale_factor (int, optional): Scaling factor for points. Defaults to 1.
-
-    Returns:
-        ndarray: Labels for each point.
-    """
-    # First convert the points to 3 columns
-    points = np.reshape(points, (-1, 3))
-    # Scale the points
-    points = points * scale_factor
-    # Round the points to the nearest whole number
-    points = np.round(points).astype(int)
-    x = points[:, 0]
-    y = points[:, 1]
-    z = points[:, 2]
-
-    # make sure the points are within the volume
-    x[x < 0] = 0
-    y[y < 0] = 0
-    z[z < 0] = 0
-    mask = (
-        (x > label_volume.shape[0] - 1)
-        | (y > label_volume.shape[1] - 1)
-        | (z > label_volume.shape[2] - 1)
-    )
-    x[mask] = 0
-    y[mask] = 0
-    z[mask] = 0
-
-    # Get the label value for each point
-    labels = label_volume[x, y, z]
-
-    return labels
-
-
 # related to counting_and_load
 def pixel_count_per_region(
-    labels_dict_points, labeled_dict_centroids, current_points_undamaged, current_centroids_undamaged, df_label_colours
+    labels_dict_points,
+    labeled_dict_centroids,
+    current_points_undamaged,
+    current_centroids_undamaged,
+    df_label_colours,
 ):
     """
     Counts the number of pixels per region and writes to a DataFrame.
@@ -78,75 +40,59 @@ def pixel_count_per_region(
     # Which regions have pixels, and how many pixels are there per region
     # Create a list of unique regions and pixel counts per region
     counts_per_label = {
-        "idx" : [],
-        "name" : [],
-        "r" : [],
-        "g" : [],
-        "b" : [],
-        "pixel_count" : [],
-        "undamaged_pixel_count" : [],
-        "damaged_pixel_counts" : [],
-        "object_count" : [],
-        "undamaged_object_count" : [],
-        "damaged_object_count" : [],
-        }
+        "idx": [],
+        "name": [],
+        "r": [],
+        "g": [],
+        "b": [],
+        "pixel_count": [],
+        "undamaged_pixel_count": [],
+        "damaged_pixel_counts": [],
+        "object_count": [],
+        "undamaged_object_count": [],
+        "damaged_object_count": [],
+    }
     for index, row in df_label_colours.iterrows():
         if row["idx"] in counted_labels_points_undamaged:
-            clpu = label_counts_points_undamaged[counted_labels_points_undamaged == row["idx"]][0]
+            clpu = label_counts_points_undamaged[
+                counted_labels_points_undamaged == row["idx"]
+            ][0]
         else:
             clpu = 0
         if row["idx"] in counted_labels_points_damaged:
-            clpd = label_counts_points_damaged[counted_labels_points_damaged == row["idx"]][0]
+            clpd = label_counts_points_damaged[
+                counted_labels_points_damaged == row["idx"]
+            ][0]
         else:
             clpd = 0
         if row["idx"] in counted_labels_centroids_undamaged:
-            clcu = counted_labels_centroids_undamaged[counted_labels_centroids_undamaged == row["idx"]][0]
+            clcu = counted_labels_centroids_undamaged[
+                counted_labels_centroids_undamaged == row["idx"]
+            ][0]
         else:
             clcu = 0
         if row["idx"] in counted_labels_centroids_damaged:
-            clcd = counted_labels_centroids_damaged[counted_labels_centroids_damaged == row["idx"]][0]
+            clcd = counted_labels_centroids_damaged[
+                counted_labels_centroids_damaged == row["idx"]
+            ][0]
         else:
             clcd = 0
-        if clcd==clcu==clpd==clpu==0:
+        if clcd == clcu == clpd == clpu == 0:
             continue
 
-        counts_per_label["idx"].append(
-            row["idx"]
-        )
-        counts_per_label["name"].append(
-            row["name"]
-        )
-        counts_per_label["r"].append(
-            int(row["r"])
-        )
-        counts_per_label["g"].append(
-            int(row["g"])
-        )
-        counts_per_label["b"].append(
-            int(row["b"])
-        )
-        counts_per_label["pixel_count"].append(
-            clpu + clpd
-        )
-        counts_per_label["undamaged_pixel_count"].append(
-            clpu
-        )
-        counts_per_label["damaged_pixel_counts"].append(
-            clpd
-        )
-        counts_per_label["object_count"].append(
-            clcu + clcd
-        )
-        counts_per_label["undamaged_object_count"].append(
-            clcu
-        )
-        counts_per_label["damaged_object_count"].append(
-            clcd
-        )
-
-    df_counts_per_label = pd.DataFrame(
-        counts_per_label
-    )
+        counts_per_label["idx"].append(row["idx"])
+        counts_per_label["name"].append(row["name"])
+        counts_per_label["r"].append(int(row["r"]))
+        counts_per_label["g"].append(int(row["g"]))
+        counts_per_label["b"].append(int(row["b"]))
+        counts_per_label["pixel_count"].append(clpu + clpd)
+        counts_per_label["undamaged_pixel_count"].append(clpu)
+        counts_per_label["damaged_pixel_counts"].append(clpd)
+        counts_per_label["object_count"].append(clcu + clcd)
+        counts_per_label["undamaged_object_count"].append(clcu)
+        counts_per_label["damaged_object_count"].append(clcd)
+
+    df_counts_per_label = pd.DataFrame(counts_per_label)
     return df_counts_per_label
 
 
@@ -312,10 +258,7 @@ def warp_image(image, triangulation, rescaleXY):
     return new_image
 
 
-def flat_to_dataframe(
-    image,
-    damage_mask,
-    rescaleXY=None):
+def flat_to_dataframe(image, damage_mask, rescaleXY=None):
     """
     Converts a flat file to a DataFrame.
 
@@ -333,13 +276,33 @@ def flat_to_dataframe(
     """
     scale_factor = calculate_scale_factor(image, rescaleXY)
     if damage_mask is not None:
-        damage_mask = cv2.resize(damage_mask.astype(np.uint8), (image.shape[::-1]), interpolation=cv2.INTER_NEAREST).astype(bool)
-        undamaged_df_area_per_label = count_pixels_per_label(image[damage_mask], scale_factor)
-        damaged_df_area_per_label = count_pixels_per_label(image[~damage_mask], scale_factor)
-        undamaged_df_area_per_label = undamaged_df_area_per_label.rename(columns={"region_area": "undamaged_region_area"})
-        damaged_df_area_per_label = damaged_df_area_per_label.rename(columns={"region_area": "damaged_region_area"})
-        df_area_per_label = pd.merge(undamaged_df_area_per_label, damaged_df_area_per_label, on='idx', how='outer').fillna(0)
-        df_area_per_label["region_area"] = df_area_per_label["undamaged_region_area"] + df_area_per_label["damaged_region_area"]
+        damage_mask = cv2.resize(
+            damage_mask.astype(np.uint8),
+            (image.shape[::-1]),
+            interpolation=cv2.INTER_NEAREST,
+        ).astype(bool)
+        undamaged_df_area_per_label = count_pixels_per_label(
+            image[damage_mask], scale_factor
+        )
+        damaged_df_area_per_label = count_pixels_per_label(
+            image[~damage_mask], scale_factor
+        )
+        undamaged_df_area_per_label = undamaged_df_area_per_label.rename(
+            columns={"region_area": "undamaged_region_area"}
+        )
+        damaged_df_area_per_label = damaged_df_area_per_label.rename(
+            columns={"region_area": "damaged_region_area"}
+        )
+        df_area_per_label = pd.merge(
+            undamaged_df_area_per_label,
+            damaged_df_area_per_label,
+            on="idx",
+            how="outer",
+        ).fillna(0)
+        df_area_per_label["region_area"] = (
+            df_area_per_label["undamaged_region_area"]
+            + df_area_per_label["damaged_region_area"]
+        )
     else:
         df_area_per_label = count_pixels_per_label(image, scale_factor)
         df_area_per_label["undamaged_region_area"] = df_area_per_label["region_area"]
diff --git a/PyNutil/processing/data_analysis.py b/PyNutil/processing/data_analysis.py
index 70ab405ee56294199e2cbc47a2fb54301e7bb451..d2cfccc62df69a19b9ea2dd4889614ad08c092e2 100644
--- a/PyNutil/processing/data_analysis.py
+++ b/PyNutil/processing/data_analysis.py
@@ -1,7 +1,8 @@
 import pandas as pd
-from .counting_and_load import pixel_count_per_region, label_points
+from .counting_and_load import pixel_count_per_region
 import numpy as np
 
+
 def map_to_custom_regions(custom_regions_dict, points_labels):
     custom_points_labels = np.zeros_like(points_labels)
     for i in np.unique(points_labels):
@@ -74,7 +75,9 @@ def apply_custom_regions(df, custom_regions_dict):
     grouped_df = grouped_df.rename(columns={"custom_region_name": "name"})
 
     grouped_df["area_fraction"] = grouped_df["pixel_count"] / grouped_df["region_area"]
-    grouped_df["undamaged_area_fraction"] = grouped_df["undamaged_pixel_count"] / grouped_df["undamaged_region_area"]
+    grouped_df["undamaged_area_fraction"] = (
+        grouped_df["undamaged_pixel_count"] / grouped_df["undamaged_region_area"]
+    )
     common_columns = [col for col in df.columns if col in grouped_df.columns]
     grouped_df = grouped_df.reindex(
         columns=common_columns
@@ -82,6 +85,7 @@ def apply_custom_regions(df, custom_regions_dict):
     )
     return grouped_df, df
 
+
 def quantify_labeled_points(
     points_len,
     centroids_len,
@@ -90,7 +94,7 @@ def quantify_labeled_points(
     labeled_points_centroids,
     atlas_labels,
     per_point_undamaged,
-    per_centroid_undamaged
+    per_centroid_undamaged,
 ):
     """
     Quantifies labeled points and returns various DataFrames.
@@ -107,8 +111,6 @@ def quantify_labeled_points(
     Returns:
         tuple: Labeled points, labeled centroids, label DataFrame, per section DataFrame.
     """
-    # labeled_points_centroids = label_points(centroids, atlas_volume)
-    # labeled_points = label_points(pixel_points, atlas_volume, scale_factor=1)
 
     per_section_df = _quantify_per_section(
         labeled_points,
@@ -118,7 +120,7 @@ def quantify_labeled_points(
         region_areas_list,
         atlas_labels,
         per_point_undamaged,
-        per_centroid_undamaged
+        per_centroid_undamaged,
     )
     label_df = _combine_slice_reports(per_section_df, atlas_labels)
 
@@ -133,7 +135,7 @@ def _quantify_per_section(
     region_areas_list,
     atlas_labels,
     per_point_undamaged,
-    per_centroid_undamaged
+    per_centroid_undamaged,
 ):
     """
     Quantifies labeled points per section.
@@ -159,7 +161,11 @@ def _quantify_per_section(
         current_points_undamaged = per_point_undamaged[prev_pl : prev_pl + pl]
         current_centroids_undamaged = per_centroid_undamaged[prev_cl : prev_cl + cl]
         current_df = pixel_count_per_region(
-            current_points, current_centroids, current_points_undamaged, current_centroids_undamaged, atlas_labels
+            current_points,
+            current_centroids,
+            current_points_undamaged,
+            current_centroids_undamaged,
+            atlas_labels,
         )
         current_df_new = _merge_dataframes(current_df, ra, atlas_labels)
         per_section_df.append(current_df_new)
@@ -211,7 +217,9 @@ def _combine_slice_reports(per_section_df, atlas_labels):
         .drop(columns=["area_fraction"])
     )
     label_df["area_fraction"] = label_df["pixel_count"] / label_df["region_area"]
-    label_df["undamaged_area_fraction"] = label_df["undamaged_pixel_count"] / label_df["undamaged_region_area"]
+    label_df["undamaged_area_fraction"] = (
+        label_df["undamaged_pixel_count"] / label_df["undamaged_region_area"]
+    )
     label_df.fillna(0, inplace=True)
 
     label_df = label_df.set_index("idx")
diff --git a/PyNutil/processing/utils.py b/PyNutil/processing/utils.py
index 4f3e80065f575f7f115e3786bff4524ea9ea7d5e..ed9b64c554e733fd9675420a69cfc66aa46ced61 100644
--- a/PyNutil/processing/utils.py
+++ b/PyNutil/processing/utils.py
@@ -1,7 +1,6 @@
 import numpy as np
 import pandas as pd
 import re
-import threading
 from glob import glob
 
 
@@ -198,7 +197,14 @@ def start_and_join_threads(threads):
     [t.join() for t in threads]
 
 
-def process_results(points_list, centroids_list, points_labels, centroids_labels, points_undamaged_list, centroids_undamaged_list):
+def process_results(
+    points_list,
+    centroids_list,
+    points_labels,
+    centroids_labels,
+    points_undamaged_list,
+    centroids_undamaged_list,
+):
     """
     Processes the results from the threads.
 
@@ -220,7 +226,9 @@ def process_results(points_list, centroids_list, points_labels, centroids_labels
     points_labels = [pl for pl in points_labels if None not in pl]
     centroids_labels = [cl for cl in centroids_labels if None not in cl]
     points_undamaged_list = [pul for pul in points_undamaged_list if None not in pul]
-    centroids_undamaged_list = [cul for cul in centroids_undamaged_list if None not in cul]
+    centroids_undamaged_list = [
+        cul for cul in centroids_undamaged_list if None not in cul
+    ]
     if len(points_list) == 0:
         points = np.array([])
         points_labels = np.array([])
@@ -239,4 +247,13 @@ def process_results(points_list, centroids_list, points_labels, centroids_labels
         centroids_labels = np.concatenate(centroids_labels)
         centroids_undamaged = np.concatenate(centroids_undamaged_list)
 
-    return points, centroids, points_labels, centroids_labels, points_len, centroids_len, points_undamaged, centroids_undamaged
+    return (
+        points,
+        centroids,
+        points_labels,
+        centroids_labels,
+        points_len,
+        centroids_len,
+        points_undamaged,
+        centroids_undamaged,
+    )
diff --git a/binary.png b/binary.png
deleted file mode 100644
index d39efaca6a843b463955cb33b1a5313732d8d2da..0000000000000000000000000000000000000000
Binary files a/binary.png and /dev/null differ
diff --git a/demos/basic_example_custom_atlas.py b/demos/basic_example_custom_atlas.py
index d586e12e89840f7a75d53d71243ca5bbdbf715ad..d0f4978d1beb77a768ec7557abd5ddf0390a02e2 100644
--- a/demos/basic_example_custom_atlas.py
+++ b/demos/basic_example_custom_atlas.py
@@ -1,5 +1,4 @@
 import sys
-import os
 
 sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 from PyNutil import PyNutil
diff --git a/gui/Logo_PyNutil - Copy.ico:Zone.Identifier b/gui/Logo_PyNutil - Copy.ico:Zone.Identifier
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000