Skip to content
Snippets Groups Projects
Commit 38c95ee2 authored by polarbean's avatar polarbean
Browse files

we no longer need flat files

parent a7d03625
No related branches found
No related tags found
No related merge requests found
...@@ -121,6 +121,8 @@ def folder_to_atlas_space( ...@@ -121,6 +121,8 @@ def folder_to_atlas_space(
non_linear=True, non_linear=True,
method="all", method="all",
object_cutoff=0, object_cutoff=0,
atlas_volume=None,
use_flat = False,
): ):
"""Apply Segmentation to atlas space to all segmentations in a folder.""" """Apply Segmentation to atlas space to all segmentations in a folder."""
"""Return pixel_points, centroids, points_len, centroids_len, segmentation_filenames, """ """Return pixel_points, centroids, points_len, centroids_len, segmentation_filenames, """
...@@ -153,8 +155,12 @@ def folder_to_atlas_space( ...@@ -153,8 +155,12 @@ def folder_to_atlas_space(
seg_nr = int(number_sections([segmentation_path])[0]) seg_nr = int(number_sections([segmentation_path])[0])
current_slice_index = np.where([s["nr"] == seg_nr for s in slices]) current_slice_index = np.where([s["nr"] == seg_nr for s in slices])
current_slice = slices[current_slice_index[0][0]] current_slice = slices[current_slice_index[0][0]]
current_flat_file_index = np.where([f == seg_nr for f in flat_file_nrs]) if use_flat == True:
current_flat = flat_files[current_flat_file_index[0][0]] current_flat_file_index = np.where([f == seg_nr for f in flat_file_nrs])
current_flat = flat_files[current_flat_file_index[0][0]]
else:
current_flat = None
x = threading.Thread( x = threading.Thread(
target=segmentation_to_atlas_space, target=segmentation_to_atlas_space,
args=( args=(
...@@ -170,6 +176,8 @@ def folder_to_atlas_space( ...@@ -170,6 +176,8 @@ def folder_to_atlas_space(
index, index,
method, method,
object_cutoff, object_cutoff,
atlas_volume,
use_flat
), ),
) )
threads.append(x) threads.append(x)
...@@ -199,7 +207,7 @@ def segmentation_to_atlas_space( ...@@ -199,7 +207,7 @@ def segmentation_to_atlas_space(
slice, slice,
segmentation_path, segmentation_path,
atlas_labels, atlas_labels,
flat_file_atlas, flat_file_atlas=None,
pixel_id="auto", pixel_id="auto",
non_linear=True, non_linear=True,
points_list=None, points_list=None,
...@@ -208,11 +216,13 @@ def segmentation_to_atlas_space( ...@@ -208,11 +216,13 @@ def segmentation_to_atlas_space(
index=None, index=None,
method="per_pixel", method="per_pixel",
object_cutoff=0, object_cutoff=0,
atlas_volume=None,
use_flat=False
): ):
"""Combines many functions to convert a segmentation to atlas space. It takes care """Combines many functions to convert a segmentation to atlas space. It takes care
of deformations.""" of deformations."""
print(f"workissssssssssng on {segmentation_path}") print(f"working on {segmentation_path}")
if segmentation_path.endswith(".dzip"): if segmentation_path.endswith(".dzip"):
print("Reconstructing dzi") print("Reconstructing dzi")
segmentation = reconstruct_dzi(segmentation_path) segmentation = reconstruct_dzi(segmentation_path)
...@@ -229,14 +239,17 @@ def segmentation_to_atlas_space( ...@@ -229,14 +239,17 @@ def segmentation_to_atlas_space(
# Currently only works for a single label # Currently only works for a single label
print("length of non background pixels: ", len(segmentation_no_background)) print("length of non background pixels: ", len(segmentation_no_background))
pixel_id = segmentation_no_background[0] pixel_id = segmentation_no_background[0]
print('detected pissssssxel_id: ', pixel_id) print('detected pixel_id: ', pixel_id)
# Transform pixels to registration space (the registered image and segmentation have different dimensions) # Transform pixels to registration space (the registered image and segmentation have different dimensions)
seg_height = segmentation.shape[0] seg_height = segmentation.shape[0]
seg_width = segmentation.shape[1] seg_width = segmentation.shape[1]
reg_height = slice["height"] reg_height = slice["height"]
reg_width = slice["width"] reg_width = slice["width"]
region_areas = flat_to_dataframe(flat_file_atlas, atlas_labels, (seg_width,seg_height)) if use_flat == True:
region_areas = flat_to_dataframe(atlas_labels, flat_file_atlas, (seg_width,seg_height))
else:
region_areas = flat_to_dataframe(atlas_labels, flat_file_atlas, (seg_width,seg_height), slice["anchoring"], atlas_volume)
# This calculates reg/seg # This calculates reg/seg
y_scale, x_scale = transform_to_registration( y_scale, x_scale = transform_to_registration(
seg_height, seg_width, reg_height, reg_width seg_height, seg_width, reg_height, reg_width
......
...@@ -5,7 +5,7 @@ import matplotlib.pyplot as plt ...@@ -5,7 +5,7 @@ import matplotlib.pyplot as plt
import os import os
import nrrd import nrrd
import cv2 import cv2
# from generate_target_slice import generate_target_slice from .generate_target_slice import generate_target_slice
# related to counting and load # related to counting and load
...@@ -119,92 +119,82 @@ def pixel_count_per_region( ...@@ -119,92 +119,82 @@ def pixel_count_per_region(
"""Read flat file and write into an np array""" """Read flat file and write into an np array"""
"""Read flat file, write into an np array, assign label file values, return array""" """Read flat file, write into an np array, assign label file values, return array"""
import struct
import cv2
import numpy as np
import pandas as pd
def read_flat_file(file):
with open(file, "rb") as f:
b, w, h = struct.unpack(">BII", f.read(9))
def flat_to_dataframe(file, labelfile, rescaleXY=False): data = struct.unpack(">" + ("xBH"[b] * (w * h)), f.read(b * w * h))
if rescaleXY:
rescaleX, rescaleY = rescaleXY
if file.endswith(".flat"):
with open(file, "rb") as f:
# I don't know what b is, w and h are the width and height that we get from the
# flat file header
b, w, h = struct.unpack(">BII", f.read(9))
# Data is a one dimensional list of values
# It has the shape width times height
data = struct.unpack(">" + ("xBH"[b] * (w * h)), f.read(b * w * h))
elif file.endswith(".seg"):
with open(file,"rb") as f:
def byte():
return f.read(1)[0]
def code():
c=byte()
if(c<0):
raise "!"
return c if c<128 else (c&127)|(code()<<7)
if "SegRLEv1" != f.read(8).decode():
raise "Header mismatch"
atlas=f.read(code()).decode()
print(f"Target atlas: {atlas}")
codes=[code() for x in range(code())]
w=code()
h=code()
data=[]
while len(data)<w*h:
data += [codes[byte() if len(codes)<=256 else code()]]*(code()+1)
# Convert flat file data into an array, previously data was a tuple
image_data = np.array(data) image_data = np.array(data)
# Create an empty image array in the right shape, write image_data into image_array
image = np.zeros((h, w)) image = np.zeros((h, w))
for x in range(w): for x in range(w):
for y in range(h): for y in range(h):
image[y, x] = image_data[x + y * w] image[y, x] = image_data[x + y * w]
return image
image_arr = np.array(image)
#return image_arr def read_seg_file(file):
if rescaleXY: with open(file,"rb") as f:
w,h= rescaleXY def byte():
image_arr = cv2.resize(image_arr, (h, w), interpolation=cv2.INTER_NEAREST) return f.read(1)[0]
def code():
print("max data value",np.max(image_arr)) c=byte()
if(c<0):
if file.endswith(".flat"): raise "!"
allen_id_image = np.zeros((h, w)) # create an empty image array return c if c<128 else (c&127)|(code()<<7)
coordsy, coordsx = np.meshgrid(list(range(w)), list(range(h))) if "SegRLEv1" != f.read(8).decode():
values = image_arr[coordsy,coordsx] # assign x,y coords from image_array into values raise "Header mismatch"
atlas=f.read(code()).decode()
lbidx = labelfile["idx"].values print(f"Target atlas: {atlas}")
allen_id_image = lbidx[values.astype(int)] codes=[code() for x in range(code())]
elif file.endswith(".seg"): w=code()
allen_id_image = image_arr h=code()
"""assign label file values into image array""" data=[]
while len(data)<w*h:
#return allen_id_image data += [codes[byte() if len(codes)<=256 else code()]]*(code()+1)
image_data = np.array(data)
#def count_per_uniqueidx() image = np.reshape(image_data, (h, w))
return image
"""count pixels for unique idx"""
unique_ids, counts = np.unique(allen_id_image, return_counts=True) def rescale_image(image, rescaleXY):
w, h = rescaleXY
return cv2.resize(image, (h, w), interpolation=cv2.INTER_NEAREST)
def assign_labels_to_image(image, labelfile):
w,h = image.shape
allen_id_image = np.zeros((h, w)) # create an empty image array
coordsy, coordsx = np.meshgrid(list(range(w)), list(range(h)))
values = image[coordsy, coordsx]
lbidx = labelfile["idx"].values
allen_id_image = lbidx[values.astype(int)]
return allen_id_image
def count_pixels_per_label(image):
unique_ids, counts = np.unique(image, return_counts=True)
area_per_label = list(zip(unique_ids, counts)) area_per_label = list(zip(unique_ids, counts))
# create a list of unique regions and pixel counts per region
df_area_per_label = pd.DataFrame(area_per_label, columns=["idx", "region_area"]) df_area_per_label = pd.DataFrame(area_per_label, columns=["idx", "region_area"])
print("df_area_per_label") return df_area_per_label
print(df_area_per_label)
# create a pandas df with regions and pixel counts def flat_to_dataframe(labelfile, file=None, rescaleXY=None, image_vector=None, volume=None):
return(df_area_per_label) if (image_vector is not None) and (volume is not None):
image = generate_target_slice(image_vector, volume)
# Import flat files, count pixels per label, np.unique... etc. nitrc.org/plugins/mwiki/index.php?title=visualign:Deformation image = np.float64(image)
elif file.endswith(".flat"):
""" image = read_flat_file(file)
base=slice["filename"][:-4] elif file.endswith(".seg"):
image = read_seg_file(file)
import struct print('datatype', image.dtype)
with open(base+".flat","rb") as f: print('image shape open', image.shape)
b,w,h=struct.unpack(">BII",f.read(9))
data=struct.unpack(">"+("xBH"[b]*(w*h)),f.read(b*w*h))
"""
if rescaleXY:
image = rescale_image(image, rescaleXY)
if (image_vector is None) or (volume is None):
allen_id_image = assign_labels_to_image(image, labelfile)
else:
allen_id_image = image
df_area_per_label = count_pixels_per_label(allen_id_image)
return df_area_per_label
\ No newline at end of file
...@@ -134,7 +134,7 @@ class PyNutil: ...@@ -134,7 +134,7 @@ class PyNutil:
return atlas_volume, atlas_labels return atlas_volume, atlas_labels
def get_coordinates(self, non_linear=True, method="all", object_cutoff=0): def get_coordinates(self, non_linear=True, method="all", object_cutoff=0, use_flat=False):
"""Extracts pixel coordinates from the segmentation data. """Extracts pixel coordinates from the segmentation data.
Parameters Parameters
...@@ -177,6 +177,8 @@ class PyNutil: ...@@ -177,6 +177,8 @@ class PyNutil:
non_linear=non_linear, non_linear=non_linear,
method=method, method=method,
object_cutoff=object_cutoff, object_cutoff=object_cutoff,
atlas_volume=self.atlas_volume,
use_flat=use_flat
) )
self.pixel_points = pixel_points self.pixel_points = pixel_points
self.centroids = centroids self.centroids = centroids
...@@ -241,43 +243,6 @@ class PyNutil: ...@@ -241,43 +243,6 @@ class PyNutil:
current_df_new = all_region_df.merge(current_df, on= 'idx', how= 'left', suffixes= (None,"_y")).drop(columns=["a","VIS", "MSH", "name_y","r_y","g_y","b_y"]) current_df_new = all_region_df.merge(current_df, on= 'idx', how= 'left', suffixes= (None,"_y")).drop(columns=["a","VIS", "MSH", "name_y","r_y","g_y","b_y"])
current_df_new["area_fraction"] = current_df_new["pixel_count"] / current_df_new["region_area"] current_df_new["area_fraction"] = current_df_new["pixel_count"] / current_df_new["region_area"]
current_df_new.fillna(0, inplace=True) current_df_new.fillna(0, inplace=True)
# Several alternatives for the merge code above
"""
new_rows = []
for index, row in all_region_df.iterrows():
mask = current_df["idx"] == row ["idx"]
pnt.save_analysis("outputs/test8_PyNutil")
object_count = current_region_row["object_count"].values
row["pixel_count"] = region_area[0]
row["object_count"] = object_count[0]
new_rows.append[row]
current_df_new = pd.DataFrame(new_rows)
"""
"""
new_rows = []
for index, row in current_df.iterrows():
mask = self.atlas_labels["idx"] == row["idx"]
current_region_row = self.atlas_labels[mask]
current_region_name = current_region_row["name"].values
current_region_red = current_region_row["r"].values
current_region_green = current_region_row["g"].values
current_region_blue = current_region_row["b"].values
row["name"] = current_region_name[0]
row["r"] = current_region_red[0]
row["g"] = current_region_green[0]
row["b"] = current_region_blue[0]
new_rows.append(row)
current_df_new = pd.DataFrame(new_rows)"""
#per_section_df.append(current_df_new)
per_section_df.append(current_df_new) per_section_df.append(current_df_new)
prev_pl += pl prev_pl += pl
prev_cl += cl prev_cl += cl
......
...@@ -20,29 +20,27 @@ def load_visualign_json(filename): ...@@ -20,29 +20,27 @@ def load_visualign_json(filename):
print(slice) print(slice)
slice["nr"] = int(re.search(r"_s(\d+)", slice["filename"]).group(1)) slice["nr"] = int(re.search(r"_s(\d+)", slice["filename"]).group(1))
slice["anchoring"] = slice["ouv"] slice["anchoring"] = slice["ouv"]
lz_compat_file = {
"name":name,
"target":vafile["atlas"],
"target-resolution":[ 456, 528, 320],
"slices":slices,
}
# save with .json extension need to see if i can remove this
with open(filename.replace(".waln", ".json").replace(".wwrp", ".json"), "w") as f:
json.dump(lz_compat_file, f, indent=4)
else: else:
slices = vafile["slices"] slices = vafile["slices"]
# overwrite the existing file # overwrite the existing file
name = os.path.basename(filename) name = os.path.basename(filename)
slices = [{"filename":slice['filename'],
"nr":slice["nr"],
"width":slice["width"],
"height":slice["height"],
"anchoring":slice["anchoring"]} for slice in slices]
lz_compat_file = {
"name":name,
"target":vafile["atlas"],
"target-resolution":[ 456, 528, 320],
"slices":slices,
}
# save with .json extension
with open(filename.replace(".waln", ".json").replace(".wwrp", ".json"), "w") as f:
json.dump(lz_compat_file, f, indent=4)
return slices return slices
def load_visualign_json(filename):
with open(filename) as f:
vafile = json.load(f)
slices = vafile["slices"]
return slices
# related to read_and_write, used in write_points_to_meshview # related to read_and_write, used in write_points_to_meshview
# this function returns a dictionary of region names # this function returns a dictionary of region names
def create_region_dict(points, regions): def create_region_dict(points, regions):
......
{ "volume_path": "allen2017", { "volume_path": "allen2017",
"label_path": "annotation_volumes/allen2017_colours.csv", "label_path": "PyNutil/annotation_volumes/allen2017_colours.csv",
"segmentation_folder": "test_data/PyTest_seg", "segmentation_folder": "PyNutil/test_data/PyTest_seg",
"alignment_json": "test_data/PyNutil_testdataset_Nonlin_SY_fixed.json", "alignment_json": "PyNutil/test_data/PyNutil_testdataset_Nonlin_SY_fixed.json",
"nonlinear": true, "nonlinear": true,
"colour": [0, 0, 0], "colour": [0, 0, 0],
"points_json_path": "outputs/test8_PyNutil.json" "points_json_path": "PyNutil/outputs/test8_PyNutil.json"
} }
\ No newline at end of file
from PyNutil import PyNutil from PyNutil import PyNutil
pnt = PyNutil(settings_file=r"test/test8_PyNutil_fixed.json") pnt = PyNutil(settings_file=r"PyNutil/test/test8_PyNutil_fixed.json")
# pnt = PyNutil(settings_file=r"test/test7_PyNutil.json") ##Use flat can be set to True if you want to use the flat file
# pnt.build_quantifier() # instead of the visualign json (this is only useful for testing and will be removed)
pnt.get_coordinates(object_cutoff=0, use_flat=False)
pnt.get_coordinates(object_cutoff=0)
pnt.quantify_coordinates() pnt.quantify_coordinates()
pnt.save_analysis("outputs/test8_PyNutil") pnt.save_analysis("PyNutil/outputs/test8_PyNutil")
# remove name, r, g, b, from pixel_ # remove name, r, g, b, from pixel_
# add to region_areas df # add to region_areas df
\ No newline at end of file
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment