diff --git a/PyNutil/coordinate_extraction.py b/PyNutil/coordinate_extraction.py index 357103592489c27cc823837dea5a5ff9f560ebbb..277c3b75ea0abce1183d44cfa6d94ee172df9a0b 100644 --- a/PyNutil/coordinate_extraction.py +++ b/PyNutil/coordinate_extraction.py @@ -121,6 +121,8 @@ def folder_to_atlas_space( non_linear=True, method="all", object_cutoff=0, + atlas_volume=None, + use_flat = False, ): """Apply Segmentation to atlas space to all segmentations in a folder.""" """Return pixel_points, centroids, points_len, centroids_len, segmentation_filenames, """ @@ -153,8 +155,12 @@ def folder_to_atlas_space( seg_nr = int(number_sections([segmentation_path])[0]) current_slice_index = np.where([s["nr"] == seg_nr for s in slices]) current_slice = slices[current_slice_index[0][0]] - current_flat_file_index = np.where([f == seg_nr for f in flat_file_nrs]) - current_flat = flat_files[current_flat_file_index[0][0]] + if use_flat == True: + current_flat_file_index = np.where([f == seg_nr for f in flat_file_nrs]) + current_flat = flat_files[current_flat_file_index[0][0]] + else: + current_flat = None + x = threading.Thread( target=segmentation_to_atlas_space, args=( @@ -170,6 +176,8 @@ def folder_to_atlas_space( index, method, object_cutoff, + atlas_volume, + use_flat ), ) threads.append(x) @@ -199,7 +207,7 @@ def segmentation_to_atlas_space( slice, segmentation_path, atlas_labels, - flat_file_atlas, + flat_file_atlas=None, pixel_id="auto", non_linear=True, points_list=None, @@ -208,11 +216,13 @@ def segmentation_to_atlas_space( index=None, method="per_pixel", object_cutoff=0, + atlas_volume=None, + use_flat=False ): """Combines many functions to convert a segmentation to atlas space. It takes care of deformations.""" - print(f"workissssssssssng on {segmentation_path}") + print(f"working on {segmentation_path}") if segmentation_path.endswith(".dzip"): print("Reconstructing dzi") segmentation = reconstruct_dzi(segmentation_path) @@ -229,14 +239,17 @@ def segmentation_to_atlas_space( # Currently only works for a single label print("length of non background pixels: ", len(segmentation_no_background)) pixel_id = segmentation_no_background[0] - print('detected pissssssxel_id: ', pixel_id) + print('detected pixel_id: ', pixel_id) # Transform pixels to registration space (the registered image and segmentation have different dimensions) seg_height = segmentation.shape[0] seg_width = segmentation.shape[1] reg_height = slice["height"] reg_width = slice["width"] - region_areas = flat_to_dataframe(flat_file_atlas, atlas_labels, (seg_width,seg_height)) + if use_flat == True: + region_areas = flat_to_dataframe(atlas_labels, flat_file_atlas, (seg_width,seg_height)) + else: + region_areas = flat_to_dataframe(atlas_labels, flat_file_atlas, (seg_width,seg_height), slice["anchoring"], atlas_volume) # This calculates reg/seg y_scale, x_scale = transform_to_registration( seg_height, seg_width, reg_height, reg_width diff --git a/PyNutil/counting_and_load.py b/PyNutil/counting_and_load.py index 99d470a1c62badb1d7b6ddd766f30fc3ca75c446..87adbe2d683a7565e1e45ed818e791a41dedc280 100644 --- a/PyNutil/counting_and_load.py +++ b/PyNutil/counting_and_load.py @@ -5,7 +5,7 @@ import matplotlib.pyplot as plt import os import nrrd import cv2 -# from generate_target_slice import generate_target_slice +from .generate_target_slice import generate_target_slice # related to counting and load @@ -119,92 +119,82 @@ def pixel_count_per_region( """Read flat file and write into an np array""" """Read flat file, write into an np array, assign label file values, return array""" +import struct +import cv2 +import numpy as np +import pandas as pd - - - -def flat_to_dataframe(file, labelfile, rescaleXY=False): - if rescaleXY: - rescaleX, rescaleY = rescaleXY - if file.endswith(".flat"): - with open(file, "rb") as f: - # I don't know what b is, w and h are the width and height that we get from the - # flat file header - b, w, h = struct.unpack(">BII", f.read(9)) - # Data is a one dimensional list of values - # It has the shape width times height - data = struct.unpack(">" + ("xBH"[b] * (w * h)), f.read(b * w * h)) - elif file.endswith(".seg"): - with open(file,"rb") as f: - def byte(): - return f.read(1)[0] - def code(): - c=byte() - if(c<0): - raise "!" - return c if c<128 else (c&127)|(code()<<7) - if "SegRLEv1" != f.read(8).decode(): - raise "Header mismatch" - atlas=f.read(code()).decode() - print(f"Target atlas: {atlas}") - codes=[code() for x in range(code())] - w=code() - h=code() - data=[] - while len(data)<w*h: - data += [codes[byte() if len(codes)<=256 else code()]]*(code()+1) - # Convert flat file data into an array, previously data was a tuple +def read_flat_file(file): + with open(file, "rb") as f: + b, w, h = struct.unpack(">BII", f.read(9)) + data = struct.unpack(">" + ("xBH"[b] * (w * h)), f.read(b * w * h)) image_data = np.array(data) - - # Create an empty image array in the right shape, write image_data into image_array image = np.zeros((h, w)) for x in range(w): for y in range(h): image[y, x] = image_data[x + y * w] - - image_arr = np.array(image) - #return image_arr - if rescaleXY: - w,h= rescaleXY - image_arr = cv2.resize(image_arr, (h, w), interpolation=cv2.INTER_NEAREST) - - print("max data value",np.max(image_arr)) - - if file.endswith(".flat"): - allen_id_image = np.zeros((h, w)) # create an empty image array - coordsy, coordsx = np.meshgrid(list(range(w)), list(range(h))) - values = image_arr[coordsy,coordsx] # assign x,y coords from image_array into values - - lbidx = labelfile["idx"].values - allen_id_image = lbidx[values.astype(int)] - elif file.endswith(".seg"): - allen_id_image = image_arr - """assign label file values into image array""" - - #return allen_id_image - - #def count_per_uniqueidx() - - """count pixels for unique idx""" - unique_ids, counts = np.unique(allen_id_image, return_counts=True) - + return image + +def read_seg_file(file): + with open(file,"rb") as f: + def byte(): + return f.read(1)[0] + def code(): + c=byte() + if(c<0): + raise "!" + return c if c<128 else (c&127)|(code()<<7) + if "SegRLEv1" != f.read(8).decode(): + raise "Header mismatch" + atlas=f.read(code()).decode() + print(f"Target atlas: {atlas}") + codes=[code() for x in range(code())] + w=code() + h=code() + data=[] + while len(data)<w*h: + data += [codes[byte() if len(codes)<=256 else code()]]*(code()+1) + image_data = np.array(data) + image = np.reshape(image_data, (h, w)) + return image + +def rescale_image(image, rescaleXY): + w, h = rescaleXY + return cv2.resize(image, (h, w), interpolation=cv2.INTER_NEAREST) + +def assign_labels_to_image(image, labelfile): + w,h = image.shape + allen_id_image = np.zeros((h, w)) # create an empty image array + coordsy, coordsx = np.meshgrid(list(range(w)), list(range(h))) + + values = image[coordsy, coordsx] + lbidx = labelfile["idx"].values + + allen_id_image = lbidx[values.astype(int)] + return allen_id_image + +def count_pixels_per_label(image): + unique_ids, counts = np.unique(image, return_counts=True) area_per_label = list(zip(unique_ids, counts)) - # create a list of unique regions and pixel counts per region - df_area_per_label = pd.DataFrame(area_per_label, columns=["idx", "region_area"]) - print("df_area_per_label") - print(df_area_per_label) - # create a pandas df with regions and pixel counts - return(df_area_per_label) - -# Import flat files, count pixels per label, np.unique... etc. nitrc.org/plugins/mwiki/index.php?title=visualign:Deformation - -""" - base=slice["filename"][:-4] - - import struct - with open(base+".flat","rb") as f: - b,w,h=struct.unpack(">BII",f.read(9)) - data=struct.unpack(">"+("xBH"[b]*(w*h)),f.read(b*w*h)) -""" + return df_area_per_label + +def flat_to_dataframe(labelfile, file=None, rescaleXY=None, image_vector=None, volume=None): + if (image_vector is not None) and (volume is not None): + image = generate_target_slice(image_vector, volume) + image = np.float64(image) + elif file.endswith(".flat"): + image = read_flat_file(file) + elif file.endswith(".seg"): + image = read_seg_file(file) + print('datatype', image.dtype) + print('image shape open', image.shape) + if rescaleXY: + image = rescale_image(image, rescaleXY) + if (image_vector is None) or (volume is None): + allen_id_image = assign_labels_to_image(image, labelfile) + else: + allen_id_image = image + df_area_per_label = count_pixels_per_label(allen_id_image) + return df_area_per_label \ No newline at end of file diff --git a/PyNutil/main.py b/PyNutil/main.py index ce537d7b767f9eeb6738debdf944a24a2fa86f3e..4d19a3b7a6fbc1e1f0dc212aee9b75b3841c97e5 100644 --- a/PyNutil/main.py +++ b/PyNutil/main.py @@ -134,7 +134,7 @@ class PyNutil: return atlas_volume, atlas_labels - def get_coordinates(self, non_linear=True, method="all", object_cutoff=0): + def get_coordinates(self, non_linear=True, method="all", object_cutoff=0, use_flat=False): """Extracts pixel coordinates from the segmentation data. Parameters @@ -177,6 +177,8 @@ class PyNutil: non_linear=non_linear, method=method, object_cutoff=object_cutoff, + atlas_volume=self.atlas_volume, + use_flat=use_flat ) self.pixel_points = pixel_points self.centroids = centroids @@ -241,43 +243,6 @@ class PyNutil: current_df_new = all_region_df.merge(current_df, on= 'idx', how= 'left', suffixes= (None,"_y")).drop(columns=["a","VIS", "MSH", "name_y","r_y","g_y","b_y"]) current_df_new["area_fraction"] = current_df_new["pixel_count"] / current_df_new["region_area"] current_df_new.fillna(0, inplace=True) - - # Several alternatives for the merge code above - """ - new_rows = [] - for index, row in all_region_df.iterrows(): - mask = current_df["idx"] == row ["idx"] - pnt.save_analysis("outputs/test8_PyNutil") - object_count = current_region_row["object_count"].values - - row["pixel_count"] = region_area[0] - row["object_count"] = object_count[0] - - new_rows.append[row] - - current_df_new = pd.DataFrame(new_rows) - """ - - """ - new_rows = [] - for index, row in current_df.iterrows(): - mask = self.atlas_labels["idx"] == row["idx"] - current_region_row = self.atlas_labels[mask] - current_region_name = current_region_row["name"].values - current_region_red = current_region_row["r"].values - current_region_green = current_region_row["g"].values - current_region_blue = current_region_row["b"].values - - row["name"] = current_region_name[0] - row["r"] = current_region_red[0] - row["g"] = current_region_green[0] - row["b"] = current_region_blue[0] - - new_rows.append(row) - - current_df_new = pd.DataFrame(new_rows)""" - - #per_section_df.append(current_df_new) per_section_df.append(current_df_new) prev_pl += pl prev_cl += cl diff --git a/PyNutil/read_and_write.py b/PyNutil/read_and_write.py index 4b3d0c70ced4878dda2314ddcfdabefef5d7c5a8..5a7e26f8503caf60dd3495c3c57b83d464e8f7a9 100644 --- a/PyNutil/read_and_write.py +++ b/PyNutil/read_and_write.py @@ -20,29 +20,27 @@ def load_visualign_json(filename): print(slice) slice["nr"] = int(re.search(r"_s(\d+)", slice["filename"]).group(1)) slice["anchoring"] = slice["ouv"] + lz_compat_file = { + "name":name, + "target":vafile["atlas"], + "target-resolution":[ 456, 528, 320], + "slices":slices, + } + # save with .json extension need to see if i can remove this + with open(filename.replace(".waln", ".json").replace(".wwrp", ".json"), "w") as f: + json.dump(lz_compat_file, f, indent=4) else: slices = vafile["slices"] # overwrite the existing file - name = os.path.basename(filename) - slices = [{"filename":slice['filename'], - "nr":slice["nr"], - "width":slice["width"], - "height":slice["height"], - "anchoring":slice["anchoring"]} for slice in slices] - lz_compat_file = { - "name":name, - "target":vafile["atlas"], - "target-resolution":[ 456, 528, 320], - "slices":slices, - - } - # save with .json extension - with open(filename.replace(".waln", ".json").replace(".wwrp", ".json"), "w") as f: - json.dump(lz_compat_file, f, indent=4) + return slices - +def load_visualign_json(filename): + with open(filename) as f: + vafile = json.load(f) + slices = vafile["slices"] + return slices # related to read_and_write, used in write_points_to_meshview # this function returns a dictionary of region names def create_region_dict(points, regions): diff --git a/test/test8_PyNutil_fixed.json b/test/test8_PyNutil_fixed.json index 842b226f80dec6358d62fbd09e8a5b2ca11e08aa..8969423c8ec9a40d07910b7cfe82a296ebc0a7b0 100644 --- a/test/test8_PyNutil_fixed.json +++ b/test/test8_PyNutil_fixed.json @@ -1,8 +1,8 @@ { "volume_path": "allen2017", - "label_path": "annotation_volumes/allen2017_colours.csv", - "segmentation_folder": "test_data/PyTest_seg", - "alignment_json": "test_data/PyNutil_testdataset_Nonlin_SY_fixed.json", + "label_path": "PyNutil/annotation_volumes/allen2017_colours.csv", + "segmentation_folder": "PyNutil/test_data/PyTest_seg", + "alignment_json": "PyNutil/test_data/PyNutil_testdataset_Nonlin_SY_fixed.json", "nonlinear": true, "colour": [0, 0, 0], - "points_json_path": "outputs/test8_PyNutil.json" + "points_json_path": "PyNutil/outputs/test8_PyNutil.json" } \ No newline at end of file diff --git a/testOOP.py b/testOOP.py index c5e24774619e051678bac2ca544dc37ee2438b90..8075fb85e148db53c28a0ad30607c77930f33537 100644 --- a/testOOP.py +++ b/testOOP.py @@ -1,14 +1,13 @@ from PyNutil import PyNutil -pnt = PyNutil(settings_file=r"test/test8_PyNutil_fixed.json") -# pnt = PyNutil(settings_file=r"test/test7_PyNutil.json") -# pnt.build_quantifier() - -pnt.get_coordinates(object_cutoff=0) +pnt = PyNutil(settings_file=r"PyNutil/test/test8_PyNutil_fixed.json") +##Use flat can be set to True if you want to use the flat file +# instead of the visualign json (this is only useful for testing and will be removed) +pnt.get_coordinates(object_cutoff=0, use_flat=False) pnt.quantify_coordinates() -pnt.save_analysis("outputs/test8_PyNutil") +pnt.save_analysis("PyNutil/outputs/test8_PyNutil") # remove name, r, g, b, from pixel_ # add to region_areas df \ No newline at end of file diff --git a/test_data/PyTest_seg/test_s001.png b/test_data/PyTest_seg/segmentations/test_s001.png similarity index 100% rename from test_data/PyTest_seg/test_s001.png rename to test_data/PyTest_seg/segmentations/test_s001.png diff --git a/test_data/PyTest_seg/test_s002.png b/test_data/PyTest_seg/segmentations/test_s002.png similarity index 100% rename from test_data/PyTest_seg/test_s002.png rename to test_data/PyTest_seg/segmentations/test_s002.png diff --git a/test_data/PyTest_seg/test_s003.png b/test_data/PyTest_seg/segmentations/test_s003.png similarity index 100% rename from test_data/PyTest_seg/test_s003.png rename to test_data/PyTest_seg/segmentations/test_s003.png diff --git a/test_data/PyTest_seg/test_s004.png b/test_data/PyTest_seg/segmentations/test_s004.png similarity index 100% rename from test_data/PyTest_seg/test_s004.png rename to test_data/PyTest_seg/segmentations/test_s004.png diff --git a/test_data/PyTest_seg/test_s005.png b/test_data/PyTest_seg/segmentations/test_s005.png similarity index 100% rename from test_data/PyTest_seg/test_s005.png rename to test_data/PyTest_seg/segmentations/test_s005.png