Skip to content

Commit

Permalink
setup.py
Browse files Browse the repository at this point in the history
  • Loading branch information
KibromBerihu committed Jun 7, 2022
1 parent db68f74 commit 1af9412
Show file tree
Hide file tree
Showing 27 changed files with 116 additions and 75 deletions.
Binary file added images/dissemination_profile.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/mip.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion run_docker_image.bat
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ set CONTAINERID=%5
echo '[8] Running the docker with container id: %CONTAINERID% ....'

docker run -it --rm --name %CONTAINERID%^
-v %input_dir%:/input_data ^
-v %input_dir%:/input ^
-v %output_dir%:/output ^
%docker_image_name%:%docker_tag% ^

14 changes: 14 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from setuptools import setup, find_namespace_packages

setup(name='ai4elife',
packages=find_namespace_packages(include=["ai4elife", "ai4elife.*"]),
version='1.0.0',
description='ai4elife, Data-centric aI framework for tumor segmentation.',
url="https://github.com/KibromBerihu/ai4elife",
author="LITO laboratory, institute Curie",
author_email='[email protected]',
license="MIT",
#install_requires=[],
keywords=['artificial intelligence', 'data-centric ai', 'medical image analysis',
'lfbnet', 'FDG-PET', 'tumor segmentation', 'biomarkers']
)
Binary file added src/LFBNet/__pycache__/__init__.cpython-36.pyc
Binary file not shown.
Binary file added src/LFBNet/__pycache__/data_loader.cpython-36.pyc
Binary file not shown.
8 changes: 4 additions & 4 deletions src/LFBNet/data_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,19 +146,19 @@ def get_nii_files_path(data_directory: str) -> List[ndarray]:
for path in list(nii_paths):
# get the base name: means the file name
identifier_base_name = str(os.path.basename(path)).split('.')[0]
if "pet_0" == str(identifier_base_name):
if "pet_sagittal" == str(identifier_base_name):
pet_saggital = np.asanyarray(nib.load(path).dataobj)
pet_saggital = np.expand_dims(pet_saggital, axis=0)

elif "pet_1" == str(identifier_base_name):
elif "pet_coronal" == str(identifier_base_name):
pet_coronal = np.asanyarray(nib.load(path).dataobj)
pet_coronal = np.expand_dims(pet_coronal, axis=0)

if "gt_0" == str(identifier_base_name):
if "ground_truth_sagittal" == str(identifier_base_name):
gt_saggital = np.asanyarray(nib.load(path).dataobj)
gt_saggital = np.expand_dims(gt_saggital, axis=0)

elif "gt_1" == str(identifier_base_name):
elif "ground_truth_coronal" == str(identifier_base_name):
gt_coronal = np.asanyarray(nib.load(path).dataobj)
gt_coronal = np.expand_dims(gt_coronal, axis=0)

Expand Down
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
2 changes: 2 additions & 0 deletions src/LFBNet/network_architecture/lfbnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
from numpy import ndarray
from copy import deepcopy

import logging
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
from keras.models import Model
from keras.layers import Conv2D
Expand Down
Binary file not shown.
Binary file not shown.
92 changes: 57 additions & 35 deletions src/LFBNet/preprocessing/preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def get_nii_files(nii_path):

# Get pet image
# if "pet" in str()
pet_path = str(current_dir) + "/PET/"
pet_path = str(current_dir) + "/pet/"
pet = get_nii_files(pet_path)

# Get gt image
Expand All @@ -133,8 +133,10 @@ def get_nii_files(nii_path):
return [pet, gt]


def resize_nii_to_desired_spacing(data: int = None, data_spacing: Tuple[float] = None, desired_spacing: ndarray = None,
interpolation_order_value: int = None):
def resize_nii_to_desired_spacing(
data: int = None, data_spacing: Tuple[float] = None, desired_spacing: ndarray = None,
interpolation_order_value: int = None
):
""" resizes a given input data into the desired spacing using the specified interpolation order.
Args:
Expand Down Expand Up @@ -169,13 +171,17 @@ def resize_nii_to_desired_spacing(data: int = None, data_spacing: Tuple[float] =
if len(data_spacing) == 3 and len(np.squeeze(data).shape) == 3: # 3D input image
new_z_resolution = np.ceil(data.shape[2] * (data_spacing[2] / desired_spacing_z))

# resize to new iamge resolution
image_resized = resize(data, (new_x_resolution, new_y_resolution, new_z_resolution),
order=interpolation_order_value, preserve_range=True, anti_aliasing=False)
# resize to new image resolution
image_resized = resize(
data, (new_x_resolution, new_y_resolution, new_z_resolution), order=interpolation_order_value,
preserve_range=True, anti_aliasing=False
)

else: # if the given input image is 2D
image_resized = resize(data, (new_x_resolution, new_y_resolution), order=interpolation_order_value,
preserve_range=True, anti_aliasing=False)
image_resized = resize(
data, (new_x_resolution, new_y_resolution), order=interpolation_order_value, preserve_range=True,
anti_aliasing=False
)

return image_resized

Expand Down Expand Up @@ -272,8 +278,10 @@ def crop_nii_to_desired_resolution(data: ndarray = None, cropped_resolution: Lis
return data


def save_nii_images(image: List[ndarray] = None, affine: ndarray = None, path_save: str =None, identifier: str = None,
name: List[str] = None):
def save_nii_images(
image: List[ndarray] = None, affine: ndarray = None, path_save: str = None, identifier: str = None,
name: List[str] = None
):
""" Save given images into the given directory. If no saving directory is given it will save into
./data/predicted/' directory.
Expand Down Expand Up @@ -380,9 +388,10 @@ def transform_coordinate_space(modality_1, modality_2, mode='nearest'):


# read PET and GT images
def read_pet_gt_resize_crop_save_as_3d_andor_mip(data_path: str = None, data_name: str = None, saving_dir: str = None,
save_3D: bool = False, crop: bool = True, output_resolution: List[int] = None,
desired_spacing: List[float] = None, generate_mip: bool = False):
def read_pet_gt_resize_crop_save_as_3d_andor_mip(
data_path: str = None, data_name: str = None, saving_dir: str = None, save_3D: bool = False, crop: bool = True,
output_resolution: List[int] = None, desired_spacing: List[float] = None, generate_mip: bool = False
):
""" Read pet and ground truth images from teh input data path. It also apply resize, and cropping operations.
Args:
Expand All @@ -402,7 +411,7 @@ def read_pet_gt_resize_crop_save_as_3d_andor_mip(data_path: str = None, data_nam
if output_resolution is not None:
# output resized and cropped image resolution
rows, columns, depths = output_resolution
else: # defualt values
else: # default values
# output resized and cropped image resolution=
output_resolution = [128, 128, 256]

Expand All @@ -422,7 +431,7 @@ def read_pet_gt_resize_crop_save_as_3d_andor_mip(data_path: str = None, data_nam
# check if the directory exist
directory_exist(data_path)

# by default the processed 3d and 2D MIP will be saved into the 'data' subdirectory, respectively with name tages
# by default the processed 3d and 2D MIP will be saved into the 'data' subdirectory, respectively with name tags
# as '_default_3d_dir' and '_default_MIP_dir'

def create_directory(directory_to_create: list):
Expand Down Expand Up @@ -511,8 +520,9 @@ def create_directory(directory_to_create: list):
if data_name == "remarc":
gt = np.flip(gt, axis=-1)

gt = resize_nii_to_desired_spacing(gt, data_spacing=res_pet, desired_spacing=desired_spacing,
interpolation_order_value=0)
gt = resize_nii_to_desired_spacing(
gt, data_spacing=res_pet, desired_spacing=desired_spacing, interpolation_order_value=0
)

pet = np.asanyarray(pet.dataobj)
# if the given image has stacked as channel example one image in remarc : 175x175x274x2
Expand All @@ -521,16 +531,17 @@ def create_directory(directory_to_create: list):

# generate_mip_show(pet, gt, identifier=str(image_name))

pet = resize_nii_to_desired_spacing(pet, data_spacing=res_pet, desired_spacing=desired_spacing,
interpolation_order_value=3)
pet = resize_nii_to_desired_spacing(
pet, data_spacing=res_pet, desired_spacing=desired_spacing, interpolation_order_value=3
)

'''
if most data are with brain images at the very top, avoid cropping the brain images,instead crop from bottom
or the leg part
'''
if gt is not None:
if gt is not None:
if str(data_name).lower() == 'lnh':
# flip left to right to mach lnhdata to remarc
# flip left to right to mach lnh data to remarc
gt = np.flip(gt, axis=-1)

crop_zero_above_brain = True
Expand Down Expand Up @@ -564,25 +575,37 @@ def create_directory(directory_to_create: list):
# output image affine
affine = np.diag([desired_spacing[0], desired_spacing[1], desired_spacing[2], 1])
if gt is not None:
save_nii_images([pet, gt], affine=affine, path_save=saving_dir_3d, identifier=str(image_name),
name=['pet', 'gt'])
save_nii_images(
[pet, gt], affine=affine, path_save=saving_dir_3d, identifier=str(image_name),
name=['pet', 'ground_truth']
)
else:
save_nii_images([pet], affine=affine, path_save=saving_dir_3d, identifier=str(image_name), name=['pet'])

# generate Sagittal and coronal MIPs
if generate_mip:
for sagittal_coronal in range(2): # assuming sagittal is on axis 0, and coronal is on axis 1
for sagittal_coronal in range(2):
pet_mip = generate_mip_from_3D(pet, mip_axis=int(sagittal_coronal)) # pet mip

# assuming sagittal is on axis 0, and coronal is on axis 1
if sagittal_coronal == 0: # sagittal
naming_ = "sagittal"
elif sagittal_coronal == 1:
naming_ = "coronal"

if gt is not None:
gt_mip = generate_mip_from_3D(gt, mip_axis=int(sagittal_coronal)) # gt mip
# save the generated MIP
save_nii_images([pet_mip, gt_mip], affine, path_save=saving_dir_mip, identifier=str(image_name),
name=['pet_' + str(sagittal_coronal), 'gt_' + str(sagittal_coronal)])
save_nii_images(
[pet_mip, gt_mip], affine, path_save=saving_dir_mip, identifier=str(image_name),
name=['pet_' + str(naming_), 'ground_truth_' + str(naming_)]
)
else:
# save the generated MIP
save_nii_images([pet_mip], affine, path_save=saving_dir_mip, identifier=str(image_name),
name=['pet_' + str(sagittal_coronal)])
save_nii_images(
[pet_mip], affine, path_save=saving_dir_mip, identifier=str(image_name),
name=['pet_' + str(naming_)]
)
return saving_dir_mip


Expand All @@ -592,10 +615,9 @@ def create_directory(directory_to_create: list):
# input_path = r"F:\Data\Remarc\REMARC/"
# data_ = "remarc"
#
# input_path = r"F:\Data\Vienna\No_ground_truth/"
# data_ = "vienna"
saving_dir_mip = read_pet_gt_resize_crop_save_as_3d_andor_mip(data_path=input_path, data_name=data_,
saving_dir=None, save_3D=True, crop=True,
output_resolution=[128, 128, 256],
desired_spacing=None, generate_mip=True)

input_path = r"F:\Data\Vienna\No_ground_truth/"
data_ = "LNH"
saving_dir_mip = read_pet_gt_resize_crop_save_as_3d_andor_mip(
data_path=input_path, data_name=data_, saving_dir=None, save_3D=True, crop=True,
output_resolution=[128, 128, 256], desired_spacing=None, generate_mip=True
)
Binary file not shown.
Binary file not shown.
Binary file not shown.
34 changes: 17 additions & 17 deletions src/LFBNet/utilities/compute_surrogate_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
random.seed(7)


class ComputesTMTVsDmaxFromnii:
class ComputesTMTVsDmaxFromNii:
""" computes the surrogate features from given 2D coronal and sagittal masks (grounnd truths).
Args:
Expand All @@ -26,7 +26,7 @@ class ComputesTMTVsDmaxFromnii:
Returns:
Returns a saved csv files with the computed surrogate biomarkers.
"""
def __init__(self, data_path: str = None, get_identifier: str = "prd"):
def __init__(self, data_path: str = None, get_identifier: str = "predicted"):

self.data_path = data_path

Expand Down Expand Up @@ -54,14 +54,14 @@ def get_features(mask_to_compute_feature_on: ndarray = None):
Returns the computed surrogate MTV and dissemination along the height (z) and width (xy) of the image.
"""
prof_xy, prof_z, stmtv = ComputesTMTVsDmaxFromnii.num_white_pixels(mask_to_compute_feature_on.copy())
sdmax_xy = ComputesTMTVsDmaxFromnii.compute_surrogate_dissemination(prof_xy, percentile=[2, 98])
sdmax_z = ComputesTMTVsDmaxFromnii.compute_surrogate_dissemination(prof_z, percentile=[2, 98])
prof_xy, prof_z, stmtv = ComputesTMTVsDmaxFromNii.num_white_pixels(mask_to_compute_feature_on.copy())
sdmax_xy = ComputesTMTVsDmaxFromNii.compute_surrogate_dissemination(prof_xy, percentile=[2, 98])
sdmax_z = ComputesTMTVsDmaxFromNii.compute_surrogate_dissemination(prof_z, percentile=[2, 98])
return stmtv, sdmax_xy, sdmax_z

# store all calculated features:
case_name_sagittal_coronal_axial_x_y_z_features = [
['PID', 'sTMTV_sagittal', 'sTMTV_coronal', "sTMTV_(mm)", 'Sagittal_xy', 'Sagittal_z', 'Coronal_xy',
['PID', 'sTMTV_sagittal', 'sTMTV_coronal', "sTMTV_(mm\u00b2)", 'Sagittal_xy', 'Sagittal_z', 'Coronal_xy',
'Coronal_z', "sDmax_(mm)", "sDmax_(mm)_euclidean", 'X', 'Y', 'Z']]

for n, id in tqdm(enumerate(case_ids), total=(len(case_ids))):
Expand All @@ -70,7 +70,7 @@ def get_features(mask_to_compute_feature_on: ndarray = None):

# if there is any ids that ends with _0 or _1, the coronal and sagittal images are saved separately,
# otherwise not. sagittal with '_o' and coronal with '_1'.
saved_sagittal_coronal_seprately = any([True for case_id in case_ids_img_name if "_0" in case_id])
saved_sagittal_coronal_separately = any([True for case_id in case_ids_img_name if "_sagittal" in case_id])

# dictionary to store the values for sagittal and coronal features
sagittal = dict(smtv=0, sdmax_xy=0, sdmax_z=0)
Expand All @@ -80,13 +80,13 @@ def get_features(mask_to_compute_feature_on: ndarray = None):
# get number of files ending with the identifier, i.e., predicted (prd) or ground truth (gt).
if str(self.get_identifier) in str(read_image):
read_image_path = os.path.join(img_folder, read_image)
mask, _ = ComputesTMTVsDmaxFromnii.get_image(read_image_path)
mask, _ = ComputesTMTVsDmaxFromNii.get_image(read_image_path)

if saved_sagittal_coronal_seprately:
if saved_sagittal_coronal_separately:
# We have sagittal and coronal images saved separately.
if "_0" in str(read_image): # sagittal
if "_sagittal" in str(read_image): # sagittal
sagittal['smtv'], sagittal['sdmax_xy'], sagittal['sdmax_z'] = get_features(mask)
elif "_1" in str(read_image): # coronal
elif "_coronal" in str(read_image): # coronal
coronal['smtv'], coronal['sdmax_xy'], coronal['sdmax_z'] = get_features(mask)
else:
# sagittal and coronal given as one nifti image.
Expand All @@ -98,7 +98,7 @@ def get_features(mask_to_compute_feature_on: ndarray = None):
coronal['smtv'], coronal['sdmax_xy'], coronal['sdmax_z'] = get_features(mask_)

# combine the sagittal and coronal features, and compute them in physical space.
sTMTV, sDmax_abs, sDmax_sqrt = ComputesTMTVsDmaxFromnii.compute_features_in_physical_space(
sTMTV, sDmax_abs, sDmax_sqrt = ComputesTMTVsDmaxFromNii.compute_features_in_physical_space(
sagittal, coronal
)
# add the given patient's features into all dataset.
Expand All @@ -109,7 +109,7 @@ def get_features(mask_to_compute_feature_on: ndarray = None):
)

# save the computed features into csv file
ComputesTMTVsDmaxFromnii.write_it_to_csv(
ComputesTMTVsDmaxFromNii.write_it_to_csv(
data=case_name_sagittal_coronal_axial_x_y_z_features, dir_name=self.data_path,
identifier=self.get_identifier
)
Expand Down Expand Up @@ -176,7 +176,7 @@ def num_white_pixels(input_image):
Returns the number of pixels across each row of the image in profile_axis_z
and number of pixels across each column of the image in profile axis_xy.
"""
input_image = ComputesTMTVsDmaxFromnii.threshold(input_image)
input_image = ComputesTMTVsDmaxFromNii.threshold(input_image)

profile_axis_Z, profile_axis_xy = [], []
for index in range(input_image.shape[0]):
Expand Down Expand Up @@ -248,11 +248,11 @@ def get_image(img_mask_pt):
voxel_size = mask.header.get_zooms()

mask = np.asanyarray(mask.dataobj)
mask = ComputesTMTVsDmaxFromnii.threshold(mask)
mask = ComputesTMTVsDmaxFromNii.threshold(mask)
return mask, voxel_size

@staticmethod
def write_it_to_csv(data: ndarray = None, dir_name: str = None, identifier: str = "prd"):
def write_it_to_csv(data: ndarray = None, dir_name: str = None, identifier: str = "predicted"):
""" Write the surrogate feature in xls files
Args:
Expand Down Expand Up @@ -290,5 +290,5 @@ def write_it_to_csv(data: ndarray = None, dir_name: str = None, identifier: str

if __name__ == '__main__':
data_pth = r"E:\ai4elife\data\predicted/"
cls = ComputesTMTVsDmaxFromnii(data_path=data_pth, get_identifier="prd")
cls = ComputesTMTVsDmaxFromNii(data_path=data_pth, get_identifier="predicted")
cls.compute_and_save_surrogate_features()
Loading

0 comments on commit 1af9412

Please sign in to comment.