diff --git a/images/dissemination_profile.png b/images/dissemination_profile.png new file mode 100644 index 0000000..5b6c5be Binary files /dev/null and b/images/dissemination_profile.png differ diff --git a/images/mip.png b/images/mip.png new file mode 100644 index 0000000..a55c491 Binary files /dev/null and b/images/mip.png differ diff --git a/run_docker_image.bat b/run_docker_image.bat index 5fdf408..a63e722 100644 --- a/run_docker_image.bat +++ b/run_docker_image.bat @@ -21,7 +21,7 @@ set CONTAINERID=%5 echo '[8] Running the docker with container id: %CONTAINERID% ....' docker run -it --rm --name %CONTAINERID%^ - -v %input_dir%:/input_data ^ + -v %input_dir%:/input ^ -v %output_dir%:/output ^ %docker_image_name%:%docker_tag% ^ diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..59bce64 --- /dev/null +++ b/setup.py @@ -0,0 +1,14 @@ +from setuptools import setup, find_namespace_packages + +setup(name='ai4elife', + packages=find_namespace_packages(include=["ai4elife", "ai4elife.*"]), + version='1.0.0', + description='ai4elife, Data-centric aI framework for tumor segmentation.', + url="https://github.com/KibromBerihu/ai4elife", + author="LITO laboratory, institute Curie", + author_email='kibrom.girum@curie.fr', + license="MIT", + #install_requires=[], + keywords=['artificial intelligence', 'data-centric ai', 'medical image analysis', + 'lfbnet', 'FDG-PET', 'tumor segmentation', 'biomarkers'] + ) diff --git a/src/LFBNet/__pycache__/__init__.cpython-36.pyc b/src/LFBNet/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..b7b011a Binary files /dev/null and b/src/LFBNet/__pycache__/__init__.cpython-36.pyc differ diff --git a/src/LFBNet/__pycache__/data_loader.cpython-36.pyc b/src/LFBNet/__pycache__/data_loader.cpython-36.pyc new file mode 100644 index 0000000..985bdd6 Binary files /dev/null and b/src/LFBNet/__pycache__/data_loader.cpython-36.pyc differ diff --git a/src/LFBNet/data_loader.py b/src/LFBNet/data_loader.py index e847780..087823f 100644 --- a/src/LFBNet/data_loader.py +++ b/src/LFBNet/data_loader.py @@ -146,19 +146,19 @@ def get_nii_files_path(data_directory: str) -> List[ndarray]: for path in list(nii_paths): # get the base name: means the file name identifier_base_name = str(os.path.basename(path)).split('.')[0] - if "pet_0" == str(identifier_base_name): + if "pet_sagittal" == str(identifier_base_name): pet_saggital = np.asanyarray(nib.load(path).dataobj) pet_saggital = np.expand_dims(pet_saggital, axis=0) - elif "pet_1" == str(identifier_base_name): + elif "pet_coronal" == str(identifier_base_name): pet_coronal = np.asanyarray(nib.load(path).dataobj) pet_coronal = np.expand_dims(pet_coronal, axis=0) - if "gt_0" == str(identifier_base_name): + if "ground_truth_sagittal" == str(identifier_base_name): gt_saggital = np.asanyarray(nib.load(path).dataobj) gt_saggital = np.expand_dims(gt_saggital, axis=0) - elif "gt_1" == str(identifier_base_name): + elif "ground_truth_coronal" == str(identifier_base_name): gt_coronal = np.asanyarray(nib.load(path).dataobj) gt_coronal = np.expand_dims(gt_coronal, axis=0) diff --git a/src/LFBNet/losses/__pycache__/__init__.cpython-36.pyc b/src/LFBNet/losses/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..570bdd7 Binary files /dev/null and b/src/LFBNet/losses/__pycache__/__init__.cpython-36.pyc differ diff --git a/src/LFBNet/losses/__pycache__/losses.cpython-36.pyc b/src/LFBNet/losses/__pycache__/losses.cpython-36.pyc new file mode 100644 index 0000000..faba8c2 Binary files /dev/null and b/src/LFBNet/losses/__pycache__/losses.cpython-36.pyc differ diff --git a/src/LFBNet/network_architecture/__pycache__/__init__.cpython-36.pyc b/src/LFBNet/network_architecture/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..d2e85ac Binary files /dev/null and b/src/LFBNet/network_architecture/__pycache__/__init__.cpython-36.pyc differ diff --git a/src/LFBNet/network_architecture/__pycache__/get_conv_blocks.cpython-36.pyc b/src/LFBNet/network_architecture/__pycache__/get_conv_blocks.cpython-36.pyc new file mode 100644 index 0000000..548c98a Binary files /dev/null and b/src/LFBNet/network_architecture/__pycache__/get_conv_blocks.cpython-36.pyc differ diff --git a/src/LFBNet/network_architecture/__pycache__/lfbnet.cpython-36.pyc b/src/LFBNet/network_architecture/__pycache__/lfbnet.cpython-36.pyc new file mode 100644 index 0000000..d27037c Binary files /dev/null and b/src/LFBNet/network_architecture/__pycache__/lfbnet.cpython-36.pyc differ diff --git a/src/LFBNet/network_architecture/lfbnet.py b/src/LFBNet/network_architecture/lfbnet.py index 33f5f67..5036a31 100644 --- a/src/LFBNet/network_architecture/lfbnet.py +++ b/src/LFBNet/network_architecture/lfbnet.py @@ -10,6 +10,8 @@ from numpy import ndarray from copy import deepcopy +import logging +logging.getLogger('tensorflow').disabled = True import tensorflow as tf from keras.models import Model from keras.layers import Conv2D diff --git a/src/LFBNet/preprocessing/__pycache__/__init__.cpython-36.pyc b/src/LFBNet/preprocessing/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..6fb3397 Binary files /dev/null and b/src/LFBNet/preprocessing/__pycache__/__init__.cpython-36.pyc differ diff --git a/src/LFBNet/preprocessing/__pycache__/preprocessing.cpython-36.pyc b/src/LFBNet/preprocessing/__pycache__/preprocessing.cpython-36.pyc new file mode 100644 index 0000000..6b761be Binary files /dev/null and b/src/LFBNet/preprocessing/__pycache__/preprocessing.cpython-36.pyc differ diff --git a/src/LFBNet/preprocessing/preprocessing.py b/src/LFBNet/preprocessing/preprocessing.py index e783c75..5214362 100644 --- a/src/LFBNet/preprocessing/preprocessing.py +++ b/src/LFBNet/preprocessing/preprocessing.py @@ -118,7 +118,7 @@ def get_nii_files(nii_path): # Get pet image # if "pet" in str() - pet_path = str(current_dir) + "/PET/" + pet_path = str(current_dir) + "/pet/" pet = get_nii_files(pet_path) # Get gt image @@ -133,8 +133,10 @@ def get_nii_files(nii_path): return [pet, gt] -def resize_nii_to_desired_spacing(data: int = None, data_spacing: Tuple[float] = None, desired_spacing: ndarray = None, - interpolation_order_value: int = None): +def resize_nii_to_desired_spacing( + data: int = None, data_spacing: Tuple[float] = None, desired_spacing: ndarray = None, + interpolation_order_value: int = None + ): """ resizes a given input data into the desired spacing using the specified interpolation order. Args: @@ -169,13 +171,17 @@ def resize_nii_to_desired_spacing(data: int = None, data_spacing: Tuple[float] = if len(data_spacing) == 3 and len(np.squeeze(data).shape) == 3: # 3D input image new_z_resolution = np.ceil(data.shape[2] * (data_spacing[2] / desired_spacing_z)) - # resize to new iamge resolution - image_resized = resize(data, (new_x_resolution, new_y_resolution, new_z_resolution), - order=interpolation_order_value, preserve_range=True, anti_aliasing=False) + # resize to new image resolution + image_resized = resize( + data, (new_x_resolution, new_y_resolution, new_z_resolution), order=interpolation_order_value, + preserve_range=True, anti_aliasing=False + ) else: # if the given input image is 2D - image_resized = resize(data, (new_x_resolution, new_y_resolution), order=interpolation_order_value, - preserve_range=True, anti_aliasing=False) + image_resized = resize( + data, (new_x_resolution, new_y_resolution), order=interpolation_order_value, preserve_range=True, + anti_aliasing=False + ) return image_resized @@ -272,8 +278,10 @@ def crop_nii_to_desired_resolution(data: ndarray = None, cropped_resolution: Lis return data -def save_nii_images(image: List[ndarray] = None, affine: ndarray = None, path_save: str =None, identifier: str = None, - name: List[str] = None): +def save_nii_images( + image: List[ndarray] = None, affine: ndarray = None, path_save: str = None, identifier: str = None, + name: List[str] = None + ): """ Save given images into the given directory. If no saving directory is given it will save into ./data/predicted/' directory. @@ -380,9 +388,10 @@ def transform_coordinate_space(modality_1, modality_2, mode='nearest'): # read PET and GT images -def read_pet_gt_resize_crop_save_as_3d_andor_mip(data_path: str = None, data_name: str = None, saving_dir: str = None, - save_3D: bool = False, crop: bool = True, output_resolution: List[int] = None, - desired_spacing: List[float] = None, generate_mip: bool = False): +def read_pet_gt_resize_crop_save_as_3d_andor_mip( + data_path: str = None, data_name: str = None, saving_dir: str = None, save_3D: bool = False, crop: bool = True, + output_resolution: List[int] = None, desired_spacing: List[float] = None, generate_mip: bool = False + ): """ Read pet and ground truth images from teh input data path. It also apply resize, and cropping operations. Args: @@ -402,7 +411,7 @@ def read_pet_gt_resize_crop_save_as_3d_andor_mip(data_path: str = None, data_nam if output_resolution is not None: # output resized and cropped image resolution rows, columns, depths = output_resolution - else: # defualt values + else: # default values # output resized and cropped image resolution= output_resolution = [128, 128, 256] @@ -422,7 +431,7 @@ def read_pet_gt_resize_crop_save_as_3d_andor_mip(data_path: str = None, data_nam # check if the directory exist directory_exist(data_path) - # by default the processed 3d and 2D MIP will be saved into the 'data' subdirectory, respectively with name tages + # by default the processed 3d and 2D MIP will be saved into the 'data' subdirectory, respectively with name tags # as '_default_3d_dir' and '_default_MIP_dir' def create_directory(directory_to_create: list): @@ -511,8 +520,9 @@ def create_directory(directory_to_create: list): if data_name == "remarc": gt = np.flip(gt, axis=-1) - gt = resize_nii_to_desired_spacing(gt, data_spacing=res_pet, desired_spacing=desired_spacing, - interpolation_order_value=0) + gt = resize_nii_to_desired_spacing( + gt, data_spacing=res_pet, desired_spacing=desired_spacing, interpolation_order_value=0 + ) pet = np.asanyarray(pet.dataobj) # if the given image has stacked as channel example one image in remarc : 175x175x274x2 @@ -521,16 +531,17 @@ def create_directory(directory_to_create: list): # generate_mip_show(pet, gt, identifier=str(image_name)) - pet = resize_nii_to_desired_spacing(pet, data_spacing=res_pet, desired_spacing=desired_spacing, - interpolation_order_value=3) + pet = resize_nii_to_desired_spacing( + pet, data_spacing=res_pet, desired_spacing=desired_spacing, interpolation_order_value=3 + ) ''' if most data are with brain images at the very top, avoid cropping the brain images,instead crop from bottom or the leg part ''' - if gt is not None: + if gt is not None: if str(data_name).lower() == 'lnh': - # flip left to right to mach lnhdata to remarc + # flip left to right to mach lnh data to remarc gt = np.flip(gt, axis=-1) crop_zero_above_brain = True @@ -564,25 +575,37 @@ def create_directory(directory_to_create: list): # output image affine affine = np.diag([desired_spacing[0], desired_spacing[1], desired_spacing[2], 1]) if gt is not None: - save_nii_images([pet, gt], affine=affine, path_save=saving_dir_3d, identifier=str(image_name), - name=['pet', 'gt']) + save_nii_images( + [pet, gt], affine=affine, path_save=saving_dir_3d, identifier=str(image_name), + name=['pet', 'ground_truth'] + ) else: save_nii_images([pet], affine=affine, path_save=saving_dir_3d, identifier=str(image_name), name=['pet']) # generate Sagittal and coronal MIPs if generate_mip: - for sagittal_coronal in range(2): # assuming sagittal is on axis 0, and coronal is on axis 1 + for sagittal_coronal in range(2): pet_mip = generate_mip_from_3D(pet, mip_axis=int(sagittal_coronal)) # pet mip + # assuming sagittal is on axis 0, and coronal is on axis 1 + if sagittal_coronal == 0: # sagittal + naming_ = "sagittal" + elif sagittal_coronal == 1: + naming_ = "coronal" + if gt is not None: gt_mip = generate_mip_from_3D(gt, mip_axis=int(sagittal_coronal)) # gt mip # save the generated MIP - save_nii_images([pet_mip, gt_mip], affine, path_save=saving_dir_mip, identifier=str(image_name), - name=['pet_' + str(sagittal_coronal), 'gt_' + str(sagittal_coronal)]) + save_nii_images( + [pet_mip, gt_mip], affine, path_save=saving_dir_mip, identifier=str(image_name), + name=['pet_' + str(naming_), 'ground_truth_' + str(naming_)] + ) else: # save the generated MIP - save_nii_images([pet_mip], affine, path_save=saving_dir_mip, identifier=str(image_name), - name=['pet_' + str(sagittal_coronal)]) + save_nii_images( + [pet_mip], affine, path_save=saving_dir_mip, identifier=str(image_name), + name=['pet_' + str(naming_)] + ) return saving_dir_mip @@ -592,10 +615,9 @@ def create_directory(directory_to_create: list): # input_path = r"F:\Data\Remarc\REMARC/" # data_ = "remarc" # - # input_path = r"F:\Data\Vienna\No_ground_truth/" - # data_ = "vienna" - saving_dir_mip = read_pet_gt_resize_crop_save_as_3d_andor_mip(data_path=input_path, data_name=data_, - saving_dir=None, save_3D=True, crop=True, - output_resolution=[128, 128, 256], - desired_spacing=None, generate_mip=True) - + input_path = r"F:\Data\Vienna\No_ground_truth/" + data_ = "LNH" + saving_dir_mip = read_pet_gt_resize_crop_save_as_3d_andor_mip( + data_path=input_path, data_name=data_, saving_dir=None, save_3D=True, crop=True, + output_resolution=[128, 128, 256], desired_spacing=None, generate_mip=True + ) diff --git a/src/LFBNet/utilities/__pycache__/__init__.cpython-36.pyc b/src/LFBNet/utilities/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..2481eee Binary files /dev/null and b/src/LFBNet/utilities/__pycache__/__init__.cpython-36.pyc differ diff --git a/src/LFBNet/utilities/__pycache__/compute_surrogate_features.cpython-36.pyc b/src/LFBNet/utilities/__pycache__/compute_surrogate_features.cpython-36.pyc new file mode 100644 index 0000000..54ff497 Binary files /dev/null and b/src/LFBNet/utilities/__pycache__/compute_surrogate_features.cpython-36.pyc differ diff --git a/src/LFBNet/utilities/__pycache__/train_valid_paths.cpython-36.pyc b/src/LFBNet/utilities/__pycache__/train_valid_paths.cpython-36.pyc new file mode 100644 index 0000000..b6d96be Binary files /dev/null and b/src/LFBNet/utilities/__pycache__/train_valid_paths.cpython-36.pyc differ diff --git a/src/LFBNet/utilities/compute_surrogate_features.py b/src/LFBNet/utilities/compute_surrogate_features.py index 57952ed..177a9dd 100644 --- a/src/LFBNet/utilities/compute_surrogate_features.py +++ b/src/LFBNet/utilities/compute_surrogate_features.py @@ -16,7 +16,7 @@ random.seed(7) -class ComputesTMTVsDmaxFromnii: +class ComputesTMTVsDmaxFromNii: """ computes the surrogate features from given 2D coronal and sagittal masks (grounnd truths). Args: @@ -26,7 +26,7 @@ class ComputesTMTVsDmaxFromnii: Returns: Returns a saved csv files with the computed surrogate biomarkers. """ - def __init__(self, data_path: str = None, get_identifier: str = "prd"): + def __init__(self, data_path: str = None, get_identifier: str = "predicted"): self.data_path = data_path @@ -54,14 +54,14 @@ def get_features(mask_to_compute_feature_on: ndarray = None): Returns the computed surrogate MTV and dissemination along the height (z) and width (xy) of the image. """ - prof_xy, prof_z, stmtv = ComputesTMTVsDmaxFromnii.num_white_pixels(mask_to_compute_feature_on.copy()) - sdmax_xy = ComputesTMTVsDmaxFromnii.compute_surrogate_dissemination(prof_xy, percentile=[2, 98]) - sdmax_z = ComputesTMTVsDmaxFromnii.compute_surrogate_dissemination(prof_z, percentile=[2, 98]) + prof_xy, prof_z, stmtv = ComputesTMTVsDmaxFromNii.num_white_pixels(mask_to_compute_feature_on.copy()) + sdmax_xy = ComputesTMTVsDmaxFromNii.compute_surrogate_dissemination(prof_xy, percentile=[2, 98]) + sdmax_z = ComputesTMTVsDmaxFromNii.compute_surrogate_dissemination(prof_z, percentile=[2, 98]) return stmtv, sdmax_xy, sdmax_z # store all calculated features: case_name_sagittal_coronal_axial_x_y_z_features = [ - ['PID', 'sTMTV_sagittal', 'sTMTV_coronal', "sTMTV_(mm)", 'Sagittal_xy', 'Sagittal_z', 'Coronal_xy', + ['PID', 'sTMTV_sagittal', 'sTMTV_coronal', "sTMTV_(mm\u00b2)", 'Sagittal_xy', 'Sagittal_z', 'Coronal_xy', 'Coronal_z', "sDmax_(mm)", "sDmax_(mm)_euclidean", 'X', 'Y', 'Z']] for n, id in tqdm(enumerate(case_ids), total=(len(case_ids))): @@ -70,7 +70,7 @@ def get_features(mask_to_compute_feature_on: ndarray = None): # if there is any ids that ends with _0 or _1, the coronal and sagittal images are saved separately, # otherwise not. sagittal with '_o' and coronal with '_1'. - saved_sagittal_coronal_seprately = any([True for case_id in case_ids_img_name if "_0" in case_id]) + saved_sagittal_coronal_separately = any([True for case_id in case_ids_img_name if "_sagittal" in case_id]) # dictionary to store the values for sagittal and coronal features sagittal = dict(smtv=0, sdmax_xy=0, sdmax_z=0) @@ -80,13 +80,13 @@ def get_features(mask_to_compute_feature_on: ndarray = None): # get number of files ending with the identifier, i.e., predicted (prd) or ground truth (gt). if str(self.get_identifier) in str(read_image): read_image_path = os.path.join(img_folder, read_image) - mask, _ = ComputesTMTVsDmaxFromnii.get_image(read_image_path) + mask, _ = ComputesTMTVsDmaxFromNii.get_image(read_image_path) - if saved_sagittal_coronal_seprately: + if saved_sagittal_coronal_separately: # We have sagittal and coronal images saved separately. - if "_0" in str(read_image): # sagittal + if "_sagittal" in str(read_image): # sagittal sagittal['smtv'], sagittal['sdmax_xy'], sagittal['sdmax_z'] = get_features(mask) - elif "_1" in str(read_image): # coronal + elif "_coronal" in str(read_image): # coronal coronal['smtv'], coronal['sdmax_xy'], coronal['sdmax_z'] = get_features(mask) else: # sagittal and coronal given as one nifti image. @@ -98,7 +98,7 @@ def get_features(mask_to_compute_feature_on: ndarray = None): coronal['smtv'], coronal['sdmax_xy'], coronal['sdmax_z'] = get_features(mask_) # combine the sagittal and coronal features, and compute them in physical space. - sTMTV, sDmax_abs, sDmax_sqrt = ComputesTMTVsDmaxFromnii.compute_features_in_physical_space( + sTMTV, sDmax_abs, sDmax_sqrt = ComputesTMTVsDmaxFromNii.compute_features_in_physical_space( sagittal, coronal ) # add the given patient's features into all dataset. @@ -109,7 +109,7 @@ def get_features(mask_to_compute_feature_on: ndarray = None): ) # save the computed features into csv file - ComputesTMTVsDmaxFromnii.write_it_to_csv( + ComputesTMTVsDmaxFromNii.write_it_to_csv( data=case_name_sagittal_coronal_axial_x_y_z_features, dir_name=self.data_path, identifier=self.get_identifier ) @@ -176,7 +176,7 @@ def num_white_pixels(input_image): Returns the number of pixels across each row of the image in profile_axis_z and number of pixels across each column of the image in profile axis_xy. """ - input_image = ComputesTMTVsDmaxFromnii.threshold(input_image) + input_image = ComputesTMTVsDmaxFromNii.threshold(input_image) profile_axis_Z, profile_axis_xy = [], [] for index in range(input_image.shape[0]): @@ -248,11 +248,11 @@ def get_image(img_mask_pt): voxel_size = mask.header.get_zooms() mask = np.asanyarray(mask.dataobj) - mask = ComputesTMTVsDmaxFromnii.threshold(mask) + mask = ComputesTMTVsDmaxFromNii.threshold(mask) return mask, voxel_size @staticmethod - def write_it_to_csv(data: ndarray = None, dir_name: str = None, identifier: str = "prd"): + def write_it_to_csv(data: ndarray = None, dir_name: str = None, identifier: str = "predicted"): """ Write the surrogate feature in xls files Args: @@ -290,5 +290,5 @@ def write_it_to_csv(data: ndarray = None, dir_name: str = None, identifier: str if __name__ == '__main__': data_pth = r"E:\ai4elife\data\predicted/" - cls = ComputesTMTVsDmaxFromnii(data_path=data_pth, get_identifier="prd") + cls = ComputesTMTVsDmaxFromNii(data_path=data_pth, get_identifier="predicted") cls.compute_and_save_surrogate_features() diff --git a/src/LFBNet/utilities/train_valid_paths.py b/src/LFBNet/utilities/train_valid_paths.py index 549d3d4..e02c57f 100644 --- a/src/LFBNet/utilities/train_valid_paths.py +++ b/src/LFBNet/utilities/train_valid_paths.py @@ -16,7 +16,7 @@ random.seed(7) -def write_it_to_csv(data, name='file', dir_name=None, columns_ = None): +def write_it_to_csv(data, name='file', dir_name=None, columns_=None): """ Args: @@ -42,7 +42,7 @@ def write_it_to_csv(data, name='file', dir_name=None, columns_ = None): if columns_ is None: columns = ['column_' + str(i) for i in range(data.shape[1])] else: - columns =[clm for clm in columns_] + columns = [clm for clm in columns_] if len(columns) < data.shape[1]: add_column_name = ['column_' + str(i) for i in range(abs(data.shape[1] - len(columns)))] @@ -101,7 +101,8 @@ def get_training_and_validation_ids_from_csv(path): def get_train_valid_ids_from_folder(path_train_valid: dict = None, ratio_valid_data: int = 0.25): """ gets the path to the train and validation data as dictionary, with dictionary name "train" and "valid". - if only the train or valid is given it considers random separation of training and validation ids with ratio_valid_data. + if only the train or valid is given it considers random separation of training and validation ids with + ratio_valid_data. The default value is 25% :param path_train_valid: dictionary of path to training data, with key word "train" @@ -153,14 +154,16 @@ def get_train_valid_ids_from_folder(path_train_valid: dict = None, ratio_valid_d # save it for later reference print(train_test) - write_it_to_csv(train_test, name='train_valid_ids', - columns_ =['train','valid']) + write_it_to_csv( + train_test, name='train_valid_ids', columns_=['train', 'valid'] + ) return train_id, valid_id -def get_output_or_create_folder_name(model: str, task: str = None, trainer: str = None, pans: str = None, - fold: int = None, - processed_data_directory: str = None): +def get_output_or_create_folder_name( + model: str, task: str = None, trainer: str = None, pans: str = None, fold: int = None, + processed_data_directory: str = None + ): """ Retrieve the output directory for the LFB-net model given in the input parameters :param processed_data_directory: @@ -270,6 +273,5 @@ def read_csv_train_valid_index(path_, csv_identifier: str = None): if __name__ == '__main__': # print("traind_valid_path_finder ") - # path_ = r"F:\LFB_Net\data\csv\training_validation_indexs\remarc/" + path_ = r"F:\LFB_Net\data\csv\training_validation_indexs\remarc/" xy = read_csv_train_valid_index(path_) - diff --git a/src/__init__.py b/src/__init__.py index d359053..6b203ea 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -1,6 +1,6 @@ from __future__ import absolute_import -print("\n\n Please cite the following paper when using LFB-Net: \n\n" - "K. B. Girum, G. Créhange and A. Lalande, \"Learning With Context Feedback Loop for Robust Medical Image Segmentation,\" " - "in IEEE Transactions on Medical Imaging, vol. 40, no. 6, pp. 1542-1554, June 2021, doi: 10.1109/TMI.2021.3060497. \n") +print("\n\n Please cite the following paper: \n\n" + "K. B. Girum, L. Rebaud A.S. Cottereau et. al., 18F-FDG PET maximum intensity projections and artificial intelligence: " + "a win-win combination to easily measure prognostic biomarkers in DLBCL patients, in The Journal of Nuclear Medicine. \n") from . import * diff --git a/src/__pycache__/__init__.cpython-36.pyc b/src/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..af3f7d9 Binary files /dev/null and b/src/__pycache__/__init__.cpython-36.pyc differ diff --git a/src/run/__pycache__/__init__.cpython-36.pyc b/src/run/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..bdd7def Binary files /dev/null and b/src/run/__pycache__/__init__.cpython-36.pyc differ diff --git a/src/run/__pycache__/parse_argument.cpython-36.pyc b/src/run/__pycache__/parse_argument.cpython-36.pyc new file mode 100644 index 0000000..9e7a2c5 Binary files /dev/null and b/src/run/__pycache__/parse_argument.cpython-36.pyc differ diff --git a/src/run/__pycache__/trainer.cpython-36.pyc b/src/run/__pycache__/trainer.cpython-36.pyc new file mode 100644 index 0000000..edc9ee5 Binary files /dev/null and b/src/run/__pycache__/trainer.cpython-36.pyc differ diff --git a/src/run/trainer.py b/src/run/trainer.py index 355f154..a926015 100644 --- a/src/run/trainer.py +++ b/src/run/trainer.py @@ -561,7 +561,7 @@ def evaluation( predicted = self.model.combine_and_train.predict([input_image, feedback_latent]) save_nii_images( [predicted, ground_truth, input_image], identifier=str(case_name), - name=[case_name + "_prd", case_name + "_gt", case_name + "_img"], path_save=self.predicted_directory + name=[case_name + "_predicted", case_name + "_ground_truth", case_name + "_image"], path_save=self.predicted_directory ) else: @@ -570,7 +570,7 @@ def evaluation( predicted = self.model.combine_and_train.predict([input_image, feedback_latent]) save_nii_images( [predicted, ground_truth, input_image], identifier=str(case_name), - name=[case_name + "_prd", case_name + "_gt", case_name + "_img"], path_save=self.predicted_directory + name=[case_name + "_predicted", case_name + "_ground_truth", case_name + "_image"], path_save=self.predicted_directory ) return all_loss_dice, dice_sen_sp @@ -806,8 +806,8 @@ def evaluation_test( predicted = self.model.combine_and_train.predict([input_image, feedback_latent]) save_nii_images( [predicted, ground_truth, input_image], identifier=str(case_name), - name=[case_name + "_prd", case_name + "_gt", case_name + "_img"], - path_save= os.path.join(str(self.predicted_dir) , 'predicted_data') + name=[case_name + "_predicted", case_name + "_ground_truth", case_name + "_pet"], + path_save= os.path.join(str(self.predicted_dir), 'predicted_data') ) return all_loss_dice @@ -832,7 +832,8 @@ def prediction(self, input_image: ndarray = None, case_name: str = None): predicted = self.model.combine_and_train.predict([input_image, feedback_latent]) save_nii_images( - image=[predicted, input_image], identifier=str(case_name), name=[case_name + "_prd", case_name + "_img"], + image=[predicted, input_image], identifier=str(case_name), name=[case_name + "_predicted", + case_name + "_pet"], path_save= os.path.join(str(self.predicted_dir), 'predicted_data') )