Skip to content

Commit

Permalink
Updating to version 2.2.1
Browse files Browse the repository at this point in the history
  • Loading branch information
MohammedSunoqrot committed Sep 16, 2024
1 parent ff42089 commit 4797273
Show file tree
Hide file tree
Showing 10 changed files with 431 additions and 307 deletions.
Binary file not shown.
Binary file not shown.
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[metadata]
name = pyAutoRef
version = 2.1.0
version = 2.2.1
author = Mohammed R. S. Sunoqrot
author_email = [email protected]
description = Python pakage to perfom AutoRef (prostate T2w MRI dual reference tissue [fat and muscle] normalization).
Expand Down
2 changes: 1 addition & 1 deletion src/pyAutoRef.egg-info/PKG-INFO
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: pyAutoRef
Version: 2.1.0
Version: 2.2.1
Summary: Python pakage to perfom AutoRef (prostate T2w MRI dual reference tissue [fat and muscle] normalization).
Home-page: https://github.com/MohammedSunoqrot/pyAutoRef
Author: Mohammed R. S. Sunoqrot
Expand Down
49 changes: 30 additions & 19 deletions src/pyAutoRef/autoref.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,15 @@
import time
import shutil
import pkg_resources
import logging

from pyAutoRef.pre_processing import pre_processing
from pyAutoRef.object_detection import object_detection
from pyAutoRef.post_processing import post_process_predictions
from pyAutoRef.normalization import normalize_image
from pyAutoRef.utils import save_image, check_predictions, check_input_image, get_intensities_without_detection


"""
This is the python version of the:
"Automated reference tissue normalization of T2-weighted MR images of the prostate using object recognition".
Expand All @@ -22,6 +24,10 @@
GitHub: https://github.com/MohammedSunoqrot/pyAutoRef
"""

# Set up logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')


def autoref(input_image, output_image_path=None):
"""
Expand All @@ -38,23 +44,24 @@ def autoref(input_image, output_image_path=None):
Note:
The normalized 3D image is saved to the specified output_image_path if provided.
"""
# Measure the time taken for processing
start_time = time.time()

# Check if the input image valid
try:
input_image_type = check_input_image(input_image)
print(f"Input image type: {input_image_type}")
logging.info(f"Input image type: {input_image_type}")
except ValueError as e:
print(e)
logging.error(f"Invalid input image: {e}")
return None

# Print that the method started processing
if input_image_type == 'Path':
print(f"=> Started AutoRef (fat and muscle) normalizing: {input_image}")
logging.info(
f"=> Started AutoRef (fat and muscle) normalizing: {input_image}")
else:
print(f"=> Started AutoRef (fat and muscle) normalizing...")
logging.info(f"=> Started AutoRef (fat and muscle) normalizing...")

# Get the current script file path
current_file_path = os.path.abspath(__file__)
Expand All @@ -69,47 +76,51 @@ def autoref(input_image, output_image_path=None):
# Perform object detection on the preprocessed image
model_path = pkg_resources.resource_filename(__name__, "model.onnx")
top_predictions = object_detection(temp_images_dir, model_path)

# Initial check for classes with zero predictions
class_with_zero_predictions = check_predictions(top_predictions)

# If any class had zero predictions, recalculate with new parameters
if class_with_zero_predictions:
print(f"No detected objects for {class_with_zero_predictions}. Recalculating predictions...")

logging.info(
f"No detected objects for {class_with_zero_predictions}. Recalculating predictions...")

# Perform object detection again with new parameters
top_predictions = object_detection(temp_images_dir, model_path, yolo_classes=["fat", "muscle"], slice_percent=[0, 1])

top_predictions = object_detection(temp_images_dir, model_path, yolo_classes=[
"fat", "muscle"], slice_percent=[0, 1])

# Check again after recalculating
class_with_zero_predictions = check_predictions(top_predictions)

# If still any class has zero predictions, normalize without detection
if class_with_zero_predictions:
print(f"Objects still not detected. Recalculating intensities without detection...")
processed_images_intensities = get_intensities_without_detection(resized_corrected_image)
logging.info(
"Objects still not detected. Recalculating intensities without detection...")
processed_images_intensities = get_intensities_without_detection(
resized_corrected_image)
else:
# Perform post-processing to the detected objects
processed_images_intensities = post_process_predictions(
resized_corrected_image, top_predictions)
resized_corrected_image, top_predictions)

# Perform normalization
normalized_image = normalize_image(processed_images_intensities, corrected_image)
normalized_image = normalize_image(
processed_images_intensities, corrected_image)

# Write the normalized image to the output path if provided
if output_image_path:
save_image(normalized_image, input_image,
is_dicom, output_image_path)
save_image(normalized_image, input_image, is_dicom, output_image_path)

# Delete the temp folder
shutil.rmtree(temp_images_dir)

# Measure the time taken for processing
end_time = time.time()
processing_time = end_time - start_time
print("==> Done with AutoRef (fat and muscle) normalizing It took: {:.2f} seconds.".format(
logging.info("==> Done with AutoRef (fat and muscle) normalizing. It took: {:.2f} seconds.".format(
processing_time))
if output_image_path:
print(f" Output saved in: {output_image_path}")
logging.info(f" Output saved in: {output_image_path}")

# Return the AutoRef normalized image
return normalized_image
52 changes: 34 additions & 18 deletions src/pyAutoRef/normalization.py
Original file line number Diff line number Diff line change
@@ -1,32 +1,48 @@
import numpy as np
import SimpleITK as sitk


def normalize_image(processed_images_intensities, corrected_image, fat_reference_value=121, muscle_reference_value=40,
fat_intensity_percentile=95, muscle_intensity_percentile=5):
"""
Normalize the corrected 3D image using linear scaling based on the 90th percentile for fat and
the 10th percentile for muscle.
Normalize the corrected 3D image using linear scaling based on the specified percentiles for fat and muscle intensities.
This function performs normalization of the 3D image by calculating the intensity values for fat and muscle
based on specified percentiles and then scaling the corrected image accordingly.
Parameters:
processed_images_intensities (numpy.ndarray): An array contains all the intensites
under the post-processed area under bounding box for all images each class separtly.
corrected_image (SimpleITK.Image): The 3D image after correction and post-processing.
fat_reference_value (float, optional): The reference value for fat after normalization.
Default is 121.
muscle_reference_value (float, optional): The reference value for muscle after normalization.
Default is 40.
fat_intensity_percentile (int, optional): The percentile for fat intensity to represent detected fat. Default is 95.
muscle_intensity_percentile (int, optional): The percentile for muscle intensity to represent detected muscle. Default is 5.
processed_images_intensities (dict): A dictionary containing arrays of intensities extracted from post-processed areas.
Keys are class names ('fat', 'muscle') and values are arrays of intensities.
corrected_image (SimpleITK.Image): The 3D image that has been corrected and post-processed.
fat_reference_value (float, optional): The target reference value for fat after normalization. Default is 121.
muscle_reference_value (float, optional): The target reference value for muscle after normalization. Default is 40.
fat_intensity_percentile (int, optional): The percentile value to determine fat intensity. Default is 95.
muscle_intensity_percentile (int, optional): The percentile value to determine muscle intensity. Default is 5.
Returns:
normalized_image (SimpleITK.Image): The normalized 3D image.
SimpleITK.Image: The normalized 3D image.
"""
# Calculate the 95th/defined percentile for fat and the 5th/defined percentile for muscle
fat_intensity = np.percentile(processed_images_intensities['fat'], fat_intensity_percentile)
muscle_intensity = np.percentile(processed_images_intensities['muscle'], muscle_intensity_percentile)
# Extract the intensity values for fat and muscle
fat_intensities = processed_images_intensities.get('fat', np.array([]))
muscle_intensities = processed_images_intensities.get(
'muscle', np.array([]))

# Calculate intensity percentiles
fat_intensity = np.percentile(
fat_intensities, fat_intensity_percentile) if fat_intensities.size > 0 else 1
muscle_intensity = np.percentile(
muscle_intensities, muscle_intensity_percentile) if muscle_intensities.size > 0 else 0

# Convert SimpleITK image to NumPy array for processing
corrected_image_np = sitk.GetArrayFromImage(corrected_image)

# Apply linear normalization
normalized_image_np = ((corrected_image_np - muscle_intensity) / (fat_intensity - muscle_intensity)
) * (fat_reference_value - muscle_reference_value) + muscle_reference_value

# Linearly scale the corrected image to the fat and muscle reference values
normalized_image = ((corrected_image - muscle_intensity) / (fat_intensity - muscle_intensity)
) * (fat_reference_value - muscle_reference_value) + muscle_reference_value
# Convert the NumPy array back to SimpleITK image
normalized_image = sitk.GetImageFromArray(normalized_image_np)
normalized_image.CopyInformation(corrected_image)

# Return the normalized image
return normalized_image
63 changes: 44 additions & 19 deletions src/pyAutoRef/object_detection.py
Original file line number Diff line number Diff line change
@@ -1,61 +1,86 @@
import os

import logging
from pyAutoRef.utils import detect_objects_on_image

# Set up logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')


def object_detection(input_folder, model_path, yolo_classes=["fat", "muscle"], slice_percent=[0.15, 0.85]):
def object_detection(input_folder, model_path, yolo_classes=None, slice_percent=None):
"""
Perform prediction on multiple images in an input folder and select the top 3 images
with the highest prediction score for each class.
Perform object detection on multiple images in an input folder and select the top 3 images
with the highest prediction scores for each class.
Parameters:
input_folder (str): The path to the folder containing the input images.
model_path (str): The path to the YOLO v8 ONNX model file.
yolo_classes (list): The classes of the trained model. Default is ["fat", "muscle"].
slice_percent (list): The percentage range of slices to be detected. Default is [0.15, 0.85]. Remove the first 15% and the last 15% of slices.
yolo_classes (list, optional): The classes of the trained model. Default is ["fat", "muscle"].
slice_percent (list, optional): The percentage range of slices to be detected. Default is [0.15, 0.85].
Removes the first 15% and the last 15% of slices.
Returns:
top_predictions (dict): A dictionary containing the top 3 predictions for each class.
The keys are class names, and the values are lists of dictionaries.
Each dictionary contains the 'slice' name (without the ".jpg"),
the bounding box coordinates, and the probability score.
"""
if yolo_classes is None:
yolo_classes = ["fat", "muscle"]
if slice_percent is None:
slice_percent = [0.15, 0.85]

# Initialize a dictionary to store the top 3 predictions for each class.
top_predictions = {class_name: [] for class_name in yolo_classes}

# Get a list of image filenames in the input folder.
image_filenames = [filename for filename in os.listdir(
input_folder) if filename.endswith(".jpg")]
try:
image_filenames = [filename for filename in os.listdir(
input_folder) if filename.endswith(".jpg")]
except FileNotFoundError as e:
logging.error(f"Input folder not found: {e}")
raise

# Sort the image filenames (assuming filenames are in the format '00.jpg', '01.jpg', etc.)
# Sort the image filenames
image_filenames.sort()

# Calculate the number of images
num_images = len(image_filenames)

# Calculate the middle part range
if num_images == 0:
logging.warning("No images found in the input folder.")
return top_predictions

# Calculate the range of slices to process
start_index = int(num_images * slice_percent[0])
end_index = int(num_images * slice_percent[1])

# Loop through the images in the input folder and perform object detection on them.
# Perform object detection on the selected range of images
for filename in image_filenames[start_index:end_index]:
image_path = os.path.join(input_folder, filename)
result = detect_objects_on_image(image_path, model_path)
try:
result = detect_objects_on_image(image_path, model_path)
except Exception as e:
logging.error(f"Error detecting objects in {filename}: {e}")
continue

# For each detected object, store the information in the top_predictions dictionary.
# Store the prediction results
for prediction in result:
class_name = prediction[4]
top_predictions[class_name].append({
'slice': os.path.splitext(filename)[0],
'bbox': prediction[:4],
'probability': prediction[5]
})
if class_name in top_predictions:
top_predictions[class_name].append({
'slice': os.path.splitext(filename)[0],
'bbox': prediction[:4],
'probability': prediction[5]
})

# Sort the predictions by probability score in descending order for each class.
# Sort and select top 3 predictions for each class
for class_name in yolo_classes:
top_predictions[class_name].sort(
key=lambda x: x['probability'], reverse=True)
top_predictions[class_name] = top_predictions[class_name][:3]
logging.info(
f"Top predictions for class {class_name}: {top_predictions[class_name]}")

# Return the final dictionary containing the top 3 predictions for each class.
return top_predictions
Loading

0 comments on commit 4797273

Please sign in to comment.