diff --git a/asltk/aux_methods.py b/asltk/aux_methods.py index 21d3d56..e3987a1 100644 --- a/asltk/aux_methods.py +++ b/asltk/aux_methods.py @@ -191,3 +191,25 @@ def get_optimal_core_count( # Return the smaller of: cores based on memory or total available cores return min(cores_by_memory, cpu_count()) + + +def estimate_memory_usage(data: np.ndarray) -> float: + """Estimate memory usage of a numpy array in MB. + + This function calculates the memory footprint of a given numpy array + by determining its size in bytes and converting it to megabytes (MB). + + Args: + data (np.ndarray): The numpy array for which to estimate memory usage. + + Returns: + float: Estimated memory usage in megabytes (MB). + """ + if not isinstance(data, np.ndarray): + raise TypeError( + f'Input must be a numpy array, got {type(data)} instead.' + ) + + total_bytes = data.nbytes + total_mb = total_bytes / (1024**2) # Convert bytes to MB + return total_mb diff --git a/asltk/reconstruction/cbf_mapping.py b/asltk/reconstruction/cbf_mapping.py index c7c1ee0..22d72be 100644 --- a/asltk/reconstruction/cbf_mapping.py +++ b/asltk/reconstruction/cbf_mapping.py @@ -1,12 +1,16 @@ from multiprocessing import Array, Pool, cpu_count import numpy as np -import SimpleITK as sitk from rich.progress import Progress from scipy.optimize import curve_fit from asltk.asldata import ASLData -from asltk.aux_methods import _apply_smoothing_to_maps, _check_mask_values +from asltk.aux_methods import ( + _apply_smoothing_to_maps, + _check_mask_values, + estimate_memory_usage, + get_optimal_core_count, +) from asltk.logging_config import get_logger, log_processing_step from asltk.models.signal_dynamic import asl_model_buxton from asltk.mri_parameters import MRIParameters @@ -180,7 +184,7 @@ def create_map( ub=[1.0, 5000.0], lb=[0.0, 0.0], par0=[1e-5, 1000], - cores: int = int(cpu_count() / 2), + cores='auto', smoothing=None, smoothing_params=None, ): @@ -280,12 +284,28 @@ def create_map( logger = get_logger('cbf_mapping') logger.info('Starting CBF map creation') - if (cores < 0) or (cores > cpu_count()) or not isinstance(cores, int): - error_msg = 'Number of proecess must be at least 1 and less than maximum cores availble.' - logger.error( - f'{error_msg} Requested: {cores}, Available: {cpu_count()}' + if not isinstance(cores, str): + if ( + (cores < 0) + or (cores > cpu_count()) + or not isinstance(cores, int) + ): + error_msg = 'Number of CPU cores must be at least 1 and less than maximum cores available.' + logger.error( + f'{error_msg} Requested: {cores}, Available: {cpu_count()}' + ) + raise ValueError(error_msg) + elif isinstance(cores, str): + if cores not in ['auto']: + error_msg = ( + 'Cores parameter must be either "auto" or a integer.' + ) + logger.error(error_msg) + raise ValueError(error_msg) + else: + raise ValueError( + 'Cores parameter must be either "auto" or a integer.' ) - raise ValueError(error_msg) if ( len(self._asl_data.get_ld()) == 0 @@ -314,14 +334,31 @@ def create_map( f'Processing volume dimensions: {z_axis}x{y_axis}x{x_axis}' ) - cbf_map_shared = Array('d', z_axis * y_axis * x_axis, lock=False) - att_map_shared = Array('d', z_axis * y_axis * x_axis, lock=False) + cbf_map_shared = Array('f', z_axis * y_axis * x_axis, lock=False) + att_map_shared = Array('f', z_axis * y_axis * x_axis, lock=False) + + # Estimate all the memory usage needed for each core processing + asldata_memory = estimate_memory_usage( + self._asl_data('pcasl').get_as_numpy() + ) + brain_mask_memory = estimate_memory_usage(self._brain_mask) + cbf_memory = estimate_memory_usage(self._cbf_map) + att_memory = estimate_memory_usage(self._att_map) + + actual_cores = get_optimal_core_count( + cores, + sum([asldata_memory, brain_mask_memory, cbf_memory, att_memory]), + ) + + # Make a copy of base information + m0_array = asl_data('m0').get_as_numpy() + pcasl_array = asl_data('pcasl').get_as_numpy() log_processing_step( 'Running voxel-wise CBF fitting', 'this may take several minutes' ) with Pool( - processes=cores, + processes=actual_cores, initializer=_cbf_init_globals, initargs=(cbf_map_shared, att_map_shared, brain_mask, asl_data), ) as pool: @@ -339,6 +376,8 @@ def create_map( par0, lb, ub, + m0_array, + pcasl_array, ), callback=lambda _: progress.update(task, advance=1), ) @@ -347,12 +386,12 @@ def create_map( for result in results: result.wait() - self._cbf_map = np.frombuffer(cbf_map_shared).reshape( - z_axis, y_axis, x_axis - ) - self._att_map = np.frombuffer(att_map_shared).reshape( - z_axis, y_axis, x_axis - ) + self._cbf_map = np.frombuffer( + cbf_map_shared, dtype=np.float32 + ).reshape(z_axis, y_axis, x_axis) + self._att_map = np.frombuffer( + att_map_shared, dtype=np.float32 + ).reshape(z_axis, y_axis, x_axis) # Log completion statistics cbf_values = self._cbf_map[brain_mask > 0] @@ -400,20 +439,20 @@ def _cbf_init_globals( def _cbf_process_slice( - i, x_axis, y_axis, z_axis, BuxtonX, par0, lb, ub + i, x_axis, y_axis, z_axis, BuxtonX, par0, lb, ub, m0, pcasl ): # pragma: no cover # indirect call method by CBFMapping().create_map() for j in range(y_axis): for k in range(z_axis): if brain_mask[k, j, i] != 0: - m0_px = asl_data('m0').get_as_numpy()[k, j, i] + m0_px = m0[k, j, i] def mod_buxton(Xdata, par1, par2): return asl_model_buxton( Xdata[0], Xdata[1], m0_px, par1, par2 ) - Ydata = asl_data('pcasl').get_as_numpy()[0, :, k, j, i] + Ydata = pcasl[0, :, k, j, i] # Calculate the processing index for the 3D space index = k * (y_axis * x_axis) + j * x_axis + i @@ -422,8 +461,8 @@ def mod_buxton(Xdata, par1, par2): par_fit, _ = curve_fit( mod_buxton, BuxtonX, Ydata, p0=par0, bounds=(lb, ub) ) - cbf_map[index] = par_fit[0] - att_map[index] = par_fit[1] + cbf_map[index] = np.float32(par_fit[0]) + att_map[index] = np.float32(par_fit[1]) except RuntimeError: cbf_map[index] = 0.0 att_map[index] = 0.0 diff --git a/asltk/reconstruction/ultralong_te_mapping.py b/asltk/reconstruction/ultralong_te_mapping.py index c33cc4b..2b41ee9 100644 --- a/asltk/reconstruction/ultralong_te_mapping.py +++ b/asltk/reconstruction/ultralong_te_mapping.py @@ -57,7 +57,12 @@ def __init__(self, asl_data: ASLData) -> None: Marianne A.A. van Walderveen, Mark A. van Buchem, Matthias J.P. van Osch, "Ultra-long-TE arterial spin labeling reveals rapid and brain-wide blood-to-CSF water transport in humans", NeuroImage, ISSN 1053-8119, - https://doi.org/10.1016/j.neuroimage.2021.118755. + doi: [10.1016/j.neuroimage.2021.118755](http://doi.org/10.1016/j.neuroimage.2021.118755). + + Important: + Although this method applies a parallel CPU processing, it still a + highly time and memory consuming procedure. Take this into account + when working with large datasets or high-resolution images. Examples: Basic Ultralong-TE ASL mapping setup: @@ -218,10 +223,11 @@ def create_map( T1 relaxation maps that characterize blood-to-gray matter water exchange. The analysis uses multiple echo times to separate blood and tissue signal contributions. - The method implements the multi-compartment TE ASL model described in: - "Ultra-long-TE arterial spin labeling reveals rapid and brain-wide - blood-to-CSF water transport in humans", NeuroImage, 2022. - doi: 10.1016/j.neuroimage.2021.118755 + Note: + The method implements the multi-compartment TE ASL model described in: + "Ultra-long-TE arterial spin labeling reveals rapid and brain-wide + blood-to-CSF water transport in humans", NeuroImage, 2022. + doi: [10.1016/j.neuroimage.2021.118755](http://doi.org/10.1016/j.neuroimage.2021.118755) Note: The CBF and ATT maps can be provided before calling this method, @@ -369,6 +375,10 @@ def create_map( 'd', z_axis * y_axis * x_axis, lock=False ) + # Make a copy of base information + m0_array = asl_data('m0').get_as_numpy() + pcasl_array = asl_data('pcasl').get_as_numpy() + with Pool( processes=actual_cores, initializer=_multite_init_globals, @@ -392,7 +402,17 @@ def create_map( results = [ pool.apply_async( _tcsfgm_multite_process_slice, - args=(i, x_axis, y_axis, z_axis, par0, lb, ub), + args=( + i, + x_axis, + y_axis, + z_axis, + par0, + lb, + ub, + m0_array, + pcasl_array, + ), callback=lambda _: progress.update( task, advance=1 ), @@ -478,13 +498,13 @@ def _multite_init_globals( def _tcsfgm_multite_process_slice( - i, x_axis, y_axis, z_axis, par0, lb, ub + i, x_axis, y_axis, z_axis, par0, lb, ub, m0, pcasl ): # pragma: no cover # indirect call method by CBFMapping().create_map() for j in range(y_axis): for k in range(z_axis): if brain_mask[k, j, i] != 0: - m0_px = asl_data('m0').get_as_numpy()[k, j, i] + m0_px = m0[k, j, i] def mod_2comp(Xdata, par1): return asl_model_multi_te( @@ -500,8 +520,7 @@ def mod_2comp(Xdata, par1): ) Ydata = ( - asl_data('pcasl') - .get_as_numpy()[:, :, k, j, i] + pcasl[:, :, k, j, i] .reshape( ( len(ld_arr) * len(te_arr), diff --git a/docs/examples/workflow_examples.md b/docs/examples/workflow_examples.md index a0cfa4c..46ba0dc 100644 --- a/docs/examples/workflow_examples.md +++ b/docs/examples/workflow_examples.md @@ -4,11 +4,21 @@ This document provides comprehensive examples of using the asltk library for var ## Table of Contents -1. [Basic CBF Mapping Workflow](#basic-cbf-mapping-workflow) -2. [Multi-Echo ASL Analysis](#multi-echo-asl-analysis) -3. [Diffusion-Weighted ASL Analysis](#diffusion-weighted-asl-analysis) -4. [Complete Analysis Pipeline](#complete-analysis-pipeline) -5. [Best Practices and Tips](#best-practices-and-tips) +- [ASL Processing Workflow Examples](#asl-processing-workflow-examples) + - [Table of Contents](#table-of-contents) + - [Basic CBF Mapping Workflow](#basic-cbf-mapping-workflow) + - [Expected Results](#expected-results) + - [Example CBF and ATT Maps](#example-cbf-and-att-maps) + - [Multi-Echo ASL Analysis](#multi-echo-asl-analysis) + - [Example Multi-TE Results](#example-multi-te-results) + - [Ultra-Long TE ASL Mapping](#ultra-long-te-asl-mapping) + - [Diffusion-Weighted ASL Analysis](#diffusion-weighted-asl-analysis) + - [Complete Analysis Pipeline](#complete-analysis-pipeline) + - [Best Practices and Tips](#best-practices-and-tips) + - [Performance Optimization](#performance-optimization) + - [Parameter Selection](#parameter-selection) + - [Quality Assessment](#quality-assessment) + - [Troubleshooting](#troubleshooting) ## Basic CBF Mapping Workflow @@ -17,7 +27,7 @@ This example shows the standard ASL processing workflow for generating CBF and A ```python from asltk.asldata import ASLData from asltk.reconstruction import CBFMapping -from asltk.utils import load_image, save_image +from asltk.utils.io import ImageIO import numpy as np # Step 1: Load ASL data @@ -32,18 +42,13 @@ asl_data = ASLData( cbf_mapper = CBFMapping(asl_data) # Step 3: Set brain mask (recommended for speed and quality) -brain_mask = load_image('./data/brain_mask.nii.gz') +brain_mask = ImageIO(image_array='./data/brain_mask.nii.gz') cbf_mapper.set_brain_mask(brain_mask) # Step 4: Generate CBF and ATT maps # Note: All parameters (ub, lb, par0, cores) have default values and are optional # You can simply call: results = cbf_mapper.create_map() for default behavior -results = cbf_mapper.create_map( - ub=[1.0, 5000.0], # Upper bounds: [CBF, ATT] - lb=[0.0, 0.0], # Lower bounds: [CBF, ATT] - par0=[1e-5, 1000], # Initial guess: [CBF, ATT] - cores=4 # Use 4 CPU cores -) +results = cbf_mapper.create_map() # Step 5: Access and save results cbf_map = results['cbf_norm'] # CBF in mL/100g/min @@ -53,14 +58,14 @@ cbf_raw = results['cbf'] # Raw CBF values # Save results # Important: Image saving formats are limited to asltk API settings # Available formats: .nii, .nii.gz, .mha, .nrrd (see save_image documentation) -save_image(cbf_map, './results/cbf_map.nii.gz') -save_image(att_map, './results/att_map.nii.gz') +cbf_map.save('./results/cbf_map.nii.gz') +att_map.save('./results/att_map.nii.gz') # Step 6: Quality assessment -print(f"CBF range: {cbf_map.min():.1f} - {cbf_map.max():.1f} mL/100g/min") -print(f"Mean CBF: {np.mean(cbf_map[brain_mask > 0]):.1f} mL/100g/min") -print(f"ATT range: {att_map.min():.0f} - {att_map.max():.0f} ms") -print(f"Mean ATT: {np.mean(att_map[brain_mask > 0]):.0f} ms") +print(f"CBF range: {cbf_map.get_as_numpy().min():.1f} - {cbf_map.get_as_numpy().max():.1f} mL/100g/min") +print(f"Mean CBF: {np.mean(cbf_map.get_as_numpy()[brain_mask > 0]):.1f} mL/100g/min") +print(f"ATT range: {att_map.get_as_numpy().min():.0f} - {att_map.get_as_numpy().max():.0f} ms") +print(f"Mean ATT: {np.mean(att_map.get_as_numpy()[brain_mask > 0]):.0f} ms") ``` ### Expected Results @@ -85,7 +90,7 @@ This example demonstrates multi-TE ASL processing for T1 relaxometry and enhance ```python from asltk.asldata import ASLData from asltk.reconstruction import MultiTE_ASLMapping -from asltk.utils import save_image +from asltk.utils.io import ImageIO import numpy as np # Step 1: Load multi-TE ASL data @@ -100,22 +105,12 @@ asl_data = ASLData( # Step 2: Create multi-TE mapper mte_mapper = MultiTE_ASLMapping(asl_data) -# Step 3: Adjust MRI parameters for specific field strength (3T example) -mte_mapper.set_constant(1600.0, 'T1csf') # CSF T1 at 3T -mte_mapper.set_constant(1300.0, 'T1gm') # GM T1 at 3T -mte_mapper.set_constant(800.0, 'T1wm') # WM T1 at 3T - -# Step 4: Set brain mask -brain_mask = load_image('./data/brain_mask.nii.gz') +# Step 3: Set brain mask +brain_mask = ImageIO(image_array='./data/brain_mask.nii.gz') mte_mapper.set_brain_mask(brain_mask) # Step 5: Generate multi-TE maps -results = mte_mapper.create_map( - ub=[800.0], # Upper bound for T1blGM (ms) - lb=[50.0], # Lower bound for T1blGM (ms) - par0=[400.0], # Initial guess for T1blGM (ms) - cores=4 -) +results = mte_mapper.create_map() # Step 6: Access results cbf_norm = results['cbf_norm'] # Normalized CBF @@ -123,13 +118,13 @@ att_map = results['att'] # Arterial transit time t1blgm_map = results['t1blgm'] # T1 blood-grey matter exchange # Step 7: Save results -save_image(cbf_norm, './results/mte_cbf.nii.gz') -save_image(att_map, './results/mte_att.nii.gz') -save_image(t1blgm_map, './results/t1blgm.nii.gz') +cbf_norm.save('./results/mte_cbf.nii.gz') +att_map.save('./results/mte_att.nii.gz') +t1blgm_map.save('./results/t1blgm.nii.gz') # Step 8: Analysis -print(f"T1blGM range: {t1blgm_map.min():.0f} - {t1blgm_map.max():.0f} ms") -print(f"Mean T1blGM: {np.mean(t1blgm_map[brain_mask > 0]):.0f} ms") +print(f"T1blGM range: {t1blgm_map.get_as_numpy().min():.0f} - {t1blgm_map.get_as_numpy().max():.0f} ms") +print(f"Mean T1blGM: {np.mean(t1blgm_map.get_as_numpy()[brain_mask > 0]):.0f} ms") # Typical T1blGM values: # Grey matter: 200-600 ms (faster exchange) @@ -142,6 +137,50 @@ print(f"Mean T1blGM: {np.mean(t1blgm_map[brain_mask > 0]):.0f} ms") A T1 blood-GM map example, using the Multi TE ASL imaging acquisition +## Ultra-Long TE ASL Mapping + +This example demonstrates ultra-long TE ASL processing for improved tissue characterization. + +```python +from asltk.asldata import ASLData +from asltk.reconstruction import UltraLongTE_ASLMapping +from asltk.utils.io import ImageIO +import numpy as np + +# Step 1: Load ultra-long TE ASL data +asl_data = ASLData( + pcasl='./data/pcasl_ultra_long_te.nii.gz', + m0='./data/m0.nii.gz', + te_values=[100, 200, 300], # Echo times (ms) + ld_values=[1.8, 1.8, 1.8], + pld_values=[0.8, 1.8, 2.8] +) + +# Step 2: Create ultra-long TE mapper +ulte_mapper = UltraLongTE_ASLMapping(asl_data) + +# Step 3: Set brain mask (optional but recommended) +brain_mask = ImageIO(image_array='./data/brain_mask.nii.gz') +ulte_mapper.set_brain_mask(brain_mask) + +# Step 5: Generate ultra-long TE maps +results = ulte_mapper.create_map() + +# Step 6: Access results +cbf_norm = results['cbf_norm'] # Normalized CBF +att_map = results['att'] # Arterial transit time +t1csfgm_map = results['t1csfgm'] # T1 CSF-grey matter exchange + +# Step 7: Save results +save_image(cbf_norm, './results/ulte_cbf.nii.gz') +save_image(att_map, './results/ulte_att.nii.gz') +save_image(t1csfgm_map, './results/t1csfgm.nii.gz') + +# Step 8: Analysis +print(f"T1csfGM range: {t1csfgm_map.min():.0f} - {t1csfgm_map.max():.0f} ms") +print(f"Mean T1csfGM: {np.mean(t1csfgm_map[brain_mask > 0]):.0f} ms") +``` + ## Diffusion-Weighted ASL Analysis This example shows multi-DW ASL processing for compartment analysis and diffusion characterization. @@ -149,7 +188,7 @@ This example shows multi-DW ASL processing for compartment analysis and diffusio ```python from asltk.asldata import ASLData from asltk.reconstruction import MultiDW_ASLMapping -from asltk.utils import save_image +from asltk.utils.io import ImageIO, clone_image import numpy as np # Step 1: Load multi-DW ASL data @@ -165,9 +204,9 @@ asl_data = ASLData( mdw_mapper = MultiDW_ASLMapping(asl_data) # Step 3: Set conservative brain mask (important for processing time) -brain_mask = load_image('./data/brain_mask.nii.gz') +brain_mask = ImageIO(image_array='./data/brain_mask.nii.gz') # Create more conservative mask for initial testing -conservative_mask = brain_mask.copy() +conservative_mask = clone_image(brain_mask).get_as_numpy() conservative_mask[0:2, :, :] = 0 # Remove edge slices conservative_mask[-2:, :, :] = 0 # Remove edge slices mdw_mapper.set_brain_mask(conservative_mask) @@ -176,11 +215,7 @@ print(f"Processing {np.sum(conservative_mask)} voxels (reduced from {np.sum(brai # Step 4: Generate multi-DW maps (this may take 30+ minutes) print("Starting multi-DW processing... (this may take a while)") -results = mdw_mapper.create_map( - lb=[0.1, 1e-6, 0.1, 1e-7], # Lower bounds: [A1, D1, A2, D2] - ub=[2.0, 1e-3, 2.0, 1e-4], # Upper bounds: [A1, D1, A2, D2] - par0=[0.8, 5e-5, 0.3, 1e-5] # Initial guess: [A1, D1, A2, D2] -) +results = mdw_mapper.create_map() # Step 5: Access compartment results cbf_norm = results['cbf_norm'] # Normalized CBF @@ -192,19 +227,19 @@ D2_map = results['D2'] # Slow compartment diffusion kw_map = results['kw'] # Water exchange parameter # Step 6: Save all results -save_image(cbf_norm, './results/mdw_cbf.nii.gz') -save_image(att_map, './results/mdw_att.nii.gz') -save_image(A1_map, './results/A1_fast_compartment.nii.gz') -save_image(D1_map, './results/D1_fast_diffusion.nii.gz') -save_image(A2_map, './results/A2_slow_compartment.nii.gz') -save_image(D2_map, './results/D2_slow_diffusion.nii.gz') -save_image(kw_map, './results/water_exchange.nii.gz') +cbf_norm.save('./results/mdw_cbf.nii.gz') +att_map.save('./results/mdw_att.nii.gz') +A1_map.save('./results/A1_fast_compartment.nii.gz') +D1_map.save('./results/D1_fast_diffusion.nii.gz') +A2_map.save('./results/A2_slow_compartment.nii.gz') +D2_map.save('./results/D2_slow_diffusion.nii.gz') +kw_map.save('./results/water_exchange.nii.gz') # Step 7: Analysis and interpretation mask_indices = conservative_mask > 0 -print(f"Fast diffusion (D1) range: {D1_map[mask_indices].min():.2e} - {D1_map[mask_indices].max():.2e} mm²/s") -print(f"Slow diffusion (D2) range: {D2_map[mask_indices].min():.2e} - {D2_map[mask_indices].max():.2e} mm²/s") -print(f"Amplitude ratio (A1/A2): {np.mean(A1_map[mask_indices]/A2_map[mask_indices]):.2f}") +print(f"Fast diffusion (D1) range: {D1_map.get_as_numpy()[mask_indices].min():.2e} - {D1_map.get_as_numpy()[mask_indices].max():.2e} mm²/s") +print(f"Slow diffusion (D2) range: {D2_map.get_as_numpy()[mask_indices].min():.2e} - {D2_map.get_as_numpy()[mask_indices].max():.2e} mm²/s") +print(f"Amplitude ratio (A1/A2): {np.mean(A1_map.get_as_numpy()[mask_indices]/A2_map.get_as_numpy()[mask_indices]):.2f}") # Typical interpretations: # D1 > D2: Fast compartment likely represents intravascular component @@ -219,7 +254,8 @@ This example demonstrates a complete ASL analysis pipeline comparing different r ```python from asltk.asldata import ASLData from asltk.reconstruction import CBFMapping, MultiTE_ASLMapping -from asltk.utils import load_image, save_image, collect_data_volumes +from asltk.utils.io import ImageIO +from asltk.utils.image_manipulation import collect_data_volumes import numpy as np # Step 1: Load comprehensive ASL dataset @@ -232,8 +268,8 @@ asl_data = ASLData( ) # Step 2: Data exploration -print(f"ASL data shape: {asl_data('pcasl').shape}") -print(f"M0 data shape: {asl_data('m0').shape}") +print(f"ASL data shape: {asl_data('pcasl').get_as_numpy().shape}") +print(f"M0 data shape: {asl_data('m0').get_as_numpy().shape}") print(f"LD values: {asl_data.get_ld()} s") print(f"PLD values: {asl_data.get_pld()} s") print(f"TE values: {asl_data.get_te()} ms") @@ -245,12 +281,12 @@ print(f"Extracted {len(volumes)} individual volumes") # Step 3: Basic CBF mapping print("\n=== Basic CBF Mapping ===") cbf_mapper = CBFMapping(asl_data) -brain_mask = load_image('./data/brain_mask.nii.gz') +brain_mask = ImageIO(image_array='./data/brain_mask.nii.gz') cbf_mapper.set_brain_mask(brain_mask) basic_results = cbf_mapper.create_map(cores=4) -basic_cbf = basic_results['cbf_norm'] -basic_att = basic_results['att'] +basic_cbf = basic_results['cbf_norm'].get_as_numpy() +basic_att = basic_results['att'].get_as_numpy() print(f"Basic CBF - Mean: {np.mean(basic_cbf[brain_mask > 0]):.1f} mL/100g/min") print(f"Basic ATT - Mean: {np.mean(basic_att[brain_mask > 0]):.0f} ms") @@ -269,33 +305,33 @@ mte_results = mte_mapper.create_map( cores=4 ) -mte_cbf = mte_results['cbf_norm'] -mte_t1blgm = mte_results['t1blgm'] +mte_cbf = mte_results['cbf_norm'].get_as_numpy() +mte_t1blgm = mte_results['t1blgm'].get_as_numpy() print(f"Multi-TE CBF - Mean: {np.mean(mte_cbf[brain_mask > 0]):.1f} mL/100g/min") print(f"T1blGM - Mean: {np.mean(mte_t1blgm[brain_mask > 0]):.0f} ms") # Step 5: Save all results with organized naming -save_image(basic_cbf, './results/basic_cbf.nii.gz') -save_image(basic_att, './results/basic_att.nii.gz') -save_image(mte_cbf, './results/multite_cbf.nii.gz') -save_image(mte_t1blgm, './results/t1blgm_map.nii.gz') +basic_cbf.save('./results/basic_cbf.nii.gz') +basic_att.save('./results/basic_att.nii.gz') +mte_cbf.save('./results/multite_cbf.nii.gz') +mte_t1blgm.save('./results/t1blgm_map.nii.gz') # Step 6: Comparison analysis print("\n=== Method Comparison ===") -cbf_difference = mte_cbf - basic_cbf +cbf_difference = mte_cbf.get_as_numpy() - basic_cbf.get_as_numpy() print(f"CBF difference (Multi-TE - Basic): {np.mean(cbf_difference[brain_mask > 0]):.1f} ± {np.std(cbf_difference[brain_mask > 0]):.1f} mL/100g/min") # Step 7: Regional analysis (example with simple ROIs) # Create simple GM/WM masks based on CBF thresholds -gm_mask = (basic_cbf > 40) & (brain_mask > 0) # High CBF regions (GM) -wm_mask = (basic_cbf <= 40) & (basic_cbf > 10) & (brain_mask > 0) # Lower CBF regions (WM) +gm_mask = (basic_cbf.get_as_numpy() > 40) & (brain_mask.get_as_numpy() > 0) # High CBF regions (GM) +wm_mask = (basic_cbf.get_as_numpy() <= 40) & (basic_cbf.get_as_numpy() > 10) & (brain_mask.get_as_numpy() > 0) # Lower CBF regions (WM) print(f"\nRegional Analysis:") -print(f"GM CBF: {np.mean(basic_cbf[gm_mask]):.1f} ± {np.std(basic_cbf[gm_mask]):.1f} mL/100g/min") -print(f"WM CBF: {np.mean(basic_cbf[wm_mask]):.1f} ± {np.std(basic_cbf[wm_mask]):.1f} mL/100g/min") -print(f"GM T1blGM: {np.mean(mte_t1blgm[gm_mask]):.0f} ± {np.std(mte_t1blgm[gm_mask]):.0f} ms") -print(f"WM T1blGM: {np.mean(mte_t1blgm[wm_mask]):.0f} ± {np.std(mte_t1blgm[wm_mask]):.0f} ms") +print(f"GM CBF: {np.mean(basic_cbf.get_as_numpy()[gm_mask]):.1f} ± {np.std(basic_cbf.get_as_numpy()[gm_mask]):.1f} mL/100g/min") +print(f"WM CBF: {np.mean(basic_cbf.get_as_numpy()[wm_mask]):.1f} ± {np.std(basic_cbf.get_as_numpy()[wm_mask]):.1f} mL/100g/min") +print(f"GM T1blGM: {np.mean(mte_t1blgm.get_as_numpy()[gm_mask]):.0f} ± {np.std(mte_t1blgm.get_as_numpy()[gm_mask]):.0f} ms") +print(f"WM T1blGM: {np.mean(mte_t1blgm.get_as_numpy()[wm_mask]):.0f} ± {np.std(mte_t1blgm.get_as_numpy()[wm_mask]):.0f} ms") print(f"\nResults saved to ./results/ directory") ``` @@ -307,8 +343,9 @@ print(f"\nResults saved to ./results/ directory") 1. **Use Brain Masks**: Always use brain masks to reduce processing time and improve results quality. ```python # Conservative mask for initial testing - conservative_mask = brain_mask.copy() - conservative_mask[brain_mask < np.percentile(brain_mask[brain_mask > 0], 50)] = 0 + brain_mask_array = brain_mask.get_as_numpy() + conservative_mask = brain_mask_array.copy() + conservative_mask[brain_mask_array < np.percentile(brain_mask_array[brain_mask_array > 0], 50)] = 0 ``` 2. **Parallel Processing**: Adjust CPU cores based on your system: @@ -318,6 +355,10 @@ print(f"\nResults saved to ./results/ directory") use_cores = max(1, max_cores - 2) # Leave 2 cores for system ``` +!!! tip + The `asltk` library is optimized for multi-core processing. Use the `cores` parameter in mapping functions to speed up computations. If you does not want to select the quantity of `cores` will be used in the reconstruction processing, then choose the `'auto'` option. On the other hand, if there is a clear necessity to maintain a single core processing, set `cores=1`. + + 3. **Memory Management**: For large datasets, process in chunks or use conservative masks first. ### Parameter Selection @@ -337,8 +378,16 @@ print(f"\nResults saved to ./results/ directory") - `A1, A2`: Amplitude parameters (0.1-2.0) - `D1, D2`: Diffusion parameters (1e-7 to 1e-3 mm²/s) +4. **Ultra-Long TE Parameters**: + - `T2csf=1500` (ms): Assumed for healthy subjects at 3T + - `T2bl=100` (ms): Assumed for healthy subjects at 3T + ### Quality Assessment +!!! note + The following checks examples are made using the Numpy data representation + from the `ImageIO` module. + 1. **CBF Range Check**: ```python # Typical ranges for healthy adults @@ -362,8 +411,8 @@ print(f"\nResults saved to ./results/ directory") ### Troubleshooting 1. **Low SNR**: Increase spatial smoothing or use larger brain masks -2. **Long Processing Times**: Use smaller brain masks or reduce CPU cores +2. **Long Processing Times**: Use smaller brain masks or increase CPU cores 3. **Unrealistic Values**: Check input parameters and data quality 4. **Convergence Issues**: Adjust initial guess parameters or bounds -This documentation provides a comprehensive guide to using asltk for various ASL processing workflows. Each example is designed to be practical and adaptable to different research needs and datasets. \ No newline at end of file +In general, these tips and best practices provide a comprehensive guide to using asltk for various ASL processing workflows. Each example is designed to be practical and adaptable to different research needs and datasets. \ No newline at end of file diff --git a/tests/reconstruction/test_cbf_mapping.py b/tests/reconstruction/test_cbf_mapping.py index da69dff..ea7a253 100644 --- a/tests/reconstruction/test_cbf_mapping.py +++ b/tests/reconstruction/test_cbf_mapping.py @@ -183,7 +183,7 @@ def test_cbf_raise_error_cores_not_valid(core_value): assert ( e.value.args[0] - == 'Number of proecess must be at least 1 and less than maximum cores availble.' + == 'Number of CPU cores must be at least 1 and less than maximum cores available.' )