diff --git a/.circleci/config.yml b/.circleci/config.yml index b5521a3..5384ed2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -32,7 +32,7 @@ jobs: paths: - src/pySPFM - restore_cache: # ensure this step occurs *before* installing dependencies - key: deps1-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }} + key: conda-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }} - run: # will overwrite pySPFM installation each time name: Generate environment command: | @@ -46,7 +46,7 @@ jobs: fi python setup.py install --user - save_cache: # environment cache tied to requirements - key: deps1-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }} + key: conda-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }} paths: - "/opt/conda/envs/py<< parameters.PYTHON >>_env" @@ -61,7 +61,7 @@ jobs: - attach_workspace: # get pySPFM at: /tmp - restore_cache: # load environment - key: deps1-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }} + key: conda-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }} - run: name: Run tests and compile coverage command: | @@ -86,7 +86,7 @@ jobs: - attach_workspace: # get pySPFM at: /tmp - restore_cache: # load environment - key: deps1-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }} + key: conda-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }} - run: name: Run tests and compile coverage command: | @@ -108,7 +108,7 @@ jobs: - attach_workspace: # get pySPFM at: /tmp - restore_cache: # load environment - key: deps1-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "3.9" }} + key: conda-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "3.9" }} - run: name: Build documentation command: | @@ -128,7 +128,7 @@ jobs: - attach_workspace: # get pySPFM at: /tmp - restore_cache: # load environment - key: deps1-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "3.9" }} + key: conda-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "3.9" }} - run: name: Linting command: | diff --git a/pySPFM/deconvolution/hrf_generator.py b/pySPFM/deconvolution/hrf_generator.py index c7c29b9..6e6d29e 100644 --- a/pySPFM/deconvolution/hrf_generator.py +++ b/pySPFM/deconvolution/hrf_generator.py @@ -3,211 +3,168 @@ import subprocess import numpy as np -import scipy.io -import scipy.stats +from nilearn.glm.first_level import glover_hrf, spm_hrf LGR = logging.getLogger("GENERAL") RefLGR = logging.getLogger("REFERENCES") -def hrf_linear(TR, p): - """Generate HRF. - - Parameters - ---------- - TR : float - TR of the acquisition. - p : list - Input parameters of the response function (two gamma functions). - defaults - (seconds) - p[1] - delay of response (relative to onset) 6 - p[2] - delay of undershoot (relative to onset) 16 - p[3] - dispersion of response 1 - p[4] - dispersion of undershoot 1 - p[5] - ratio of response to undershoot 6 - p[6] - onset (seconds) 0 - p[7] - length of kernel (seconds) 32 - - Returns - ------- - hrf : ndarray - A hemodynamic response function (HRF). - - Notes - ----- - Based on the spm_hrf function in SPM8 - Written in R by Cesar Caballero Gaudes - Translated into Python by Eneko Urunuela - """ - # global parameter - # -------------------------------------------------------------------------- - fMRI_T = 16 - - # modelled hemodynamic response function - {mixture of Gammas} - # -------------------------------------------------------------------------- - dt = TR / fMRI_T - u = np.arange(0, p[6] / dt + 1, 1) - p[5] / dt - a1 = p[0] / p[2] - b1 = 1 / p[3] - a2 = p[1] / p[3] - b2 = 1 / p[3] - - hrf = ( - scipy.stats.gamma.pdf(u * dt, a1, scale=b1) - - scipy.stats.gamma.pdf(u * dt, a2, scale=b2) / p[4] - ) / dt - time_axis = np.arange(0, int(p[6] / TR + 1), 1) * fMRI_T - hrf = hrf[time_axis] - min_hrf = 1e-9 * min(hrf[hrf > 10 * np.finfo(float).eps]) - - if min_hrf < 10 * np.finfo(float).eps: - min_hrf = 10 * np.finfo(float).eps - - hrf[hrf == 0] = min_hrf - - return hrf - - -def hrf_afni(TR, lop_hrf="SPMG1"): - """Generate HRF with AFNI's 3dDeconvolve. - - Parameters - ---------- - TR : float - TR of the acquisition. - lop_hrf : str - 3dDeconvolve option to select HRF shape, by default "SPMG1" - - Returns - ------- - hrf : ndarray - A hemodynamic response function (HRF). - - Notes - ----- - AFNI installation is needed as it runs 3dDeconvolve on the terminal wtih subprocess. - """ - dur_hrf = 8 - last_hrf_sample = 1 - # Increases duration until last HRF sample is zero - while last_hrf_sample != 0: - dur_hrf = 2 * dur_hrf - hrf_command = ( - "3dDeconvolve -x1D_stop -nodata %d %f -polort -1 -num_stimts 1 -stim_times 1 " - "'1D:0' '%s' -quiet -x1D stdout: | 1deval -a stdin: -expr 'a'" - ) % (dur_hrf, TR, lop_hrf) - hrf_tr_str = subprocess.check_output( - hrf_command, shell=True, universal_newlines=True - ).splitlines() - hrf = np.array([float(i) for i in hrf_tr_str]) - last_hrf_sample = hrf[len(hrf) - 1] - if last_hrf_sample != 0: - LGR.info( - "Duration of HRF was not sufficient for specified model. Doubling duration " - "and computing again." - ) - - # Removes tail of zero samples - while last_hrf_sample == 0: - hrf = hrf[0 : len(hrf) - 1] - last_hrf_sample = hrf[len(hrf) - 1] - - return hrf - - class HRFMatrix: """A class for generating an HRF matrix. Parameters ---------- - TR : float - TR of the acquisition, by default 2 TE : list Values of TE in ms, by default None - n_scans : int - Number of volumes in acquisition, by default 200 - r2only : bool - Whether to only consider R2* in the signal model, by default True - is_afni : bool - Whether to use AFNI's 3dDeconvolve to generate HRF matrix, by default True - lop_hrf : str - 3dDeconvolve option to select HRF shape, by default "SPMG1" + model : str + Model to use for HRF, by default "spm" block : bool Whether to use the block model in favor of the spike model, by default false + custom : str + Path to custom HRF file, by default None """ def __init__( self, - TR=2, - TE=None, - n_scans=200, - r2only=True, - is_afni=True, - lop_hrf="SPMG1", + te=None, + model="spm", block=True, ): - self.TR = TR - self.TE = TE - self.n_scans = n_scans - self.r2only = r2only - self.lop_hrf = lop_hrf - self.is_afni = is_afni + # If te values are higher than 1, then assume they are in ms and convert to s + # If te values are lower than 1, then assume they are in s + + if te is not None: + if max(te) > 1: + te = [i / 1000 for i in te] + self.te = te + + self.model = model self.block = block - def generate_hrf(self): + def generate_hrf(self, tr, n_scans): """Generate HRF matrix. + Parameters + ---------- + tr : float + tr of the acquisition. + n_scans : int + Number of scans. + Returns ------- - self + self.hrf_ : array_like + A hemodynamic response function (HRF). """ - if self.is_afni: - hrf_SPM = hrf_afni(self.TR, self.lop_hrf) - else: - p = [6, 16, 1, 1, 6, 0, 32] - hrf_SPM = hrf_linear(self.TR, p) - - self.L_hrf = len(hrf_SPM) # Length - mahrf = max(abs(hrf_SPM)) # Max value - filler = np.zeros(self.n_scans - hrf_SPM.shape[0], dtype=int) - hrf_SPM = np.append(hrf_SPM, filler) # Fill up array with zeros until n_scans - - temp = hrf_SPM - for i in range(self.n_scans - 1): - foo = np.append(np.zeros(i + 1), hrf_SPM[0 : (len(hrf_SPM) - i - 1)]) - temp = np.column_stack((temp, foo)) + # Read custom HRF from file if self.model ends in .1D or .txt + if self.model.endswith(".1D") or self.model.endswith(".txt"): + hrf = np.loadtxt(self.custom) + + # Make sure that the HRF is not longer than the number of scans + # If it is, then raise an error + if len(hrf) > n_scans: + raise ValueError( + "HRF is longer than the number of scans. " + "Please make sure that your custom HRF is not longer than the number of scans." + ) + else: + # Get HRF from nilearn + if self.model == "spm": + hrf = spm_hrf(tr, oversampling=1, time_length=n_scans * tr) + elif self.model == "glover": + hrf = glover_hrf(tr, oversampling=1, time_length=n_scans * tr) + else: + raise ValueError( + "Model must be either 'spm', 'glover' or a custom '.1D' or '.txt' file, not %s" + % self.model + ) + + # Calculate maximum HRF value + max_val = max(abs(hrf)) + + # Generate HRF matrix + hrf_mtx = hrf + for i in range(n_scans - 1): + foo = np.append(np.zeros(i + 1), hrf[0 : (len(hrf) - i - 1)]) + hrf_mtx = np.column_stack((hrf_mtx, foo)) + + # Normalize HRF matrix + hrf_mtx = hrf_mtx / max_val + + # Concatenate and scale HRFs for multi-echo, + # leave it as it is for single-echo. + if len(self.te) > 1: + # Add integrator if necessary + if self.block: + hrf_mtx_te = -self.te[0] * np.dot(hrf_mtx, np.tril(np.ones(n_scans))) + else: + hrf_mtx_te = -self.te[0] * hrf_mtx + + # Concatenate and scale HRFs for multi-echo + for teidx in range(len(self.te) - 1): + # Add integrator if necessary + if self.block: + hrf_mtx_te = np.vstack( + ( + hrf_mtx_te, + -self.te[teidx + 1] * np.dot(hrf_mtx, np.tril(np.ones(n_scans))), + ) + ) + else: + hrf_mtx_te = np.vstack((hrf_mtx_te, -self.te[teidx + 1] * hrf_mtx)) - if len(self.TE) > 1: - tempTE = -self.TE[0] * temp - for teidx in range(len(self.TE) - 1): - tempTE = np.vstack((tempTE, -self.TE[teidx + 1] * temp)) + self.hrf_ = hrf_mtx_te else: - tempTE = temp + if self.block: + self.hrf_ = np.dot(hrf_mtx, np.tril(np.ones(n_scans))) + else: + self.hrf_ = hrf_mtx - if self.r2only: - self.hrf = tempTE + return self - self.hrf_norm = self.hrf / mahrf + def hrf_afni(tr, hrf_model="SPMG1"): + """Generate HRF with AFNI's 3dDeconvolve. - if self.block: - if len(self.TE) > 1: - for teidx in range(len(self.TE)): - temp = self.hrf[ - teidx * self.n_scans : (teidx + 1) * self.n_scans - 1, : - ].copy() - self.hrf[teidx * self.n_scans : (teidx + 1) * self.n_scans - 1, :] = np.dot( - temp, np.tril(np.ones(self.n_scans)) - ) - temp = self.hrf_norm[ - teidx * self.n_scans : (teidx + 1) * self.n_scans - 1, : - ].copy() - self.hrf_norm[ - teidx * self.n_scans : (teidx + 1) * self.n_scans - 1, : - ] = np.dot(temp, np.tril(np.ones(self.n_scans))) - else: - self.hrf = np.dot(self.hrf, np.tril(np.ones(self.n_scans))) - self.hrf_norm = np.dot(self.hrf_norm, np.tril(np.ones(self.n_scans))) + Parameters + ---------- + tr : float + tr of the acquisition. + hrf_model : str + 3dDeconvolve option to select HRF shape, by default "SPMG1" - return self + Returns + ------- + hrf : array_like + A hemodynamic response function (HRF). + + Notes + ----- + AFNI installation is needed as it runs 3dDeconvolve on the terminal with subprocess. + """ + dur_hrf = 8 + last_hrf_sample = 1 + # Increases duration until last HRF sample is zero + while last_hrf_sample != 0: + dur_hrf = 2 * dur_hrf + hrf_command = ( + "3dDeconvolve -x1D_stop -nodata %d %f -polort -1 -num_stimts 1 -stim_times 1 " + "'1D:0' '%s' -quiet -x1D stdout: | 1deval -a stdin: -expr 'a'" + ) % (dur_hrf, tr, hrf_model) + hrf_tr_str = subprocess.check_output( + hrf_command, shell=True, universal_newlines=True + ).splitlines() + hrf = np.array([float(i) for i in hrf_tr_str]) + last_hrf_sample = hrf[len(hrf) - 1] + if last_hrf_sample != 0: + LGR.info( + "Duration of HRF was not sufficient for specified model. Doubling duration " + "and computing again." + ) + + # Removes tail of zero samples + while last_hrf_sample == 0: + hrf = hrf[0 : len(hrf) - 1] + last_hrf_sample = hrf[len(hrf) - 1] + + return hrf diff --git a/pySPFM/io.py b/pySPFM/io.py index 1c662e6..fa6ef62 100644 --- a/pySPFM/io.py +++ b/pySPFM/io.py @@ -1,10 +1,14 @@ """I/O.""" +import json +import os.path as op from subprocess import run import nibabel as nib from nilearn import masking from nilearn.input_data import NiftiLabelsMasker +from pySPFM.utils import get_keyword_description + def read_data(data_fn, mask_fn, is_atlas=False): """Read data from filename and apply mask. @@ -73,7 +77,7 @@ def update_header(filename, command): run(f'3dNotes -h "{command}" {filename}', shell=True) -def write_data(data, filename, mask, header, command, is_atlas=False): +def write_data(data, filename, mask, header, command, is_atlas=False, use_bids=False): """Write data into NIFTI file. Parameters @@ -95,4 +99,46 @@ def write_data(data, filename, mask, header, command, is_atlas=False): reshaped = reshape_data(data, mask) out_img = nib.Nifti1Image(reshaped.get_fdata(), None, header=header) out_img.to_filename(filename) - update_header(filename, command) + + # Update header with AFNI if BIDS is not required + if not use_bids: + update_header(filename, command) + + +def write_json(keywords, out_dir): + """Write dataset description into JSON file. + + Parameters + ---------- + keywords : list + List of keywords to be added to the JSON file. + out_dir : str or path + Path to the output directory. + """ + # Create dictionary with all the information + out_dict = {} + + # Iterate over all the keywords and add their description and method to the dictionary + for keyword in keywords: + out_dict[keyword] = {} + out_dict[keyword]["description"] = get_keyword_description(keyword) + out_dict[keyword]["method"] = "pySPFM" + + # Add units + if "bold" in keyword: + out_dict[keyword]["units"] = "percent" + elif "activityInducing" in keyword: + # Count how many keywords have "bold" in their name + bold_count = 0 + for k in keywords: + if "bold" in k: + bold_count += 1 + if bold_count > 1: + out_dict[keyword]["units"] = "s-1" + + # Create output filename + outname = "dataset_description.json" + + # Write json file + with open(op.join(out_dir, outname), "w") as f: + f.write(json.dumps(out_dict, indent=4)) diff --git a/pySPFM/tests/conftest.py b/pySPFM/tests/conftest.py index 8383e2c..4ea578f 100644 --- a/pySPFM/tests/conftest.py +++ b/pySPFM/tests/conftest.py @@ -79,13 +79,18 @@ def mask_five_echo(testpath): @pytest.fixture -def hrf_file(testpath): - return fetch_file("gefu4", testpath, "hrf.txt") +def spm_single_echo_block(testpath): + return fetch_file("4k85j", testpath, "spm_single_echo_block.npy") @pytest.fixture -def hrf_linear_file(testpath): - return fetch_file("mkeu2", testpath, "hrf_linear.txt") +def spm_single_echo(testpath): + return fetch_file("zt3pm", testpath, "spm_single_echo.npy") + + +@pytest.fixture +def glover_multi_echo(testpath): + return fetch_file("xke79", testpath, "glover_multi_echo.npy") @pytest.fixture diff --git a/pySPFM/tests/data/lars_integration_outputs.txt b/pySPFM/tests/data/lars_integration_outputs.txt index 80092a3..982be66 100644 --- a/pySPFM/tests/data/lars_integration_outputs.txt +++ b/pySPFM/tests/data/lars_integration_outputs.txt @@ -1,6 +1,6 @@ _references.txt call.sh -test_lars.pySPFM_fitts.nii.gz -test_lars.pySPFM_beta.nii.gz -test_lars.pySPFM_lambda.nii.gz -test_lars.pySPFM_MAD.nii.gz \ No newline at end of file +test_lars_pySPFM_denoised_bold.nii.gz +test_lars_pySPFM_activityInducing.nii.gz +test_lars_pySPFM_lambda.nii.gz +test_lars_pySPFM_MAD.nii.gz \ No newline at end of file diff --git a/pySPFM/tests/data/nih_five_echo_outputs_verbose.txt b/pySPFM/tests/data/nih_five_echo_outputs_verbose.txt index 0be1bc7..b7b38e6 100644 --- a/pySPFM/tests/data/nih_five_echo_outputs_verbose.txt +++ b/pySPFM/tests/data/nih_five_echo_outputs_verbose.txt @@ -1,15 +1,16 @@ _references.txt call.sh -test_me.pySPFM_dr2HRF_E01.nii.gz -test_me.pySPFM_dr2HRF_E02.nii.gz -test_me.pySPFM_dr2HRF_E03.nii.gz -test_me.pySPFM_dr2HRF_E04.nii.gz -test_me.pySPFM_dr2HRF_E05.nii.gz -test_me.pySPFM_DR2.nii.gz -test_me.pySPFM_innovation.nii.gz -test_me.pySPFM_lambda.nii.gz -test_me.pySPFM_MAD_E01.nii.gz -test_me.pySPFM_MAD_E02.nii.gz -test_me.pySPFM_MAD_E03.nii.gz -test_me.pySPFM_MAD_E04.nii.gz -test_me.pySPFM_MAD_E05.nii.gz \ No newline at end of file +test-me_echo-1_desc-denoised_bold.nii.gz +test-me_echo-2_desc-denoised_bold.nii.gz +test-me_echo-3_desc-denoised_bold.nii.gz +test-me_echo-4_desc-denoised_bold.nii.gz +test-me_echo-5_desc-denoised_bold.nii.gz +test-me_desc-activityInducing.nii.gz +test-me_desc-innovation.nii.gz +test-me_desc-stat-lambda_statmap.nii.gz +test-me_desc-echo-1_MAD.nii.gz +test-me_desc-echo-2_MAD.nii.gz +test-me_desc-echo-3_MAD.nii.gz +test-me_desc-echo-4_MAD.nii.gz +test-me_desc-echo-5_MAD.nii.gz +dataset_description.json \ No newline at end of file diff --git a/pySPFM/tests/data/stability_integration_outputs.txt b/pySPFM/tests/data/stability_integration_outputs.txt index f53c458..9104a2c 100644 --- a/pySPFM/tests/data/stability_integration_outputs.txt +++ b/pySPFM/tests/data/stability_integration_outputs.txt @@ -1,3 +1,3 @@ _references.txt call.sh -test_stability.pySPFM_AUC.nii.gz \ No newline at end of file +test_stability_pySPFM_AUC.nii.gz \ No newline at end of file diff --git a/pySPFM/tests/test_hrf_generator.py b/pySPFM/tests/test_hrf_generator.py index 3294d04..010c45d 100644 --- a/pySPFM/tests/test_hrf_generator.py +++ b/pySPFM/tests/test_hrf_generator.py @@ -3,14 +3,20 @@ from pySPFM.deconvolution import hrf_generator -def test_HRF_matrix(hrf_file, hrf_linear_file): - hrf_object = hrf_generator.HRFMatrix(TR=1, TE=[0], n_scans=168, block=False) - hrf_object.generate_hrf() - hrf_loaded = np.loadtxt(hrf_file) - assert np.allclose(hrf_object.hrf, hrf_loaded) - hrf_linear = hrf_generator.HRFMatrix(TR=1, TE=[0], is_afni=False, n_scans=168, block=False) - hrf_linear.generate_hrf() - assert np.allclose(hrf_linear.hrf, np.loadtxt(hrf_linear_file)) - hrf_block = hrf_generator.HRFMatrix(TR=1, TE=[0], n_scans=168, block=True) - hrf_block.generate_hrf() - assert np.allclose(hrf_block.hrf, np.matmul(hrf_loaded, np.tril(np.ones(hrf_loaded.shape[0])))) +def test_HRF_matrix(spm_single_echo, spm_single_echo_block, glover_multi_echo): + + hrf_object = hrf_generator.HRFMatrix(te=[0], block=False) + hrf = hrf_object.generate_hrf(tr=1, n_scans=168).hrf_ + hrf_loaded = np.load(spm_single_echo) + assert np.allclose(hrf, hrf_loaded) + + hrf_object = hrf_generator.HRFMatrix(te=[0], block=True) + hrf = hrf_object.generate_hrf(tr=1, n_scans=168).hrf_ + hrf_loaded = np.load(spm_single_echo_block) + assert np.allclose(hrf, hrf_loaded) + + te = np.array([18.4, 32.0, 48.4]) + hrf_object = hrf_generator.HRFMatrix(te=te, block=False, model="glover") + hrf = hrf_object.generate_hrf(tr=1, n_scans=168).hrf_ + hrf_loaded = np.load(glover_multi_echo) + assert np.allclose(hrf, hrf_loaded) diff --git a/pySPFM/tests/test_integration.py b/pySPFM/tests/test_integration.py index 3ba5f6d..bc17e76 100644 --- a/pySPFM/tests/test_integration.py +++ b/pySPFM/tests/test_integration.py @@ -94,7 +94,7 @@ def test_integration_five_echo(skip_integration, mask_five_echo): + ["-m"] + [mask_five_echo] + ["-o"] - + ["test_me.pySPFM"] + + ["test-me"] + ["-tr"] + ["2"] + ["-d"] @@ -111,6 +111,7 @@ def test_integration_five_echo(skip_integration, mask_five_echo): "--debug", "--debias", "--block", + "--bids", ] ) pySPFM_cli._main(args) @@ -144,7 +145,7 @@ def test_integration_lars(skip_integration, mask_five_echo): + ["-m"] + [mask_five_echo] + ["-o"] - + ["test_lars.pySPFM"] + + ["test_lars"] + ["-tr"] + ["2"] + ["-d"] @@ -194,7 +195,7 @@ def test_integration_stability_selection(skip_integration, mask_five_echo): + ["-m"] + [mask_five_echo] + ["-o"] - + ["test_stability.pySPFM"] + + ["test_stability"] + ["-tr"] + ["2"] + ["-d"] diff --git a/pySPFM/utils.py b/pySPFM/utils.py index 850fc8f..8fe60b1 100644 --- a/pySPFM/utils.py +++ b/pySPFM/utils.py @@ -69,6 +69,79 @@ def teardown_loggers(): local_logger.removeHandler(handler) +def get_outname(outname, keyword, ext, use_bids=False): + """Get the output name. + + Parameters + ---------- + outname : str + Name of the output file. + keyword : str + Keyword added by pySPFM. + ext : str + Extension of the output file. + use_bids : bool, optional + Whether the output file is in BIDS format, by default False + + Returns + ------- + outname : str + Name of the output file. + """ + if use_bids: + outname = f"{outname}_desc-{keyword}.{ext}" + else: + outname = f"{outname}_pySPFM_{keyword}.{ext}" + return outname + + +def get_keyword_description(keyword): + """ + Get the description of the keyword for BIDS sidecar + + + Parameters + ---------- + keyword : str + Keyword added by pySPFM. + + Returns + ------- + keyword_description : str + Description of the keyword. + """ + + if "innovation" in keyword: + keyword_description = ( + "Deconvolution-estimated innovation signal; i.e., the derivative" + "of the activity-inducing signal." + ) + elif "beta" in keyword: + keyword_description = ( + "Deconvolution-estimated activity-inducing signal; i.e., induces BOLD response." + ) + elif "activityInducing" in keyword: + keyword_description = ( + "Deconvolution-estimated activity-inducing signal that represents" + "changes in the R2* component of the multi-echo signal; i.e., induces BOLD response." + ) + elif "bold" in keyword: + keyword_description = ( + "Deconvolution-denoised activity-related signal; i.e., denoised BOLD signal." + ) + elif "lambda" in keyword: + keyword_description = ( + "Map of the regularization parameter lambda used to solve the deconvolution problem." + ) + elif "MAD" in keyword: + keyword_description = ( + "Estimated mean absolute deviation of the noise; i.e., noise level" + "of the signal to be deconvolved." + ) + + return keyword_description + + def dask_scheduler(jobs): """ Checks if the user has a dask_jobqueue configuration file, and if so, diff --git a/pySPFM/workflows/pySPFM.py b/pySPFM/workflows/pySPFM.py index 846b836..4b099f4 100644 --- a/pySPFM/workflows/pySPFM.py +++ b/pySPFM/workflows/pySPFM.py @@ -17,8 +17,8 @@ from pySPFM.deconvolution.select_lambda import select_lambda from pySPFM.deconvolution.spatial_regularization import spatial_tikhonov from pySPFM.deconvolution.stability_selection import stability_selection -from pySPFM.io import read_data, write_data -from pySPFM.utils import dask_scheduler +from pySPFM.io import read_data, write_data, write_json +from pySPFM.utils import dask_scheduler, get_outname LGR = logging.getLogger("GENERAL") RefLGR = logging.getLogger("REFERENCES") @@ -88,6 +88,17 @@ def _get_parser(): help="List with TE of the fMRI data acquisition.", default=[0], ) + optional.add_argument( + "-hrf", + "--hrf", + dest="hrf_model", + type=str, + help=( + "HRF model to use. Default is 'spm'. Options are 'spm', 'glover', or a custom HRF " + "file with the '.1D' or '.txt' extension." + ), + default="spm", + ) optional.add_argument( "-block", "--block", @@ -264,6 +275,18 @@ def _get_parser(): help="Use provided mask as an atlas (default = False).", default=False, ) + optional.add_argument( + "-bids", + "--bids", + dest="use_bids", + action="store_true", + help=( + "Use BIDS-style suffix on the given `output` (default = False). pySPFM assumes that " + "`output` follows the BIDS convention. Not using this option will default to using " + "AFNI to update the header of the output." + ), + default=False, + ) optional.add_argument( "-nsur", "--nsurrogates", @@ -304,6 +327,7 @@ def pySPFM( tr, out_dir, te=[0], + hrf_model="spm", block_model=False, debias=True, group=0.2, @@ -323,6 +347,7 @@ def pySPFM( mu=0.01, tolerance=1e-6, is_atlas=False, + use_bids=False, n_surrogates=50, debug=False, quiet=False, @@ -343,6 +368,9 @@ def pySPFM( Output directory te : list, optional TE values in ms, by default [0], i.e. single-echo + hrf_model : str, optional + HRF model to use, by default 'spm', i.e. SPM's canonical HRF. + Options are 'spm', 'glover' and a path to a 1D file with a custom HRF model. block_model : bool, optional Use the block model instead of using the spike model, by default False debias : bool, optional @@ -384,6 +412,10 @@ def pySPFM( Tolerance for residuals to find convergence of inverse problem, by default 1e-6 is_atlas : bool, optional Read mask as atlas with different labels, by default False + use_bids : bool, optional + Use BIDS-style suffix on the given `output` (default = False). pySPFM assumes that `output` + follows the BIDS convention. Not using this option will default to using AFNI to update the + header of the output." n_surrogates : int, optional Number of surrogates to generate for stability selection, by default 50 debug : bool, optional @@ -463,8 +495,8 @@ def pySPFM( # Generate design matrix with shifted versions of HRF LGR.info("Generating design matrix with shifted versions of HRF...") - hrf_obj = HRFMatrix(TR=tr, n_scans=n_scans, TE=te, block=block_model) - hrf_norm = hrf_obj.generate_hrf().hrf_norm + hrf_obj = HRFMatrix(te=te, block=block_model, model=hrf_model) + hrf = hrf_obj.generate_hrf(tr=tr, n_scans=n_scans).hrf_ # Run LARS if bic or aic on given. # If another criterion is given, then solve with FISTA. @@ -480,6 +512,9 @@ def pySPFM( estimates_spatial = np.empty((n_scans, n_voxels)) final_estimates = np.empty((n_scans, n_voxels)) + # Initialize list to save keywords used for BIDS compatible outputs + out_bids_keywords = [] + # Iterate between temporal and spatial regularizations client, _ = dask_scheduler(n_jobs) for iter_idx in range(max_iter): @@ -493,9 +528,9 @@ def pySPFM( # Scatter data to workers if client is not None if client is not None: - hrf_norm_fut = client.scatter(hrf_norm) + hrf_fut = client.scatter(hrf) else: - hrf_norm_fut = hrf_norm + hrf_fut = hrf if criterion in lars_criteria: LGR.info("Solving inverse problem with LARS...") @@ -504,7 +539,7 @@ def pySPFM( futures = [] for vox_idx in range(n_voxels): fut = delayed_dask(solve_regularization_path, pure=False)( - hrf_norm_fut, data_temp_reg[:, vox_idx], n_lambdas, criterion + hrf_fut, data_temp_reg[:, vox_idx], n_lambdas, criterion ) futures.append(fut) @@ -524,7 +559,7 @@ def pySPFM( futures = [] for vox_idx in range(n_voxels): fut = delayed_dask(fista, pure=False)( - hrf_norm_fut, + hrf_fut, data_temp_reg[:, vox_idx], criterion, max_iter_fista, @@ -555,7 +590,7 @@ def pySPFM( # Solve stability regularization futures = [ delayed_dask(stability_selection)( - hrf_norm_fut, + hrf_fut, data_temp_reg[:, vox_idx], n_lambdas, n_surrogates, @@ -575,7 +610,8 @@ def pySPFM( LGR.info("Stability selection finished.") LGR.info("Saving AUCs to %s..." % out_dir) - output_name = f"{output_filename}_AUC.nii.gz" + out_bids_keywords.append("AUC") + output_name = get_outname(output_filename, "AUC", "nii.gz", use_bids) write_data( auc, os.path.join(out_dir, output_name), @@ -594,13 +630,13 @@ def pySPFM( # Convolve with HRF if block_model: estimates_block = estimates - hrf_obj = HRFMatrix(TR=tr, n_scans=n_scans, TE=te, block=False) - hrf_norm_fitting = hrf_obj.generate_hrf().hrf_norm + hrf_obj = HRFMatrix(te=te, block=False, model=hrf_model) + hrf_fitting = hrf_obj.generate_hrf(tr=tr, n_scans=n_scans).hrf_ estimates_spike = np.dot(np.tril(np.ones(n_scans)), estimates_block) - fitts = np.dot(hrf_norm_fitting, estimates_spike) + fitts = np.dot(hrf_fitting, estimates_spike) else: estimates_spike = estimates - fitts = np.dot(hrf_norm, estimates_spike) + fitts = np.dot(hrf, estimates_spike) # Perform spatial regularization if a weight is given if spatial_weight > 0: @@ -630,24 +666,32 @@ def pySPFM( LGR.info("Inverse problem solved.") + # Update HRF for block model + if block_model: + hrf_obj = HRFMatrix(te=te, block=False, model=hrf_model) + hrf = hrf_obj.generate_hrf(tr=tr, n_scans=n_scans).hrf_ + # Perform debiasing step if debias: LGR.info("Debiasing estimates...") if block_model: - hrf_obj = HRFMatrix(TR=tr, n_scans=n_scans, TE=te, block=False) - hrf_norm = hrf_obj.generate_hrf().hrf_norm - estimates_spike = debiasing_block(hrf_norm, data_masked, final_estimates, n_jobs) - fitts = np.dot(hrf_norm, estimates_spike) - else: - estimates_spike, fitts = debiasing_spike( - hrf_norm, data_masked, final_estimates, n_jobs + estimates_spike = debiasing_block( + hrf=hrf, y=data_masked, estimates_matrix=final_estimates ) + fitts = np.dot(hrf, estimates_spike) + else: + estimates_spike, fitts = debiasing_spike(hrf, data_masked, final_estimates) + elif block_model: + estimates_spike = np.dot(np.tril(np.ones(n_scans)), estimates_block) + fitts = np.dot(hrf, estimates_spike) LGR.info("Saving results...") + # Save innovation signal if block_model: estimates_block = final_estimates - output_name = f"{output_filename}_innovation.nii.gz" + output_name = get_outname(output_filename, "innovation", "nii.gz", use_bids) + out_bids_keywords.append("innovation") write_data( estimates_block, os.path.join(out_dir, output_name), @@ -655,19 +699,22 @@ def pySPFM( data_header, command_str, is_atlas=is_atlas, + use_bids=use_bids, ) if not debias: hrf_obj = HRFMatrix(TR=tr, n_scans=n_scans, TE=te, block=False) - hrf_norm = hrf_obj.generate_hrf().hrf_norm + hrf = hrf_obj.generate_hrf().hrf estimates_spike = np.dot(np.tril(np.ones(n_scans)), estimates_block) - fitts = np.dot(hrf_norm, estimates_spike) + fitts = np.dot(hrf, estimates_spike) # Save activity-inducing signal if n_te == 1: - output_name = f"{output_filename}_beta.nii.gz" + output_name = get_outname(output_filename, "activityInducing", "nii.gz", use_bids) + out_bids_keywords.append("activityInducing") elif n_te > 1: - output_name = f"{output_filename}_DR2.nii.gz" + output_name = get_outname(output_filename, "activityInducing", "nii.gz", use_bids) + out_bids_keywords.append("activityInducing") write_data( estimates_spike, os.path.join(out_dir, output_name), @@ -675,11 +722,13 @@ def pySPFM( data_header, command_str, is_atlas=is_atlas, + use_bids=use_bids, ) # Save fitts if n_te == 1: - output_name = f"{output_filename}_fitts.nii.gz" + output_name = get_outname(output_filename, "denoised_bold", "nii.gz", use_bids) + out_bids_keywords.append("denoised_bold") write_data( fitts, os.path.join(out_dir, output_name), @@ -687,11 +736,15 @@ def pySPFM( data_header, command_str, is_atlas=is_atlas, + use_bids=use_bids, ) elif n_te > 1: for te_idx in range(n_te): te_data = fitts[te_idx * n_scans : (te_idx + 1) * n_scans, :] - output_name = f"{output_filename}_dr2HRF_E0{te_idx + 1}.nii.gz" + output_name = get_outname( + f"{output_filename}_echo-{te_idx + 1}", "denoised_bold", "nii.gz", use_bids + ) + out_bids_keywords.append(f"echo-{te_idx + 1}_desc-denoised_bold") write_data( te_data, os.path.join(out_dir, output_name), @@ -699,13 +752,15 @@ def pySPFM( data_header, command_str, is_atlas=is_atlas, + use_bids=use_bids, ) # Save noise estimate if n_te == 1: - output_name = f"{output_filename}_MAD.nii.gz" - y = data_masked[:n_scans, :] - _, _, noise_estimate = select_lambda(hrf=hrf_norm, y=y) + output_name = get_outname(output_filename, "MAD", "nii.gz", use_bids) + out_bids_keywords.append("MAD") + out_data = data_masked[:n_scans, :] + _, _, noise_estimate = select_lambda(hrf=hrf, y=out_data) write_data( np.expand_dims(noise_estimate, axis=0), os.path.join(out_dir, output_name), @@ -713,15 +768,19 @@ def pySPFM( data_header, command_str, is_atlas=is_atlas, + use_bids=use_bids, ) else: for te_idx in range(n_te): - output_name = f"{output_filename}_MAD_E0{te_idx + 1}.nii.gz" + output_name = get_outname( + output_filename, f"echo-{te_idx + 1}_MAD", "nii.gz", use_bids + ) + out_bids_keywords.append(f"echo-{te_idx + 1}_MAD") if te_idx == 0: y_echo = data_masked[:n_scans, :] else: y_echo = data_masked[te_idx * n_scans : (te_idx + 1) * n_scans, :] - _, _, noise_estimate = select_lambda(hrf=hrf_norm, y=y_echo) + _, _, noise_estimate = select_lambda(hrf=hrf, y=y_echo) write_data( np.expand_dims(noise_estimate, axis=0), os.path.join(out_dir, output_name), @@ -729,10 +788,15 @@ def pySPFM( data_header, command_str, is_atlas=is_atlas, + use_bids=use_bids, ) # Save lambda - output_name = f"{output_filename}_lambda.nii.gz" + out_keyword = "lambda" + if use_bids: + out_keyword = f"stat-{out_keyword}_statmap" + output_name = get_outname(output_filename, out_keyword, "nii.gz", use_bids) + out_bids_keywords.append(out_keyword) write_data( np.expand_dims(lambda_map, axis=0), os.path.join(out_dir, output_name), @@ -740,8 +804,13 @@ def pySPFM( data_header, command_str, is_atlas=is_atlas, + use_bids=use_bids, ) + # Save BIDS compatible sidecar file + if use_bids: + write_json(out_bids_keywords, out_dir) + LGR.info("Results saved.") LGR.info("pySPFM finished.") diff --git a/setup.cfg b/setup.cfg index 387301a..98490d0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,7 +27,7 @@ install_requires = dask_jobqueue distributed nibabel - nilearn + nilearn==0.9.1 numpy<=1.22 pylops>=1.18.2 pyproximal>=0.4.0