Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Drop AFNI dependency #19

Merged
merged 38 commits into from
Oct 13, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
8317348
First version of pythonic HRF generator
eurunuela Jun 22, 2022
5b5c6bc
Renamed all instances of hrf_norm to hrf
eurunuela Jun 22, 2022
ee43546
Fix bug on calling hrf_generator
eurunuela Jun 22, 2022
d5dfa2a
Fix bug
eurunuela Jun 22, 2022
d60c73c
Merge branch 'main' into afni
eurunuela Jun 23, 2022
a261c84
Added option for custom HRF
eurunuela Jun 23, 2022
9b2de76
Lars wron naming fix
eurunuela Jun 23, 2022
0a472de
CLI bug fix
eurunuela Jun 23, 2022
183779d
Merge branch 'main' into afni
eurunuela Jul 21, 2022
c8d68eb
Add BIDS and custom HRF compatibility
eurunuela Jul 22, 2022
715d38d
Added error messages
eurunuela Jul 22, 2022
30297ae
Clarified arg help
eurunuela Jul 22, 2022
6274257
Actually update the arg help
eurunuela Jul 22, 2022
4718ebb
Merge branch 'main' into afni
eurunuela Jul 22, 2022
7e1e0e5
Make it clear AFNI is used if bids is not
eurunuela Jul 27, 2022
a05d58d
Add spaces to help strings and change is_bids for use_bids
eurunuela Jul 27, 2022
7ffe415
Remove leading zero from multi-echo outputs
eurunuela Jul 27, 2022
55a280c
Remove missing leading zeros
eurunuela Jul 27, 2022
d4973d7
Add suggestions by Taylor
eurunuela Aug 22, 2022
ac60811
Fix styling error
eurunuela Aug 22, 2022
bd35681
Higher scipy version to avoid issues with MAD naming
eurunuela Aug 22, 2022
dda6423
Fixes
eurunuela Aug 22, 2022
3797643
Fixed json keyword matching
eurunuela Aug 22, 2022
a595913
Update integration test output filenames
eurunuela Aug 22, 2022
80e3981
Revert MAD naming and constrain scipy version
eurunuela Aug 22, 2022
0613301
Add units
eurunuela Aug 22, 2022
3bf223c
Fix error
eurunuela Aug 22, 2022
d9ce0bb
Merge branch 'main' into afni
eurunuela Oct 5, 2022
4449428
Fixed an error from the previous merge
eurunuela Oct 5, 2022
a0f35b5
Fixed the debiasing n_jobs error from previous merge
eurunuela Oct 5, 2022
14981b5
Update naming of AUC file
eurunuela Oct 11, 2022
ba09fb1
Added TE values check and updated test
eurunuela Oct 11, 2022
43acbb4
Removed breakpoint
eurunuela Oct 11, 2022
352f507
Updated cache key
eurunuela Oct 13, 2022
6471998
print nilearn version
eurunuela Oct 13, 2022
57a40c4
Trigger tests
eurunuela Oct 13, 2022
7b6525c
Fix nilearn version
eurunuela Oct 13, 2022
6d58475
Removed print from test
eurunuela Oct 13, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
paths:
- src/pySPFM
- restore_cache: # ensure this step occurs *before* installing dependencies
key: deps1-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }}
key: conda-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }}
- run: # will overwrite pySPFM installation each time
name: Generate environment
command: |
Expand All @@ -46,7 +46,7 @@ jobs:
fi
python setup.py install --user
- save_cache: # environment cache tied to requirements
key: deps1-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }}
key: conda-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }}
paths:
- "/opt/conda/envs/py<< parameters.PYTHON >>_env"

Expand All @@ -61,7 +61,7 @@ jobs:
- attach_workspace: # get pySPFM
at: /tmp
- restore_cache: # load environment
key: deps1-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }}
key: conda-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }}
- run:
name: Run tests and compile coverage
command: |
Expand All @@ -86,7 +86,7 @@ jobs:
- attach_workspace: # get pySPFM
at: /tmp
- restore_cache: # load environment
key: deps1-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }}
key: conda-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "<< parameters.PYTHON >>" }}
- run:
name: Run tests and compile coverage
command: |
Expand All @@ -108,7 +108,7 @@ jobs:
- attach_workspace: # get pySPFM
at: /tmp
- restore_cache: # load environment
key: deps1-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "3.9" }}
key: conda-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "3.9" }}
- run:
name: Build documentation
command: |
Expand All @@ -128,7 +128,7 @@ jobs:
- attach_workspace: # get pySPFM
at: /tmp
- restore_cache: # load environment
key: deps1-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "3.9" }}
key: conda-{{ checksum "setup.cfg" }}-{{ checksum "setup.py" }}-{{ "3.9" }}
- run:
name: Linting
command: |
Expand Down
305 changes: 131 additions & 174 deletions pySPFM/deconvolution/hrf_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,211 +3,168 @@
import subprocess

import numpy as np
import scipy.io
import scipy.stats
from nilearn.glm.first_level import glover_hrf, spm_hrf

LGR = logging.getLogger("GENERAL")
RefLGR = logging.getLogger("REFERENCES")


def hrf_linear(TR, p):
"""Generate HRF.

Parameters
----------
TR : float
TR of the acquisition.
p : list
Input parameters of the response function (two gamma functions).
defaults
(seconds)
p[1] - delay of response (relative to onset) 6
p[2] - delay of undershoot (relative to onset) 16
p[3] - dispersion of response 1
p[4] - dispersion of undershoot 1
p[5] - ratio of response to undershoot 6
p[6] - onset (seconds) 0
p[7] - length of kernel (seconds) 32

Returns
-------
hrf : ndarray
A hemodynamic response function (HRF).

Notes
-----
Based on the spm_hrf function in SPM8
Written in R by Cesar Caballero Gaudes
Translated into Python by Eneko Urunuela
"""
# global parameter
# --------------------------------------------------------------------------
fMRI_T = 16

# modelled hemodynamic response function - {mixture of Gammas}
# --------------------------------------------------------------------------
dt = TR / fMRI_T
u = np.arange(0, p[6] / dt + 1, 1) - p[5] / dt
a1 = p[0] / p[2]
b1 = 1 / p[3]
a2 = p[1] / p[3]
b2 = 1 / p[3]

hrf = (
scipy.stats.gamma.pdf(u * dt, a1, scale=b1)
- scipy.stats.gamma.pdf(u * dt, a2, scale=b2) / p[4]
) / dt
time_axis = np.arange(0, int(p[6] / TR + 1), 1) * fMRI_T
hrf = hrf[time_axis]
min_hrf = 1e-9 * min(hrf[hrf > 10 * np.finfo(float).eps])

if min_hrf < 10 * np.finfo(float).eps:
min_hrf = 10 * np.finfo(float).eps

hrf[hrf == 0] = min_hrf

return hrf


def hrf_afni(TR, lop_hrf="SPMG1"):
"""Generate HRF with AFNI's 3dDeconvolve.

Parameters
----------
TR : float
TR of the acquisition.
lop_hrf : str
3dDeconvolve option to select HRF shape, by default "SPMG1"

Returns
-------
hrf : ndarray
A hemodynamic response function (HRF).

Notes
-----
AFNI installation is needed as it runs 3dDeconvolve on the terminal wtih subprocess.
"""
dur_hrf = 8
last_hrf_sample = 1
# Increases duration until last HRF sample is zero
while last_hrf_sample != 0:
dur_hrf = 2 * dur_hrf
hrf_command = (
"3dDeconvolve -x1D_stop -nodata %d %f -polort -1 -num_stimts 1 -stim_times 1 "
"'1D:0' '%s' -quiet -x1D stdout: | 1deval -a stdin: -expr 'a'"
) % (dur_hrf, TR, lop_hrf)
hrf_tr_str = subprocess.check_output(
hrf_command, shell=True, universal_newlines=True
).splitlines()
hrf = np.array([float(i) for i in hrf_tr_str])
last_hrf_sample = hrf[len(hrf) - 1]
if last_hrf_sample != 0:
LGR.info(
"Duration of HRF was not sufficient for specified model. Doubling duration "
"and computing again."
)

# Removes tail of zero samples
while last_hrf_sample == 0:
hrf = hrf[0 : len(hrf) - 1]
last_hrf_sample = hrf[len(hrf) - 1]

return hrf


class HRFMatrix:
"""A class for generating an HRF matrix.

Parameters
----------
TR : float
TR of the acquisition, by default 2
TE : list
Values of TE in ms, by default None
n_scans : int
Number of volumes in acquisition, by default 200
r2only : bool
Whether to only consider R2* in the signal model, by default True
is_afni : bool
Whether to use AFNI's 3dDeconvolve to generate HRF matrix, by default True
lop_hrf : str
3dDeconvolve option to select HRF shape, by default "SPMG1"
model : str
Model to use for HRF, by default "spm"
block : bool
Whether to use the block model in favor of the spike model, by default false
custom : str
Path to custom HRF file, by default None
"""

def __init__(
self,
TR=2,
TE=None,
n_scans=200,
r2only=True,
is_afni=True,
lop_hrf="SPMG1",
te=None,
model="spm",
block=True,
):
self.TR = TR
self.TE = TE
self.n_scans = n_scans
self.r2only = r2only
self.lop_hrf = lop_hrf
self.is_afni = is_afni
# If te values are higher than 1, then assume they are in ms and convert to s
# If te values are lower than 1, then assume they are in s

if te is not None:
if max(te) > 1:
te = [i / 1000 for i in te]
self.te = te

self.model = model
self.block = block

def generate_hrf(self):
def generate_hrf(self, tr, n_scans):
"""Generate HRF matrix.

Parameters
----------
tr : float
tr of the acquisition.
n_scans : int
Number of scans.

Returns
-------
self
self.hrf_ : array_like
A hemodynamic response function (HRF).
"""
if self.is_afni:
hrf_SPM = hrf_afni(self.TR, self.lop_hrf)
else:
p = [6, 16, 1, 1, 6, 0, 32]
hrf_SPM = hrf_linear(self.TR, p)

self.L_hrf = len(hrf_SPM) # Length
mahrf = max(abs(hrf_SPM)) # Max value
filler = np.zeros(self.n_scans - hrf_SPM.shape[0], dtype=int)
hrf_SPM = np.append(hrf_SPM, filler) # Fill up array with zeros until n_scans

temp = hrf_SPM

for i in range(self.n_scans - 1):
foo = np.append(np.zeros(i + 1), hrf_SPM[0 : (len(hrf_SPM) - i - 1)])
temp = np.column_stack((temp, foo))
# Read custom HRF from file if self.model ends in .1D or .txt
if self.model.endswith(".1D") or self.model.endswith(".txt"):
hrf = np.loadtxt(self.custom)

# Make sure that the HRF is not longer than the number of scans
# If it is, then raise an error
if len(hrf) > n_scans:
raise ValueError(
"HRF is longer than the number of scans. "
"Please make sure that your custom HRF is not longer than the number of scans."
)
else:
# Get HRF from nilearn
if self.model == "spm":
hrf = spm_hrf(tr, oversampling=1, time_length=n_scans * tr)
elif self.model == "glover":
hrf = glover_hrf(tr, oversampling=1, time_length=n_scans * tr)
else:
raise ValueError(
"Model must be either 'spm', 'glover' or a custom '.1D' or '.txt' file, not %s"
% self.model
)

# Calculate maximum HRF value
max_val = max(abs(hrf))

# Generate HRF matrix
hrf_mtx = hrf
for i in range(n_scans - 1):
foo = np.append(np.zeros(i + 1), hrf[0 : (len(hrf) - i - 1)])
hrf_mtx = np.column_stack((hrf_mtx, foo))

# Normalize HRF matrix
hrf_mtx = hrf_mtx / max_val

# Concatenate and scale HRFs for multi-echo,
# leave it as it is for single-echo.
if len(self.te) > 1:
# Add integrator if necessary
if self.block:
hrf_mtx_te = -self.te[0] * np.dot(hrf_mtx, np.tril(np.ones(n_scans)))
else:
hrf_mtx_te = -self.te[0] * hrf_mtx

# Concatenate and scale HRFs for multi-echo
for teidx in range(len(self.te) - 1):
# Add integrator if necessary
if self.block:
hrf_mtx_te = np.vstack(
(
hrf_mtx_te,
-self.te[teidx + 1] * np.dot(hrf_mtx, np.tril(np.ones(n_scans))),
)
)
else:
hrf_mtx_te = np.vstack((hrf_mtx_te, -self.te[teidx + 1] * hrf_mtx))

if len(self.TE) > 1:
tempTE = -self.TE[0] * temp
for teidx in range(len(self.TE) - 1):
tempTE = np.vstack((tempTE, -self.TE[teidx + 1] * temp))
self.hrf_ = hrf_mtx_te
else:
tempTE = temp
if self.block:
self.hrf_ = np.dot(hrf_mtx, np.tril(np.ones(n_scans)))
else:
self.hrf_ = hrf_mtx

if self.r2only:
self.hrf = tempTE
return self

self.hrf_norm = self.hrf / mahrf
def hrf_afni(tr, hrf_model="SPMG1"):
"""Generate HRF with AFNI's 3dDeconvolve.

if self.block:
if len(self.TE) > 1:
for teidx in range(len(self.TE)):
temp = self.hrf[
teidx * self.n_scans : (teidx + 1) * self.n_scans - 1, :
].copy()
self.hrf[teidx * self.n_scans : (teidx + 1) * self.n_scans - 1, :] = np.dot(
temp, np.tril(np.ones(self.n_scans))
)
temp = self.hrf_norm[
teidx * self.n_scans : (teidx + 1) * self.n_scans - 1, :
].copy()
self.hrf_norm[
teidx * self.n_scans : (teidx + 1) * self.n_scans - 1, :
] = np.dot(temp, np.tril(np.ones(self.n_scans)))
else:
self.hrf = np.dot(self.hrf, np.tril(np.ones(self.n_scans)))
self.hrf_norm = np.dot(self.hrf_norm, np.tril(np.ones(self.n_scans)))
Parameters
----------
tr : float
tr of the acquisition.
hrf_model : str
3dDeconvolve option to select HRF shape, by default "SPMG1"

return self
Returns
-------
hrf : array_like
A hemodynamic response function (HRF).

Notes
-----
AFNI installation is needed as it runs 3dDeconvolve on the terminal with subprocess.
"""
dur_hrf = 8
last_hrf_sample = 1
# Increases duration until last HRF sample is zero
while last_hrf_sample != 0:
dur_hrf = 2 * dur_hrf
hrf_command = (
"3dDeconvolve -x1D_stop -nodata %d %f -polort -1 -num_stimts 1 -stim_times 1 "
"'1D:0' '%s' -quiet -x1D stdout: | 1deval -a stdin: -expr 'a'"
) % (dur_hrf, tr, hrf_model)
hrf_tr_str = subprocess.check_output(
hrf_command, shell=True, universal_newlines=True
).splitlines()
hrf = np.array([float(i) for i in hrf_tr_str])
last_hrf_sample = hrf[len(hrf) - 1]
if last_hrf_sample != 0:
LGR.info(
"Duration of HRF was not sufficient for specified model. Doubling duration "
"and computing again."
)

# Removes tail of zero samples
while last_hrf_sample == 0:
hrf = hrf[0 : len(hrf) - 1]
last_hrf_sample = hrf[len(hrf) - 1]

return hrf
Loading