- MNE version: 1.2.1
- operating system: Ubuntu 20.04.05 LTS
Dear MNE community,
when I use âstc_all_cluster_vis.plotâ to plot the results of a âspatio_temporal_cluster_testâ, the time axis doesnât seem to be correct, and therefore I have two questions:
My time axis should range from 1.8 to 2 s, but itâs ranging from 0 to 2 s.
In the tutorial Iâm trying to replicate " 2 samples permutation test on source data with spatio-temporal clustering" with my own data, itâs said 'tstep = stc.tstep * 1000 # convert to milliseconds:
1. So my stc.tstep is 0.002. Should I just multiply it by 100 (instead of 1000) to convert it to miliseconds?
2. The tutorial says that âeach cluster becomes a âtime pointâ in the SourceEstimateâ and that the very first âtime pointâ in the SourceEstimate, shows all the clusters, weighted by duration. However, as you can see in my figure, all time points are showing the same cluster.
How could I solve this problem? Any hint would be really helpful.
Hereâs a part of my code, in case it helps:
# Download fsaverage files
fs_dir = fetch_fsaverage(verbose=True)
subjects_dir = op.dirname(fs_dir)
# The files live in:
subject = 'fsaverage'
trans = 'fsaverage' # MNE has a built-in fsaverage transformation
src_fname = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')
bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')
#%%
# Load the data
morph_alpha = []
morph_sham = []
exclude_outliers = ['10',
'19',
'20',
'23',
'24',
'36',
'40',
'56',
'58',
'60']
excluded_subjs = []
# PM ERPs
print('1. PREPARE FOR LOOP WITH SUBJECTS')
studydir = "/home/.../Epoched"
# Get all the paths!
path_epochs = glob.glob("%s/epochs_PMC_Tfr_p[0-9][0-9][0-9]_[A,T,S]-epo.fif"%(studydir))#subdirs
tmin = 1.8
tmax = 2
for dir in list(path_epochs):
try:
subj=(dir[-12:-10])
session=(dir[-9])
dann=(dir[-14:-8])
print(dann)
if subj in exclude_outliers:
print('This subject will not be included!')
excluded_subjs.append(str(subj))
continue
if session == 'A':
src = mne.read_source_spaces(src_fname)
fsave_vertices = [s['vertno'] for s in src]
stc_alpha = mne.read_source_estimate('%s/p0%s_%s-lh.stc'%(studydir, subj, session), dann)
stc_alpha.crop(tmin, tmax)
morph_mat = mne.compute_source_morph(stc_alpha, subject_to='fsaverage',
spacing=fsave_vertices, smooth=20,
subjects_dir=subjects_dir)
# Now use morphy apply!!! stc_alpha is my stc object!
condition_alpha_stc = morph_mat.apply(stc_alpha)
# Append to list
morph_alpha.append(condition_alpha_stc.data)
else:
src = mne.read_source_spaces(src_fname)
fsave_vertices = [s['vertno'] for s in src]
stc_sham = mne.read_source_estimate('%s/p0%s_%s-lh.stc'%(studydir, subj, session), dann)
stc_sham.crop(tmin, tmax)
morph_mat = mne.compute_source_morph(stc_sham, subject_to='fsaverage',
spacing=fsave_vertices, smooth=20,
subjects_dir=subjects_dir)
# Now use morphy apply!!! condition_sham is my stc object!
condition_sham_stc = morph_mat.apply(stc_sham)
# Append to list
morph_sham.append(condition_sham_stc.data)
except:
print('Repeat manually: p%s_%s \n'%(subj,session))
pass
tstep = condition_alpha_stc.tstep * 1000 # convert to milliseconds
n_vertices_fsave, n_times = condition_alpha_stc.data.shape
#%%
n_subjects_X = len(morph_alpha)
n_subjects_X2 = len(morph_sham)
print('Simulating data for %d and %d subjects.' % (n_subjects_X, n_subjects_X2))
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = np.random.randn(n_subjects_X, n_vertices_fsave, n_times)
X2 = np.random.randn(n_subjects_X2, n_vertices_fsave, n_times)
#%%
X = X-X
X2 = X2-X2
#%%
X[:, :, :] += morph_alpha[:, :, :]
X2[:, :, :] += morph_sham[:, :, :]
#%%
# We want to compare the overall activity levels for each subject
X = np.abs(X) # only magnitude
X2 = np.abs(X2) # only magnitude
#%%
# Find adjacency
fs_dir = fetch_fsaverage(verbose=True)
src_fname = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')
src = mne.read_source_spaces(src_fname)
print('Computing adjacency.')
adjacency = mne.spatial_src_adjacency(src)
#%%
# Note that X needs to be a list of multi-dimensional array of shape
# samples (subjects_k) Ă time Ă space, so we permute dimensions
X = np.transpose(X, [0, 2, 1])
X2 = np.transpose(X2, [0, 2, 1])
#%%
# Here we set a cluster forming threshold based on a p-value for
# the cluster based permutation test.
# We use a two-tailed threshold, the "1 - p_threshold" is needed
# because for two-tailed tests we must specify a positive threshold.
p_threshold = 0.001
df = X.shape[0] - 1 # degrees of freedom for the test
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
X.shape[0] - 1, X2.shape[0] - 1)
#%%
n_permutations = 500 # Should be above 1000.
# Now let's actually do the clustering. This can take a long time...
print('Clustering.')
F_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_test([X, X2],
n_permutations=n_permutations,
threshold=f_threshold,
max_step=2,
n_jobs=3,
seed=49,
buffer_size=None,
adjacency=adjacency,
verbose=True)
#%%
# Select the clusters that are statistically significant at p < 0.05
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
#%%
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu,
tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
#%%
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
# blue blobs are for condition A != condition B
brain = stc_all_cluster_vis.plot(
'fsaverage',
hemi='both',
views='lateral',
subjects_dir=subjects_dir,
time_label='temporal extent (ms)',
size=(800, 800),
smoothing_steps=5,
clim='auto')#dict(kind='value', pos_lims=[0, 1, 40]))
#%%