Thanks a lot for the answer, @richard. I can paste the code I’ve used below, though what would be the best way to share my data? Maybe a google drive link?
Here’s my code:
import mne
from numpy.random import randn
import numpy as np
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne.stats import permutation_cluster_test
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.viz import circular_layout
from mne_connectivity import spectral_connectivity_epochs
from mne_connectivity.viz import plot_connectivity_circle
# I want to compare alpha vs. sham stimulation, so everything I do here for alpha,
# I repeat for sham
# First I Load the concatenated epochs of 17 subjects and its inverse solution.
# Alpha
tmin, tmax = -0.2, 2
path_epochs_Tfr_alpha = '/home/.../epochs_PM_alpha-epo.fif'
epochs_Tfr_alpha = mne.read_epochs(path_epochs_Tfr_alpha, preload=True)
fname_inv_alpha = '/home/.../epochs_PM_alpha-inv.fif'
inverse_operator_alpha = read_inverse_operator(fname_inv_alpha)
subjects_dir='/home/.../mne_data/MNE-fsaverage-data'
subj_alpha = 'epochs_PM_alpha'
#%%
# Compute the inverse solution for this data
# Returns the sources / source activity that I’ll use in computing connectivity.
# I can specify particular frequencies to include in the connectivity with the fmin and fmax flags.
# mne-python does:
# reads an epoch from the raw file
# applies SSP and baseline correction
# computes the inverse to obtain a source estimate
# averages the source estimate to obtain a time series for each label
# includes the label time series in the connectivity computation
# moves to the next epoch.
# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list. (Source estimate)
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs_Tfr_alpha, inverse_operator_alpha, lambda2, method,
pick_ori="normal", return_generator=True)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels = mne.read_labels_from_annot(subj_alpha, parc='aparc',
subjects_dir=subjects_dir)
#%%
labels_2 = []
#for i in labels
#%%
for i in labels:
if 'unknown' not in i.name:
print(i)
labels_2.append(i)
#%%
label_colors = [label.color for label in labels_2]
# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator_alpha['src']
label_ts = mne.extract_label_time_course(
stcs, labels_2, src, mode='mean_flip', return_generator=True)#, allow_empty=True)
# Theta 3:8; Alpha: 8:13; Beta low: 12:15; Beta mid: 15:20; Beta high: 20:40
fmin = 8.
fmax = 13.
sfreq = epochs_Tfr_alpha.info['sfreq'] # the sampling frequency
con_methods = ['pli', 'wpli2_debiased', 'ciplv']
con = spectral_connectivity_epochs(
label_ts,
method=con_methods,
mode='multitaper',
sfreq=sfreq,
fmin=fmin,
fmax=fmax,
faverage=True,
mt_adaptive=True,
tmin=tmin,
tmax=tmax,
n_jobs=7)
# con is a 3D array, get the connectivity for the first (and only) freq. band
# for each method
con_res = dict()
for method, c in zip(con_methods, con):
con_res[method] = c.get_data(output='dense')[:, :, 0]
#%%
# Then I save con object for alpha and then for sham
epochs_PM_alpha_con_alpha_wpli = con_res['wpli2_debiased']
#%%
np.save('/home/.../epochs_PM_alpha_con_alpha_wpli', epochs_PM_alpha_con_alpha_wpli)
# So after obtaining a wPLI connectivity matrix for alpha and sham, I load both matrices and perform
# a cluster permutation test on then:
epochs_PM_alpha_con = np.load('/home/.../epochs_PM_alpha_con_alpha_wpli.npy')
epochs_PM_sham_con = np.load('/home/.../epochs_PM_sham_con_alpha_wpli.npy')
#%%
# Build arrays for the cluster permutation test:
n_signals, n_freqs = epochs_PM_alpha_con.shape
n_subjects = 1
print(f'Simulating data for {n_subjects} subjects.')
#%%
# Let's make sure our results replicate, so set the seed.
#np.random.seed(0)
X = randn(n_signals, n_freqs, n_subjects)#, 2)# * 10
X1 = randn(n_signals, n_freqs, n_subjects)#, 2)# * 10
#%%
X = X - X #(Transform all values into zeroes to couterbalance the Random)
X1 = X1 - X1
#%%
X[:, :, :] += epochs_PM_alpha_con[:, :, np.newaxis]
X1[:, :, :] += epochs_PM_sham_con[:, :, np.newaxis]
#%%
# Note that X needs to be a multi-dimensional array of shape
# observations (subjects) × time × space, so we permute dimensions
X = np.transpose(X, [2, 1, 0])
X1 = np.transpose(X1, [2, 1, 0])
#%%
# Compute statistic
threshold = 0.05
n_permutations = 1000
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([X, X1],
n_permutations=n_permutations,
threshold=threshold,
tail=0,
n_jobs=7,
seed=49,
out_type='mask',
verbose=True)
#%%
Thanks for your help!
Bruno