Mean Square Error and Standard Error the Same for All Channels

Hey Everyone,

I have been doing individual level (glm) and group level (LMM) analysis on my FNIRS data to see group level activations for a block design task. I am running into a problem in which for the the individual glm output for each participant has the same mean square error for every channel for every single participant and the resulting group level lmm output has the same standard error for every single channel. I am wondering if this is common in other peoples code and what the reason for this could be ? in NIRS toolbox there is standard error for each individual channel that allows you to make inferences about each channel individually and this doesnt seem to be the case in MNE. Any help clearing this up would be greatly appreciated and Ill share my code chunks below! Thanks in advance!

GLM CODE:
def individual_analysis(root, file_name):
# Construct the full path to the file
subject_file_path = os.path.join(root, file_name)

# Check if the file exists
if not os.path.exists(subject_file_path):
    print(f"File not found: {subject_file_path}")
    return None, None, None, None

# Load the raw intensity data from the SNIRF file
raw_intensity = mne.io.read_raw_snirf(subject_file_path, verbose=False)

# Adjust annotations
for i, desc in enumerate(raw_intensity.annotations.description):
    if desc in ['1', '3', '5']:
        raw_intensity.annotations.duration[i] = 36

# Print the updated annotations to verify
print(raw_intensity.annotations)
unwanted = np.nonzero((raw_intensity.annotations.description == "7") |
                      (raw_intensity.annotations.description == "2") |
                      (raw_intensity.annotations.description == "4") |
                      (raw_intensity.annotations.description == "6"))
raw_intensity.annotations.delete(unwanted)
raw_intensity.annotations.rename({"1": "1st Order", "3": "1st Order", "5": "2nd Order"})
event_dict = {'1st Order': 1, '1st Order': 3, "2nd Order": 5}

# Convert signal to haemoglobin and resample
raw_od = optical_density(raw_intensity)
raw_od = temporal_derivative_distribution_repair(raw_od)


# Calculate and plot scalp coupling index
sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od)

# Mark bad channels based on SCI
raw_od.info["bads"] = list(compress(raw_od.ch_names, sci < 0.5))

# Skip the file if more than 10 channels are marked as bad
if len(raw_od.info["bads"]) > 10:
    print(f"File {file_name} has more than 10 bad channels, skipping.")
    return None, None, None, None

# Convert to hemoglobin and filter the data
raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1)
raw_haemo_unfiltered = raw_haemo.copy()
raw_haemo.filter(0.03, 0.7, h_trans_bandwidth=0.2, l_trans_bandwidth=0.01)
raw_haemo.resample(1.4)


# Extract short and long channels
sht_chans = get_short_channels(raw_haemo)
raw_haemo = get_long_channels(raw_haemo)

# Create a design matrix
design_matrix = make_first_level_design_matrix(
    raw_haemo,
    drift_model="cosine",
    high_pass=0.005,
    hrf_model="spm",
    stim_dur=36.0
)

# Append short channels mean to design matrix
design_matrix["ShortHbO"] = np.mean(
    sht_chans.copy().pick(picks="hbo").get_data(), axis=0
)
design_matrix["ShortHbR"] = np.mean(
    sht_chans.copy().pick(picks="hbr").get_data(), axis=0
)

# Use joblib to manage parallel processing in the GLM
with parallel_backend('loky', n_jobs=1):
    glm_est = run_glm(raw_haemo, design_matrix, noise_model="ar5")

# Define regions of interest
left = [[1, 1], [1, 2], [2, 1], [2, 2], [2, 3], [3, 1], [3, 3], [4, 3], [4, 2], [4, 4], [5, 3], [5, 4],
        [11, 9], [11, 8], [12, 8], [12, 10], [13, 8], [13, 9], [13, 10], [13, 11]]
right = [[6, 5], [6, 6], [7, 5], [7, 6], [7, 7], [8, 5], [8, 7], [9, 7], [9, 6], [10, 7], [14, 12], [14, 13],
         [15, 12], [15, 14], [16, 12], [16, 13], [16, 14], [16, 15]]

# Generate the correct indices for each pair
groups = dict(
    Left_Hemisphere=picks_pair_to_idx(raw_haemo, left, on_missing="ignore"),
    Right_Hemisphere=picks_pair_to_idx(raw_haemo, right, on_missing="ignore")
)

# Compute region of interest results from channel data
roi = glm_est.to_dataframe_region_of_interest(groups, design_matrix.columns)
cha = glm_est.to_dataframe()

# Define left vs right tapping contrast
contrast_matrix = np.eye(design_matrix.shape[1])
basic_conts = dict(
    [(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]
)
contrast = basic_conts["2nd Order"] - basic_conts["1st Order"]

# Compute defined contrast
contrast = glm_est.compute_contrast(contrast)
con = contrast.to_dataframe()

# Add the participant ID to the dataframes
roi["ID"] = cha["ID"] = con["ID"] = file_name  # Use file_name as ID since there's no specific ID in filenames

# Convert to uM for nicer plotting below.
cha["theta"] = [t * 1.0e6 for t in cha["theta"]]
roi["theta"] = [t * 1.0e6 for t in roi["theta"]]
con["effect"] = [t * 1.0e6 for t in con["effect"]]

return raw_haemo, roi, cha, con

Initialize empty dataframes to store results

df_roi = pd.DataFrame() # To store region of interest results
df_cha = pd.DataFrame() # To store channel level results
df_con = pd.DataFrame() # To store channel level contrast results

Get the list of all file names in the directory

subjects = [f for f in os.listdir(root) if f.endswith(‘.snirf’)]

Loop through each file in the directory and run the individual analysis on each

for file_name in subjects:
# Analyse data and return both ROI and channel results
raw_haemo, roi, channel, con = individual_analysis(root, file_name)

if roi is not None:
    # Append individual results to all participants
    df_roi = pd.concat([df_roi, roi], ignore_index=True)
    df_cha = pd.concat([df_cha, channel], ignore_index=True)
    df_con = pd.concat([df_con, con], ignore_index=True)

LMM CODE:

Cut down the dataframe just to the conditions we are interested in

ch_summary = df_cha.query(“Condition in [‘1st Order’,‘2nd Order’]”)
ch_summary = ch_summary.query(“Chroma in [‘hbo’]”)

Run group level model and convert to dataframe

ch_model = smf.mixedlm(
“theta ~ -1 + ch_name:Chroma:Condition”, ch_summary, groups=ch_summary[“ID”]
).fit(method=“nm”)
ch_model_df = statsmodels_to_results(ch_model)