NaN error values - fNIRS Group Averaging

:red_question_mark: If you have a question or issue with MNE-Python, please include the following info:

  • MNE version: 1.11.0
  • operating system: macOS Sequoia 15.5

Hi guys !

I’m trying to reproduce the group averaging results of R.Luke in this article : Luke R, Larson E, Shader MJ, Innes-Brown H, Van Yper L, Lee AKC, et al. Analysis methods for measuring passive auditory fNIRS responses generated by a block-design paradigm. Neurophotonics. avr 2021;8(2):025008.

I’m having trouble with channel losses due to post-SCI exclusion, the plot_compare_evokeds function can’t work properly because subject 7 and 13 lost to much data during the pipeline.

Here is the exact info :

=== Subject 7 ===
Total bad channels: 32 / 66
LIFG: 2/10 channels bad
Bad: [‘S3_D14 760’, ‘S3_D14 850’]
LSTG: 4/10 channels bad
Bad: [‘S6_D5 760’, ‘S7_D7 760’, ‘S6_D5 850’, ‘S7_D7 850’]
RSTG: 2/10 channels bad
Bad: [‘S11_D9 760’, ‘S11_D9 850’]

=== Subject 13 ===
Total bad channels: 38 / 66
LIFG: 4/10 channels bad
Bad: [‘S2_D1 760’, ‘S3_D14 760’, ‘S2_D1 850’, ‘S3_D14 850’]
LSTG: 6/10 channels bad
Bad: [‘S6_D5 760’, ‘S7_D6 760’, ‘S7_D7 760’, ‘S6_D5 850’, ‘S7_D6 850’, ‘S7_D7 850’]
RSTG: 8/10 channels bad
Bad: [‘S10_D7 760’, ‘S10_D8 760’, ‘S11_D9 760’, ‘S11_D12 760’, ‘S10_D7 850’, ‘S10_D8 850’, ‘S11_D9 850’, ‘S11_D12 850’]

I get this error even after subject 7 and 13 exclusion : ValueError: Some of the values to be plotted are NaN.

Here is where i’m at :

# Localisation des données
fnirs_data_folder = "/Users/kahim/mne_data/fNIRS-block-speech-noise"
fnirs_data_folder

# Créations des ROIs
rois_coord = dict(
LIFG = [[1, 1], [2, 1], [3, 1], [4, 1], [4, 2]],
LSTG = [[5, 3], [6, 2], [6, 5], [7, 6], [7, 7]],
RSTG = [[10, 7], [10, 8], [11, 9], [11, 12], [12, 10]],
)


# Définition de l'algorithme à l'échelle individuelle
def individual_analysis (bids_path, roi_definitions):
    # Import et lecture des données
    raw_intensity = mne.io.read_raw_snirf(bids_path, verbose=True)
    raw_intensity.load_data()
    print(f"\n=== Processing {bids_path.basename} ===")
    print(f"Initial channels: {len(raw_intensity.ch_names)}")
    print(f"Channel names: {raw_intensity.ch_names[:10]}...")
    
    # Labelisation des conditions
    raw_intensity.annotations.rename({"1.0": "Silence", "2.0": "Noise", "3.0": "Speech"})
    unwanted = np.nonzero(raw_intensity.annotations.description == "15.0")
    raw_intensity.annotations.delete(unwanted)

    # Conversion en densité optique
    raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)
    
    # Calcul et labelisation des canaux avec SCI < 0,8 comme "BAD"
    sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od, l_freq=0.7, h_freq=1.45)
    raw_od.info["bads"] = list(compress(raw_od.ch_names, sci < 0.8))
    print(f"Bad channels marked: {len(raw_od.info['bads'])} out of {len(raw_od.ch_names)}")
    
    # Correction des bruits physiologiques par soustraction des canaux courts
    corrected_od = mne_nirs.signal_enhancement.short_channel_regression(raw_od)

    # Exclusion des canaux courts de l'analyse
    picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True)
    dists = mne.preprocessing.nirs.source_detector_distances(raw_intensity.info, picks=picks)
    corrected_od.pick(picks[(dists > 0.02) & (dists < 0.04)])
    print(f"Channels after distance filter: {len(corrected_od.ch_names)}")
    print(f"Remaining channels: {corrected_od.ch_names}")

    # Ré-échantillonage des données à 3Hz
    raw = corrected_od.copy()
    raw.resample(3)
    raw

    # Correction des artéfacts de mouvement par TDDR
    corrected_tddr = mne.preprocessing.nirs.tddr(raw)

    # Conversion en concentration d'Hb par application de la mBLL (DPF = 0,1)
    corrected_haemo = mne.preprocessing.nirs.beer_lambert_law(corrected_tddr, ppf=0.1)
    
    print(f"After Beer-Lambert, channels: {len(corrected_haemo.ch_names)}")
    print(f"Channel types: {set([ch.split()[-1] for ch in corrected_haemo.ch_names])}")
    print(f"Sample channels: {corrected_haemo.ch_names[:10]}")
    print(corrected_haemo.ch_names)

    # Correction des artéfacts de mouvement par corrélation négative des [HbO]/[HbR]
    corrected_anti = mne_nirs.signal_enhancement.enhance_negative_correlation(corrected_haemo)

    # Filtre passe-bande pour suppression des bruits physiologiques résiduels
    processed_haemo = corrected_anti.filter(0.01, 0.7, h_trans_bandwidth=0.3, l_trans_bandwidth=0.005)

    # Extraction des évènement d'intérêt
    events, event_dict = mne.events_from_annotations(processed_haemo)

    # Segmentation du tracé en vue du moyennage [baseline_corrected]
    reject_criteria = dict(hbo=100e-6)
    tmin, tmax = -3, 14

    corrected_epochs = mne.Epochs(
        processed_haemo,
        events,
        event_id=event_dict,
        tmin=tmin,
        tmax=tmax,
        reject=reject_criteria,
        reject_by_annotation=True,
        proj=True,
        baseline=(None, 0),
        preload=True,
        detrend=1,
        verbose=True,
    )
    
    print(f"Final epochs channels: {len(corrected_epochs.ch_names)}")
    print(f"Epochs shape: {corrected_epochs.get_data().shape}") 
    
    # Moyennage des segments par ROIs et conditions à l'échelle individuelle

    roi = {
        roi_name: picks_pair_to_idx(processed_haemo, coords)
        for roi_name, coords in rois_coord.items()
    }

    subject_evokeds = {}

    for cond in event_dict:
        evoked = corrected_epochs[cond].average()

        # Create separate ROI dictionaries for HbO and HbR
        roi_hbo = {}
        roi_hbr = {}

        for roi_name, indices in roi.items():
            # picks_pair_to_idx returns indices for all channels (both hbo and hbr)
            # We need to filter by channel type
            hbo_indices = [i for i in indices if 'hbo' in evoked.ch_names[i]]
            hbr_indices = [i for i in indices if 'hbr' in evoked.ch_names[i]]
            roi_hbo[roi_name] = hbo_indices
            roi_hbr[roi_name] = hbr_indices
            print(f"  ROI {roi_name}: HbO indices={hbo_indices}, HbR indices={hbr_indices}")

        print(f"  Available channels in evoked: {evoked.ch_names}")

        # Combine channels by ROI for each chromophore
        roi_evoked_hbo = mne.channels.combine_channels(
            evoked, roi_hbo, method='mean', drop_bad=True, on_missing='raise'
        )
        roi_evoked_hbr = mne.channels.combine_channels(
            evoked, roi_hbr, method='mean', drop_bad=True, on_missing='raise'
        )

        print(f"Condition {cond} HbO ROI channels:", roi_evoked_hbo.ch_names)
        print(f"Condition {cond} HbR ROI channels:", roi_evoked_hbr.ch_names)

        subject_evokeds[cond] = {'hbo': roi_evoked_hbo, 'hbr': roi_evoked_hbr}

    return subject_evokeds


# Exécution de l'algorithme pour chacun des sujets (sauf sujet 2)
all_evokeds_hbo = defaultdict(list)
all_evokeds_hbr = defaultdict(list)

for sub in range(1, 18):
    if sub == [2]:
        print(f"Skipping subject {sub} (data quality issues)")
        continue

    bids_path = BIDSPath(
        subject="%02d" % sub,
        session="01",
        task="AudioSpeechNoise",
        datatype="nirs",
        root=fnirs_data_folder,
        suffix="nirs",
        extension=".snirf",
    )

    print(f"Processing subject {sub}: {bids_path}")

    subject_roi_data = individual_analysis(bids_path, rois_coord)
    for cond, evoked_dict in subject_roi_data.items():
        all_evokeds_hbo[cond].append(evoked_dict['hbo'])
        all_evokeds_hbr[cond].append(evoked_dict['hbr'])

pprint(all_evokeds_hbo)
pprint(all_evokeds_hbr)


print("\n=== Checking all_evokeds ===")
for condition in all_evokeds_hbo.keys():
    print(f"{condition}: {len(all_evokeds_hbo[condition])} subjects")
    if all_evokeds_hbo[condition]:
        print(f"  HbO channels: {all_evokeds_hbo[condition][0].ch_names}")
        print(f"  HbR channels: {all_evokeds_hbr[condition][0].ch_names}")

    # Check for NaN values
    for i, ev in enumerate(all_evokeds_hbo[condition]):
        if np.isnan(ev.data).any():
            print(f"  WARNING: Subject {i+1} HbO has NaN values in {condition}")
    for i, ev in enumerate(all_evokeds_hbr[condition]):
        if np.isnan(ev.data).any():
            print(f"  WARNING: Subject {i+1} HbR has NaN values in {condition}")


# Représentation graphique des HRFs moyen par ROI et conditions à l'échelle du groupe
conditions = ["Silence", "Noise", "Speech"]
roi_names = ["LIFG", "LSTG", "RSTG"]
lims = dict(hbo=[-2, 4], hbr=[-2, 4])

fig, axes = plt.subplots(len(roi_names), len(conditions), figsize=(15, 10))

for ridx, roi in enumerate(roi_names):
    for cidx, cond in enumerate(conditions):
        ax = axes[ridx, cidx]

        # Plot HbO and HbR on same axes with single call
        # Use index (ridx) instead of channel name string
        plot_compare_evokeds(
            {'HbO': all_evokeds_hbo[cond], 'HbR': all_evokeds_hbr[cond]},
            picks=ridx,
            combine='mean',
            axes=ax,
            show=False,
            colors={'HbO': 'r', 'HbR': 'b'},
            legend=False,
            ylim=lims,
            ci=0.95,
        )

        # Formatting titles and labels
        if ridx == 0:
            ax.set_title(cond, fontweight='bold', fontsize=14)
        if cidx == 0:
            ax.set_ylabel(f"{roi}\nΔμM", fontweight='bold')
        else:
            ax.set_ylabel("")

        ax.axhline(0, color='k', linewidth=0.5)
        ax.axvline(0, color='k', linewidth=0.5, linestyle='--')

# Add legend
axes[0, 0].legend(['Oxyhaemoglobin', 'Deoxyhaemoglobin'], loc='upper left', fontsize=8)

plt.tight_layout()
plt.show()

Any ideas on how to get by ?
Thank’s

The NaNs are for subjects 8 and 14. They arise because all the channels in some of the ROIs are marked as bad.

Thank’s, It works perfectly now.

1 Like

This topic was automatically closed 7 days after the last reply. New replies are no longer allowed.