File: linear_model_patterns.py

package info (click to toggle)
python-mne 1.9.0-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 131,492 kB
  • sloc: python: 213,302; javascript: 12,910; sh: 447; makefile: 144
file content (123 lines) | stat: -rw-r--r-- 4,059 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
"""
.. _ex-linear-patterns:

===============================================================
Linear classifier on sensor data with plot patterns and filters
===============================================================

Here decoding, a.k.a MVPA or supervised machine learning, is applied to M/EEG
data in sensor space. Fit a linear classifier with the LinearModel object
providing topographical patterns which are more neurophysiologically
interpretable :footcite:`HaufeEtAl2014` than the classifier filters (weight
vectors). The patterns explain how the MEG and EEG data were generated from
the discriminant neural sources which are extracted by the filters.
Note patterns/filters in MEG data are more similar than EEG data
because the noise is less spatially correlated in MEG than EEG.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#          Romain Trachel <trachelr@gmail.com>
#          Jean-Rémi King <jeanremi.king@gmail.com>
#
# License: BSD-3-Clause
# Copyright the MNE-Python contributors.

# %%

from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler

import mne
from mne import EvokedArray, io
from mne.datasets import sample

# import a linear classifier from mne.decoding
from mne.decoding import LinearModel, Vectorizer, get_coef

print(__doc__)

data_path = sample.data_path()
sample_path = data_path / "MEG" / "sample"

# %%
# Set parameters
raw_fname = sample_path / "sample_audvis_filt-0-40_raw.fif"
event_fname = sample_path / "sample_audvis_filt-0-40_raw-eve.fif"
tmin, tmax = -0.1, 0.4
event_id = dict(aud_l=1, vis_l=3)

# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(0.5, 25, fir_design="firwin")
events = mne.read_events(event_fname)

# Read epochs
epochs = mne.Epochs(
    raw, events, event_id, tmin, tmax, proj=True, decim=2, baseline=None, preload=True
)
del raw

labels = epochs.events[:, -1]

# get MEG data
meg_epochs = epochs.copy().pick(picks="meg", exclude="bads")
meg_data = meg_epochs.get_data(copy=False).reshape(len(labels), -1)

# %%
# Decoding in sensor space using a LogisticRegression classifier
# --------------------------------------------------------------

clf = LogisticRegression(solver="liblinear")  # liblinear is faster than lbfgs
scaler = StandardScaler()

# create a linear model with LogisticRegression
model = LinearModel(clf)

# fit the classifier on MEG data
X = scaler.fit_transform(meg_data)
model.fit(X, labels)

# Extract and plot spatial filters and spatial patterns
for name, coef in (("patterns", model.patterns_), ("filters", model.filters_)):
    # We fitted the linear model onto Z-scored data. To make the filters
    # interpretable, we must reverse this normalization step
    coef = scaler.inverse_transform([coef])[0]

    # The data was vectorized to fit a single model across all time points and
    # all channels. We thus reshape it:
    coef = coef.reshape(len(meg_epochs.ch_names), -1)

    # Plot
    evoked = EvokedArray(coef, meg_epochs.info, tmin=epochs.tmin)
    fig = evoked.plot_topomap()
    fig.suptitle(f"MEG {name}")

# %%
# Let's do the same on EEG data using a scikit-learn pipeline

X = epochs.pick(picks="eeg", exclude="bads")
y = epochs.events[:, 2]

# Define a unique pipeline to sequentially:
clf = make_pipeline(
    Vectorizer(),  # 1) vectorize across time and channels
    StandardScaler(),  # 2) normalize features across trials
    LinearModel(  # 3) fits a logistic regression
        LogisticRegression(solver="liblinear")
    ),
)
clf.fit(X, y)

# Extract and plot patterns and filters
for name in ("patterns_", "filters_"):
    # The `inverse_transform` parameter will call this method on any estimator
    # contained in the pipeline, in reverse order.
    coef = get_coef(clf, name, inverse_transform=True)
    evoked = EvokedArray(coef, epochs.info, tmin=epochs.tmin)
    fig = evoked.plot_topomap()
    fig.suptitle(f"EEG {name[:-1]}")

# %%
# References
# ----------
# .. footbibliography::