In [8]:
from mne import Epochs, find_events, set_eeg_reference, read_epochs, viz
from time import time, strftime, gmtime
from collections import OrderedDict
from glob import glob
from collections import OrderedDict
from mne import create_info, concatenate_raws
from mne.io import RawArray
from mne.channels import read_montage
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.getcwd()), 'app','utils','jupyter'))
import utils
%matplotlib


Using matplotlib backend: Qt5Agg

In [3]:
files = ['/home/dano/eeg-notebooks/data/visual/N170/subject1/session1/data_2017-09-13-15.30.01.csv',
'/home/dano/eeg-notebooks/data/visual/N170/subject1/session1/data_2017-09-13-15.32.50.csv',
'/home/dano/eeg-notebooks/data/visual/N170/subject1/session1/data_2017-09-13-15.35.26.csv',
'/home/dano/eeg-notebooks/data/visual/N170/subject1/session1/data_2017-09-13-15.40.17.csv',
'/home/dano/eeg-notebooks/data/visual/N170/subject1/session1/data_2017-09-13-15.42.33.csv',
'/home/dano/eeg-notebooks/data/visual/N170/subject1/session1/data_2017-09-13-15.45.08.csv']
sfreq=256
ch_ind = [0,1,2,3]
stim_ind = 4
replace_ch_names = None
#raw = utils.load_data(standard, replace_ch_names)
raw = utils.load_data(files, replace_ch_names).drop_channels(['Right AUX'])


['/home/dano/eeg-notebooks/data/visual/N170/subject1/session1/data_2017-09-13-15.30.01.csv', '/home/dano/eeg-notebooks/data/visual/N170/subject1/session1/data_2017-09-13-15.32.50.csv', '/home/dano/eeg-notebooks/data/visual/N170/subject1/session1/data_2017-09-13-15.35.26.csv', '/home/dano/eeg-notebooks/data/visual/N170/subject1/session1/data_2017-09-13-15.40.17.csv', '/home/dano/eeg-notebooks/data/visual/N170/subject1/session1/data_2017-09-13-15.42.33.csv', '/home/dano/eeg-notebooks/data/visual/N170/subject1/session1/data_2017-09-13-15.45.08.csv']
The following EEG sensors did not have a position specified in the selected montage: ['Right AUX']. Their position has been left untouched.
Creating RawArray with float64 data, n_channels=6, n_times=30732
    Range : 0 ... 30731 =      0.000 ...   120.043 secs
Ready.
The following EEG sensors did not have a position specified in the selected montage: ['Right AUX']. Their position has been left untouched.
Creating RawArray with float64 data, n_channels=6, n_times=30732
    Range : 0 ... 30731 =      0.000 ...   120.043 secs
Ready.
The following EEG sensors did not have a position specified in the selected montage: ['Right AUX']. Their position has been left untouched.
Creating RawArray with float64 data, n_channels=6, n_times=30732
    Range : 0 ... 30731 =      0.000 ...   120.043 secs
Ready.
The following EEG sensors did not have a position specified in the selected montage: ['Right AUX']. Their position has been left untouched.
Creating RawArray with float64 data, n_channels=6, n_times=30732
    Range : 0 ... 30731 =      0.000 ...   120.043 secs
Ready.
The following EEG sensors did not have a position specified in the selected montage: ['Right AUX']. Their position has been left untouched.
Creating RawArray with float64 data, n_channels=6, n_times=30720
    Range : 0 ... 30719 =      0.000 ...   119.996 secs
Ready.
/home/dano/BrainWaves/app/utils/jupyter/utils.py:67: RuntimeWarning: The following EEG sensors did not have a position specified in the selected montage: ['Right AUX']. Their position has been left untouched.
  sfreq=sfreq, montage=montage)
/home/dano/BrainWaves/app/utils/jupyter/utils.py:67: RuntimeWarning: The following EEG sensors did not have a position specified in the selected montage: ['Right AUX']. Their position has been left untouched.
  sfreq=sfreq, montage=montage)
/home/dano/BrainWaves/app/utils/jupyter/utils.py:67: RuntimeWarning: The following EEG sensors did not have a position specified in the selected montage: ['Right AUX']. Their position has been left untouched.
  sfreq=sfreq, montage=montage)
/home/dano/BrainWaves/app/utils/jupyter/utils.py:67: RuntimeWarning: The following EEG sensors did not have a position specified in the selected montage: ['Right AUX']. Their position has been left untouched.
  sfreq=sfreq, montage=montage)
/home/dano/BrainWaves/app/utils/jupyter/utils.py:67: RuntimeWarning: The following EEG sensors did not have a position specified in the selected montage: ['Right AUX']. Their position has been left untouched.
  sfreq=sfreq, montage=montage)
The following EEG sensors did not have a position specified in the selected montage: ['Right AUX']. Their position has been left untouched.
Creating RawArray with float64 data, n_channels=6, n_times=30732
    Range : 0 ... 30731 =      0.000 ...   120.043 secs
Ready.
/home/dano/BrainWaves/app/utils/jupyter/utils.py:67: RuntimeWarning: The following EEG sensors did not have a position specified in the selected montage: ['Right AUX']. Their position has been left untouched.
  sfreq=sfreq, montage=montage)

In [4]:
raw.filter(1,30, method='iir')
raw.plot_psd(fmin=1, fmax=30);


Setting up band-pass filter from 1 - 30 Hz
Setting up band-pass filter from 1 - 30 Hz
Setting up band-pass filter from 1 - 30 Hz
Setting up band-pass filter from 1 - 30 Hz
Setting up band-pass filter from 1 - 30 Hz
Setting up band-pass filter from 1 - 30 Hz
Effective window size : 8.000 (s)

In [47]:
raw.plot_psd()


Effective window size : 8.000 (s)
/home/dano/anaconda3/envs/brainwaves/lib/python3.5/site-packages/matplotlib/colors.py:251: RuntimeWarning: invalid value encountered in less
  if np.any((result < 0) | (result > 1)):
/home/dano/anaconda3/envs/brainwaves/lib/python3.5/site-packages/matplotlib/colors.py:251: RuntimeWarning: invalid value encountered in greater
  if np.any((result < 0) | (result > 1)):
Out[47]:

In [5]:
# Create an array containing the timestamps and type of each stimulus (i.e. face or house)
events = find_events(raw)
house_id = {'House': 1}

# Create an MNE Epochs object representing all the epochs around stimulus presentation
house_epochs = Epochs(raw, events=events, event_id=house_id, 
                tmin=-0.1, tmax=0.8, baseline=None,
                reject={'eeg': 75e-6}, preload=True, 

                verbose=False, picks=[0,1,2,3])
face_id = {'Face': 2}

# Create an MNE Epochs object representing all the epochs around stimulus presentation
face_epochs = Epochs(raw, events=events, event_id=face_id, 
                tmin=-0.1, tmax=0.8, baseline=None,
                reject={'eeg': 75e-6}, preload=True, 

                verbose=False, picks=[0,1,2,3])


1174 events found
Event IDs: [1 2]

In [6]:
face_epochs


Out[6]:
<Epochs  |   0 events (all good), -0.101562 - 0.800781 sec, baseline off, ~18 kB, data loaded,
 'Face': 0>

In [7]:
face_epochs.plot_image(title="Faces T7",picks=[3])
house_epochs.plot_image(title="Houses T7",picks=[3])


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-7-46963ebd949e> in <module>()
----> 1 face_epochs.plot_image(title="Faces T7",picks=[3])
      2 house_epochs.plot_image(title="Houses T7",picks=[3])

~/anaconda3/envs/brainwaves/lib/python3.5/site-packages/mne/epochs.py in plot_image(self, picks, sigma, vmin, vmax, colorbar, order, show, units, scalings, cmap, fig, axes, overlay_times, combine, group_by, evoked, ts_args, title)
   1084                                  overlay_times=overlay_times, combine=combine,
   1085                                  group_by=group_by, evoked=evoked,
-> 1086                                  ts_args=ts_args, title=title)
   1087 
   1088     @verbose

~/anaconda3/envs/brainwaves/lib/python3.5/site-packages/mne/viz/epochs.py in plot_epochs_image(epochs, picks, sigma, vmin, vmax, colorbar, order, show, units, scalings, cmap, fig, axes, overlay_times, combine, group_by, evoked, ts_args, title)
    231         group.extend(_prepare_epochs_image_im_data(
    232             epochs, ch_type, overlay_times, order, sigma, vmin, vmax,
--> 233             scalings[ch_type], ts_args))
    234         if vmin is None or vmax is None:  # equalize across groups
    235             this_vmin, this_vmax, this_ylim = group[-3:]

~/anaconda3/envs/brainwaves/lib/python3.5/site-packages/mne/viz/epochs.py in _prepare_epochs_image_im_data(epochs, ch_type, overlay_times, order, sigma, vmin, vmax, scaling, ts_args)
    430     scale_vmax = True if (vmax is None or callable(vmax)) else False
    431     vmin, vmax = _setup_vmin_vmax(
--> 432         data, vmin, vmax, norm=(data.min() >= 0) and (vmin is None))
    433     if not scale_vmin:
    434         vmin /= scaling

~/anaconda3/envs/brainwaves/lib/python3.5/site-packages/numpy/core/_methods.py in _amin(a, axis, out, keepdims)
     27 
     28 def _amin(a, axis=None, out=None, keepdims=False):
---> 29     return umr_minimum(a, axis, None, out, keepdims)
     30 
     31 def _sum(a, axis=None, dtype=None, out=None, keepdims=False):

ValueError: zero-size array to reduction operation minimum which has no identity

In [102]:
face_epochs.plot_topo_image()


Out[102]:

In [69]:
%matplotlib tk
import numpy as np
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds

house_evoked = house_epochs.average()
face_evoked = face_epochs.average()
# set time instants in seconds (from 50 to 150ms in a step of 10ms)
times = np.arange(0.05, 0.30, 0.02)
# If times is set to None only 10 regularly spaced topographies will be shown

# plot eeg data as topomaps
#evoked.plot_topomap(times, ch_type='eeg', time_unit='s')

# compute a 50 ms bin to stabilize topographies
house_evoked.plot_topomap(times, title='Houses',  ch_type='eeg', average=0.05, time_unit='s')
face_evoked.plot_topomap(times, title='Faces',  ch_type='eeg', average=0.05, time_unit='s')


# plot eeg data as an animation
#house_evoked.animate_topomap(ch_type='eeg', times=times, frame_rate=10,
#                       time_unit='s')
# plot eeg data as an animation
#face_evoked.animate_topomap(ch_type='eeg', times=times, frame_rate=10,
#                       time_unit='s')


Warning: Cannot change to a different GUI toolkit: tk. Using qt5 instead.
Out[69]:

In [71]:
house_evoked.plot(window_title="House")
face_evoked.plot(window_title="Face")


time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
<ipython-input-71-e621e3999a39>:1: DeprecationWarning: time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
  house_evoked.plot(window_title="House")
<ipython-input-71-e621e3999a39>:2: DeprecationWarning: time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
  face_evoked.plot(window_title="Face")
Out[71]:

In [76]:
house_evoked.plot_image()
face_evoked.plot_image()


time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
<ipython-input-76-05ff36021d47>:1: DeprecationWarning: time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
  house_evoked.plot_image()
<ipython-input-76-05ff36021d47>:2: DeprecationWarning: time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
  face_evoked.plot_image()
Out[76]:

In [80]:
house_evoked.plot_joint(times="peaks", title="Houses")
face_evoked.plot_joint(times="peaks", title="Faces")


time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
<ipython-input-80-8cfad5bd48bd>:1: DeprecationWarning: time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
  house_evoked.plot_joint(times="peaks", title="Houses")
time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
<ipython-input-80-8cfad5bd48bd>:1: DeprecationWarning: time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
  house_evoked.plot_joint(times="peaks", title="Houses")
time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
<ipython-input-80-8cfad5bd48bd>:2: DeprecationWarning: time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
  face_evoked.plot_joint(times="peaks", title="Faces")
time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
<ipython-input-80-8cfad5bd48bd>:2: DeprecationWarning: time_unit defaults to "ms" in 0.16 but will change to "s" in 0.17, set it explicitly to avoid this warning
  face_evoked.plot_joint(times="peaks", title="Faces")
Out[80]:

In [87]:
house_evoked.plot_topo(title="Houses")
face_evoked.plot_topo(title="Houses")


Out[87]:

In [89]:



---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-89-6e2834f072e9> in <module>()
----> 1 house_evoked.plot_white()

TypeError: plot_white() missing 1 required positional argument: 'noise_cov'

In [12]:
%matplotlib
clean_epochs_plot = face_epochs.plot(scalings='auto');
fig = plt.gcf()
fig.canvas.manager.window.activateWindow()
fig.canvas.manager.window.raise_()


Using matplotlib backend: Qt5Agg
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-12-72180c287de0> in <module>()
      1 get_ipython().run_line_magic('matplotlib', '')
----> 2 clean_epochs_plot = face_epochs.plot(scalings='auto');
      3 fig = plt.gcf()
      4 fig.canvas.manager.window.activateWindow()
      5 fig.canvas.manager.window.raise_()

~/anaconda3/envs/brainwaves/lib/python3.5/site-packages/mne/epochs.py in plot(self, picks, scalings, n_epochs, n_channels, title, events, event_colors, show, block, decim, noise_cov)
    944                            title=title, events=events,
    945                            event_colors=event_colors, show=show, block=block,
--> 946                            decim=decim, noise_cov=noise_cov)
    947 
    948     @copy_function_doc_to_method_doc(plot_epochs_psd)

~/anaconda3/envs/brainwaves/lib/python3.5/site-packages/mne/viz/epochs.py in plot_epochs(epochs, picks, scalings, n_epochs, n_channels, title, events, event_colors, show, block, decim, noise_cov)
    798     """
    799     epochs.drop_bad()
--> 800     scalings = _compute_scalings(scalings, epochs)
    801     scalings = _handle_default('scalings_plot_raw', scalings)
    802     decim, data_picks = _handle_decim(epochs.info.copy(), decim, None)

~/anaconda3/envs/brainwaves/lib/python3.5/site-packages/mne/viz/utils.py in _compute_scalings(scalings, inst)
   1624             raise ValueError("Sensor {0} doesn't exist in data".format(key))
   1625         this_data = data[ch_types[key]]
-> 1626         scale_factor = np.percentile(this_data.ravel(), [0.5, 99.5])
   1627         scale_factor = np.max(np.abs(scale_factor))
   1628         scalings[key] = scale_factor

~/anaconda3/envs/brainwaves/lib/python3.5/site-packages/numpy/lib/function_base.py in percentile(a, q, axis, out, overwrite_input, interpolation, keepdims)
   4289     r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
   4290                     overwrite_input=overwrite_input,
-> 4291                     interpolation=interpolation)
   4292     if keepdims:
   4293         return r.reshape(q.shape + k)

~/anaconda3/envs/brainwaves/lib/python3.5/site-packages/numpy/lib/function_base.py in _ureduce(a, func, **kwargs)
   4031         keepdim = (1,) * a.ndim
   4032 
-> 4033     r = func(a, **kwargs)
   4034     return r, keepdim
   4035 

~/anaconda3/envs/brainwaves/lib/python3.5/site-packages/numpy/lib/function_base.py in _percentile(a, q, axis, out, overwrite_input, interpolation, keepdims)
   4403             n = np.isnan(ap[-1:, ...])
   4404 
-> 4405         x1 = take(ap, indices_below, axis=axis) * weights_below
   4406         x2 = take(ap, indices_above, axis=axis) * weights_above
   4407 

~/anaconda3/envs/brainwaves/lib/python3.5/site-packages/numpy/core/fromnumeric.py in take(a, indices, axis, out, mode)
    157            [5, 7]])
    158     """
--> 159     return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
    160 
    161 

~/anaconda3/envs/brainwaves/lib/python3.5/site-packages/numpy/core/fromnumeric.py in _wrapfunc(obj, method, *args, **kwds)
     50 def _wrapfunc(obj, method, *args, **kwds):
     51     try:
---> 52         return getattr(obj, method)(*args, **kwds)
     53 
     54     # An AttributeError occurs if the object does not have

IndexError: cannot do a non-empty take from an empty axes.

In [ ]:
## Plot ERPs

# Input
ch_ind = 10

# Computation
conditions = OrderedDict({key: [value] for (key, value) in event_id.items()})

# Output
X, y = utils.plot_conditions(epochs, ch_ind=ch_ind, conditions=conditions, 
                                ci=97.5, n_boot=1000, title='',)

In [ ]:
{'House': 3, 'Face': 4}.items()

In [ ]:


In [ ]: