In [1]:
# Import Necessary Libraries
import numpy as np
import scipy.io
import matplotlib
from matplotlib import *
from matplotlib import pyplot as plt
import itertools
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.decomposition import PCA
import scipy.stats as stats
from scipy.spatial import distance as Distance
# pretty charting
import seaborn as sns
sns.set_palette('muted')
sns.set_style('darkgrid')
%matplotlib inline
In [11]:
#### Extract wordpairs data into a dictionary for a subject/session/block
#### dictionary{wordpair:{channels}}
def extractSubjVocalizedData(subj, word):
# file directory for a subj/session/block
filedir = '../../condensed_data_' + subj + '/summary_vocalization/' + word
# initialize data dictionary with meta data
data_dict = {}
data_dict['meta'] = {'subject': subj,
'word': word}
all_channel_mats = os.listdir(filedir)
for channel in all_channel_mats: # loop thru all channels
chan_file = filedir + '/' + channel
## 00: load in data
data = scipy.io.loadmat(chan_file)
data = data['data']
## 01: get the time point for probeword on
timeZero = data['timeZero'][0][0][0]
## 02: get the time point of vocalization
vocalization = data['vocalization'][0][0][0]
## 03: Get Power Matrix
power_matrix = data['powerMatZ'][0][0]
chan = channel.split('_')[0]
# convert channel data into a json dict
data_dict[chan] = {'timeZero': timeZero,
'timeVocalization':vocalization,
'powerMat': power_matrix}
return data_dict
In [12]:
######## Get list of files (.mat) we want to work with ########
subj = 'NIH034' # change the directories if you want
filedir = '../../condensed_data_' + subj + '/summary_vocalization/'
targetWords = os.listdir(filedir)
print targetWords
for word in targetWords:
wordDir = filedir + word
## 01: Extract the data of every channel for this subject and targetWord
wordData = extractSubjVocalizedData(subj, word)
print wordData.keys()
break
In [ ]: