In [6]:
import cPickle as pickle
import numpy as np
import pandas as pd
import h5py
import matplotlib.pyplot as plt
import matplotlib as mpl
import CAM_NWB as cn
%matplotlib inline
In [7]:
#path = ('/Volumes/Brain2015/CAM/')
#this path should have the pkl meta files, and the "baselines" dir with the
#pre-computed means of the traces per area
baseline_path = ('/Users/etaralova/src/pa_2015/')
group = ('185282')
#how the mean was computed:
baseline_method = ('_mean_preStim_responses')
In [9]:
#the actual traces:
traces = np.load('/Volumes/Brain2015/Steph/' + 'traces_' + group + '.npy')
#traces is a 3d array: #rois, #trials, responses (extracted with Extract CAM data by area),
#where responses are computed with intersweep=30 before and after the trial, and sweeplength = 60
#meta:
pklfile = open(baseline_path + 'CAM_Meta.pkl', 'r') # importing the compiled CAM meta data
CAM_Meta = pickle.load(pklfile)
pklfile.close()
CAM_Meta_X = CAM_Meta[(CAM_Meta.specimen==group)]
group_ids = list(CAM_Meta_X.lims_id)
print group_ids
In [ ]:
#take the DFF traces and output them in csv format along with orient and temp_f info
#skeleton code taken from S. Seeman, modified by E. Taralova
for x, filename in enumerate(group_ids):
print x,filename
exp_traces = traces[x]
baseline = np.load(path + 'baselines' + '/' + filename + baseline_method + '.npy')
baseline = baseline.T
DFF_traces = np.nan*np.ones(exp_traces.shape)
for counter in range(DFF_traces.shape[2]): #DF/F calculation (exp_traces/baseline)-1
DFF_traces[:,:,counter] = (exp_traces[:,:,counter]/baseline)-1
rois_N = DFF_traces.shape[0]
response_frames_N = DFF_traces.shape[2]
stimparams = cn.getStimulusTable('/Volumes/Brain2015/CAM/' + filename + '/' + filename + '.nwb')
orientations = np.unique(stimparams['orientation'])
orientations = orientations[~np.isnan(orientations)]
print 'Orientations: ', orientations
TFs = np.unique(stimparams['temporal_frequency'])
TFs = TFs[~np.isnan(TFs)]
print 'TFs: ', TFs
temp_traces = np.zeros((rois_N,0))
temp_orient = []
temp_freq = []
temp_trial = []
for i, ori in enumerate(orientations):
print "orient: ", ori
for k, tf in enumerate(TFs):
#print "freq: ", tf
stimpairs= stimparams[(stimparams.orientation==ori) & (stimparams.temporal_frequency==tf)]
stimpairs_index = list(stimpairs.index)
t = DFF_traces[:,stimpairs_index,:]
t = t.squeeze()
t = np.reshape(t, (rois_N, len(stimpairs_index)*response_frames_N))
temp_traces = np.concatenate((temp_traces, t), axis=1)
temp_orient = np.concatenate((temp_orient, ori*np.ones(len(stimpairs_index)*response_frames_N)))
temp_freq = np.concatenate((temp_freq, tf*np.ones(len(stimpairs_index)*response_frames_N)))
for j, trial_id in enumerate(stimpairs_index):
temp_trial = np.concatenate((temp_trial, trial_id*np.ones((response_frames_N))))
print temp_traces.shape, temp_orient.shape
fsave = '/Users/etaralova/src/pa_2015/baselines/dff_data_' + filename + baseline_method + '.csv'
np.savetxt(fsave, temp_traces, fmt='%0.5f',delimiter=',')
fsave = '/Users/etaralova/src/pa_2015/baselines/dff_orient_' + filename + baseline_method + '.csv'
np.savetxt(fsave, temp_orient, fmt='%0d',delimiter=',')
fsave = '/Users/etaralova/src/pa_2015/baselines/dff_freq_' + filename + baseline_method + '.csv'
np.savetxt(fsave, temp_freq, fmt='%0d',delimiter=',')
fsave = '/Users/etaralova/src/pa_2015/baselines/dff_trials_' + filename + baseline_method + '.csv'
np.savetxt(fsave, temp_trial, fmt='%0d',delimiter=',')
print 'saved: ', fsave
In [148]:
#this code was written by S. Seeman
tuning={}
for x, filename in enumerate(group_ids):
print x,filename
exp_traces = traces[x]
baseline = np.load(path + 'baselines' + '/' + filename + baseline_method + '.npy')
baseline = baseline.T
DFF_traces = np.nan*np.ones(exp_traces.shape)
for counter in range(DFF_traces.shape[2]): #DF/F calculation (exp_traces/baseline)-1
DFF_traces[:,:,counter] = (exp_traces[:,:,counter]/baseline)-1
mean_response= np.mean(DFF_traces[:,:,30:90], axis=2)
stimparams = cn.getStimulusTable('/Volumes/Brain2015/CAM/' + filename + '/' + filename + '.nwb')
orientations = np.unique(stimparams['orientation'])
orientations = orientations[~np.isnan(orientations)]
print 'Orientations: ', orientations
TFs = np.unique(stimparams['temporal_frequency'])
TFs = TFs[~np.isnan(TFs)]
print 'TFs: ', TFs
summary_order = ['Orientation','Temporal_Frequency','# Trials']
stim_response = pd.DataFrame(index = range(len(orientations)*len(TFs)),
columns = summary_order + range(mean_response.shape[0]))
blank_index = stimparams[(stimparams.blank_sweep)==1].index
blank_trials = DFF_traces[:,blank_index,:]
blank_mean = np.mean(mean_response[:,blank_index], axis=1)
blank_sd = np.std(mean_response[:,blank_index], axis=1)
s=0
for i, ori in enumerate(orientations):
for k, tf in enumerate(TFs):
stimpairs= stimparams[(stimparams.orientation==ori) & (stimparams.temporal_frequency==tf)]
stimpairs_index = list(stimpairs.index)
response = np.mean(mean_response[:,stimpairs_index], axis=1)
std_response = np.std(mean_response[:,stimpairs_index], axis=1)
stim_response_dict = {'Orientation': ori, 'Temporal_Frequency': tf,'#Trials': len(stimpairs_index)}
response_dict = {counter: response for counter, response in enumerate(response)}
stim_response_dict.update(response_dict)
stim_response.loc[s] = pd.Series(stim_response_dict)
s=s+1
temptuning=pd.DataFrame(index=range(DFF_traces.shape[0]), columns = ['ROI'] + summary_order[:2])
for roi in range(DFF_traces.shape[0]):
peak = stim_response[roi].idxmax()
ori_tune = stim_response.Orientation[peak]
ori_orth = ori_tune-90
tf_tune = stim_response.Temporal_Frequency[peak]
#orth_response = stim_response
tuning_dict = {'ROI':roi,'Orientation':ori_tune,'Temporal_Frequency':tf_tune}
temptuning.loc[roi] = pd.Series(tuning_dict)
#tuning[filename] = temptuning
fsave = '/Users/etaralova/src/pa_2015/baselines/tuning_' + filename + baseline_method + '.csv'
np.savetxt(fsave, temptuning, fmt='%0d,%0d,%0d',delimiter=',')
print 'Saved: ', fsave