In [23]:
import numpy as np
import conceptor.util as util
import conceptor.reservoir as reservoir
import conceptor.logic as logic
import conceptor.recognition as recog
from imp import reload
reload(reservoir)
reload(recog)
reload(util)


Out[23]:
<module 'conceptor.util' from '/Users/xuhe/Documents/GSoC/speaker-recognition/src/Preprocessing/conceptor/util.py'>

In [17]:
# Import the Japanese vouwels data to Python

train = np.loadtxt('./ae.train')
test = np.loadtxt('./ae.test')
  
num_test=370
num_train = 270

train_inputs = []
read_index = 0 
for c in range(num_train):    
    l = 0
    while(train[read_index, 0] != 1.0):
        l += 1
        read_index += 1
    train_inputs.append(train[read_index - l : read_index, :].T)
    read_index += 1
    
    
test_inputs = []
read_index = 0
for c in range(num_test):
    l = 0
    while(test[read_index, 0]!=1.0):
        l += 1
        read_index += 1
    test_inputs.append(test[read_index - l : read_index, :].T)
    read_index += 1

In [18]:
train_inputs, shifts, scales = util.normalize_data(train_inputs)
test_inputs = util.transform_data(test_inputs, shifts, scales)

In [19]:
from scipy import interpolate
virtualLength = 4
polyOrder = 3
num_train = len(train_inputs)

for i in range(num_train):
    p = train_inputs[i]
    pNew = np.zeros((12, virtualLength))
    l = p.shape[1]
    fitpts = np.asarray(range(l))
    intPts = np.linspace(0, l-1, num = 4)
    for s in range(12):
        polyCoeffs = np.polyfit(fitpts, p[s, :], polyOrder)
        newS = np.polyval(polyCoeffs, fitpts)
        interpfun = interpolate.interp1d(fitpts, newS)
        newSNormalLength = interpfun(intPts)
        pNew[s, :] = newSNormalLength
    train_inputs[i] = pNew

In [20]:
num_test = len(test_inputs)

for i in range(num_test):
    p = test_inputs[i]
    pNew = np.zeros((12, virtualLength))
    l = p.shape[1]
    fitpts = np.asarray(range(l))
    intPts = np.linspace(0, l - 1, num = 4)
    for s in range(12):
        polyCoeffs = np.polyfit(fitpts, p[s, :], polyOrder)
        newS = np.polyval(polyCoeffs, fitpts)
        interpfun = interpolate.interp1d(fitpts, newS)
        newSNormalLength = interpfun(intPts)
        pNew[s, :] = newSNormalLength
    test_inputs[i] = pNew

In [16]:
def compute_conceptors(all_train_states,
                      apN):
    CPoss = []
    RPoss = []
    ROthers = []
    CNegs = []
    statesAllClasses = np.hstack(all_train_states)
    Rall = statesAllClasses.dot(statesAllClasses.T)
    I = np.eye(Rall.shape[0])
    for i in range(len(all_train_states)):
        R = all_train_states[i].dot(all_train_states[i].T)
        Rnorm = R / all_train_states[i].shape[1]
        RPoss.append(Rnorm)
        ROther = Rall - R
        ROthersNorm = ROther / (statesAllClasses.shape[1] - all_train_states[i].shape[1])
        ROthers.append(ROthersNorm)
        CPossi = []
        CNegsi = []
        for api in range(apN):
            C = Rnorm.dot(np.linalg.inv(Rnorm + (2 ** float(api)) ** (-2) * I))
            CPossi.append(C)
            COther = ROthersNorm.dot(np.linalg.inv(ROthersNorm + (2 ** float(api)) ** (-2) * I))
            CNegsi.append(I - COther)
        CPoss.append(CPossi)
        CNegs.append(CNegsi)
    return CPoss, RPoss, ROthers, CNegs

In [17]:
def compute_aperture(C_pos_list,
                     apN):
    classnum = len(C_pos_list)
    best_aps_pos = []
    apsExploreExponents = np.asarray(range(apN))
    intPts = np.arange(apsExploreExponents[0], apsExploreExponents[-1] + 0.01, 0.01)
    for i in range(classnum):
        norm_pos = np.zeros(apN)
        for api in range(apN):
            norm_pos[api] = np.linalg.norm(C_pos_list[i][api], 'fro') ** 2      
        f_pos = interpolate.interp1d(np.arange(apN), norm_pos, kind="cubic")
        norm_pos_inter = f_pos(intPts)
        norm_pos_inter_grad = (norm_pos_inter[1:] - norm_pos_inter[0:-1]) / 0.01
        max_ind_pos = np.argmax(np.abs(norm_pos_inter_grad), axis = 0)    
        best_aps_pos.append(2 ** intPts[max_ind_pos])  
    return best_aps_pos

In [18]:
def compute_best_conceptor(R_list,
                           best_apt):
    classnum = len(R_list)
    C_best_list = []
    I = np.eye(R_list[0].shape[0])
    for i in range(classnum):
        C_best = R_list[i].dot(np.linalg.inv(R_list[i] + best_apt ** (-2) * I))
        C_best_list.append(C_best)      
    return C_best_list

In [21]:
N = 10

M = N + 12

In [14]:
RNN = reservoir.Reservoir(12, 10, sr = 1.2, in_scale = 0.2, bias_scale = 1)

    all_train_data = np.dstack(train_inputs)
    all_train_data = all_train_data.swapaxes(1, 2)
    hid_states, raw_hid_states= RNN.augment(all_train_data, 1)

    all_test_data = np.dstack(test_inputs)
    all_test_data = all_test_data.swapaxes(1, 2)
    hid_states_test, raw_hid_states_test = RNN.augment(all_test_data, 1)

    all_states_train = np.vstack((raw_hid_states, all_train_data))

    all_states_test = np.vstack((raw_hid_states_test, all_test_data))

    all_states_train = all_states_train.swapaxes(1, 2).reshape((-1, num_train), order = 'F')

    all_states_test = all_states_test.swapaxes(1, 2).reshape((-1, num_test), order = 'F')

    states_list_train = np.hsplit(all_states_train, 9)
    
    japvow_rec = recog.Recognizer()

    japvow_rec.compute_conceptors(states_list_train)

    japvow_rec.aperture_adjust()
    
    japvow_rec.compute_best_conceptors()

    results_pos, evidence_pos = japvow_rec.evidence(all_states_test, japvow_rec.Cs_best_pos)

    results_neg, evidence_neg = japvow_rec.evidence(all_states_test, japvow_rec.Cs_best_neg)
    
    results_comb, combEv = japvow_rec.combine_evidence(evidence_pos, evidence_neg)
    
    correct_results = []
    block_lengthes = [31, 35, 88, 44, 29, 24, 40, 50, 29]
    for i in range(9):
        num = block_lengthes[i]
        resulti = np.zeros(num).astype(int) + i
        correct_results.append(resulti)
    correct_results = np.hstack(correct_results)

    misclasnum_pos = np.sum(correct_results != results_pos)

    misclasnum_neg = np.sum(correct_results != results_neg)

    misclasnum_comb = np.sum(correct_results != results_comb)

    print('misclassifications using positive evidence:', misclasnum_pos)

    print('misclassifications using negtive evidence:', misclasnum_neg)

    print('misclassifications using combined evidence:', misclasnum_comb)


misclassifications using positive evidence: 8
misclassifications using negtive evidence: 6
misclassifications using combined evidence: 4

In [9]:
def TestJapVow():
    RNN = reservoir.Reservoir(12, 10, sr = 1.2, in_scale = 0.2, bias_scale = 1)

    all_train_data = np.dstack(train_inputs)
    all_train_data = all_train_data.swapaxes(1, 2)
    hid_states, raw_hid_states= RNN.augment(all_train_data, 1)

    all_test_data = np.dstack(test_inputs)
    all_test_data = all_test_data.swapaxes(1, 2)
    hid_states_test, raw_hid_states_test = RNN.augment(all_test_data, 1)

    all_states_train = np.vstack((raw_hid_states, all_train_data))

    all_states_test = np.vstack((raw_hid_states_test, all_test_data))

    all_states_train = all_states_train.swapaxes(1, 2).reshape((-1, num_train), order = 'F')

    all_states_test = all_states_test.swapaxes(1, 2).reshape((-1, num_test), order = 'F')

    states_list_train = np.hsplit(all_states_train, 9)
    
    japvow_rec = recog.Recognizer()

    japvow_rec.compute_conceptors(states_list_train)

    japvow_rec.aperture_adjust()
    
    japvow_rec.compute_best_conceptors()

    results_pos, evidence_pos = japvow_rec.evidence(all_states_test, japvow_rec.Cs_best_pos)

    results_neg, evidence_neg = japvow_rec.evidence(all_states_test, japvow_rec.Cs_best_neg)
    
    results_comb, combEv = japvow_rec.combine_evidence(evidence_pos, evidence_neg)
    
    correct_results = []
    block_lengthes = [31, 35, 88, 44, 29, 24, 40, 50, 29]
    for i in range(9):
        num = block_lengthes[i]
        resulti = np.zeros(num).astype(int) + i
        correct_results.append(resulti)
    correct_results = np.hstack(correct_results)

    misclasnum_pos = np.sum(correct_results != results_pos)

    misclasnum_neg = np.sum(correct_results != results_neg)

    misclasnum_comb = np.sum(correct_results != results_comb)
    
   # print('misclassifications using positive evidence:', misclasnum_pos)

   # print('misclassifications using negtive evidence:', misclasnum_neg)

   # print('misclassifications using combined evidence:', misclasnum_comb)
    
    return misclasnum_pos, misclasnum_neg, misclasnum_comb

In [10]:
totalmis_pos = 0
totalmis_neg = 0
totalmis_comb = 0


for i in range(50):
    miscla_pos, miscla_neg, miscla_comb = TestJapVow()
    totalmis_pos += miscla_pos
    totalmis_neg += miscla_neg
    totalmis_comb += miscla_comb

print(totalmis_pos / 50, totalmis_neg / 50, totalmis_comb / 50)


8.4 5.9 4.92

In [27]:
RNN = reservoir.Reservoir(12, 10, sr = 1.2, in_scale = 0.2, bias_scale = 1)

    all_train_data = np.dstack(train_inputs)
    all_train_data = all_train_data.swapaxes(1, 2)
    hid_states, raw_hid_states= RNN.augment(all_train_data, 1)

    all_test_data = np.dstack(test_inputs)
    all_test_data = all_test_data.swapaxes(1, 2)
    hid_states_test, raw_hid_states_test = RNN.augment(all_test_data, 1)

    all_states_train = np.vstack((raw_hid_states, all_train_data))

    all_states_test = np.vstack((raw_hid_states_test, all_test_data))

    all_states_train = all_states_train.swapaxes(1, 2).reshape((-1, num_train), order = 'F')

    all_states_test = all_states_test.swapaxes(1, 2).reshape((-1, num_test), order = 'F')

    states_list_train = np.hsplit(all_states_train, 9)
    
    japvow_rec = recog.Recognizer()

    japvow_rec.train(states_list_train)
    
    results = japvow_rec.predict(all_states_test)

    correct_results = []
    block_lengthes = [31, 35, 88, 44, 29, 24, 40, 50, 29]
    for i in range(9):
        num = block_lengthes[i]
        resulti = np.zeros(num).astype(int) + i
        correct_results.append(resulti)
    correct_results = np.hstack(correct_results)

    misclasnum = np.sum(correct_results != results)

    print('misclassifications using combined evidence:', misclasnum)


misclassifications using combined evidence: 5

In [ ]: