In [2]:
import numpy as np;
size_net = 100
size_in = 1
Wraw_in=np.random.rand(size_net, size_in);
Wraw_in_1=np.random.randn(size_net, size_in);

In [3]:
print(Wraw_in)
print(Wraw_in_1)


[[ 0.26558972]
 [ 0.86565742]
 [ 0.84614955]
 [ 0.83061475]
 [ 0.24818472]
 [ 0.24509341]
 [ 0.80486005]
 [ 0.39171085]
 [ 0.9819142 ]
 [ 0.77387868]
 [ 0.09914355]
 [ 0.65386428]
 [ 0.52260045]
 [ 0.09473185]
 [ 0.03711237]
 [ 0.97049067]
 [ 0.16706283]
 [ 0.25380365]
 [ 0.77631019]
 [ 0.06801035]
 [ 0.93635735]
 [ 0.1041537 ]
 [ 0.68751626]
 [ 0.04790763]
 [ 0.03965534]
 [ 0.65409208]
 [ 0.96241852]
 [ 0.6239418 ]
 [ 0.29636374]
 [ 0.19755716]
 [ 0.71670413]
 [ 0.57373664]
 [ 0.93261139]
 [ 0.88205801]
 [ 0.04623435]
 [ 0.77610274]
 [ 0.4301736 ]
 [ 0.65596204]
 [ 0.32642939]
 [ 0.47356734]
 [ 0.60883728]
 [ 0.51532046]
 [ 0.00122873]
 [ 0.94592678]
 [ 0.85367517]
 [ 0.92487439]
 [ 0.10264238]
 [ 0.34355275]
 [ 0.13384306]
 [ 0.54515921]
 [ 0.04413363]
 [ 0.06673564]
 [ 0.34717459]
 [ 0.28412939]
 [ 0.35357609]
 [ 0.59287594]
 [ 0.52052664]
 [ 0.60217486]
 [ 0.46193526]
 [ 0.83675146]
 [ 0.37438615]
 [ 0.34515406]
 [ 0.64585606]
 [ 0.03841359]
 [ 0.03082847]
 [ 0.54760101]
 [ 0.19140161]
 [ 0.84247385]
 [ 0.04554658]
 [ 0.21793951]
 [ 0.74367888]
 [ 0.22995822]
 [ 0.25662   ]
 [ 0.80961918]
 [ 0.74209135]
 [ 0.6483969 ]
 [ 0.88770699]
 [ 0.75732633]
 [ 0.17619609]
 [ 0.43803254]
 [ 0.94011209]
 [ 0.58876049]
 [ 0.84118366]
 [ 0.62143317]
 [ 0.20358953]
 [ 0.90947755]
 [ 0.57086531]
 [ 0.43015816]
 [ 0.81499393]
 [ 0.14612363]
 [ 0.79576202]
 [ 0.26775967]
 [ 0.54904026]
 [ 0.49881548]
 [ 0.24884427]
 [ 0.45900058]
 [ 0.98284898]
 [ 0.47934939]
 [ 0.15856187]
 [ 0.81736001]]
[[ -2.48511816e-01]
 [ -1.65459845e-01]
 [  3.45296840e+00]
 [ -4.08189308e-02]
 [ -1.29626769e+00]
 [ -7.51327069e-01]
 [ -1.25049244e+00]
 [ -8.56633669e-01]
 [ -9.00778092e-01]
 [ -4.26499157e-02]
 [  1.92126777e+00]
 [ -1.58326130e-02]
 [ -4.83155919e-01]
 [ -1.89254613e+00]
 [  2.88054676e-02]
 [ -1.90808321e+00]
 [ -8.67364819e-01]
 [  2.13235569e-01]
 [ -1.67786164e-01]
 [ -1.34709363e+00]
 [  7.62892920e-01]
 [  4.49953303e-01]
 [ -2.05409065e+00]
 [  1.48205174e+00]
 [ -6.52082762e-01]
 [ -5.02552332e-01]
 [  6.07992881e-02]
 [  1.79903830e+00]
 [ -4.06024660e-01]
 [ -4.54514132e-01]
 [  1.14568630e-03]
 [ -8.47359408e-02]
 [ -3.01489584e-01]
 [ -3.27219717e-01]
 [  3.85631165e-01]
 [  3.59953720e-01]
 [ -5.35042993e-03]
 [  1.26091806e+00]
 [  3.64645557e-01]
 [ -1.08583355e+00]
 [ -8.67968181e-01]
 [  5.33069587e-01]
 [  4.09051009e-01]
 [  4.48476300e-02]
 [  4.58142980e-01]
 [ -2.50340422e-01]
 [  1.16664590e+00]
 [  5.70056197e-01]
 [ -7.04799365e-01]
 [  5.82698935e-01]
 [ -1.59748078e-01]
 [ -7.00338087e-02]
 [ -1.19736788e+00]
 [  2.90914146e-01]
 [ -1.86909441e+00]
 [ -2.73996183e-01]
 [  1.11992349e-01]
 [ -9.46328090e-01]
 [ -3.25918940e-02]
 [ -1.30833744e-01]
 [  9.55324955e-01]
 [ -9.02711468e-01]
 [ -1.02572537e+00]
 [  1.63717737e-01]
 [ -2.63116411e+00]
 [  1.37223581e+00]
 [ -1.59759771e+00]
 [  5.56216222e-01]
 [ -1.36879653e+00]
 [ -1.11770539e+00]
 [ -7.67936459e-01]
 [  8.77060948e-01]
 [  8.53537540e-01]
 [ -1.50360117e+00]
 [  1.30799499e+00]
 [  9.88419822e-02]
 [  1.17589250e+00]
 [  1.68574440e+00]
 [  6.88392499e-02]
 [ -1.67331573e+00]
 [ -1.23744344e+00]
 [ -1.54699425e-01]
 [  2.03176799e+00]
 [ -1.66575716e+00]
 [ -4.96238900e-01]
 [ -2.26670789e-01]
 [ -1.30126363e+00]
 [  5.26397303e-01]
 [ -6.82527829e-01]
 [  9.97745933e-02]
 [  6.29424633e-02]
 [  4.57025512e-01]
 [ -1.66241731e+00]
 [  2.90079182e-02]
 [ -7.87466277e-01]
 [  5.03919006e-01]
 [  3.22489296e+00]
 [  1.79042521e+00]
 [  1.09923092e+00]
 [ -1.24192061e+00]]

In [9]:
from scipy.lib.six import xrange
from scipy.sparse.coo import coo_matrix

def sprandn(m, n, density=0.01, format="coo", dtype=None, random_state=None):
    """Generate a sparse matrix of the given shape and density with standard
    normally distributed values.
    Parameters
    ----------
    m, n : int
        shape of the matrix
    density : real
        density of the generated matrix: density equal to one means a full
        matrix, density of 0 means a matrix with no non-zero items.
    format : str
        sparse matrix format.
    dtype : dtype
        type of the returned matrix values.
    random_state : {numpy.random.RandomState, int}, optional
        Random number generator or random seed. If not given, the singleton
        numpy.random will be used.
    Notes
    -----
    Only float types are supported for now.
    """
    if density < 0 or density > 1:
        raise ValueError("density expected to be 0 <= density <= 1")
    if dtype and (dtype not in [np.float32, np.float64, np.longdouble]):
        raise NotImplementedError("type %s not supported" % dtype)

    mn = m * n

    tp = np.intc
    if mn > np.iinfo(tp).max:
        tp = np.int64

    if mn > np.iinfo(tp).max:
        msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
        raise ValueError(msg % np.iinfo(tp).max)

    # Number of non zero values
    k = int(density * m * n)

    if random_state is None:
        random_state = np.random
    elif isinstance(random_state, (int, np.integer)):
        random_state = np.random.RandomState(random_state)

    # Use the algorithm from python's random.sample for k < mn/3.
    if mn < 3*k:
        # We should use this line, but choice is only available in numpy >= 1.7
        # ind = random_state.choice(mn, size=k, replace=False)
        ind = random_state.permutation(mn)[:k]
    else:
        ind = np.empty(k, dtype=tp)
        selected = set()
        for i in range(k):
            j = random_state.randint(mn)
            while j in selected:
                j = random_state.randint(mn)
            selected.add(j)
            ind[i] = j

    j = np.floor(ind * 1. / m).astype(tp)
    i = (ind - j * m).astype(tp)
    vals = random_state.randn(k).astype(dtype)
    return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format)

In [132]:
W_star = sprandn(m = size_net, n = size_net, density = 10. / size_net, format = 'coo');

In [134]:
import scipy.sparse
eigw, _ = scipy.sparse.linalg.eigs(W_star, 1, which = 'LM')
eigwH, _ = scipy.sparse.linalg.eigsh(W_star, 1, which = 'LM')

In [135]:
print(eigw, eigwH)


[ 0.16528709-3.31578371j] [-5.11206967]

In [19]:
W_star = W_star.toarray

In [22]:
print(W_star)


  (0, 90)	0.394446656527
  (92, 24)	0.0593445097689
  (69, 61)	0.768859480068
  (6, 11)	-0.230215217918
  (42, 85)	-0.190321487274
  (40, 36)	-0.382062662398
  (29, 85)	-1.48374253565
  (64, 66)	-0.214194481441
  (21, 21)	0.216471832784
  (95, 86)	0.129991057337
  (65, 53)	2.09394520984
  (19, 0)	-0.460754587458
  (18, 7)	0.718791354893
  (17, 71)	1.10125354209
  (69, 49)	1.13320565858
  (74, 81)	-0.625938924188
  (57, 83)	-0.346107553764
  (60, 77)	0.490525976829
  (69, 82)	0.216414248344
  (47, 35)	0.164061415181
  (30, 0)	-0.305044824462
  (5, 7)	-1.26365007529
  (78, 65)	-0.622715539684
  (50, 86)	1.05298739601
  (55, 29)	-0.153626943271
  :	:
  (64, 29)	-1.13395669951
  (97, 83)	0.335694753079
  (32, 78)	-0.297290773687
  (37, 82)	-0.342453643047
  (97, 5)	1.21675788716
  (98, 50)	0.333937605262
  (24, 10)	0.720592499327
  (37, 93)	0.75442680541
  (99, 63)	1.0570699973
  (30, 95)	0.00331005160349
  (95, 90)	0.688515413075
  (30, 61)	-0.183161212449
  (8, 98)	-1.13487943178
  (17, 30)	0.660060746677
  (67, 15)	0.188899105702
  (76, 64)	-0.0280366532385
  (59, 93)	1.44307300597
  (13, 48)	-1.08066139828
  (33, 10)	-1.10497626993
  (56, 13)	1.35787414931
  (61, 68)	1.75624254756
  (83, 92)	2.37304044985
  (79, 85)	1.84860031552
  (59, 89)	-0.645365071476
  (64, 76)	1.76079801491

In [27]:
v = W_star.data
r, c = W_star.nonzero()

print(v.size)


1000

In [51]:
# Prepare testing data
p1=np.asarray(range(10));
p1=np.sin(2*np.pi*p1/np.sqrt(75));
p2=np.asarray(range(10));
#p2=0.5*np.sin(2*np.pi*p2/np.sqrt(20))+np.sin(2*np.pi*p2/np.sqrt(40));
p2=np.sin(2*np.pi*p2/np.sqrt(40));

In [37]:
ps=np.vstack((p1[None], p2[None]));

In [38]:
ps1=np.vstack((p1, p2));

In [70]:
p = []
p.append(p1[None])
p.append(p2[None])

In [39]:
ps


Out[39]:
array([[ 0.        ,  0.66352438,  0.99283772,  0.82206794,  0.23723021,
        -0.46709817, -0.93615356, -0.93367712, -0.46091621,  0.24400389],
       [ 0.        ,  0.8379188 ,  0.91466364,  0.16051873, -0.73944303,
        -0.96768722, -0.3168745 ,  0.62179024,  0.9956144 ,  0.46501231]])

In [40]:
ps1


Out[40]:
array([[ 0.        ,  0.66352438,  0.99283772,  0.82206794,  0.23723021,
        -0.46709817, -0.93615356, -0.93367712, -0.46091621,  0.24400389],
       [ 0.        ,  0.8379188 ,  0.91466364,  0.16051873, -0.73944303,
        -0.96768722, -0.3168745 ,  0.62179024,  0.9956144 ,  0.46501231]])

In [42]:
p1[None]


Out[42]:
array([[ 0.        ,  0.66352438,  0.99283772,  0.82206794,  0.23723021,
        -0.46709817, -0.93615356, -0.93367712, -0.46091621,  0.24400389]])

In [32]:
p1 == p1[None]


Out[32]:
array([[ True,  True,  True, ...,  True,  True,  True]], dtype=bool)

In [41]:
p1


Out[41]:
array([ 0.        ,  0.66352438,  0.99283772,  0.82206794,  0.23723021,
       -0.46709817, -0.93615356, -0.93367712, -0.46091621,  0.24400389])

In [49]:
def total_len(nparraylist):
    return sum(x.shape[1] for x in nparraylist)

In [44]:
foo=[[1, 'This is a test',12039],[12, 'test',1235]]
zipped = zip(*foo)

In [45]:
zipped


Out[45]:
<zip at 0x1081437c8>

In [71]:
total_len(p)


Out[71]:
20

In [68]:
p1=np.asarray(range(10));
p1t = p1[None]

In [69]:
p1t.shape


Out[69]:
(1, 10)

In [73]:
np.zeros((2,10))


Out[73]:
array([[ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
       [ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.]])

In [75]:
r = range(8)

In [77]:
np.asarray(r)


Out[77]:
array([0, 1, 2, 3, 4, 5, 6, 7])

In [81]:
p = np.vstack((p1[None], p2[None]))

In [92]:
pv = p[:,0][None].T

In [93]:
p[:,2:4].dot(pv)


Out[93]:
array([[ 0.],
       [ 0.]])

In [94]:
a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)

In [95]:
U, s, V = np.linalg.svd(a)

In [96]:
s


Out[96]:
array([ 6.42881306,  5.24377349,  4.27084683,  3.22703281,  2.63166133,
        1.40595224])

In [98]:
snew = np.diag(s)

In [101]:
snew.shape


Out[101]:
(6, 6)

In [114]:
import reservoir

In [ ]:
import numpy as np;
import scipy.interpolate;
import matplotlib.pyplot as pplot;
import conceptors.util as util;
from conceptors.net import ConceptorNetwork;
from conceptors.util import read_jpv_data;
from conceptors.util import normalize_jap_data;
from conceptors.util import transform_jap_data;

train_inputs, train_outputs, test_inputs, test_outputs=read_jpv_data("/home/arlmaster/workspace/conceptors/conceptors/data/ae.train",
                                                                     "/home/arlmaster/workspace/conceptors/conceptors/data/ae.test");
                                                                     
                                                                     
train_data, shifts, scales=normalize_jap_data(train_inputs);
test_data=transform_jap_data(test_inputs, shifts, scales);



# Create conceptor network
net=ConceptorNetwork(2, 200);

# Prepare testing data
p1=np.asarray(xrange(2000));
p1=np.sin(2*np.pi*p1/np.sqrt(75));
p2=np.asarray(xrange(2000));
#p2=0.5*np.sin(2*np.pi*p2/np.sqrt(20))+np.sin(2*np.pi*p2/np.sqrt(40));
p2=np.sin(2*np.pi*p2/np.sqrt(40));
ps=np.vstack((p1[None], p2[None]));

p=[];
p.append(ps);
#p.append(p1[None]);
#p.append(p2[None]);

# training
net.train(p);

# test readout
print "NRMSE readout: %f" % (util.nrmse(net.W_out.dot(net.all_train_args), net.all_train_outs));
print "mean NEMSE W: %f" % util.nrmse(net.W.dot(net.all_train_old_args), net.W_targets);

y=net.W_out.dot(net.all_train_args)
print y.shape
print net.all_train_outs.shape

pplot.figure(1);
pplot.plot(xrange(1000), p[0][0,500:1500]);
pplot.plot(xrange(1000), y[0,0:1000]);
pplot.title("Redout")
pplot.show();

# test conceptors

parameter_nl=0.1;
c_test_length=200;
state_nl=0.5;
W_noisy=net.W+parameter_nl*np.abs(net.W).dot(np.random.rand(net.num_neuron, net.num_neuron)-0.5);

x_ctestpl=np.zeros((5,c_test_length,net.num_pattern));
p_ctestpl=np.zeros((net.num_in,c_test_length,net.num_pattern));

for p in xrange(net.num_pattern):
  c=net.Cs[0][p];
  x=0.5*np.random.rand(net.num_neuron,1);
  
  for n in xrange(c_test_length):
    x=np.tanh(W_noisy.dot(x)+net.W_bias)+state_nl*(np.random.rand(net.num_neuron,1)-0.5);
    x=c.dot(x);
    x_ctestpl[:,n,p]=x[0:5,0];
    p_ctestpl[:,n,p]=net.W_out.dot(x)[:,0];
    
    
for p in xrange(net.num_pattern):
  int_rate=20;
  this_driver=net.train_ppl[p];
  this_out=p_ctestpl[0,:,p];
  this_driver_int_f=scipy.interpolate.interp1d(np.arange(0,net.signal_plot_length), this_driver, kind="cubic");
  this_driver_int=this_driver_int_f(np.linspace(0, net.signal_plot_length-1, int_rate*net.signal_plot_length));
  this_out_int_f=scipy.interpolate.interp1d(np.arange(0,c_test_length), this_out, kind="cubic");
  this_out_int=this_out_int_f(np.linspace(0, c_test_length-1, int_rate*c_test_length));
  
      
pplot.figure(2);
pplot.plot(np.log10(net.sr_collectors[0]))
pplot.plot(net.Cs[2][0])
#pplot.plot(xrange(c_test_length), p_ctestpl[0,:,1]);
pplot.show()

# draw conceptors

L=100;

trace1=net.all_train_args[[71,80],0:L];
trace2=net.all_train_args[[71,80],1000:1000+L];

R1=trace1.dot(trace1.T)/L;
U1, S1, _=np.linalg.svd(R1);
R2=trace2.dot(trace2.T)/L;
U2, S2, _=np.linalg.svd(R2);

cycle_data=np.vstack((np.cos(2*np.pi*np.arange(0,200)/200), np.sin(2*np.pi*np.arange(0,200)/200)));

E1=R1.dot(cycle_data); E2=R2.dot(cycle_data);

a=1.6
C1=R1.dot(np.linalg.inv(R1+a**-2*np.eye(2)));
U1c, S1c, _=np.linalg.svd(C1);
C2=R2.dot(np.linalg.inv(R2+a**-2*np.eye(2)));
U2c, S2c, _=np.linalg.svd(C2);
E1c=C1.dot(cycle_data);
E2c=C2.dot(cycle_data);

pplot.figure(3);
pplot.plot(cycle_data[0,:], cycle_data[1,:]);
#pplot.plot(trace1[0,:], trace1[1,:]);
pplot.plot(E1c[0,:], E1c[1,:]);
pplot.show();

In [155]:
import matplotlib.pyplot as plt
from imp import reload
reload(reservoir)
import util
reload(util)

%matplotlib inline

# Create conceptor network
RNN = reservoir.Reservoir(2, 200)

# Prepare testing data
p1 = np.asarray(range(2000))
p1 = np.sin(2 * np.pi * p1 / np.sqrt(75))
p2 = np.asarray(range(2000))
p2 = np.sin(2 * np.pi * p2 / np.sqrt(40))
ps = np.vstack((p1[None], p2[None]))

p=[]
p.append(ps);


# training
RNN.train(p, 500);


def nrmse(output,
          target):
  """
  Compute normalized root mean square error.
    
  @param output: output data in D x time dimension
  @param target: target data in D x time dimension
    
  @return NRMSE: normalized root mean square error. 
  """
    
  if output.ndim == 1 and target.ndim == 1:
    output = output[None].T
    target = target[None].T
    
  combined_var = 0.5 * (np.var(a = target, axis = 1, ddof = 1) + np.var(a = output, axis = 1, ddof = 1))    
  error_signal = (output-target)
    
  return np.mean(np.sqrt(np.mean(error_signal ** 2, 1) / combined_var))

# test readout
print("NRMSE readout: %f" % (nrmse(RNN.W_out.dot(RNN.all_train_args), RNN.all_train_outs)))
total_length = RNN.all_train_args.shape[1]
W_targets = np.arctanh(RNN.all_train_args) - np.matlib.repmat(RNN.W_bias, 1, total_length)
print("mean NEMSE W: %f" % nrmse(RNN.W.dot(RNN.all_train_old_args), W_targets))

y = RNN.W_out.dot(RNN.all_train_args)
print(y.shape)
print(RNN.all_train_outs.shape)

f, axarr = plt.subplots(2, sharex = True, sharey = True)
axarr[0].plot(range(1000), p[0][0, 500:1500])
axarr[0].set_title('p1: Original')
axarr[1].plot(range(1000), y[0, 0:1000])
axarr[1].set_title('p1: Readout')


f2, axarr2 = plt.subplots(2, sharex = True, sharey = True)
axarr2[0].plot(range(1000), p[0][1, 500:1500])
axarr2[0].set_title('p2: Original')
axarr2[1].plot(range(1000), y[1, 0:1000])
axarr2[1].set_title('p2: Readout')


NRMSE readout: 0.009091
mean NEMSE W: 0.011952
(2, 1500)
(2, 1500)
Out[155]:
<matplotlib.text.Text at 0x10cec99e8>

In [157]:
# test conceptors
import reservoir
reload(reservoir)
import scipy.interpolate
alphas = [10, 10]
RNN.compute_projectors(alphas)
parameter_nl = 0.1
c_test_length = 200
state_nl = 0.5
W_noisy = RNN.W + parameter_nl * np.abs(RNN.W).dot(np.random.rand(RNN.size_net, RNN.size_net) - 0.5)

x_ctestpl = np.zeros((5, c_test_length, RNN.num_pattern))
p_ctestpl = np.zeros((RNN.size_in, c_test_length, RNN.num_pattern))

for p in range(RNN.num_pattern):
  C = RNN.Cs[0][p]
  x = 0.5 * np.random.rand(RNN.size_net, 1)
  
  for n in range(c_test_length):
    x = np.tanh(W_noisy.dot(x) + RNN.W_bias) + state_nl * (np.random.rand(RNN.size_net, 1) - 0.5)
    x = C.dot(x)
    x_ctestpl[:, n, p] = x[0:5, 0]
    p_ctestpl[:, n, p] = RNN.W_out.dot(x)[:, 0]
    
signal_plot_length = 20
for p in range(RNN.num_pattern):
  int_rate = 20
  this_driver = RNN.pattern_collectors[p][:, 0:signal_plot_length]
  this_out = p_ctestpl[0, :, p]
  this_driver_int_f = scipy.interpolate.interp1d(np.arange(0, signal_plot_length), this_driver, kind = "cubic")
  this_driver_int = this_driver_int_f(np.linspace(0, signal_plot_length - 1, int_rate * signal_plot_length))
  this_out_int_f = scipy.interpolate.interp1d(np.arange(0, c_test_length), this_out, kind="cubic")
  this_out_int = this_out_int_f(np.linspace(0, c_test_length - 1, int_rate * c_test_length))
  
      
plt.figure(2)
plt.plot(np.log10(RNN.SR_collectors[0]))
plt.plot(RNN.Cs[2][0])


Out[157]:
[<matplotlib.lines.Line2D at 0x10cb03ba8>]

In [162]:
L = 100

trace1 = RNN.all_train_args[[71, 80], 0:L]
trace2 = RNN.all_train_args[[71, 80], 1000:1000 + L]

R1 = trace1.dot(trace1.T) / L
U1, S1, _ = np.linalg.svd(R1)
R2 = trace2.dot(trace2.T) / L
U2, S2, _ = np.linalg.svd(R2)

cycle_data = np.vstack((np.cos(2 * np.pi * np.arange(0, 200) / 200), np.sin(2 * np.pi * np.arange(0, 200) / 200)))

E1 = R1.dot(cycle_data)
E2 = R2.dot(cycle_data)

a = 1.6
C1 = R1.dot(np.linalg.inv(R1 + a ** -2 * np.eye(2)))
U1c, S1c, _ = np.linalg.svd(C1)
C2 = R2.dot(np.linalg.inv(R2 + a ** -2 * np.eye(2)))
U2c, S2c, _ = np.linalg.svd(C2)
E1c = C1.dot(cycle_data)
E2c = C2.dot(cycle_data);

plt.figure(3)
plt.plot(cycle_data[0,:], cycle_data[1,:])
plt.plot(E1c[0, :], E1c[1, :])
plt.plot(E2c[0, :], E2c[1, :])
plt.show()



In [164]:
trace1.shape


Out[164]:
(2, 100)

In [183]:
num_rank = 50
trace0 = trace1[:, num_rank:]

In [170]:
S = trace0


Out[170]:
array([[-0.04528276, -0.62799754, -0.76533837, -0.90601828, -0.90835289,
        -0.92514233, -0.22192499,  0.85621415,  0.99448059,  0.99329088,
         0.45201985, -0.95311662, -0.99820672, -0.98384499, -0.25778125,
         0.97483086,  0.99854986,  0.85875588, -0.50874593, -0.9906941 ,
        -0.98986965, -0.84346295, -0.05665793,  0.86336359,  0.95309144,
         0.87768506,  0.77717401,  0.72666367, -0.58794385, -0.94755873,
        -0.98665403, -0.97784122, -0.17677221,  0.98608989,  0.99734748,
         0.98183504,  0.2111491 , -0.992341  , -0.9972738 , -0.8923439 ,
         0.57150572,  0.99180813,  0.97781073,  0.40781345, -0.85221764,
        -0.98011454, -0.97977141, -0.92212549, -0.41178311,  0.21608936],
       [ 0.53313941, -0.53551706,  0.18696648, -0.05822249, -0.05021214,
         0.23459431, -0.35497985,  0.09152988,  0.37031703,  0.84534354,
         0.95660804,  0.18988918, -0.56941415, -0.92949373, -0.69662494,
         0.35452683,  0.96594729,  0.96831384,  0.81659414,  0.0224826 ,
        -0.93520951, -0.88863995, -0.20105265,  0.7750004 ,  0.7417893 ,
         0.81615738,  0.55571037,  0.17317812,  0.58084155,  0.37982983,
         0.41646008, -0.50092295, -0.68809246,  0.37340829,  0.80518257,
         0.95764318,  0.93809824,  0.17341359, -0.90360394, -0.94090586,
        -0.49522546,  0.69744547,  0.99274907,  0.91647651,  0.63002069,
        -0.68705661, -0.81162179, -0.66797324, -0.19241362,  0.43059503]])

In [186]:
S = np.asarray(trace0[1, :])
print(trace0)
S[S < 0] = np.zeros((np.sum((S < 0).astype(float)), 1))


[[ 0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.        ]
 [ 0.53313941 -0.53551706  0.18696648 -0.05822249 -0.05021214  0.23459431
  -0.35497985  0.09152988  0.37031703  0.84534354  0.95660804  0.18988918
  -0.56941415 -0.92949373 -0.69662494  0.35452683  0.96594729  0.96831384
   0.81659414  0.0224826  -0.93520951 -0.88863995 -0.20105265  0.7750004
   0.7417893   0.81615738  0.55571037  0.17317812  0.58084155  0.37982983
   0.41646008 -0.50092295 -0.68809246  0.37340829  0.80518257  0.95764318
   0.93809824  0.17341359 -0.90360394 -0.94090586 -0.49522546  0.69744547
   0.99274907  0.91647651  0.63002069 -0.68705661 -0.81162179 -0.66797324
  -0.19241362  0.43059503]]

In [188]:
print(S)
print(trace0)


[ 0.53313941  0.          0.18696648  0.          0.          0.23459431
  0.          0.09152988  0.37031703  0.84534354  0.95660804  0.18988918
  0.          0.          0.          0.35452683  0.96594729  0.96831384
  0.81659414  0.0224826   0.          0.          0.          0.7750004
  0.7417893   0.81615738  0.55571037  0.17317812  0.58084155  0.37982983
  0.41646008  0.          0.          0.37340829  0.80518257  0.95764318
  0.93809824  0.17341359  0.          0.          0.          0.69744547
  0.99274907  0.91647651  0.63002069  0.          0.          0.          0.
  0.43059503]
[[ 0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.        ]
 [ 0.53313941  0.          0.18696648  0.          0.          0.23459431
   0.          0.09152988  0.37031703  0.84534354  0.95660804  0.18988918
   0.          0.          0.          0.35452683  0.96594729  0.96831384
   0.81659414  0.0224826   0.          0.          0.          0.7750004
   0.7417893   0.81615738  0.55571037  0.17317812  0.58084155  0.37982983
   0.41646008  0.          0.          0.37340829  0.80518257  0.95764318
   0.93809824  0.17341359  0.          0.          0.          0.69744547
   0.99274907  0.91647651  0.63002069  0.          0.          0.          0.
   0.43059503]]

In [ ]:
% 26.8.2012-2 playing with 2D ellipses, check OR, AND, neg

dim = 2;
newData = 1; 
I = eye(dim);

if newData
    X = randn(dim,dim); 
    R = X * X' / dim; 
    A = R * inv(R + I);
    [Ua Sa Va] = svd(A);
    Sa(1,1) = 0.95; Sa(2,2) = 0.2;
    A = Ua * Sa * Ua';

    Y = randn(dim,dim); 
    Q = Y * Y' / dim; 
    B = Q * inv(Q + I);
    [Ub Sb Vb] = svd(B);
    Sb(1,1) = 0.8; Sb(2,2) = 0.3;
    B = Ub * Sb * Ub';
end




AandB = AND(A, B);
AorB = OR(A, B);
notA = I - A;

 
%% simple plotting 
fs = 24; lw = 3; LW = 6;
figure(1); clf;
set(gcf, 'WindowStyle','normal');
set(gcf,'Position', [800 300 900 300]);
subplot(1,3,1);
hold on;
plot([-1 1],[0 0], 'k--');
plot([0 0],[-1 1], 'k--');
plot(cos(2 * pi * (0:200)/200), sin(2 * pi * (0:200)/200), 'k' );
plot2DEllipse(A, 'r', lw, 200);
plot2DEllipse(B, 'b', lw, 200);
plot2DEllipse(AorB, 'm', LW, 200);
hold off;
set(gca, 'Box', 'on', 'FontSize',fs, 'PlotBoxAspectRatio',[1 1 1], ...
    'YTick',[-1 0 1]);
subplot(1,3,2);
hold on;
plot([-1 1],[0 0], 'k--');
plot([0 0],[-1 1], 'k--');
plot(cos(2 * pi * (0:200)/200), sin(2 * pi * (0:200)/200), 'k' );
plot2DEllipse(A, 'r', lw, 200);
plot2DEllipse(B, 'b', lw, 200);
plot2DEllipse(AandB, 'm', LW, 200);
hold off;
set(gca, 'Box', 'on', 'FontSize',fs, 'PlotBoxAspectRatio',[1 1 1], ...
    'YTick',[-1 0 1]);
subplot(1,3,3);
hold on;
plot([-1 1],[0 0], 'k--');
plot([0 0],[-1 1], 'k--');
plot(cos(2 * pi * (0:200)/200), sin(2 * pi * (0:200)/200), 'k' );
plot2DEllipse(A, 'r', lw, 200);
plot2DEllipse(notA, 'm', LW, 200);
hold off;
set(gca, 'Box', 'on', 'FontSize',fs, 'PlotBoxAspectRatio',[1 1 1], ...
    'YTick',[-1 0 1]);

In [189]:
np.random.randn(2, 2)


Out[189]:
array([[ 0.58400824,  1.02762608],
       [-1.14596486,  2.17673993]])

In [232]:
#check OR, AND, NOT using 2D ellipses
import logic
reload(logic)
dim = 2
newData = 1
I = np.eye(dim)

if newData:
  X = np.random.randn(dim, dim) 
  R = X.dot(X.T) / dim 
  A = R.dot(np.linalg.inv(R + I))
  Ua, Sa, _ = np.linalg.svd(A)
  Sa[0] = 0.95
  Sa[1] = 0.2
  A = Ua.dot(np.diag(Sa)).dot(Ua.T)

  Y = np.random.randn(dim, dim) 
  Q = Y.dot(Y.T) / dim 
  B = Q.dot(np.linalg.inv(Q + I))
  Ub, Sb, _ = np.linalg.svd(B)
  Sb[0] = 0.8
  Sb[1] = 0.3
  B = Ub.dot(np.diag(Sb)).dot(Ub.T)


AandB = logic.AND(A, B)
AorB = logic.OR(A, B)
notA = logic.NOT(A)

 
# simple plotting 
AE = A.dot(cycle_data)
BE = B.dot(cycle_data)
AorBE = AorB.dot(cycle_data)


plt.figure(4)
plt.plot(cycle_data[0,:], cycle_data[1,:])
plt.plot(AE[0, :], AE[1, :])
plt.plot(BE[0, :], BE[1, :])
plt.plot(AorBE[0, :], AorBE[1, :])
plt.axis('equal')
plt.show()



In [231]:
AandBE = AandB.dot(cycle_data)


plt.figure(4)
plt.plot(cycle_data[0,:], cycle_data[1,:])
plt.plot(AE[0, :], AE[1, :])
plt.plot(BE[0, :], BE[1, :])
plt.plot(AandBE[0, :], AandBE[1, :])
plt.axis('equal')
plt.show()



In [230]:
notAE = notA.dot(cycle_data)


plt.figure(4)
plt.plot(cycle_data[0,:], cycle_data[1,:])
plt.plot(AE[0, :], AE[1, :])
plt.plot(notAE[0, :], notAE[1, :])
plt.axis('equal')
plt.show()



In [203]:
B


Out[203]:
array([[ 0.69381752, -0.20449088],
       [-0.20449088,  0.40618248]])

In [204]:
AandB


Out[204]:
array([[ 0.        ,  0.        ],
       [ 0.        ,  0.14669264]])

In [205]:
AorB


Out[205]:
array([[ 1.        ,  0.        ],
       [ 0.        ,  0.64134496]])

In [209]:
C = A
dim = C.shape[0]

In [210]:
dim


Out[210]:
2

In [212]:
dim = C.shape[0]
  tol = 1e-14
  UC, SC, _ = np.linalg.svd(C)
  UB, SB, _ = np.linalg.svd(B)
  
  num_rank_C = np.sum((SC > tol).astype(float))
  num_rank_B = np.sum((SB > tol).astype(float))
  
  UC0 = UC[:, num_rank_C:]
  UB0 = UB[:, num_rank_B:]
  
  W, sigma, _ = np.linalg.svd(UC0.dot(UC0.T) + UB0.dot(UB0.T))
  num_rank_sigma = np.sum((sigma > tol).astype(float))
  Wgk = W[:, num_rank_sigma:]
  
  C_and_B = Wgk.dot(np.linalg.inv(Wgk.T.dot(np.linalg.pinv(C, tol) + np.linalg.pinv(B, tol) - np.eye(dim)).dot(Wgk))).dot(Wgk.T)

In [223]:
C_and_B


Out[223]:
array([[ 0.55424447, -0.02576586],
       [-0.02576586,  0.14789045]])

In [ ]: