In [2]:
import numpy as np;
size_net = 100
size_in = 1
Wraw_in=np.random.rand(size_net, size_in);
Wraw_in_1=np.random.randn(size_net, size_in);
In [3]:
print(Wraw_in)
print(Wraw_in_1)
In [9]:
from scipy.lib.six import xrange
from scipy.sparse.coo import coo_matrix
def sprandn(m, n, density=0.01, format="coo", dtype=None, random_state=None):
"""Generate a sparse matrix of the given shape and density with standard
normally distributed values.
Parameters
----------
m, n : int
shape of the matrix
density : real
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format : str
sparse matrix format.
dtype : dtype
type of the returned matrix values.
random_state : {numpy.random.RandomState, int}, optional
Random number generator or random seed. If not given, the singleton
numpy.random will be used.
Notes
-----
Only float types are supported for now.
"""
if density < 0 or density > 1:
raise ValueError("density expected to be 0 <= density <= 1")
if dtype and (dtype not in [np.float32, np.float64, np.longdouble]):
raise NotImplementedError("type %s not supported" % dtype)
mn = m * n
tp = np.intc
if mn > np.iinfo(tp).max:
tp = np.int64
if mn > np.iinfo(tp).max:
msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
raise ValueError(msg % np.iinfo(tp).max)
# Number of non zero values
k = int(density * m * n)
if random_state is None:
random_state = np.random
elif isinstance(random_state, (int, np.integer)):
random_state = np.random.RandomState(random_state)
# Use the algorithm from python's random.sample for k < mn/3.
if mn < 3*k:
# We should use this line, but choice is only available in numpy >= 1.7
# ind = random_state.choice(mn, size=k, replace=False)
ind = random_state.permutation(mn)[:k]
else:
ind = np.empty(k, dtype=tp)
selected = set()
for i in range(k):
j = random_state.randint(mn)
while j in selected:
j = random_state.randint(mn)
selected.add(j)
ind[i] = j
j = np.floor(ind * 1. / m).astype(tp)
i = (ind - j * m).astype(tp)
vals = random_state.randn(k).astype(dtype)
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format)
In [132]:
W_star = sprandn(m = size_net, n = size_net, density = 10. / size_net, format = 'coo');
In [134]:
import scipy.sparse
eigw, _ = scipy.sparse.linalg.eigs(W_star, 1, which = 'LM')
eigwH, _ = scipy.sparse.linalg.eigsh(W_star, 1, which = 'LM')
In [135]:
print(eigw, eigwH)
In [19]:
W_star = W_star.toarray
In [22]:
print(W_star)
In [27]:
v = W_star.data
r, c = W_star.nonzero()
print(v.size)
In [51]:
# Prepare testing data
p1=np.asarray(range(10));
p1=np.sin(2*np.pi*p1/np.sqrt(75));
p2=np.asarray(range(10));
#p2=0.5*np.sin(2*np.pi*p2/np.sqrt(20))+np.sin(2*np.pi*p2/np.sqrt(40));
p2=np.sin(2*np.pi*p2/np.sqrt(40));
In [37]:
ps=np.vstack((p1[None], p2[None]));
In [38]:
ps1=np.vstack((p1, p2));
In [70]:
p = []
p.append(p1[None])
p.append(p2[None])
In [39]:
ps
Out[39]:
In [40]:
ps1
Out[40]:
In [42]:
p1[None]
Out[42]:
In [32]:
p1 == p1[None]
Out[32]:
In [41]:
p1
Out[41]:
In [49]:
def total_len(nparraylist):
return sum(x.shape[1] for x in nparraylist)
In [44]:
foo=[[1, 'This is a test',12039],[12, 'test',1235]]
zipped = zip(*foo)
In [45]:
zipped
Out[45]:
In [71]:
total_len(p)
Out[71]:
In [68]:
p1=np.asarray(range(10));
p1t = p1[None]
In [69]:
p1t.shape
Out[69]:
In [73]:
np.zeros((2,10))
Out[73]:
In [75]:
r = range(8)
In [77]:
np.asarray(r)
Out[77]:
In [81]:
p = np.vstack((p1[None], p2[None]))
In [92]:
pv = p[:,0][None].T
In [93]:
p[:,2:4].dot(pv)
Out[93]:
In [94]:
a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
In [95]:
U, s, V = np.linalg.svd(a)
In [96]:
s
Out[96]:
In [98]:
snew = np.diag(s)
In [101]:
snew.shape
Out[101]:
In [114]:
import reservoir
In [ ]:
import numpy as np;
import scipy.interpolate;
import matplotlib.pyplot as pplot;
import conceptors.util as util;
from conceptors.net import ConceptorNetwork;
from conceptors.util import read_jpv_data;
from conceptors.util import normalize_jap_data;
from conceptors.util import transform_jap_data;
train_inputs, train_outputs, test_inputs, test_outputs=read_jpv_data("/home/arlmaster/workspace/conceptors/conceptors/data/ae.train",
"/home/arlmaster/workspace/conceptors/conceptors/data/ae.test");
train_data, shifts, scales=normalize_jap_data(train_inputs);
test_data=transform_jap_data(test_inputs, shifts, scales);
# Create conceptor network
net=ConceptorNetwork(2, 200);
# Prepare testing data
p1=np.asarray(xrange(2000));
p1=np.sin(2*np.pi*p1/np.sqrt(75));
p2=np.asarray(xrange(2000));
#p2=0.5*np.sin(2*np.pi*p2/np.sqrt(20))+np.sin(2*np.pi*p2/np.sqrt(40));
p2=np.sin(2*np.pi*p2/np.sqrt(40));
ps=np.vstack((p1[None], p2[None]));
p=[];
p.append(ps);
#p.append(p1[None]);
#p.append(p2[None]);
# training
net.train(p);
# test readout
print "NRMSE readout: %f" % (util.nrmse(net.W_out.dot(net.all_train_args), net.all_train_outs));
print "mean NEMSE W: %f" % util.nrmse(net.W.dot(net.all_train_old_args), net.W_targets);
y=net.W_out.dot(net.all_train_args)
print y.shape
print net.all_train_outs.shape
pplot.figure(1);
pplot.plot(xrange(1000), p[0][0,500:1500]);
pplot.plot(xrange(1000), y[0,0:1000]);
pplot.title("Redout")
pplot.show();
# test conceptors
parameter_nl=0.1;
c_test_length=200;
state_nl=0.5;
W_noisy=net.W+parameter_nl*np.abs(net.W).dot(np.random.rand(net.num_neuron, net.num_neuron)-0.5);
x_ctestpl=np.zeros((5,c_test_length,net.num_pattern));
p_ctestpl=np.zeros((net.num_in,c_test_length,net.num_pattern));
for p in xrange(net.num_pattern):
c=net.Cs[0][p];
x=0.5*np.random.rand(net.num_neuron,1);
for n in xrange(c_test_length):
x=np.tanh(W_noisy.dot(x)+net.W_bias)+state_nl*(np.random.rand(net.num_neuron,1)-0.5);
x=c.dot(x);
x_ctestpl[:,n,p]=x[0:5,0];
p_ctestpl[:,n,p]=net.W_out.dot(x)[:,0];
for p in xrange(net.num_pattern):
int_rate=20;
this_driver=net.train_ppl[p];
this_out=p_ctestpl[0,:,p];
this_driver_int_f=scipy.interpolate.interp1d(np.arange(0,net.signal_plot_length), this_driver, kind="cubic");
this_driver_int=this_driver_int_f(np.linspace(0, net.signal_plot_length-1, int_rate*net.signal_plot_length));
this_out_int_f=scipy.interpolate.interp1d(np.arange(0,c_test_length), this_out, kind="cubic");
this_out_int=this_out_int_f(np.linspace(0, c_test_length-1, int_rate*c_test_length));
pplot.figure(2);
pplot.plot(np.log10(net.sr_collectors[0]))
pplot.plot(net.Cs[2][0])
#pplot.plot(xrange(c_test_length), p_ctestpl[0,:,1]);
pplot.show()
# draw conceptors
L=100;
trace1=net.all_train_args[[71,80],0:L];
trace2=net.all_train_args[[71,80],1000:1000+L];
R1=trace1.dot(trace1.T)/L;
U1, S1, _=np.linalg.svd(R1);
R2=trace2.dot(trace2.T)/L;
U2, S2, _=np.linalg.svd(R2);
cycle_data=np.vstack((np.cos(2*np.pi*np.arange(0,200)/200), np.sin(2*np.pi*np.arange(0,200)/200)));
E1=R1.dot(cycle_data); E2=R2.dot(cycle_data);
a=1.6
C1=R1.dot(np.linalg.inv(R1+a**-2*np.eye(2)));
U1c, S1c, _=np.linalg.svd(C1);
C2=R2.dot(np.linalg.inv(R2+a**-2*np.eye(2)));
U2c, S2c, _=np.linalg.svd(C2);
E1c=C1.dot(cycle_data);
E2c=C2.dot(cycle_data);
pplot.figure(3);
pplot.plot(cycle_data[0,:], cycle_data[1,:]);
#pplot.plot(trace1[0,:], trace1[1,:]);
pplot.plot(E1c[0,:], E1c[1,:]);
pplot.show();
In [155]:
import matplotlib.pyplot as plt
from imp import reload
reload(reservoir)
import util
reload(util)
%matplotlib inline
# Create conceptor network
RNN = reservoir.Reservoir(2, 200)
# Prepare testing data
p1 = np.asarray(range(2000))
p1 = np.sin(2 * np.pi * p1 / np.sqrt(75))
p2 = np.asarray(range(2000))
p2 = np.sin(2 * np.pi * p2 / np.sqrt(40))
ps = np.vstack((p1[None], p2[None]))
p=[]
p.append(ps);
# training
RNN.train(p, 500);
def nrmse(output,
target):
"""
Compute normalized root mean square error.
@param output: output data in D x time dimension
@param target: target data in D x time dimension
@return NRMSE: normalized root mean square error.
"""
if output.ndim == 1 and target.ndim == 1:
output = output[None].T
target = target[None].T
combined_var = 0.5 * (np.var(a = target, axis = 1, ddof = 1) + np.var(a = output, axis = 1, ddof = 1))
error_signal = (output-target)
return np.mean(np.sqrt(np.mean(error_signal ** 2, 1) / combined_var))
# test readout
print("NRMSE readout: %f" % (nrmse(RNN.W_out.dot(RNN.all_train_args), RNN.all_train_outs)))
total_length = RNN.all_train_args.shape[1]
W_targets = np.arctanh(RNN.all_train_args) - np.matlib.repmat(RNN.W_bias, 1, total_length)
print("mean NEMSE W: %f" % nrmse(RNN.W.dot(RNN.all_train_old_args), W_targets))
y = RNN.W_out.dot(RNN.all_train_args)
print(y.shape)
print(RNN.all_train_outs.shape)
f, axarr = plt.subplots(2, sharex = True, sharey = True)
axarr[0].plot(range(1000), p[0][0, 500:1500])
axarr[0].set_title('p1: Original')
axarr[1].plot(range(1000), y[0, 0:1000])
axarr[1].set_title('p1: Readout')
f2, axarr2 = plt.subplots(2, sharex = True, sharey = True)
axarr2[0].plot(range(1000), p[0][1, 500:1500])
axarr2[0].set_title('p2: Original')
axarr2[1].plot(range(1000), y[1, 0:1000])
axarr2[1].set_title('p2: Readout')
Out[155]:
In [157]:
# test conceptors
import reservoir
reload(reservoir)
import scipy.interpolate
alphas = [10, 10]
RNN.compute_projectors(alphas)
parameter_nl = 0.1
c_test_length = 200
state_nl = 0.5
W_noisy = RNN.W + parameter_nl * np.abs(RNN.W).dot(np.random.rand(RNN.size_net, RNN.size_net) - 0.5)
x_ctestpl = np.zeros((5, c_test_length, RNN.num_pattern))
p_ctestpl = np.zeros((RNN.size_in, c_test_length, RNN.num_pattern))
for p in range(RNN.num_pattern):
C = RNN.Cs[0][p]
x = 0.5 * np.random.rand(RNN.size_net, 1)
for n in range(c_test_length):
x = np.tanh(W_noisy.dot(x) + RNN.W_bias) + state_nl * (np.random.rand(RNN.size_net, 1) - 0.5)
x = C.dot(x)
x_ctestpl[:, n, p] = x[0:5, 0]
p_ctestpl[:, n, p] = RNN.W_out.dot(x)[:, 0]
signal_plot_length = 20
for p in range(RNN.num_pattern):
int_rate = 20
this_driver = RNN.pattern_collectors[p][:, 0:signal_plot_length]
this_out = p_ctestpl[0, :, p]
this_driver_int_f = scipy.interpolate.interp1d(np.arange(0, signal_plot_length), this_driver, kind = "cubic")
this_driver_int = this_driver_int_f(np.linspace(0, signal_plot_length - 1, int_rate * signal_plot_length))
this_out_int_f = scipy.interpolate.interp1d(np.arange(0, c_test_length), this_out, kind="cubic")
this_out_int = this_out_int_f(np.linspace(0, c_test_length - 1, int_rate * c_test_length))
plt.figure(2)
plt.plot(np.log10(RNN.SR_collectors[0]))
plt.plot(RNN.Cs[2][0])
Out[157]:
In [162]:
L = 100
trace1 = RNN.all_train_args[[71, 80], 0:L]
trace2 = RNN.all_train_args[[71, 80], 1000:1000 + L]
R1 = trace1.dot(trace1.T) / L
U1, S1, _ = np.linalg.svd(R1)
R2 = trace2.dot(trace2.T) / L
U2, S2, _ = np.linalg.svd(R2)
cycle_data = np.vstack((np.cos(2 * np.pi * np.arange(0, 200) / 200), np.sin(2 * np.pi * np.arange(0, 200) / 200)))
E1 = R1.dot(cycle_data)
E2 = R2.dot(cycle_data)
a = 1.6
C1 = R1.dot(np.linalg.inv(R1 + a ** -2 * np.eye(2)))
U1c, S1c, _ = np.linalg.svd(C1)
C2 = R2.dot(np.linalg.inv(R2 + a ** -2 * np.eye(2)))
U2c, S2c, _ = np.linalg.svd(C2)
E1c = C1.dot(cycle_data)
E2c = C2.dot(cycle_data);
plt.figure(3)
plt.plot(cycle_data[0,:], cycle_data[1,:])
plt.plot(E1c[0, :], E1c[1, :])
plt.plot(E2c[0, :], E2c[1, :])
plt.show()
In [164]:
trace1.shape
Out[164]:
In [183]:
num_rank = 50
trace0 = trace1[:, num_rank:]
In [170]:
S = trace0
Out[170]:
In [186]:
S = np.asarray(trace0[1, :])
print(trace0)
S[S < 0] = np.zeros((np.sum((S < 0).astype(float)), 1))
In [188]:
print(S)
print(trace0)
In [ ]:
% 26.8.2012-2 playing with 2D ellipses, check OR, AND, neg
dim = 2;
newData = 1;
I = eye(dim);
if newData
X = randn(dim,dim);
R = X * X' / dim;
A = R * inv(R + I);
[Ua Sa Va] = svd(A);
Sa(1,1) = 0.95; Sa(2,2) = 0.2;
A = Ua * Sa * Ua';
Y = randn(dim,dim);
Q = Y * Y' / dim;
B = Q * inv(Q + I);
[Ub Sb Vb] = svd(B);
Sb(1,1) = 0.8; Sb(2,2) = 0.3;
B = Ub * Sb * Ub';
end
AandB = AND(A, B);
AorB = OR(A, B);
notA = I - A;
%% simple plotting
fs = 24; lw = 3; LW = 6;
figure(1); clf;
set(gcf, 'WindowStyle','normal');
set(gcf,'Position', [800 300 900 300]);
subplot(1,3,1);
hold on;
plot([-1 1],[0 0], 'k--');
plot([0 0],[-1 1], 'k--');
plot(cos(2 * pi * (0:200)/200), sin(2 * pi * (0:200)/200), 'k' );
plot2DEllipse(A, 'r', lw, 200);
plot2DEllipse(B, 'b', lw, 200);
plot2DEllipse(AorB, 'm', LW, 200);
hold off;
set(gca, 'Box', 'on', 'FontSize',fs, 'PlotBoxAspectRatio',[1 1 1], ...
'YTick',[-1 0 1]);
subplot(1,3,2);
hold on;
plot([-1 1],[0 0], 'k--');
plot([0 0],[-1 1], 'k--');
plot(cos(2 * pi * (0:200)/200), sin(2 * pi * (0:200)/200), 'k' );
plot2DEllipse(A, 'r', lw, 200);
plot2DEllipse(B, 'b', lw, 200);
plot2DEllipse(AandB, 'm', LW, 200);
hold off;
set(gca, 'Box', 'on', 'FontSize',fs, 'PlotBoxAspectRatio',[1 1 1], ...
'YTick',[-1 0 1]);
subplot(1,3,3);
hold on;
plot([-1 1],[0 0], 'k--');
plot([0 0],[-1 1], 'k--');
plot(cos(2 * pi * (0:200)/200), sin(2 * pi * (0:200)/200), 'k' );
plot2DEllipse(A, 'r', lw, 200);
plot2DEllipse(notA, 'm', LW, 200);
hold off;
set(gca, 'Box', 'on', 'FontSize',fs, 'PlotBoxAspectRatio',[1 1 1], ...
'YTick',[-1 0 1]);
In [189]:
np.random.randn(2, 2)
Out[189]:
In [232]:
#check OR, AND, NOT using 2D ellipses
import logic
reload(logic)
dim = 2
newData = 1
I = np.eye(dim)
if newData:
X = np.random.randn(dim, dim)
R = X.dot(X.T) / dim
A = R.dot(np.linalg.inv(R + I))
Ua, Sa, _ = np.linalg.svd(A)
Sa[0] = 0.95
Sa[1] = 0.2
A = Ua.dot(np.diag(Sa)).dot(Ua.T)
Y = np.random.randn(dim, dim)
Q = Y.dot(Y.T) / dim
B = Q.dot(np.linalg.inv(Q + I))
Ub, Sb, _ = np.linalg.svd(B)
Sb[0] = 0.8
Sb[1] = 0.3
B = Ub.dot(np.diag(Sb)).dot(Ub.T)
AandB = logic.AND(A, B)
AorB = logic.OR(A, B)
notA = logic.NOT(A)
# simple plotting
AE = A.dot(cycle_data)
BE = B.dot(cycle_data)
AorBE = AorB.dot(cycle_data)
plt.figure(4)
plt.plot(cycle_data[0,:], cycle_data[1,:])
plt.plot(AE[0, :], AE[1, :])
plt.plot(BE[0, :], BE[1, :])
plt.plot(AorBE[0, :], AorBE[1, :])
plt.axis('equal')
plt.show()
In [231]:
AandBE = AandB.dot(cycle_data)
plt.figure(4)
plt.plot(cycle_data[0,:], cycle_data[1,:])
plt.plot(AE[0, :], AE[1, :])
plt.plot(BE[0, :], BE[1, :])
plt.plot(AandBE[0, :], AandBE[1, :])
plt.axis('equal')
plt.show()
In [230]:
notAE = notA.dot(cycle_data)
plt.figure(4)
plt.plot(cycle_data[0,:], cycle_data[1,:])
plt.plot(AE[0, :], AE[1, :])
plt.plot(notAE[0, :], notAE[1, :])
plt.axis('equal')
plt.show()
In [203]:
B
Out[203]:
In [204]:
AandB
Out[204]:
In [205]:
AorB
Out[205]:
In [209]:
C = A
dim = C.shape[0]
In [210]:
dim
Out[210]:
In [212]:
dim = C.shape[0]
tol = 1e-14
UC, SC, _ = np.linalg.svd(C)
UB, SB, _ = np.linalg.svd(B)
num_rank_C = np.sum((SC > tol).astype(float))
num_rank_B = np.sum((SB > tol).astype(float))
UC0 = UC[:, num_rank_C:]
UB0 = UB[:, num_rank_B:]
W, sigma, _ = np.linalg.svd(UC0.dot(UC0.T) + UB0.dot(UB0.T))
num_rank_sigma = np.sum((sigma > tol).astype(float))
Wgk = W[:, num_rank_sigma:]
C_and_B = Wgk.dot(np.linalg.inv(Wgk.T.dot(np.linalg.pinv(C, tol) + np.linalg.pinv(B, tol) - np.eye(dim)).dot(Wgk))).dot(Wgk.T)
In [223]:
C_and_B
Out[223]:
In [ ]: