Let's implement, use, and analyze ICA!
First, let's look at what ICA does by using a library implementation from the excellent scikit-learn package
In [1]:
from sklearn.decomposition import FastICA
In [2]:
import numpy as np
import numpy.random as npr
In [72]:
length = 1000
a = np.sin(np.linspace(0,50*np.pi,length))
b = np.sin(np.linspace(np.pi,60*np.pi+0.5,length))
c = npr.randn(length) * 0.05
S = np.c_[a,b,c]
S /= np.std(S,axis=0)
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]])
X = np.dot(S,A.T)
X.shape
Out[72]:
In [73]:
import pylab as pl
%matplotlib inline
In [83]:
pl.plot(a,alpha=0.2)
pl.plot(b,alpha=0.2)
pl.plot(c,alpha=0.2)
Out[83]:
In [75]:
pl.plot(X[:,0],alpha=0.2)
pl.plot(X[:,1],alpha=0.2)
pl.plot(X[:,2],alpha=0.2)
Out[75]:
In [89]:
f = FastICA(n_components=4)
In [90]:
Y = f.fit_transform(X)
In [94]:
pl.plot(Y[:,0],alpha=0.2)
pl.plot(Y[:,1],alpha=0.2)
pl.plot(Y[:,2],alpha=0.2)
Out[94]:
Is ICA actually just a "hack" for what a deep autoencoder would do?
In [ ]: