In [1]:
from nifti import NiftiImage
import numpy as np
from scipy import stats
import pylab
nim=NiftiImage('/home/sophie/Desktop/oli')
In [ ]:
In [2]:
D=nim.data
In [3]:
D.shape
Out[3]:
In [4]:
from thunder.rdds.fileio.imagesloader import ImagesLoader
imgs = ImagesLoader(sc).fromArrays(list(D))
In [5]:
S=imgs.toSeries()
In [6]:
imgs.min().min()
Out[6]:
In [7]:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('notebook')
sns.set_style('darkgrid')
from thunder import Colorize
image = Colorize.image
In [8]:
#P=prctile(reshape(R,size(R,1)*size(R,2),1),90);
#R1=double(R).*(10000/P);
In [9]:
#demean
In [10]:
#%%Smith lines
#[uu,ss,vv]=nets_svds(R,30); % initial SVD to the top 30 components (arbitrary number fixed in MELODIC)
#vv(abs(vv)<2.3*std(vv(:)))=0; % zero small values in PCA maps %what is that doing here?
#stddevs=max(std(R-uu*ss*vv'),0.1); % subtract main parts of top components from data to get normalisation
#R=R./repmat(stddevs,size(R,1),1); % var-norm
In [11]:
#[u,s,v]=nets_svds(R,Npc);
In [12]:
#[icasig, A, W] = fastica (v','approach','symm','epsilon', 0.000001);
In [8]:
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
#see http://scikit-learn.org/stable/auto_examples/decomposition/plot_ica_blind_source_separation.html
In [12]:
from thunder import NMF
model = NMF(k=50,maxIter=20).fit(S)
In [ ]:
plt.plot(model.h.T);
In [ ]:
NMFmaps = model.w.pack()
In [ ]:
NMFmaps.shape
In [ ]:
NMFT=model.h.T
In [ ]:
DT=NMFT
In [ ]:
data=NMFmaps.T
In [ ]:
S=data.shape
In [ ]:
Demean=data
Dvar=data
Var=np.zeros(S[3])
Tvar=np.zeros(S[3])
Sk=np.zeros(S[3])
Kr=np.zeros(S[3])
In [ ]:
for i in range(S[3]):
Demean[:,:,:,i]=data[:,:,:,i]-np.mean(np.mean(np.mean(data[:,:,:,i],0),0),0)
In [ ]:
for i in range(S[3]):
Dsq=np.reshape(Demean[:,:,:,i],S[0]*S[1]*S[2])
Var[i]=np.var(Dsq)
Dvar[:,:,:,i]=Demean[:,:,:,i]/Var[i]
Kr[i]=stats.kurtosis(Dsq)
Sk[i]=stats.skew(Dsq)
In [ ]:
plt.plot(Sk)
In [ ]:
Percs=np.percentile(range(S[2]),[20,40,60,80,100])
Indices=np.split(range(S[2]),Percs)
D1=data[:,:,range(5),2]
Dmean=data[:,:,range(5),2]
Dmaps=np.zeros([S[0],S[1],5,S[3]])
In [ ]:
for i in range(5):
Vmean=np.mean(Dvar[:,:,Indices[i],:],3)
Dmean[:,:,i]=np.max(Vmean,2)
In [28]:
my_cmap=plt.cm.jet
my_cmap.set_bad(alpha=0)
Good_ICs=np.zeros(S[3])
In [29]:
%%javascript
IPython.OutputArea.auto_scroll_threshold = 2000;
In [ ]:
pylab.rcParams['figure.figsize'] = (13, 3)
for j in range(S[3]):
for i in range(5):
V=Dvar[:,:,Indices[i],j]
D1[:,:,i]=np.max(V,2)
D1[np.abs(D1)<2.3]=np.nan
for i in range(5):
plt.subplot(1,5,i+1)
plt.imshow(Dmean[:,:,i],cmap=plt.cm.gray)
plt.imshow(D1[:,:,i], cmap=my_cmap,interpolation='none')
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
plt.show()
plt.plot(DT.T[j])
plt.show()
Good_ICs[j]=input()
Dmaps[:,:,:,j]=np.abs(D1)-2.30
In [31]:
where_are_NaNs = np.isnan(Dmaps)
Dmaps[where_are_NaNs] = 0
In [32]:
Final_map=np.zeros([S[0],S[1],5,3])
Fmaps=np.zeros([S[0],S[1],5,3])
C=np.zeros([S[3],3])
In [33]:
for j in range(S[3]):
if Good_ICs[j]:
C[j,:]=np.squeeze(np.random.rand(3,1))
for k in range(3):
M=np.max(np.squeeze(np.reshape(Dmaps[:,:,:,j],S[0]*S[1]*5)))
#M[M==0]=1
# Fmaps[:,:,:,k]=0.7*Dmaps[:,:,:,j]*C[j,k]/np.max(np.squeeze(np.reshape(Dmaps[:,:,:,j]*np.max(C[j,:],S[0]*S[1]*5))
Fmaps[:,:,:,k]=0.7*Dmaps[:,:,:,j]*C[j,k]/(M*np.max(C[j,:]))
Final_map=Final_map+Fmaps
In [34]:
pylab.rcParams['figure.figsize'] = (18, 14)
for i in range(5):
plt.subplot(1,5,i+1)
plt.imshow(Final_map[:,:,i])
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
In [ ]: