Software by Amine ECHRAIBI
Internship 2 months (july/August 2017) at IMT Atlantique
Supervisor : Nicolas FARRUGIA, nicolas.farrugia@imt-atlantique.fr
This notebook implements the method described Here and verifies the results.
This is a re-implementation in Keras Tensorflow backend, of the neural net described in the paper.
version 1
In [3]:
from __future__ import print_function, division
import matplotlib.pyplot as plt
plt.interactive(False)
import tensorflow as tf
import h5py
from scipy.stats import pearsonr
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import Dense, Dropout, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras import optimizers, callbacks, regularizers, initializers
from E2E_conv import *
from injury import ConnectomeInjury
import numpy as np
from vis.visualization import visualize_activation
from keras.utils import to_categorical
from nilearn import datasets
from nilearn import input_data
from nilearn.connectome import ConnectivityMeasure
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
Setting up the hyper parameters, and l2 regularizer
In [4]:
# Globals : Hyperparamaters
batch_size = 14
dropout = 0.5
momentum = 0.9
noise_weight = 0.0625
lr = 0.01
decay = 0.0005
# Setting l2_norm regularizer
reg = regularizers.l2(decay)
kernel_init = initializers.he_uniform()
In [5]:
# Loading synthetic data
injuryconnectome = ConnectomeInjury()
x_train,y_train = injuryconnectome.generate_injury(n_samples=1000,noise_weight=noise_weight)
x_valid,y_valid = injuryconnectome.generate_injury(n_samples=300,noise_weight=noise_weight)
In [6]:
# ploting a sample
%matplotlib inline
plt.imshow(x_train[0][0])
plt.title('synthetic connectome')
plt.show()
print("The adjacency matrix : ")
print(x_train[0][0])
# reshaping data
x_train = x_train.reshape(x_train.shape[0],x_train.shape[3],x_train.shape[2],x_train.shape[1])
x_valid = x_valid.reshape(x_valid.shape[0],x_valid.shape[3],x_valid.shape[2],x_valid.shape[1])
the synthetic connectome is represented as a graph $G(A,\Omega)$ where :
To construct this layer, we must create two kernels corresponding to r and c of size $1x|\Omega|$ and $|\Omega|x1$ respectively, we then convolve with the output filters, duplicate them $|\Omega|$ times than sum the duplications elementwize to create the convolution discribed above.
In [7]:
# Construction of the Edge-2-Edge layer using Keras backend.
# -*- coding: utf-8 -*-
from keras import backend as K
from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.engine import Layer
from keras.engine import InputSpec
from keras.utils import conv_utils
class E2E_conv(Layer):
def __init__(self, rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
activation=None,
use_bias=False,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(E2E_conv, self).__init__(**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
kernel_shape=K.get_value(self.kernel).shape
d=kernel_shape[1]
kernel1xd = K.reshape(self.kernel[0,:],(1,kernel_shape[1],kernel_shape[2],kernel_shape[3]))
kerneldx1 = K.reshape(self.kernel[1,:], (kernel_shape[1],1, kernel_shape[2], kernel_shape[3]))
conv1xd = K.conv2d(
inputs,
kernel1xd,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
convdx1 = K.conv2d(
inputs,
kerneldx1,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
concat1 = K.concatenate([convdx1]*d,axis=1)
concat2 = K.concatenate([conv1xd]*d,axis=2)
return concat1+concat2
def compute_output_shape(self, input_shape):
return (input_shape[0],input_shape[1],input_shape[2],self.filters)
def get_config(self):
config = {
'rank': self.rank,
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(E2E_conv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
N.b. the previous cell is here to show how the E2E layer was implemented as a keras layer.
To use the layer, simply do:
In [8]:
from E2E_conv import *
In [9]:
# Model architecture
model = Sequential()
model.add(E2E_conv(2,32,(2,90),kernel_regularizer=reg,input_shape=(90,90,1),input_dtype='float32',data_format="channels_last"))
print("First layer output shape :"+str(model.output_shape))
model.add(LeakyReLU(alpha=0.33))
#print(model.output_shape)
model.add(E2E_conv(2,32,(2,90),kernel_regularizer=reg,data_format="channels_last"))
print(model.output_shape)
model.add(LeakyReLU(alpha=0.33))
model.add(Convolution2D(64,(1,90),kernel_regularizer=reg,data_format="channels_last"))
model.add(LeakyReLU(alpha=0.33))
model.add(Convolution2D(256,(90,1),kernel_regularizer=reg,data_format="channels_last"))
model.add(LeakyReLU(alpha=0.33))
#print(model.output_shape)
model.add(Dropout(0.5))
model.add(Dense(128,kernel_regularizer=reg,kernel_initializer=kernel_init))
#print(model.output_shape)
model.add(LeakyReLU(alpha=0.33))
#print(model.output_shape)
model.add(Dropout(0.5))
model.add(Dense(30,kernel_regularizer=reg,kernel_initializer=kernel_init))
model.add(LeakyReLU(alpha=0.33))
#print(model.output_shape)
model.add(Dropout(0.5))
model.add(Dense(2,kernel_regularizer=reg,kernel_initializer=kernel_init))
model.add(Flatten())
model.add(LeakyReLU(alpha=0.33))
model.summary()
#print(model.output_shape)
In [10]:
from injury import ConnectomeInjury
from scipy.stats import pearsonr
from sklearn.metrics import mean_absolute_error as mae
from numpy import std
from IPython.display import HTML, display
import tabulate
def generate_synthetic_validation_data(noise):
injury = ConnectomeInjury() # Generate train/test synthetic data.
x_valid_y_valid = injury.generate_injury(n_samples=300, noise_weight=noise)
return x_valid_y_valid
def get_results_from_models(model,noises):
results = [["noises"]+noises,
["mae_alpha"],
["stdae_alpha"],
["mae_beta"],
["stdae_beta"],
["r_alpha"],
["r_beta"]]
for i in range(len(noises)):
noise = noises[i]
x_valid, y_valid = generate_synthetic_validation_data(noise)
x_valid = x_valid.reshape(x_valid.shape[0], x_valid.shape[3], x_valid.shape[2], x_valid.shape[1])
# load weights into new model
model.load_weights("Weights/BrainCNNWeights_noise_"+str(noise)+".h5")
print("Loaded model from disk")
preds = model.predict(x_valid)
results[1].append("{0:.2f}".format(100*mae(preds[:,0],y_valid[:,0])))
results[2].append("{0:.2f}".format(100*std(abs(y_valid[:, 0] - preds[:, 0]))))
results[3].append("{0:.2f}".format(100*mae(preds[:, 1], y_valid[:, 1])))
results[4].append("{0:.2f}".format(100*std(abs(y_valid[:,1]-preds[:,1]))))
results[5].append("{0:.2f}".format(pearsonr(preds[:,0],y_valid[:,0])[0]))
results[6].append("{0:.2f}".format(pearsonr(preds[:, 1], y_valid[:, 1])[0]))
display(HTML(tabulate.tabulate(results, tablefmt='html')))
In [13]:
opt = optimizers.SGD(momentum=momentum,nesterov=True,lr=lr)
model.compile(optimizer=opt,loss='mean_squared_error',metrics=['mae'])
csv_logger = callbacks.CSVLogger('BrainCNN.log')
command = str(raw_input("Train or predict ? [t/p] "))
if command == "t":
print("Training for noise = "+str(noise_weight))
history=model.fit(x_train,y_train,nb_epoch=1000,verbose=1,callbacks=[csv_logger])
model.save_weights("Weights/BrainCNNWeights_noise_"+str(noise_weight)+".h5")
else:
print("[*] Predicting and printing results for the models trained :")
get_results_from_models(model,noises = [0,0.0625,0.125,0.25])
The functions bellow generate synthetic connectomes with injuries introduced at a specific region, the injury affects all the connections spreading from the region with different intensities. The code bellow is a modification of the opensource implementation using Caffe described Here
In [11]:
# generating injury signitures S_1 ans S_2
def get_symmetric_noise(m, n):
"""Return a random noise image of size m x n with values between 0 and 1."""
# Generate random noise image.
noise_img = np.random.rand(m, n)
# Make the noise image symmetric.
noise_img = noise_img + noise_img.T
# Normalize between 0 and 1.
noise_img = (noise_img - noise_img.min()) / (noise_img.max() - noise_img.min())
assert noise_img.max() == 1 # Make sure is between 0 and 1.
assert noise_img.min() == 0
assert (noise_img.T == noise_img).all() # Make sure symmetric.
return noise_img
def simulate_injury(X, weight_A, sig_A):
denom = (np.ones(X.shape) + (weight_A * sig_A))
X_sig_AB = np.divide(X, denom)
return X_sig_AB
def apply_injury_and_noise(X, Sig_A, weight_A,noise_weight):
"""Returns a symmetric, signed, noisy, adjacency matrix with simulated injury from two sources."""
X_sig_AB = simulate_injury(X, weight_A, Sig_A)
# Get the noise image.
noise_img = get_symmetric_noise(X.shape[0], X.shape[1])
# Weight the noise image.
weighted_noise_img = noise_img * noise_weight
# Add the noise to the original image.
X_sig_AB_noise = X_sig_AB + weighted_noise_img
assert (X_sig_AB_noise[0,:,:].T == X_sig_AB_noise[0,:,:]).all() # Make sure still is symmetric.
return X_sig_AB_noise
def generate_injury_signatures(X_mn, r_state,sig_indexes):
"""Generates the signatures that represent the underlying signal in our synthetic experiments.
d : (integer) the size of the input matrix (assumes is size dxd)
"""
# Get the strongest regions, which we will apply simulated injuries
sig_indexes = sig_indexes
d = X_mn.shape[0]
S = []
# Create a signature for
for idx, sig_idx in enumerate(sig_indexes):
# Okay, let's make some signature noise vectors.
A_vec = r_state.rand((d))
# B_vec = np.random.random((n))
# Create the signature matrix.
A = np.zeros((d, d))
A[:, sig_idx] = A_vec
A[sig_idx, :] = A_vec
S.append(A)
assert (A.T == A).all() # Check if matrix is symmetric.
return np.asarray(S)
def sample_injury_strengths(n_samples, X_mn, A, noise_weight):
"""Returns n_samples connectomes with simulated injury from two sources."""
mult_factor = 10
n_classes = 1
# Range of values to predict.
n_start = 0.5
n_end = 1.4
# amt_increase = 0.1
# These will be our Y.
A_weights = np.random.uniform(n_start, n_end, [n_samples])
X_h5 = np.zeros((n_samples, 1, X_mn.shape[0], X_mn.shape[1]), dtype=np.float32)
Y_h5 = np.zeros((n_samples, n_classes), dtype=np.float32)
for idx in range(n_samples):
w_A = A_weights[idx]
# Get the matrix.
X_sig = apply_injury_and_noise(X_mn, A, w_A * mult_factor, noise_weight)
# Normalize.
X_sig = (X_sig - X_sig.min()) / (X_sig.max() - X_sig.min())
# Put in h5 format.
X_h5[idx, 0, :, :] = X_sig
Y_h5[idx, :] = [w_A]
return X_h5, Y_h5
def load_base_connectome():
X_mn = scipy.io.loadmat("data/base.mat")
X_mn = X_mn['X_mn']
return X_mn
in this section we will use adhd data collected from nilearn database to construct a mean connectome of the control patients, than we will use this mean connectome to generate the phantom dataset with injuries introduced at specific regions.
In [12]:
adhd_data = datasets.fetch_adhd(n_subjects=20)
msdl_data = datasets.fetch_atlas_msdl()
msdl_coords = msdl_data.region_coords
masker = input_data.NiftiMapsMasker(
msdl_data.maps, resampling_target="data", t_r=2.5, detrend=True,
low_pass=.1, high_pass=.01, memory='nilearn_cache', memory_level=1)
adhd_subjects = []
pooled_subjects = []
site_names = []
adhd_labels = [] # 1 if ADHD, 0 if control
for func_file, confound_file, phenotypic in zip(
adhd_data.func, adhd_data.confounds, adhd_data.phenotypic):
time_series = masker.fit_transform(func_file, confounds=confound_file)
pooled_subjects.append(time_series)
is_adhd = phenotypic['adhd']
if is_adhd:
adhd_subjects.append(time_series)
site_names.append(phenotypic['site'])
adhd_labels.append(is_adhd)
conn_measure = ConnectivityMeasure(kind="tangent")
X = conn_measure.fit_transform(pooled_subjects)
Y = np.array(adhd_labels,dtype="float32")
the phantom data genetated is constructed from the mean connectome of the control patients from adhd dataset, we generate two classes of injuries each at a specific region, than the BrainNetCNN is trained to detect each class. finaly we use keras-vis "maximization-activation" to genrate connectomes maximizing the classification score.
In [50]:
# Hyper parameters
batch_size = 14
dropout = 0.5
momentum = 0.9
lr = 0.001
decay = 0.0005
noise_weight = 0.0625
reg = regularizers.l2(decay)
kernel_init = initializers.he_uniform()
# Model architecture
model = Sequential()
model.add(E2E_conv(2,8,(2,39),kernel_regularizer=reg,input_shape=(39,39,1),input_dtype='float32',data_format="channels_last"))
print("First layer output shape :"+str(model.output_shape))
model.add(LeakyReLU(alpha=0.33))
#print(model.output_shape)
print(model.output_shape)
model.add(LeakyReLU(alpha=0.33))
model.add(Convolution2D(3,(1,39),kernel_regularizer=reg,data_format="channels_last"))
model.add(LeakyReLU(alpha=0.33))
model.add(Convolution2D(90,(39,1),kernel_regularizer=reg,data_format="channels_last"))
model.add(LeakyReLU(alpha=0.33))
#print(model.output_shape)
model.add(Dropout(0.5))
model.add(Dense(64,kernel_regularizer=reg,kernel_initializer=kernel_init))
#print(model.output_shape)
model.add(LeakyReLU(alpha=0.33))
#print(model.output_shape)
model.add(Dropout(0.5))
model.add(Dense(10,kernel_regularizer=reg,kernel_initializer=kernel_init))
model.add(LeakyReLU(alpha=0.33))
#print(model.output_shape)
model.add(Dropout(0.5))
model.add(Dense(2,activation = "softmax",kernel_regularizer=reg,kernel_initializer=kernel_init))
model.add(Flatten())
model.summary()
#print(model.output_shape)
opt = optimizers.SGD(nesterov=True,lr=lr)
model.compile(optimizer=opt,loss='binary_crossentropy',metrics=['accuracy'])
csv_logger = callbacks.CSVLogger('predict_age.log')
In [14]:
# Generating the two classes injury introduced at node 5 and 30
control = X[Y==0]
r_state = np.random.RandomState(41)
base_connectome = control.mean(axis=0)
base_connectome = (base_connectome.T + base_connectome)/2
S = generate_injury_signatures(X_mn=base_connectome,r_state=r_state,sig_indexes=[5])
X1,Y = sample_injury_strengths(1000,base_connectome,S,noise_weight)
S = generate_injury_signatures(X_mn=base_connectome,r_state=r_state,sig_indexes=[30])
X2,Y = sample_injury_strengths(1000,base_connectome,S,noise_weight)
In [15]:
# Constructing the dataset
X = np.concatenate([X1,X2],axis=0)
X = X.reshape(X.shape[0],X.shape[2],X.shape[3],1)
Y1 = np.array([1]*1000)
Y2 = np.array([0]*1000)
Y = np.concatenate([Y1,Y2],axis=0)
Y = to_categorical(Y,2)
x_train,x_test,y_train,y_test = train_test_split(X,Y,test_size=0.33,random_state=42)
In [51]:
# fitting the model
train = str(raw_input("train/predict enter 1/0"))
if train =="1" :
model.fit(x_train,y_train,batch_size=14,nb_epoch=1000,verbose=1,callbacks=[csv_logger])
model.save_weights("Weights/BrainCNNWeights_Visualization.h5")
else :
model.load_weights("Weights/BrainCNNWeights_Visualization.h5")
evals = model.evaluate(batch_size=14,x=x_test,y=y_test)
print(evals)
In [17]:
# plotting results for generated connectomes using maximization-activation
from nilearn import plotting
%matplotlib inline
x = np.zeros((39,1))
x[5] = 1
x[30] = 1
heatmap1 = visualize_activation(model,layer_idx=-1, filter_indices=1,input_range = (0.,0.1))[:,:,0]
heatmap0 = visualize_activation(model,layer_idx=-1, filter_indices=0,input_range = (0.,0.1))[:,:,0]
plotting.plot_connectome(heatmap1.T+heatmap1,msdl_coords,title="reconstructed edges focal injury at node 5",edge_threshold = '98%',node_color=x)
plotting.plot_connectome(heatmap0.T+heatmap0,msdl_coords,title="reconstructed edges focal injury at node 30",edge_threshold = '98%',node_color=x)
Out[17]:
In [18]:
def construct_dataset(mean_connectome,first_injury_location, second_injury_location,noise_weight=0.0625):
"""
This function takes the mean connectome and the locations of the injuries and constructs a dataset of two classes
each class corresponds two a connectome injured at either the first location or the second.
prams :
- mean_connectome : a mean connectome of some dataset functionnal or structural (np.array)
- first_injury_location : integer representing the first region location
- second_injury_location : integer representing the second region location
returns : tuple (X,Y)
- X : connectomes generated by introducing a focal injury at region 1 or 2
- Y : categorical array for each connectome representing the class 1 or 2
"""
S = generate_injury_signatures(X_mn=base_connectome,r_state=r_state,sig_indexes=[first_injury_location])
X1,Y = sample_injury_strengths(1000,base_connectome,S,noise_weight)
S = generate_injury_signatures(X_mn=base_connectome,r_state=r_state,sig_indexes=[second_injury_location])
X2,Y = sample_injury_strengths(1000,base_connectome,S,noise_weight)
X = np.concatenate([X1,X2],axis=0)
X = X.reshape(X.shape[0],X.shape[2],X.shape[3],1)
Y1 = np.array([1]*1000)
Y2 = np.array([0]*1000)
Y = np.concatenate([Y1,Y2],axis=0)
Y = to_categorical(Y,2)
return X,Y
def plot_activation_maximization_result(model,first_injury_location,second_injury_location,thresholds):
"""
model must be trained and the params first_injury_location and second_injury_location
must be the same as in the construct_dataset function.
"""
from nilearn import plotting
%matplotlib inline
x = np.zeros((39,1))
x[first_injury_location] = 1
x[second_injury_location] = 1
heatmap1 = visualize_activation(model,layer_idx=-1, filter_indices=1,input_range = (0.,0.1))[:,:,0]
[] heatmap0 = visualize_activation(model,layer_idx=-1, filter_indices=0,input_range = (0.,0.1))[:,:,0]
for t in thresholds :
plotting.plot_connectome(heatmap1.T+heatmap1,msdl_coords,title="reconstructed edges focal injury at node "+str(first_injury_location)+" threshold = "+str(t),edge_threshold = str(t)+"%",node_color=x)
plotting.plot_connectome(heatmap0.T+heatmap0,msdl_coords,title="reconstructed edges focal injury at node "+str(second_injury_location)+" threshold = "+str(t),edge_threshold = str(t)+"%",node_color=x)
# example
plot_activation_maximization_result(model,5,30,[95,97,98,99])
In [54]:
# Displaying the feature maps at the 5th layer
layer = model.layers[5]
weights = layer.get_weights()[0]
print(weights.shape)
from numpy.linalg import norm
image = norm(weights,axis = 3)
print(image.shape)
image = image.reshape(39,3)
print(image.shape)
plt.imshow(image,cmap='RdBu_r')
Out[54]:
In [ ]: