In [2]:
""" Imports and global definitions """

import numpy as np

In [3]:
# Weight and bias creation for conv and FC  layers

def createConvWeights(_f,_d,_s):   
    """ Creates _f filters of dimensions _d depth and _s x _s (width x height) """
    return np.random.randn(shape=(_f,_d,_s,_s))


def createConvBiases(_f):
    """ Creates biases for _f filters """
    return np.random.randn(shape=(_f))


def createFCweights(_in,_out):
    """ Creates weights for fully connected layer with input of size _in and output of size _out """
    return np.random.randn(shape=(_out,_in))


def createFCbiases(_out):
    """ Creates biases for fully connected layer with _out output neurons"""
    return np.random.randn(shape=(_out))

In [6]:
# Forward propagation of the network

def calcOutputConv(self,_in,_w,_b,_s,_p,relu=True):
    
    """ Takes input volume(_in):channels x width x height
    weights of filters(_w):Filters x channels x width x height
    biases(_b):Filters x channels
    stride(_s) and padding(_p) as parameters """
    # Need to implement padding #
    
    #Shapes of input,weights and biases
    inShape = _in.shape # LxLxD  
    wShape = _w.shape   # FxFxDxN
    bShape = _b.shape   # N
    
    #A = (L-F+2P)/S+1 ,Where P is padding and S is stride
    #Output volume shape AxAxN
    if (inShape[1]-wShape[2]+2*_p)%_s != 0:
        print "incompatible filter and stride sizes"
        return
    A = (inShape[1]-wShape[2]+2*_p)/_s+1
    outVol = np.ndarray(shape=(N,A,A))
    
    for f in range(wShape[0]):
        for i in range(0,inShape[1]-wShape[2]+2*_p+1,_s):
            for j in range(0,inShape[2]-wShape[3]+2*_p+1,_s):
                if relu:
                    #output with activation function
                    outVol[f][i/_s][j/_s] += \
                        max(0,np.sum(_in[:,i:i+wShape[2],j:j+wShape[3]]*_w[f]) + _b[f])
                else:
                    #output without activation function
                    outVol[f][i/_s][j/_s] += np.sum(_in[:,i:i+wShape[2],j:j+wShape[3]]*_w[f]) + _b[f]
                
    return outVol

def calcActOutVol(self,_in,act=None):
    """ Takes output from conv layer as input and applies activation function element wise """
    
def maxPool(self,_in,_f,_s):
    
    """ Takes input volume of dimensions Channels x Width x Height
    filter size(_f) and stride(_s) as parameters and returns a max pooled output """
    
    if (_in.shape[1]-_f)%_s != 0:
        print ("incompatible filter and stride sizes")
        return
    A = (_in.shape[1]-_f)/_s+1
    outVol = np.ndarray(shape=(_in.shape[0],A,A))
    for k in range(_in.shape[0]):
        for i in range(0,_in.shape[1]-_f+1,_s):
            for j in range(0,_in.shape[2]-_f+1,_s):
                outVol[k][i/_s][j/_s] = np.max(_in[k,i:i+_f,j:j+_f])
                
    return outVol

def calFCout(self,_in,_w,_b,act=None):
    """ Calculates output in feed forward direction in a FC layer 
    _in: an N-D volume input
    _w: Neurons in output layer x N*D weight matrix
    _b: bias matrix"""
    
    outVol = np.ndarray(shape=(_w.shape[0],1))
    inDim = 1
    for i in _in.shape:
        inDim *= i
    _in = _in.reshape(inDim,1)
    outVol = np.dot(_w,_in)+_b
    
    if act != None:
        outVol = act(outVol)
    
    return outVol

def forwardPropagation(self):
    
    # Conv layer 1
    
    # Conv layer 2
    
    # FC layer
    
    # Output layer with 10(tanh or relu)
    return


  File "<ipython-input-6-2b15242cdb44>", line 19
    print "incompatible filter and stride sizes"
                                               ^
SyntaxError: Missing parentheses in call to 'print'

In [ ]:
""" Calculating loss """

def calculate_MSE_Loss(self, y_out, batch = 1):
    """calculates loss for a mini batch"""
    _y_output = forwardPropagation()
    e = y_out - _y_output
    l = np.sum(e * e)/batch
    return l

In [ ]:
# Backward propagation of gradients

"""During backpropagation of gradients from FC to conv layer do W.T * FC_output"""

def gradientsFC(self, _w, e = None, prevLayerGrad = None):
    
    """Calculating gradients for fully connected layer"""
    
    dLdw = []
    # Calculating with respect to relu
    if prevLayerGrad == None:
        # Calculating gradients for last layer
        dLdw = e
    else:
        # Calculating gradients for previous layer
        dLdw = _w * prevLayerGrad        
        dLdw = np.sum(dLdw, axis=1) # row-wise addition since each row contains gradients from each output neuron
        
    return dLdw

def gradientsConv(self, _w, prevLayerGrad):
    
    """ Calculating gradients for conv layer with respect to relu """
    # Do a 180 degree rotation of convolution kernel and do a convolution on gradients after doing a padding of K-1
    # assuming we use a stride of 1 during forward calculation
    
    w_rot = np.rot90(_w, 2)
    convGrad = calcOutputConv(prevLayerGrad, w_rot, None, 1, _w.shape[0]-1)
    
    return convGrad

def backpropagation(self):
    
    # Gradients FC
    
    # Gradients convolution

In [ ]:
"""Implementing encoder"""
forwar

In [79]:
print maxPool(a,3,1)


[[[ 9.]]

 [[ 9.]]

 [[ 9.]]]

In [3]:
a = [1,2,3,4,5,6,7,8,9,1,2,3,4,5,6,7,8,9,1,2,3,4,5,6,7,8,9]
a = np.reshape(a,(3,3,3))
#vol = np.ndarray(shape=(3,2,2))
#b = [a,a]
#b = np.reshape(b,(2,27))
#a[0] *= [2,1,2,1]
print (a)
print (np.rot90(a,1,(1,2)))
#print np.reshape(a,(4*4,1))
#print np.sum(a, axis=0)
#print np.dot(b,a)+[1,1]


[[[1 2 3]
  [4 5 6]
  [7 8 9]]

 [[1 2 3]
  [4 5 6]
  [7 8 9]]

 [[1 2 3]
  [4 5 6]
  [7 8 9]]]
[[[3 6 9]
  [2 5 8]
  [1 4 7]]

 [[3 6 9]
  [2 5 8]
  [1 4 7]]

 [[3 6 9]
  [2 5 8]
  [1 4 7]]]