In [1]:
import numpy as np
from scipy.optimize import check_grad
from gradient_check import eval_numerical_gradient_array

def rel_error(x, y):
      return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))

Module is an abstract class which defines fundamental methods necessary for a training a neural network. You do not need to change anything here, just read the comments.


In [2]:
class Module(object):
    def __init__ (self):
        self.output = None
        self.gradInput = None
        self.training = True
    """
    Basically, you can think of a module as of a something (black box) 
    which can process `input` data and produce `ouput` data.
    This is like applying a function which is called `forward`: 
        
        output = module.forward(input)
    
    The module should be able to perform a backward pass: to differentiate the `forward` function. 
    More, it should be able to differentiate it if is a part of chain (chain rule).
    The latter implies there is a gradient from previous step of a chain rule. 
    
        gradInput = module.backward(input, gradOutput)
    """
    
    def forward(self, input):
        """
        Takes an input object, and computes the corresponding output of the module.
        """
        return self.updateOutput(input)

    def backward(self,input, gradOutput):
        """
        Performs a backpropagation step through the module, with respect to the given input.
        
        This includes 
         - computing a gradient w.r.t. `input` (is needed for further backprop),
         - computing a gradient w.r.t. parameters (to update parameters while optimizing).
        """
        self.updateGradInput(input, gradOutput)
        self.accGradParameters(input, gradOutput)
        return self.gradInput
    

    def updateOutput(self, input):
        """
        Computes the output using the current parameter set of the class and input.
        This function returns the result which is stored in the `output` field.
        
        Make sure to both store the data in `output` field and return it. 
        """
        
        # The easiest case:
            
        # self.output = input 
        # return self.output
        
        pass

    def updateGradInput(self, input, gradOutput):
        """
        Computing the gradient of the module with respect to its own input. 
        This is returned in `gradInput`. Also, the `gradInput` state variable is updated accordingly.
        
        The shape of `gradInput` is always the same as the shape of `input`.
        
        Make sure to both store the gradients in `gradInput` field and return it.
        """
        
        # The easiest case:
        
        # self.gradInput = gradOutput 
        # return self.gradInput
        
        pass   
    
    def accGradParameters(self, input, gradOutput):
        """
        Computing the gradient of the module with respect to its own parameters.
        No need to override if module has no parameters (e.g. ReLU).
        """
        pass
    
    def zeroGradParameters(self): 
        """
        Zeroes `gradParams` variable if the module has params.
        """
        pass
        
    def getParameters(self):
        """
        Returns a list with its parameters. 
        If the module does not have parameters return empty list. 
        """
        return []
        
    def getGradParameters(self):
        """
        Returns a list with gradients with respect to its parameters. 
        If the module does not have parameters return empty list. 
        """
        return []
    
    def training(self):
        """
        Sets training mode for the module.
        Training and testing behaviour differs for Dropout, BatchNorm.
        """
        self.training = True
    
    def evaluate(self):
        """
        Sets evaluation mode for the module.
        Training and testing behaviour differs for Dropout, BatchNorm.
        """
        self.training = False
    
    def __repr__(self):
        """
        Pretty printing. Should be overrided in every module if you want 
        to have readable description. 
        """
        return "Module"

Sequential container

Define a forward and backward pass procedures.


In [3]:
class Sequential(Module):
    """
         This class implements a container, which processes `input` data sequentially. 
         
         `input` is processed by each module (layer) in self.modules consecutively.
         The resulting array is called `output`. 
    """
    
    def __init__ (self):
        super(Sequential, self).__init__()
        self.modules = []
   
    def add(self, module):
        """
        Adds a module to the container.
        """
        self.modules.append(module)

    def updateOutput(self, input):
        """
        Basic workflow of FORWARD PASS:
        
            y_0    = module[0].forward(input)
            y_1    = module[1].forward(y_0)
            ...
            output = module[n-1].forward(y_{n-2})   
            
            
        Just write a little loop. 
        """

        # Your code goes here. ################################################
        self.y = [np.array(input)]
        for module in self.modules:
            self.y.append(module.forward(self.y[-1]))
        self.y.pop(0)
        self.output = self.y[-1]
        return self.output

    def backward(self, input, gradOutput):
        """
        Workflow of BACKWARD PASS:
            
            g_{n-1} = module[n-1].backward(y_{n-2}, gradOutput)
            g_{n-2} = module[n-2].backward(y_{n-3}, g_{n-1})
            ...
            g_1 = module[1].backward(y_0, g_2)   
            gradInput = module[0].backward(input, g_1)   
             
             
        !!!
                
        To ech module you need to provide the input, module saw while forward pass, 
        it is used while computing gradients. 
        Make sure that the input for `i-th` layer the output of `module[i]` (just the same input as in forward pass) 
        and NOT `input` to this Sequential module. 
        
        !!!
        
        """
        # Your code goes here. ################################################
        g = np.array(gradOutput)
        self.y = [np.array(input)] + self.y
        for i, module in enumerate(reversed(self.modules)):
            g = module.backward(self.y[-i - 2], g)
        self.gradInput = g
        self.y.pop(0)
        return self.gradInput
      

    def zeroGradParameters(self): 
        for module in self.modules:
            module.zeroGradParameters()
    
    def getParameters(self):
        """
        Should gather all parameters in a list.
        """
        return [x.getParameters() for x in self.modules]
    
    def getGradParameters(self):
        """
        Should gather all gradients w.r.t parameters in a list.
        """
        return [x.getGradParameters() for x in self.modules]
    
    def __repr__(self):
        string = "".join([str(x) + '\n' for x in self.modules])
        return string
    
    def __getitem__(self,x):
        return self.modules.__getitem__(x)

Layers

  • input: batch_size x n_feats1
  • output: batch_size x n_feats2

In [4]:
class Linear(Module):
    """
    A module which applies a linear transformation 
    A common name is fully-connected layer, InnerProductLayer in caffe. 
    
    The module should work with 2D input of shape (n_samples, n_feature).
    """
    def __init__(self, n_in, n_out):
        super(Linear, self).__init__()
       
        # This is a nice initialization
        stdv = 1./np.sqrt(n_in)
        self.W = np.random.uniform(-stdv, stdv, size = (n_out, n_in))
        self.b = np.random.uniform(-stdv, stdv, size = n_out)
        
        self.gradW = np.zeros_like(self.W)
        self.gradb = np.zeros_like(self.b)
        
    def updateOutput(self, input):
        # Your code goes here. ################################################
        self.output = np.add(input.dot(self.W.T), self.b)
        return self.output
    
    def updateGradInput(self, input, gradOutput):
        # Your code goes here. ################################################
        self.gradInput = gradOutput.dot(self.W)
        return self.gradInput
    
    def accGradParameters(self, input, gradOutput):
        # Your code goes here. ################################################
        self.gradW = gradOutput.T.dot(input)
        self.gradb = gradOutput.sum(axis=0)
    
    def zeroGradParameters(self):
        self.gradW.fill(0)
        self.gradb.fill(0)
        
    def getParameters(self):
        return [self.W, self.b]
    
    def getGradParameters(self):
        return [self.gradW, self.gradb]
    
    def __repr__(self):
        s = self.W.shape
        q = 'Linear %d -> %d' %(s[1],s[0])
        return q

In [5]:
input_dim = 3
output_dim = 2

x = np.random.randn(5, input_dim)
w = np.random.randn(output_dim, input_dim)
b = np.random.randn(output_dim)
dout = np.random.randn(5, output_dim)
linear = Linear(input_dim, output_dim)

def update_W_matrix(new_W):
    linear.W = new_W
    return linear.forward(x)

def update_bias(new_b):
    linear.b = new_b
    return linear.forward(x)

dx = linear.backward(x, dout)
dx_num = eval_numerical_gradient_array(lambda x: linear.forward(x), x, dout)
dw_num = eval_numerical_gradient_array(update_W_matrix, w, dout)
db_num = eval_numerical_gradient_array(update_bias, b, dout)
print 'Testing Linear_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, linear.gradW)
print 'db error: ', rel_error(db_num, linear.gradb)


Testing Linear_backward function:
dx error:  8.79139596207e-11
dw error:  1.22301177567e-10
db error:  6.29064286817e-12

This one is probably the hardest but as others only takes 5 lines of code in total.

  • input: batch_size x n_feats
  • output: batch_size x n_feats

In [6]:
class SoftMax(Module):
    def __init__(self):
         super(SoftMax, self).__init__()
    
    def updateOutput(self, input):
        # start with normalization for numerical stability
        self.output = np.subtract(input, input.max(axis=1, keepdims=True))
        
        # Your code goes here. ################################################
        self.output = np.exp(self.output)
        out_sum = self.output.sum(axis=1, keepdims=True)
        self.output = np.divide(self.output, out_sum)
        return self.output
    
    def updateGradInput(self, input, gradOutput):
        # Your code goes here. ################################################
        batch_size, n_feats = self.output.shape
        a = self.output.reshape(batch_size, n_feats, -1)
        b = self.output.reshape(batch_size, -1, n_feats)
        self.gradInput = np.multiply(gradOutput.reshape(batch_size, -1, n_feats), 
                                    np.subtract(np.multiply(np.eye(n_feats), a),
                                                np.multiply(a, b))).sum(axis=2)
        return self.gradInput
    
    def __repr__(self):
        return "SoftMax"

In [7]:
soft_max = SoftMax()
x = np.random.randn(5, 3)
dout = np.random.randn(5, 3)
dx_numeric = eval_numerical_gradient_array(lambda x: soft_max.forward(x), x, dout)
dx = soft_max.backward(x, dout)

# The error should be around 1e-10
print 'Testing SoftMax grad:'
print 'dx error: ', rel_error(dx_numeric, dx)


Testing SoftMax grad:
dx error:  4.46672497399e-06

Implement dropout. The idea and implementation is really simple: just multimply the input by $Bernoulli(p)$ mask.

This is a very cool regularizer. In fact, when you see your net is overfitting try to add more dropout.

While training (self.training == True) it should sample a mask on each iteration (for every batch). When testing this module should implement identity transform i.e. self.output = input.

  • input: batch_size x n_feats
  • output: batch_size x n_feats

In [8]:
class Dropout(Module):
    def __init__(self, p=0.5):
        super(Dropout, self).__init__()
        
        self.p = p
        self.mask = None
        
    def updateOutput(self, input):
        # Your code goes here. ################################################
        if self.training:
            self.mask = np.random.binomial(1, self.p, size=len(input))
        else:
            self.mask = np.ones(len(input))
        self.mask = self.mask.reshape(len(self.mask), -1)
        self.output = np.multiply(input, self.mask)
        return  self.output
    
    def updateGradInput(self, input, gradOutput):
        # Your code goes here. ################################################
        self.gradInput = np.multiply(gradOutput, self.mask)
        return self.gradInput
        
    def __repr__(self):
        return "Dropout"

Activation functions

Here's the complete example for the Rectified Linear Unit non-linearity (aka ReLU):


In [9]:
class ReLU(Module):
    def __init__(self):
         super(ReLU, self).__init__()
    
    def updateOutput(self, input):
        self.output = np.maximum(input, 0)
        return self.output
    
    def updateGradInput(self, input, gradOutput):
        self.gradInput = np.multiply(gradOutput , input > 0)
        return self.gradInput
    
    def __repr__(self):
        return "ReLU"

Implement Leaky Rectified Linear Unit. Expriment with slope.


In [10]:
class LeakyReLU(Module):
    def __init__(self, slope = 0.03):
        super(LeakyReLU, self).__init__()
            
        self.slope = slope
        
    def updateOutput(self, input):
        # Your code goes here. ################################################
        self.output = input.copy()
        self.output[self.output < 0] *= self.slope
        return  self.output
    
    def updateGradInput(self, input, gradOutput):
        # Your code goes here. ################################################
        self.gradInput = gradOutput.copy()
        self.gradInput[input < 0] *= self.slope
        return self.gradInput
    
    def __repr__(self):
        return "LeakyReLU"

Criterions

Criterions are used to score the models answers.


In [11]:
class Criterion(object):
    def __init__ (self):
        self.output = None
        self.gradInput = None
        
    def forward(self, input, target):
        """
            Given an input and a target, compute the loss function 
            associated to the criterion and return the result.
            
            For consistency this function should not be overrided,
            all the code goes in `updateOutput`.
        """
        return self.updateOutput(input, target)

    def backward(self, input, target):
        """
            Given an input and a target, compute the gradients of the loss function
            associated to the criterion and return the result. 

            For consistency this function should not be overrided,
            all the code goes in `updateGradInput`.
        """
        return self.updateGradInput(input, target)
    
    def updateOutput(self, input, target):
        """
        Function to override.
        """
        return self.output

    def updateGradInput(self, input, target):
        """
        Function to override.
        """
        return self.gradInput   

    def __repr__(self):
        """
        Pretty printing. Should be overrided in every module if you want 
        to have readable description. 
        """
        return "Criterion"

The MSECriterion, which is basic L2 norm usually used for regression, is implemented here for you.


In [12]:
class MSECriterion(Criterion):
    def __init__(self):
        super(MSECriterion, self).__init__()
        
    def updateOutput(self, input, target):   
        self.output = np.sum(np.power(np.subtractact(input, target), 2)) / input.shape[0]
        return self.output 
 
    def updateGradInput(self, input, target):
        self.gradInput  = np.subtract(input, target) * 2 / input.shape[0]
        return self.gradInput

    def __repr__(self):
        return "MSECriterion"

You task is to implement the ClassNLLCriterion. It should implement multiclass log loss. Nevertheless there is a sum over y (target) in that formula, remember that targets are one-hot encoded. This fact simplifies the computations a lot. Note, that criterions are the only places, where you divide by batch size.


In [13]:
class ClassNLLCriterion(Criterion):
    def __init__(self):
        a = super(ClassNLLCriterion, self)
        super(ClassNLLCriterion, self).__init__()
        
    def updateOutput(self, input, target): 
        
        # Use this trick to avoid numerical errors
        eps = 1e-15 
        input_clamp = np.clip(input, eps, 1 - eps)
        
        # Your code goes here. ################################################
        self.output = -np.sum(np.multiply(target, np.log(input_clamp))) / len(input)
        return self.output

    def updateGradInput(self, input, target):
        
        # Use this trick to avoid numerical errors
        input_clamp = np.maximum(1e-15, np.minimum(input, 1 - 1e-15) )
                
        # Your code goes here. ################################################
        self.gradInput = np.subtract(input_clamp, target) / len(input)
        return self.gradInput
    
    def __repr__(self):
        return "ClassNLLCriterion"

In [ ]: