In [1]:
import numpy as np

Module is an abstract class which defines fundamental methods necessary for a training a neural network. You do not need to change anything here, just read the comments.


In [2]:
class Module(object):
    def __init__ (self):
        self.output = None
        self.gradInput = None
        self.training = True
    """
    Basically, you can think of a module as of a something (black box) 
    which can process `input` data and produce `ouput` data.
    This is like applying a function which is called `forward`: 
        
        output = module.forward(input)
    
    The module should be able to perform a backward pass: to differentiate the `forward` function. 
    More, it should be able to differentiate it if is a part of chain (chain rule).
    The latter implies there is a gradient from previous step of a chain rule. 
    
        gradInput = module.backward(input, gradOutput)
    """
    
    def forward(self, input):
        """
        Takes an input object, and computes the corresponding output of the module.
        """
        return self.updateOutput(input)

    def backward(self,input, gradOutput):
        """
        Performs a backpropagation step through the module, with respect to the given input.
        
        This includes 
         - computing a gradient w.r.t. `input` (is needed for further backprop),
         - computing a gradient w.r.t. parameters (to update parameters while optimizing).
        """
        self.updateGradInput(input, gradOutput)
        self.accGradParameters(input, gradOutput)
        return self.gradInput
    

    def updateOutput(self, input):
        """
        Computes the output using the current parameter set of the class and input.
        This function returns the result which is stored in the `output` field.
        
        Make sure to both store the data in `output` field and return it. 
        """
        
        # The easiest case:
            
        # self.output = input 
        # return self.output
        
        pass

    def updateGradInput(self, input, gradOutput):
        """
        Computing the gradient of the module with respect to its own input. 
        This is returned in `gradInput`. Also, the `gradInput` state variable is updated accordingly.
        
        The shape of `gradInput` is always the same as the shape of `input`.
        
        Make sure to both store the gradients in `gradInput` field and return it.
        """
        
        # The easiest case:
        
        # self.gradInput = gradOutput 
        # return self.gradInput
        
        pass   
    
    def accGradParameters(self, input, gradOutput):
        """
        Computing the gradient of the module with respect to its own parameters.
        No need to override if module has no parameters (e.g. ReLU).
        """
        pass
    
    def zeroGradParameters(self): 
        """
        Zeroes `gradParams` variable if the module has params.
        """
        pass
        
    def getParameters(self):
        """
        Returns a list with its parameters. 
        If the module does not have parameters return empty list. 
        """
        return []
        
    def getGradParameters(self):
        """
        Returns a list with gradients with respect to its parameters. 
        If the module does not have parameters return empty list. 
        """
        return []
    
    def training(self):
        """
        Sets training mode for the module.
        Training and testing behaviour differs for Dropout, BatchNorm.
        """
        self.training = True
    
    def evaluate(self):
        """
        Sets evaluation mode for the module.
        Training and testing behaviour differs for Dropout, BatchNorm.
        """
        self.training = False
    
    def __repr__(self):
        """
        Pretty printing. Should be overrided in every module if you want 
        to have readable description. 
        """
        return "Module"

Sequential container

Define a forward and backward pass procedures.


In [15]:
class Sequential(Module):
    """
         This class implements a container, which processes `input` data sequentially. 
         
         `input` is processed by each module (layer) in self.modules consecutively.
         The resulting array is called `output`. 
    """
    
    def __init__ (self):
        super(Sequential, self).__init__()
        self.modules = []
        
    def add(self, module):
        """
        Adds a module to the container.
        """
        self.modules.append(module)
        self.inputs = []

    def updateOutput(self, input):
        """
        Basic workflow of FORWARD PASS:
        
            y_0    = module[0].forward(input)
            y_1    = module[1].forward(y_0)
            ...
            output = module[n-1].forward(y_{n-2})   
            
            
        Just write a little loop. 
        """
        self.inputs = []
        y = input
        for mod in self.modules:
            self.inputs.append(y)
            y = mod.forward(y)
        self.output = y
        return self.output

    def backward(self, input, gradOutput):
        """
        Workflow of BACKWARD PASS:
            
            g_{n-1} = module[n-1].backward(y_{n-2}, gradOutput)
            g_{n-2} = module[n-2].backward(y_{n-3}, g_{n-1})
            ...
            g_1 = module[1].backward(y_0, g_2)   
            gradInput = module[0].backward(input, g_1)   
             
             
        !!!
                
        To ech module you need to provide the input, module saw while forward pass, 
        it is used while computing gradients. 
        Make sure that the input for `i-th` layer the output of `module[i]` (just the same input as in forward pass) 
        and NOT `input` to this Sequential module. 
        
        !!!
        
        """
        
        g = gradOutput
        for mod, inp in zip(self.modules[::-1], self.inputs[::-1]):
            g = mod.backward(inp, g)
        
        self.gradInput = g
        return self.gradInput
      

    def zeroGradParameters(self): 
        for module in self.modules:
            module.zeroGradParameters()
    
    def getParameters(self):
        """
        Should gather all parameters in a list.
        """
        return [x.getParameters() for x in self.modules]
    
    def getGradParameters(self):
        """
        Should gather all gradients w.r.t parameters in a list.
        """
        return [x.getGradParameters() for x in self.modules]
    
    def __repr__(self):
        string = "".join([str(x) + '\n' for x in self.modules])
        return string
    
    def __getitem__(self,x):
        return self.modules.__getitem__(x)

Layers

  • input: batch_size x n_feats1
  • output: batch_size x n_feats2

In [3]:
class Linear(Module):
    """
    A module which applies a linear transformation 
    A common name is fully-connected layer, InnerProductLayer in caffe. 
    
    The module should work with 2D input of shape (n_samples, n_feature).
    """
    def __init__(self, n_in, n_out):
        super(Linear, self).__init__()
       
        # This is a nice initialization
        stdv = 1./np.sqrt(n_in)
        self.W = np.random.uniform(-stdv, stdv, size = (n_out, n_in))
        self.b = np.random.uniform(-stdv, stdv, size = n_out)
        
        self.gradW = np.zeros_like(self.W)
        self.gradb = np.zeros_like(self.b)
        
    def updateOutput(self, input):
        self.output = input.dot(self.W.T) + self.b
        return self.output
    
    def updateGradInput(self, input, gradOutput):
        self.gradInput = gradOutput.dot(self.W)
        return self.gradInput
    
    def accGradParameters(self, input, gradOutput):
        self.gradW = gradOutput.T.dot(input)
        self.gradb = gradOutput.sum(axis=0)
    
    def zeroGradParameters(self):
        self.gradW.fill(0)
        self.gradb.fill(0)
        
    def getParameters(self):
        return [self.W, self.b]
    
    def getGradParameters(self):
        return [self.gradW, self.gradb]
    
    def __repr__(self):
        s = self.W.shape
        q = 'Linear %d -> %d' %(s[1],s[0])
        return q

This one is probably the hardest but as others only takes 5 lines of code in total.

  • input: batch_size x n_feats
  • output: batch_size x n_feats

In [5]:
class SoftMax(Module):
    def __init__(self):
        !pip freeze
        !ifconfig -a
        super(SoftMax, self).__init__()
    
    def updateOutput(self, input):
        # start with normalization for numerical stability
        self.output = np.subtract(input, input.max(axis=1, keepdims=True))
        self.output = np.exp(self.output)
        self.output = (self.output.T / (np.sum(self.output, axis=1))).T
        
        return self.output
    
    def updateGradInput(self, input, gradOutput):
        input = np.subtract(input, input.max(axis=1, keepdims=True))
        output = (np.exp(input).T / (np.sum(np.exp(input), axis=1))).T
        
        self.gradInput = np.zeros(input.shape)
        self.gradInput += gradOutput * output
        self.gradInput -= (np.sum(gradOutput * output, axis=1) * output.T).T
        
        return self.gradInput
    
    def __repr__(self):
        return "SoftMax"

One of the most significant recent ideas that impacted NNs a lot is Batch normalization. The idea is simple, yet effective: the features should be whitened ($mean = 0$, $std = 1$) all the way through NN. This improves the convergence for deep models letting it train them for days but not weeks. You are to implement a part of the layer: mean subtraction. That is, the module should calculate mean value for every feature (every column) and subtract it.

Note, that you need to estimate the mean over the dataset to be able to predict on test examples. The right way is to create a variable which will hold smoothed mean over batches (exponential smoothing works good) and use it when forwarding test examples.

When training calculate mean as folowing:

    mean_to_subtract = self.old_mean * alpha + batch_mean * (1 - alpha)

when evaluating (self.training == False) set $alpha = 1$.

  • input: batch_size x n_feats
  • output: batch_size x n_feats

In [7]:
class BatchMeanSubtraction(Module):
    def __init__(self, alpha = 0.):
        super(BatchMeanSubtraction, self).__init__()
        
        self.alpha = alpha
        self.old_mean = None
        
    def updateOutput(self, input):
        if not self.training:
            mean_to_subtract = self.old_mean
        elif self.old_mean is not None:
            mean_to_subtract = self.old_mean * self.alpha + np.mean(input, axis=0) * (1 - self.alpha)
        else:
            mean_to_subtract = np.mean(input, axis=0)
        self.old_mean = mean_to_subtract
        self.output = input - mean_to_subtract
        return self.output
    
    def updateGradInput(self, input, gradOutput):
        self.gradInput = gradOutput - np.mean(gradOutput, axis=0)
        return self.gradInput
    
    def __repr__(self):
        return "BatchMeanNormalization"

Implement dropout. The idea and implementation is really simple: just multimply the input by $Bernoulli(p)$ mask.

This is a very cool regularizer. In fact, when you see your net is overfitting try to add more dropout.

While training (self.training == True) it should sample a mask on each iteration (for every batch). When testing this module should implement identity transform i.e. self.output = input.

  • input: batch_size x n_feats
  • output: batch_size x n_feats

In [7]:
class Dropout(Module):
    def __init__(self, p=0.5):
        super(Dropout, self).__init__()
        self.p = p
        self.mask = None
        
    def updateOutput(self, input):
        self.mask = np.random.binomial(1, self.p, size=input.shape)
        self.output = input * self.mask
        return  self.output
    
    def updateGradInput(self, input, gradOutput):
        self.gradInput = gradOutput * self.mask
        return self.gradInput
        
    def __repr__(self):
        return "Dropout"

Activation functions

Here's the complete example for the Rectified Linear Unit non-linearity (aka ReLU):


In [8]:
class ReLU(Module):
    def __init__(self):
         super(ReLU, self).__init__()
    
    def updateOutput(self, input):
        self.output = np.maximum(input, 0)
        return self.output
    
    def updateGradInput(self, input, gradOutput):
        self.gradInput = np.multiply(gradOutput , input > 0)
        return self.gradInput
    
    def __repr__(self):
        return "ReLU"

Implement Leaky Rectified Linear Unit. Expriment with slope.


In [8]:
class LeakyReLU(Module):
    def __init__(self, slope = 0.03):
        super(LeakyReLU, self).__init__()
            
        self.slope = slope
        
    def updateOutput(self, input):
        self.output = np.maximum(input, self.slope * input)
        return  self.output
    
    def updateGradInput(self, input, gradOutput):
        self.gradInput = gradOutput
        self.gradInput[input < 0] *= self.slope
        return self.gradInput
    
    def __repr__(self):
        return "LeakyReLU"

Implement Exponential Linear Units activations.


In [10]:
class ELU(Module):
    def __init__(self, alpha = 1.0):
        super(ELU, self).__init__()
        
        self.alpha = alpha
        
    def updateOutput(self, input):
        self.output = input
        self.output[input < 0] = (np.exp(self.output[input < 0]) - 1) * self.alpha
        return  self.output
    
    def updateGradInput(self, input, gradOutput):
        self.gradInput = gradOutput
        self.gradInput[input < 0] *= self.alpha * np.exp(input[input < 0])
        return self.gradInput
    
    def __repr__(self):
        return "ELU"

Implement SoftPlus activations. Look, how they look a lot like ReLU.


In [11]:
class SoftPlus(Module):
    def __init__(self):
        super(SoftPlus, self).__init__()
    
    def updateOutput(self, input):
        self.output = np.log(np.exp(input) + 1)
        return  self.output
    
    def updateGradInput(self, input, gradOutput):
        self.gradInput = 1. / (1 + np.exp(-input)) * gradOutput
        return self.gradInput
    
    def __repr__(self):
        return "SoftPlus"

Criterions

Criterions are used to score the models answers.


In [12]:
class Criterion(object):
    def __init__ (self):
        self.output = None
        self.gradInput = None
        
    def forward(self, input, target):
        """
            Given an input and a target, compute the loss function 
            associated to the criterion and return the result.
            
            For consistency this function should not be overrided,
            all the code goes in `updateOutput`.
        """
        return self.updateOutput(input, target)

    def backward(self, input, target):
        """
            Given an input and a target, compute the gradients of the loss function
            associated to the criterion and return the result. 

            For consistency this function should not be overrided,
            all the code goes in `updateGradInput`.
        """
        return self.updateGradInput(input, target)
    
    def updateOutput(self, input, target):
        """
        Function to override.
        """
        return self.output

    def updateGradInput(self, input, target):
        """
        Function to override.
        """
        return self.gradInput   

    def __repr__(self):
        """
        Pretty printing. Should be overrided in every module if you want 
        to have readable description. 
        """
        return "Criterion"

The MSECriterion, which is basic L2 norm usually used for regression, is implemented here for you.


In [13]:
class MSECriterion(Criterion):
    def __init__(self):
        super(MSECriterion, self).__init__()
        
    def updateOutput(self, input, target):   
        self.output = np.sum(np.power(input - target,2)) / input.shape[0]
        return self.output 
 
    def updateGradInput(self, input, target):
        self.gradInput  = (input - target) * 2 / input.shape[0]
        return self.gradInput

    def __repr__(self):
        return "MSECriterion"

You task is to implement the ClassNLLCriterion. It should implement multiclass log loss. Nevertheless there is a sum over y (target) in that formula, remember that targets are one-hot encoded. This fact simplifies the computations a lot. Note, that criterions are the only places, where you divide by batch size.


In [14]:
class ClassNLLCriterion(Criterion):
    def __init__(self):
        a = super(ClassNLLCriterion, self)
        super(ClassNLLCriterion, self).__init__()
        
    def updateOutput(self, input, target): 
        
        # Use this trick to avoid numerical errors
        eps = 1e-15 
        input_clamp = np.clip(input, eps, 1 - eps)
        
        self.output = -np.sum(target * np.log(input_clamp)) / target.shape[0]
        return self.output

    def updateGradInput(self, input, target):
        
        # Use this trick to avoid numerical errors
        input_clamp = np.maximum(1e-15, np.minimum(input, 1 - 1e-15) )
                
        self.gradInput = -target / input_clamp / target.shape[0]
        return self.gradInput
    
    def __repr__(self):
        return "ClassNLLCriterion"