In [1]:
require 'nngraph'
Out[1]:
In [4]:
input = nn.Identity()()
In [6]:
h1 = nn.Tanh()(nn.Linear(20,10)(input))
output = nn.Linear(10, 1)(h1)
mlp = nn.gModule({input},{output})
In [9]:
x = torch.rand(20)
dx = torch.rand(1)
mlp:updateOutput(x)
mlp:updateGradInput(x, dx)
mlp:accGradParameters(x, dx)
graph.dot(mlp.fg,'MLP','MLP')
In [11]:
h1 = nn.Linear(20, 10)()
h2 = nn.Linear(10, 1)(nn.Tanh()(nn.Linear(10, 10)(nn.Tanh()(h1))))
mlp = nn.gModule({h1}, {h2})
x = torch.rand(20)
dx = torch.rand(1)
mlp:updateOutput(x)
mlp:updateGradInput(x, dx)
mlp:accGradParameters(x, dx)
-- draw graph (the forward graph, '.fg')
graph.dot(mlp.fg, 'MLP')
In [13]:
local function get_net()
local input = nn.Identity()()
local h1 = nn.Linear(20,10)(input)
local h2 = nn.Sigmoid()(h1)
local output = nn.Linear(10,1)(h2)
-- Annotate nodes with local variable names
-- The local variables at the given stack level are inspected.
nngraph.annotateNodes()
return nn.gModule({input},{output})
end
mlp = get_net(10,10)
x = torch.rand(20)
dx = torch.rand(1)
mlp:updateOutput(x)
-- Computing the gradient of the module with respect to its own input.
-- This is returned in gradInput.
-- Also, the gradInput state variable is updated accordingly.
mlp:updateGradInput(x, dx)
-- gradInput
-- This contains the gradients with respect to the inputs of the module,
-- computed with the last call of updateGradInput(input, gradOutput).
-- Computing the gradient of the module with respect to its ownparameters.
mlp:accGradParameters(x, dx)
print(mlp.fg)
Out[13]:
Out[13]:
Out[13]:
Out[13]:
In [14]:
nngraph.setDebug(true)
local function get_network()
local input = nn.Identity()()
local h1 = nn.Linear(20, 10)(input)
local h2 = nn.Sigmoid()(h1)
local output = nn.Linear(10,1)(h2)
nngraph.annotateNodes()
return nn.gModule({input},{output})
end
mlp = get_network()
mlp.name = 'MYMLPError'
x=torch.rand(15)
local o, error = pcall(function() mlp:updateOutput(x) end)
In [16]:
print(o, error)
Out[16]:
In [18]:
function get_rnn(input_size, rnn_size)
local input = nn.Identity()()
local prev_h = nn.Identity()()
local i2h = nn.Linear(input_size, rnn_size)(input)
local h2h = nn.Linear(rnn_size, rnn_size)(prev_h)
local added_h = nn.CAddTable()({i2h,h2h})
local next_h = nn.Tanh()(added_h)
nngraph.annotateNodes()
return nn.gModule({input, prev_h}, {next_h})
end
local rnn_net = get_rnn(128,128)
In [19]:
-- rnn with one depth (layer) two input
local function get_rnn2(input_size, rnn_size)
local input1 = nn.Identity()()
local input2 = nn.Identity()()
local prev_h = nn.Identity()()
local rnn_net1 = get_rnn(128, 128)({input1, prev_h})
local rnn_net2 = get_rnn(128, 128)({input2, rnn_net1})
nngraph.annotateNodes()
return nn.gModule({input1, input2, prev_h}, {rnn_net2})
end
local rnn_net2 = get_rnn2(128, 128)
In [20]:
local function get_rnn2(input_size, rnn_size)
local input1 = nn.Identity()():annotate{graphAttributes = {style='filled', fillcolor='blue'}}
local input2 = nn.Identity()():annotate{graphAttributes = {style='filled', fillcolor='blue'}}
local prev_h = nn.Identity()():annotate{graphAttributes = {style='filled', fillcolor='blue'}}
local rnn_net1 = get_rnn(128, 128)({input1, prev_h}):annotate{graphAttributes = {style='filled', fillcolor='yellow'}}
local rnn_net2 = get_rnn(128, 128)({input2, rnn_net1}):annotate{graphAttributes = {style='filled', fillcolor='green'}}
nngraph.annotateNodes()
return nn.gModule({input1, input2, prev_h}, {rnn_net2})
end
local rnn_net3 = get_rnn2(128, 128)
graph.dot(rnn_net3.fg, 'rnn_net3', 'rnn_net3')
In [ ]: