In [13]:
require 'nn'
require 'nngraph'
require 'graph'

In [ ]:


In [ ]:


In [ ]:


In [7]:
x=torch.randn(5)
y=torch.randn(5)
m=nn.CAddTable()
m:forward({x,y})

In [8]:
y


Out[8]:
 0.1607
-1.1754
 0.2571
-0.4275
 0.8752
[torch.DoubleTensor of size 5]


In [15]:
h1 = nn.Linear(20, 20)()
h2 = nn.Linear(10, 10)()
hh1 = nn.Linear(20, 1)(nn.Tanh()(h1))
hh2 = nn.Linear(10, 1)(nn.Tanh()(h2))
madd = nn.CAddTable()({hh1, hh2})
oA = nn.Sigmoid()(madd)
oB = nn.Tanh()(madd)
gmod = nn.gModule({h1, h2}, {oA, oB})

x1 = torch.rand(20)
x2 = torch.rand(10)

gmod:updateOutput({x1, x2})
gmod:updateGradInput({x1, x2}, {torch.rand(1), torch.rand(1)})
graph.dot(gmod.fg, 'Big MLP')


/Users/meat/torch/install/share/lua/5.1/graph/graphviz.lua:154: graphviz layout failed
stack traceback:
	[C]: in function 'assert'
	/Users/meat/torch/install/share/lua/5.1/graph/graphviz.lua:154: in function 'graphvizFile'
	/Users/meat/torch/install/share/lua/5.1/graph/graphviz.lua:181: in function 'dot'
	[string "h1 = nn.Linear(20, 20)()..."]:15: in main chunk
	[C]: in function 'xpcall'
	/Users/meat/torch/install/share/lua/5.1/itorch/main.lua:179: in function </Users/meat/torch/install/share/lua/5.1/itorch/main.lua:143>
	/Users/meat/torch/install/share/lua/5.1/lzmq/poller.lua:75: in function 'poll'
	/Users/meat/torch/install/share/lua/5.1/lzmq/impl/loop.lua:307: in function 'poll'
	/Users/meat/torch/install/share/lua/5.1/lzmq/impl/loop.lua:325: in function 'sleep_ex'
	/Users/meat/torch/install/share/lua/5.1/lzmq/impl/loop.lua:370: in function 'start'
	/Users/meat/torch/install/share/lua/5.1/itorch/main.lua:350: in main chunk
	[C]: in function 'require'
	[string "arg={'/Users/meat/Library/Jupyter/runtime/ker..."]:1: in main chunk

In [16]:
mse = nn.MSECriterion()

In [ ]:
-- Apprentissage par descente de gradient
function fit(mlp, criterion, data, labels, lr, nIter)
   local lr = lr or 1e-4
   local nIter = nIter or 1200
  --  local choices = torch.LongTensor((#data)[1])

   for i = 1,nIter do
      mlp:zeroGradParameters()
      --on shufflise les datas
      -- choices:random((#data)[1])
      -- local x = data:index(1,choices)
      -- local y = labels:index(1,choices)
      indiceRand=math.random((#data)[1])
      local x = data[indiceRand]
      local y = labels[indiceRand]

      --calcul du y chapeau (prediction)
      local pred = mlp:forward(x)
      --calcul de l'erreur entre pred et y*
      local loss = criterion:forward(pred,y)
      --gradient de l'erreur par rapport à pred
      local df_do = criterion:backward(pred,y)
      --propagation du gradient de l'erreur sur  la fonction
      -- linear d'entrée
      local df_di = mlp:backward(x, df_do)
      -- on modifie les poids suivant le learning rate sur lensemble des couches
      mlp:updateParameters(lr)
      if i % 1000 == 0 then
        print(i,loss)
      end
   end
end

In [ ]:
-- Apprentissage par descente de gradient
function fit(mlp, criterion, data, labels, lr, nIter)
   local lr = lr or 1e-4
   local nIter = nIter or 1000
  --  local choices = torch.LongTensor((#data)[1])

   for i = 1,nIter do
      mlp:zeroGradParameters()
      --on shufflise les datas
      -- choices:random((#data)[1])
      -- local x = data:index(1,choices)
      -- local y = labels:index(1,choices)
      indiceRand=math.random((#data)[1])
      local x = data[indiceRand]
      local y = labels[indiceRand]

      --calcul du y chapeau (prediction)
      local pred = mlp:forward(x)
      --calcul de l'erreur entre pred et y*
      local loss = criterion:forward(pred,y)
      --gradient de l'erreur par rapport à pred
      local df_do = criterion:backward(pred,y)
      --propagation du gradient de l'erreur sur  la fonction
      -- linear d'entrée
      local df_di = mlp:backward(x, df_do)
      -- on modifie les poids suivant le learning rate sur lensemble des couches
      mlp:updateParameters(lr)
      if i % 1000 == 0 then
        print(i,loss)
      end
   end
end

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]: