Load dataset


In [ ]:
mnist = require('mnist')

torch.seed()

testset = mnist.testdataset()
testset['data'] = testset['data']:double():clamp(0,1)

In [ ]:
inputs_x = torch.Tensor(100, 28*28)
inputs_y = torch.ByteTensor(100)
for i=1, 100 do
    num = torch.random(testset.size)
    inputs_x[i]:copy(testset[num].x)
    inputs_y[i] = testset[num].y
end

wm = image.toDisplayTensor{
    input=inputs_x:view(torch.LongStorage{100, 28, 28}),
    padding=2, nrow=10}
itorch.image(wm)

Classify

We will forward classification RBM with zero class vector and do one step Gibbs sampling from the input digit. Resulting digit class is stored to the class_y.


In [ ]:
require('classrbm')

rbm = torch.load('models/trained_rbm_final.dat')

classrbm = ClassRBM(rbm.n_visible, rbm.n_hidden, rbm.n_class, rbm.batch)
classrbm.weight = rbm.weight
classrbm.vbias = rbm.vbias
classrbm.hbias = rbm.hbias
classrbm.uweight = rbm.uweight
classrbm.dbias = rbm.dbias

class_y = torch.ByteTensor(100)

for i=1, 100 do
    vt, yt = classrbm:forward{inputs_x[i], torch.zeros(10)}
    _, y = torch.max(yt,1)
    y = y-1 -- because index from 1
    
    class_y[i] = y
end

print(class_y:view(10,10))

Classification error


In [ ]:
oneY = torch.Tensor(10)
validation_size = 256

err = 0

for i=1, validation_size do
    local index = torch.random(testset.size)
    local v1 = testset[index].x:view(28*28)
    local y1 = testset[index].y
    oneY:zero()
    oneY[y1+1] = 1
    local v2, y2 = classrbm:forward{v1, oneY}
    err = err + (torch.ne(oneY, y2):sum() == 0 and 0 or 1)
end

print(100*(1-err/validation_size))

Weights


In [ ]:
wm = image.toDisplayTensor{
    input=classrbm.weight:view(torch.LongStorage{classrbm.n_hidden, 28, 28}),
    padding=2, nrow=22}
itorch.image(wm)

Sample new digits

It is posible with this architecture http://www.cs.toronto.edu/~hinton/adi.