In [2]:
require 'nn';

In [4]:
file = torch.DiskFile('dat/facies_vectors.t7', 'r')
facies = file:readObject()
file:close()
file = torch.DiskFile('dat/validation_data_nofacies.t7', 'r')
validate = file:readObject()
file:close()

In [7]:
-- build tables
print("facies size: ", facies:size()[1], "x", facies:size()[2])
print("validate size: ", validate:size()[1], "x", validate:size()[2])

	-- initialize
training_data = {}
val_data = {}
depth = {}

	-- build the training wells into the table
training_data["shrimplin"] = facies[{{1,471},{3,9}}]
training_data["alexander"] = facies[{{472,937},{3,9}}]
training_data["shankle"] = facies[{{938,1386},{3,9}}]
training_data["luke"] = facies[{{1387,1847},{3,9}}]
training_data["kimzey"] = facies[{{1848,2286},{3,9}}]
training_data["cross"] = facies[{{2287,2787},{3,9}}]
training_data["nolan"] = facies[{{2788,3202},{3,9}}]
training_data["recruit"] = facies[{{3203,3282},{3,9}}]
training_data["newby"] = facies[{{3283,3745},{3,9}}]
training_data["churchman"] = facies[{{3746,4149},{3,9}}]

    -- build the validation data into a table
val_data["stuart"] = validate[{{1,474},{2,8}}]
val_data["crawford"] = validate[{{475,830},{2,8}}]

	-- build a depth log for plotting
depth["shrimplin"] = facies[{{1,471},{2}}]
depth["alexander"] = facies[{{472,937},{2}}]
depth["shankle"] = facies[{{938,1386},{2}}]
depth["luke"] = facies[{{1387,1847},{2}}]
depth["kimzey"] = facies[{{1848,2286},{2}}]
depth["cross"] = facies[{{2287,2787},{2}}]
depth["nolan"] = facies[{{2788,3202},{2}}]
depth["recruit"] = facies[{{3203,3282},{2}}]
depth["newby"] = facies[{{3283,3745},{2}}]
depth["churchman"] = facies[{{3746,4149},{2}}]


Out[7]:
facies size: 	4149	x	9	
validate size: 	830	x	8	

In [8]:
val_data


Out[8]:
{
  stuart : DoubleTensor - size: 474x7
  crawford : DoubleTensor - size: 356x7
}

In [9]:
-- normalize the data
	-- training data
mean = {}
stdv = {}

for key,value in pairs(training_data) do --over each well
    mean[key] = torch.Tensor(7)
    stdv[key] = torch.Tensor(7)
    for i = 1, 7 do --over each log
        mean[key][i] = training_data[key][{{},{i}}]:mean()
        training_data[key][{{},{i}}]:add(-mean[key][i])
        
        stdv[key][i] = training_data[key][{{},{i}}]:std()
        training_data[key][{{},{i}}]:div(stdv[key][i])
    end
end

    -- validation data
mean = {}
stdv = {}

for key,value in pairs(val_data) do --over each well
    mean[key] = torch.Tensor(7)
    stdv[key] = torch.Tensor(7)
    for i = 1, 7 do --over each log
        mean[key][i] = val_data[key][{{},{i}}]:mean()
        val_data[key][{{},{i}}]:add(-mean[key][i])
        
        stdv[key][i] = val_data[key][{{},{i}}]:std()
        val_data[key][{{},{i}}]:div(stdv[key][i])
    end
end

-- facies labels for training
facies_labels = {}

facies_labels["shrimplin"] = facies[{{1,471},{1}}]
facies_labels["alexander"] = facies[{{472,937},{1}}]
facies_labels["shankle"] = facies[{{938,1386},{1}}]
facies_labels["luke"] = facies[{{1387,1847},{1}}]
facies_labels["kimzey"] = facies[{{1848,2286},{1}}]
facies_labels["cross"] = facies[{{2287,2787},{1}}]
facies_labels["nolan"] = facies[{{2788,3202},{1}}]
facies_labels["recruit"] = facies[{{3203,3282},{1}}]
facies_labels["newby"] = facies[{{3283,3745},{1}}]
facies_labels["churchman"] = facies[{{3746,4149},{1}}]

In [11]:
-- build the neural net
net = nn.Sequential()
net:add(nn.Linear(7,200))
net:add(nn.ReLU())
net:add(nn.Linear(200,50))
net:add(nn.ReLU())
net:add(nn.Linear(50,9))
net:add(nn.LogSoftMax())

In [12]:
-- test the net -> forward
temp = torch.Tensor(7)
for i = 1,7 do
    temp[i] = training_data["shrimplin"][1][i]
end
input = temp

output = net:forward(input)

-- zero gradients and initialize
net:zeroGradParameters()

gradInput = net:backward(input, torch.rand(9))

In [13]:
-- define the loss function
criterion = nn.ClassNLLCriterion()
criterion:forward(output,facies_labels["shrimplin"][1])

gradients = criterion:backward(output, facies_labels["shrimplin"][1])
gradInput = net:backward(input, gradients)

In [15]:
-- condition the data
trainset = {}

	-- the data
trainset["data"] = torch.Tensor(facies:size()[1],7) 

idx = 0
for key,value in pairs(training_data) do
    for i = 1,training_data[key]:size()[1] do
        trainset["data"][i + idx] = training_data[key][i]
    end
    idx = idx + training_data[key]:size()[1]
end

	-- the answers
trainset["facies"] = torch.Tensor(facies:size()[1])

idx = 0
for key,value in pairs(facies_labels) do
    for i = 1, facies_labels[key]:size()[1] do
        trainset["facies"][i + idx] = facies_labels[key][i]
    end
    idx = idx + facies_labels[key]:size()[1]
end


-- write index() and size() functions
setmetatable(trainset, 
    {__index = function(t, i) 
                    return {t.data[i], t.facies[i]} 
                end}
);

function trainset:size() 
    return self.data:size(1) 
end

-- condition the validation data
valset = {}

	-- the data
valset["data"] = torch.Tensor(validate:size()[1],7) 

idx = 0
for key,value in pairs(val_data) do
    for i = 1,val_data[key]:size()[1] do
        valset["data"][i + idx] = val_data[key][i]
    end
    idx = idx + val_data[key]:size()[1]
end

-- eliminate NaNs
nan_mask = trainset.data:ne(trainset.data)
trainset.data[nan_mask] = 0
nan_mask = valset.data:ne(valset.data)
valset.data[nan_mask] = 0

In [16]:
-- train the net
trainer = nn.StochasticGradient(net, criterion)
trainer.learningRate = .001
trainer.maxIteration = 20

print("starting training")
timer = torch.Timer()
trainer:train(trainset)
print("training time =", timer:time().real)


Out[16]:
starting training	
Out[16]:
# StochasticGradient: training	
Out[16]:
# current error = 1.6750709765736	
Out[16]:
# current error = 1.299995678854	
Out[16]:
# current error = 1.187636072056	
Out[16]:
# current error = 1.1256669887461	
Out[16]:
# current error = 1.0868113130971	
Out[16]:
# current error = 1.0605348770086	
Out[16]:
# current error = 1.0410629133286	
Out[16]:
# current error = 1.0254415338866	
Out[16]:
# current error = 1.0121801044183	
Out[16]:
# current error = 1.0004392567964	
Out[16]:

Out[16]:
# current error = 0.98970702478848	
Out[16]:
# current error = 0.9798716089222	
Out[16]:
# current error = 0.97080389415775	
Out[16]:
# current error = 0.962333377686	
Out[16]:
# current error = 0.95421214996677	
Out[16]:
# current error = 0.94635089067152	
Out[16]:
# current error = 0.93873328949925	
Out[16]:
# current error = 0.93125379235851	
Out[16]:
# current error = 0.92406292491902	
Out[16]:
# current error = 0.91696004944953	
# StochasticGradient: you have reached the maximum number of iterations	
# training error = 0.91696004944953	
training time =	10.339772939682	

In [21]:
preds = torch.Tensor(valset.data:size()[1])
for i = 1,valset.data:size()[1] do
    local prediction = net:forward(valset.data[i])
    local confidences, indices = torch.sort(prediction, true)
    preds[i] = indices[1]
end

In [23]:
#preds


Out[23]:
 830
[torch.LongStorage of size 1]


In [24]:
preds


Out[24]:
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 2
 2
 2
 2
 2
 2
 2
 2
 2
 3
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 3
 3
 3
 3
 3
 3
 8
 8
 8
 8
 6
 6
 6
 6
 6
 6
 4
 4
 4
 4
 4
 4
 4
 4
 4
 5
 6
 6
 6
 6
 4
 6
 6
 6
 8
 8
 8
 8
 8
 8
 8
 9
 9
 9
 9
 9
 9
 9
 9
 9
 8
 8
 8
 6
 6
 6
 6
 6
 6
 6
 6
 4
 4
 4
 4
 4
 4
 4
 4
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 2
 2
 3
 3
 3
 3
 2
 2
 3
 3
 3
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 3
 3
 3
 3
 7
 6
 8
 8
 6
 6
 6
 6
 6
 8
 8
 8
 8
 7
 7
 7
 7
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 8
 2
 3
 3
 3
 3
 3
 3
 3
 3
 3
 2
 3
 3
 3
 3
 3
 3
 3
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 9
 9
 9
 9
 9
 9
 9
 8
 8
 8
 6
 6
 6
 6
 8
 2
 3
 3
 3
 3
 3
 3
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 3
 3
 3
 8
 8
 8
 8
 8
 8
 8
 6
 6
 6
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 3
 3
 3
 8
 8
 8
 8
 8
 8
 8
 8
 8
 6
 6
 8
 8
 3
 3
 3
 3
 8
 8
 8
 8
 8
 8
 8
 8
 8
 7
 7
 7
 7
 7
 7
 7
 7
 8
 6
 6
 6
 6
 6
 8
 9
 9
 9
 9
 9
 8
 8
 8
 8
 6
 6
 6
 6
 6
 8
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 3
 3
 3
 3
 3
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 5
 5
 5
 5
 5
 7
 5
 5
 5
 5
 5
 5
 5
 5
 5
 5
 5
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 4
 4
 5
 5
 5
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 5
 8
 8
 8
 8
 8
 8
 8
 8
 8
 6
 5
 5
 5
 5
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 4
 5
 5
 8
 8
 8
 5
 5
 4
 4
 4
 4
 6
 6
 8
 8
 8
 8
 8
 6
 6
 6
 6
 6
 6
 8
 8
 8
 6
 6
 6
 6
 6
 6
 4
 4
 4
 4
 4
 4
 8
 8
 6
 4
 4
 4
 8
 8
 8
 8
 8
 8
 8
 4
 8
 8
 6
 6
 6
 6
 6
 6
 6
 6
 8
 6
 6
 6
 2
 2
 5
 8
 8
 5
 5
 5
 5
 5
 5
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 8
 6
 6
 8
 8
 8
 8
 6
 6
 8
 2
 3
 3
 3
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 3
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 7
 7
 7
 7
 4
 4
 7
 7
 8
 6
 5
 5
 4
 3
 3
 3
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 3
 3
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 8
 6
 6
 6
 6
 3
 3
 3
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 2
 3
 3
 8
 8
 8
 8
 8
 8
 8
 6
 6
 6
 6
 6
 6
 6
 6
 6
 6
 3
 3
 2
 2
 2
 2
 2
 2
 2
 2
 2
 3
 3
 8
 8
 8
 8
 8
 8
 8
 6
 7
 7
 8
 8
 8
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 7
 8
 8
 8
 8
 6
 6
 6
 6
 6
 8
 3
 3
 3
 3
 3
 3
 3
 2
 3
 3
 3
 3
 3
 3
 3
 3
 3
 3
[torch.DoubleTensor of size 830]


In [ ]: