In [1]:
# Import libraries for simulation
import tensorflow as tf
import numpy as np
import random as r
In [2]:
dimensions = (8,8)
mineProbability = 0.2 # Probability that a square contain a mine
missingProbability = 0.1 # Probability that a square is missing adjacency info
In [3]:
# count the number of mines in the proximity of given square, including square itself
def countMines(board,r,c):
count = 0
rows, cols = board.shape
for i in [r-1,r,r+1]:
if i >= 0 and i < rows:
for j in [c-1,c,c+1]:
if j >= 0 and j < cols:
count += int(board[i,j])
return count
In [4]:
def minesweepMatrix(dimensions):
rows,cols = dimensions
size = rows * cols
A = np.zeros([size,size],dtype=int)
for rA in range(size):
for cA in range(size):
inRow, inCol = divmod(rA,cols)
outRow, outCol = divmod(cA,cols)
A[rA,cA] = abs(inRow-outRow) <= 1 and abs(inCol-outCol) <= 1
return(A)
In [5]:
# Converts a board of mines into a board of mine counts
'''
def boardMineCounts_(board):
mineInfo = np.zeros(board.shape, dtype = int)
rows, cols = board.shape
for i in range(rows):
for j in range(cols):
mineInfo[i,j] = countMines(board,i,j)
return mineInfo
'''
def boardMineCounts(board):
return(minesweepMatrix(board.shape).dot(board.flatten()).reshape(board.shape))
In [6]:
def boardPartialMineCounts(board):
result = boardMineCounts(board)
for index, x in np.ndenumerate(board):
if x: result[index] = -1
elif r.uniform(0, 1) < missingProbability: result[index] = -1
return result
In [7]:
# Generates a random training batch of size n
def next_training_batch(n):
batch_xs = []
batch_ys = []
for _ in range(n):
board = np.random.random(dimensions) < mineProbability
counts = boardPartialMineCounts(board)
batch_xs.append(counts.flatten())
batch_ys.append(board.flatten().astype(float))
return (np.asarray(batch_xs), np.asarray(batch_ys))
In [8]:
# Create the model
rows, cols = dimensions
size = rows*cols
mineCounts = tf.placeholder(tf.int32, [None, size], name="mineCounts")
mineCountsOneHot = tf.reshape(tf.one_hot(mineCounts+1,10), [-1, size*10])
W = tf.Variable(tf.random_normal([size*10, size], stddev=0.01), name="W")
b = tf.Variable(tf.random_normal([size], stddev=0.01), name="b")
y = tf.sigmoid(tf.matmul(mineCountsOneHot, W) + b)
In [9]:
mines = tf.placeholder(tf.float32, [None, size], name="mines")
In [10]:
# Loss function
mean_squared_error = tf.losses.mean_squared_error(labels=mines, predictions=y)
In [11]:
# Summaries for tensorboard
with tf.name_scope('W_reshape'):
image_shaped_W = tf.reshape(W, [-1, size*10, size, 1])
tf.summary.image('W', image_shaped_W, 1000)
with tf.name_scope('b_reshape'):
image_shaped_b = tf.reshape(b, [-1, rows, cols, 1])
tf.summary.image('b', image_shaped_b, 1000)
_ = tf.summary.scalar('accuracy', mean_squared_error)
In [12]:
# Optimiser
train_step = tf.train.AdamOptimizer().minimize(mean_squared_error)
In [13]:
# Create session and initialise or restore stuff
saver = tf.train.Saver()
sess = tf.InteractiveSession()
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('.', sess.graph)
tf.global_variables_initializer().run()
In [14]:
# Restore model?
saver.restore(sess, "./saves.tf.Mines2/model-21000")
In [ ]:
# Train
for iteration in range(100001):
batch_xs, batch_ys = next_training_batch(100)
if iteration % 100 == 0:
summary, acc, _ = sess.run([merged, mean_squared_error, train_step], feed_dict={mineCounts: batch_xs, mines: batch_ys})
writer.add_summary(summary, iteration)
print('Accuracy at step %s: %s' % (iteration, acc))
else:
_ = sess.run(train_step, feed_dict={mineCounts: batch_xs, mines: batch_ys})
if iteration % 1000 == 0:
save_path = saver.save(sess, './saves.tf.Mines2/model', global_step=iteration)
print("Model saved in file: %s" % save_path)
In [15]:
# Test trained model
batch_xs, batch_ys = next_training_batch(1000)
print(sess.run(mean_squared_error, feed_dict={mineCounts: batch_xs, mines: batch_ys}))
In [33]:
# Run a single randomised test
batch_xs, batch_ys = next_training_batch(1)
print("mines")
print(batch_ys.astype(int).reshape(dimensions))
print("predicted mines")
result = sess.run(y, feed_dict={mineCounts: batch_xs, mines: batch_ys})
predictions = (result > 0.5).astype(int)
print(predictions.reshape(dimensions))
print("errors")
print((predictions != batch_ys.astype(int)).astype(int).sum())
In [ ]: