In [1]:
# Import libraries for simulation
import tensorflow as tf
import numpy as np
import random as r
import datetime as dt


/home/ruben/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6
  return f(*args, **kwds)

In [2]:
dimensions = (16,16)
mineProbability = 0.2      # Probability that a square contain a mine
missingProbability = 0.5   # Probability that a square is missing adjacency info

In [3]:
# This is a matrix that maps mine board vectors to mine count vectors
def minesweepMatrix(dimensions):
    rows,cols = dimensions
    size = rows * cols
    A = np.zeros([size,size],dtype=int)
    for rA in range(size):
        for cA in range(size):
            inRow, inCol = divmod(rA,cols)
            outRow, outCol = divmod(cA,cols)
            A[rA,cA] = abs(inRow-outRow) <= 1 and abs(inCol-outCol) <= 1
    return(A)

In [4]:
# Converts a board of mines into a board of mine counts
def boardMineCounts(board):
    return(minesweepMatrix(board.shape).dot(board.flatten()).reshape(board.shape))

In [5]:
# This takes a mine board and gives a mine count with mines removed, and other random squares removed
def boardPartialMineCounts(board):
    result = boardMineCounts(board)
    for index, x in np.ndenumerate(board):
        if x: result[index] = -1
        elif r.uniform(0, 1) < missingProbability: result[index] = -1
    return result

In [6]:
# Generates a random training batch of size at most n
def next_training_batch(n):
    batch_xs = []
    batch_ys = []
    for _ in range(n):
        board = np.random.random(dimensions) < mineProbability
        counts = boardPartialMineCounts(board)
        frees = (1 - board).flatten().astype(float)
        freesSum = sum(frees)
        if freesSum > 0:
            batch_xs.append(counts.flatten())
            batch_ys.append(frees / freesSum)
    return (np.asarray(batch_xs), np.asarray(batch_ys))

In [7]:
# Create the model
rows, cols = dimensions
size = rows*cols
mineCounts = tf.placeholder(tf.int32, [None, size], name="mineCounts")
mineCountsOneHot = tf.reshape(tf.one_hot(mineCounts+1,10), [-1, size*10])
W = tf.Variable(tf.random_normal([size*10, size], stddev=0.01), name="W")
b = tf.Variable(tf.random_normal([size], stddev=0.01), name="b")
y = tf.matmul(mineCountsOneHot, W) + b

In [8]:
mineFreeAverages = tf.placeholder(tf.float32, [None, size], name="mineFreeAverages")

In [9]:
# Loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=mineFreeAverages, logits=y))

In [10]:
# Summaries for tensorboard
with tf.name_scope('W_reshape'):
    image_shaped_W = tf.reshape(W, [-1, size*10, size, 1])
    tf.summary.image('W', image_shaped_W, 1000)

with tf.name_scope('b_reshape'):
    image_shaped_b = tf.reshape(b, [-1, rows, cols, 1])
    tf.summary.image('b', image_shaped_b, 1000)

_ = tf.summary.scalar('accuracy', cross_entropy)

In [11]:
# Optimiser
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)

In [12]:
# Create session and initialise or restore stuff
savePath = './saves.tf.Mines3/' + str(dimensions) + '/'
saver = tf.train.Saver()

sess = tf.InteractiveSession()

merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('.', sess.graph)

In [13]:
tf.global_variables_initializer().run()

In [14]:
# Restore model?
#saver.restore(sess, savePath + "model-10000")

In [15]:
# Train
for iteration in range(10001):
    batch_xs, batch_ys = next_training_batch(100)
    if iteration % 10 == 0:
        summary, loss, _ = sess.run([merged, cross_entropy, train_step],
                                   feed_dict={mineCounts: batch_xs, mineFreeAverages: batch_ys})
        writer.add_summary(summary, iteration)
        print('%s: Loss at step %s: %s' % (dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), iteration, loss))
    else:
        _ = sess.run(train_step, feed_dict={mineCounts: batch_xs, mineFreeAverages: batch_ys})
    if iteration % 1000 == 0:
        save_path = saver.save(sess, savePath + 'model', global_step=iteration)
        print("Model saved in file: %s" % save_path)


2017-11-05 19:09:42: Accuracy at step 0: 5.5594
Model saved in file: ./saves.tf.Mines3/(16, 16)/model-0
2017-11-05 19:10:05: Accuracy at step 10: 5.55122
2017-11-05 19:10:27: Accuracy at step 20: 5.54606
2017-11-05 19:10:50: Accuracy at step 30: 5.5413
2017-11-05 19:11:12: Accuracy at step 40: 5.53729
2017-11-05 19:11:35: Accuracy at step 50: 5.5352
2017-11-05 19:11:58: Accuracy at step 60: 5.53117
2017-11-05 19:12:20: Accuracy at step 70: 5.52908
2017-11-05 19:12:43: Accuracy at step 80: 5.5265
2017-11-05 19:13:05: Accuracy at step 90: 5.52514
2017-11-05 19:13:28: Accuracy at step 100: 5.5223
2017-11-05 19:13:51: Accuracy at step 110: 5.52158
2017-11-05 19:14:13: Accuracy at step 120: 5.51888
2017-11-05 19:14:36: Accuracy at step 130: 5.51745
2017-11-05 19:14:58: Accuracy at step 140: 5.5161
2017-11-05 19:15:21: Accuracy at step 150: 5.51441
2017-11-05 19:15:44: Accuracy at step 160: 5.51482
2017-11-05 19:16:06: Accuracy at step 170: 5.51461
2017-11-05 19:16:29: Accuracy at step 180: 5.51407
2017-11-05 19:16:51: Accuracy at step 190: 5.51331
2017-11-05 19:17:14: Accuracy at step 200: 5.51058
2017-11-05 19:17:37: Accuracy at step 210: 5.51082
2017-11-05 19:17:59: Accuracy at step 220: 5.51013
2017-11-05 19:18:22: Accuracy at step 230: 5.50881
2017-11-05 19:18:44: Accuracy at step 240: 5.5098
2017-11-05 19:19:07: Accuracy at step 250: 5.50831
2017-11-05 19:19:30: Accuracy at step 260: 5.5067
2017-11-05 19:19:52: Accuracy at step 270: 5.50804
2017-11-05 19:20:15: Accuracy at step 280: 5.50685
2017-11-05 19:20:37: Accuracy at step 290: 5.50761
2017-11-05 19:21:00: Accuracy at step 300: 5.50845
2017-11-05 19:21:22: Accuracy at step 310: 5.50604
2017-11-05 19:21:45: Accuracy at step 320: 5.50509
2017-11-05 19:22:08: Accuracy at step 330: 5.50585
2017-11-05 19:22:30: Accuracy at step 340: 5.50426
2017-11-05 19:22:53: Accuracy at step 350: 5.50444
2017-11-05 19:23:16: Accuracy at step 360: 5.50507
2017-11-05 19:23:39: Accuracy at step 370: 5.50597
2017-11-05 19:24:02: Accuracy at step 380: 5.50471
2017-11-05 19:24:25: Accuracy at step 390: 5.50616
2017-11-05 19:24:48: Accuracy at step 400: 5.50452
2017-11-05 19:25:11: Accuracy at step 410: 5.50561
2017-11-05 19:25:33: Accuracy at step 420: 5.50472
2017-11-05 19:25:56: Accuracy at step 430: 5.50448
2017-11-05 19:26:18: Accuracy at step 440: 5.50667
2017-11-05 19:26:41: Accuracy at step 450: 5.5018
2017-11-05 19:27:03: Accuracy at step 460: 5.50454
2017-11-05 19:27:26: Accuracy at step 470: 5.5042
2017-11-05 19:27:49: Accuracy at step 480: 5.50421
2017-11-05 19:28:11: Accuracy at step 490: 5.50296
2017-11-05 19:28:34: Accuracy at step 500: 5.50367
2017-11-05 19:28:56: Accuracy at step 510: 5.50453
2017-11-05 19:29:19: Accuracy at step 520: 5.50378
2017-11-05 19:29:42: Accuracy at step 530: 5.50327
2017-11-05 19:30:04: Accuracy at step 540: 5.50343
2017-11-05 19:30:27: Accuracy at step 550: 5.50451
2017-11-05 19:30:49: Accuracy at step 560: 5.50338
2017-11-05 19:31:12: Accuracy at step 570: 5.5039
2017-11-05 19:31:35: Accuracy at step 580: 5.50377
2017-11-05 19:31:57: Accuracy at step 590: 5.50373
2017-11-05 19:32:20: Accuracy at step 600: 5.5029
2017-11-05 19:32:42: Accuracy at step 610: 5.50346
2017-11-05 19:33:05: Accuracy at step 620: 5.50321
2017-11-05 19:33:27: Accuracy at step 630: 5.50288
2017-11-05 19:33:50: Accuracy at step 640: 5.50188
2017-11-05 19:34:13: Accuracy at step 650: 5.50226
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-15-46a985090670> in <module>()
      1 # Train
      2 for iteration in range(10001):
----> 3     batch_xs, batch_ys = next_training_batch(100)
      4     if iteration % 10 == 0:
      5         summary, acc, _ = sess.run([merged, cross_entropy, train_step],

<ipython-input-6-4d8e8ac9caa3> in next_training_batch(n)
      5     for _ in range(n):
      6         board = np.random.random(dimensions) < mineProbability
----> 7         counts = boardPartialMineCounts(board)
      8         frees = (1 - board).flatten().astype(float)
      9         freesSum = sum(frees)

<ipython-input-5-c3d0d5c89dc2> in boardPartialMineCounts(board)
      1 def boardPartialMineCounts(board):
----> 2     result = boardMineCounts(board)
      3     for index, x in np.ndenumerate(board):
      4         if x: result[index] = -1
      5         elif r.uniform(0, 1) < missingProbability: result[index] = -1

<ipython-input-4-40f4568274db> in boardMineCounts(board)
      1 # Converts a board of mines into a board of mine counts
      2 def boardMineCounts(board):
----> 3     return(minesweepMatrix(board.shape).dot(board.flatten()).reshape(board.shape))

<ipython-input-3-298b8d8d714c> in minesweepMatrix(dimensions)
      7             inRow, inCol = divmod(rA,cols)
      8             outRow, outCol = divmod(cA,cols)
----> 9             A[rA,cA] = abs(inRow-outRow) <= 1 and abs(inCol-outCol) <= 1
     10     return(A)

KeyboardInterrupt: 

In [16]:
# Test trained model on larger batch size
batch_xs, batch_ys = next_training_batch(1000)
print(sess.run(cross_entropy, feed_dict={mineCounts: batch_xs, mineFreeAverages: batch_ys}))


5.50243

In [18]:
# Run a test
batchSize = 10000
batch_xs, batch_ys = next_training_batch(batchSize)

predictions = sess.run(tf.nn.softmax(y), feed_dict={mineCounts: batch_xs, mineFreeAverages: batch_ys})
bestSquares = [pred.argmax() for pred in predictions]
board = (batch_ys == 0).astype(int)
frees = [board[i][bestSquares[i]] for i in range(batchSize)]
print("Number of errors for batch size of ", batchSize)
print(sum(frees))


Number of errors for batch size of  10000
0

In [ ]: