In [12]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import tensorflow as tf
from preprocessing import directory_to_data_files, sample_data

from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib

tf.logging.set_verbosity(tf.logging.INFO)

In [13]:
# Specify Data

# data_filename = './Data/data_9x9.txt'
# board_side_length = 9

data_filename = './Data/data_19x19_rc.txt'
board_side_length = 19

D = board_side_length*board_side_length  # length of x and y vectors
max_examples = sum(1 for line in open(data_filename))

In [14]:
def cnn_model_fn(board, labels, mode):
    
    # Input Layer
    input_layer = tf.reshape(board, [-1, 19, 19, 1])
    
    # Convolutional Layer #1
    # Input is size [batchsize, 19, 19, 1]        
    # Output is size [batchsize, 19, 19, 32]
    conv1 = tf.layers.conv2d(
        inputs=input_layer,
        filters=32, #Number of filters in the first layer
        kernel_size=5,  #NxN filter size
        padding="same",
        activation=tf.nn.relu)
    
    # Pooling Layer #1
    # Input is size [batchsize, 19, 19, 32]        
    # Output is size [batchsize, 10, 10, 32]
    
    # border_mode='same' pads the input so 19x19 -> 10x10 instead of dropping one and -> 9x9
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=2, strides=2, padding='same')
    
    # Convolutional Layer #2
    # Input is size [batchsize, 10, 10, 32]        
    # Output is size [batchsize, 10, 10, 64]
    conv2 = tf.layers.conv2d(
        inputs=pool1,
        filters=64,  #Number of filters in the second layer
        kernel_size=5,
        padding="same",
        activation=tf.nn.relu)
    
    #Pooling Layer #2
    # Input is size [batchsize, 10, 10, 64]        
    # Output is size [batchsize, 5, 5, 64]
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=2, strides=2)

    # Dense Layer
    # Input is size [batchsize, 5, 5, 64]
    # Output is size [batchsize, 5 * 5 * 64]
    pool2_flat = tf.reshape(pool2, [-1, 5 * 5 * 64])
    dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
    dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=mode == learn.ModeKeys.TRAIN)

    # Logits Layer
    #Is this our output layer?
    logits = tf.layers.dense(inputs=dropout, units=361)

    loss = None
    train_op = None

    # Calculate Loss (for both TRAIN and EVAL modes)
    if mode != learn.ModeKeys.INFER:
        #onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10) #Should already be one-hot encoded
        loss = tf.losses.softmax_cross_entropy(
        onehot_labels=labels, logits=logits)

    # Configure the Training Op (for TRAIN mode)
    if mode == learn.ModeKeys.TRAIN:
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=0.001,
            optimizer="SGD")
        
    # Generate Predictions
    predictions = {
        "classes": tf.argmax(
            input=logits, axis=1),
        "probabilities": tf.nn.softmax(
            logits, name="softmax_tensor")
  }
    # Return a ModelFnOps object
    return model_fn_lib.ModelFnOps(
        mode=mode, predictions=predictions, loss=loss, train_op=train_op)

In [25]:
def main(unused_argv):
    # Load training and eval data
#     mnist = learn.datasets.load_dataset("mnist")
#     train_data = mnist.train.images # Returns np.array
#     train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
#     eval_data = mnist.test.images # Returns np.array
#     eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    batch_size = 10000
    _, X, Y = sample_data(data_filename, k=batch_size)
    
    print(X[0])
    print(Y[0])
    
    # Create the Estimator
    go_classifier = learn.Estimator(model_fn=cnn_model_fn, model_dir="/tmp/go_convnet_model")

    
    # Set up logging for predictions
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50)
    
    # Train the model
    go_classifier.fit(
        x=X,
        y=Y,
        batch_size=100,
        steps=20000,
        monitors=[logging_hook])
    # Configure the accuracy metric for evaluation
    metrics = {"accuracy":
               learn.metric_spec.MetricSpec(
                   metric_fn=tf.metrics.accuracy, prediction_key="classes"),
              }
    # Evaluate the model and print results
#     eval_results = go_classifier.evaluate(
#         x=eval_data, y=eval_labels, metrics=metrics)
#     print(eval_results)
    
if __name__ == "__main__":
    tf.app.run()


[ 0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  1  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  0  0  0 -1  1  0  0  1  1 -1  0  0  0  0
  0  1 -1  0  0  0  0  0 -1 -1  1  0  0  0  0  0 -1  0  1  0  1  0 -1  0  0
  0  0 -1  1  1 -1  1  0  1  0  0  0  0  0  0 -1  0  0 -1  0  0  0 -1 -1 -1
  0  0  0 -1 -1  1  0  0  0  0  0  1  0  0  0  0  0  0  0  0  1  0  0  0  1
  0  0  0  0  0  0  0  0  0  0  0 -1  0  1 -1  0 -1 -1  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0  1  0  0  0  0  0  1  0  0  0  0  0 -1 -1
  1  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  1 -1  1  0  1  1  1  0  0
  0  0  0  0  0  0 -1  0  0  0  0  1  1  0 -1 -1  0  0  0  0  0  0  0  0  0
  0  0  0  0  1 -1 -1 -1  0  0  1  0  0  0  0  0  0  0  0  0  0  0  0  0 -1
  1  1  0 -1 -1 -1  0  0  0  0  0  0  0  0  0  0  0 -1  0  0  0  0  0 -1  1
  0  0  0  0  0  0 -1  0  0  0  0  1  1  1  0  0 -1  1  0  1  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  1  0 -1  1  1  0  0  1  0  0 -1  0  0  0  0  0
  0  0  0  0 -1  0 -1  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0
  0  0  0  0  0  0  0  0  0  0  0]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'save_summary_steps': 100, '_num_ps_replicas': 0, '_task_type': None, '_environment': 'local', '_is_chief': True, 'save_checkpoints_secs': 600, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x10f3da310>, 'tf_config': gpu_options {
  per_process_gpu_memory_fraction: 1
}
, '_task_id': 0, 'tf_random_seed': None, 'keep_checkpoint_every_n_hours': 10000, '_evaluation_master': '', 'save_checkpoints_steps': None, '_master': '', 'keep_checkpoint_max': 5}
WARNING:tensorflow:From <ipython-input-25-8d770778e503>:29 in main.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-25-8d770778e503>:29 in main.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-25-8d770778e503>:29 in main.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-25-8d770778e503> in <module>()
     39 
     40 if __name__ == "__main__":
---> 41     tf.app.run()

/usr/local/lib/python2.7/site-packages/tensorflow/python/platform/app.pyc in run(main, argv)
     41 
     42   # Call the main function, passing through any arguments
---> 43   # to the final program.
     44   _sys.exit(main(_sys.argv[:1] + flags_passthrough))
     45 

<ipython-input-25-8d770778e503> in main(unused_argv)
     27         batch_size=100,
     28         steps=20000,
---> 29         monitors=[logging_hook])
     30     # Configure the accuracy metric for evaluation
     31     metrics = {"accuracy":

/usr/local/lib/python2.7/site-packages/tensorflow/python/util/deprecation.pyc in new_func(*args, **kwargs)
    189       names_to_ok_vals: dict from string arg_name to a list of values,
    190         possibly empty, which should not elicit a warning.
--> 191       arg_spec: Output from inspect.getargspec on the called function.
    192 
    193     Returns:

/usr/local/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc in fit(self, x, y, input_fn, steps, batch_size, monitors, max_steps)
    348 
    349   # Note that for Google users, this is overriden with
--> 350   # learn_runner.EstimatorConfig.
    351   # TODO(wicke): Remove this once launcher takes over config functionality
    352   _Config = run_config.RunConfig  # pylint: disable=invalid-name

/usr/local/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc in _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle, epochs)
    108 
    109 def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
--> 110   """Make inputs into input and feed functions.
    111 
    112   Args:

/usr/local/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/learn_io/data_feeder.pyc in setup_train_data_feeder(x, y, n_classes, batch_size, shuffle, epochs)
    116       supports iterables.
    117     y: numpy, pandas or Dask array or dictionary of aforementioned. Also
--> 118       supports
    119       iterables.
    120     n_classes: number of classes. Must be None or same type as y. In case, `y`

/usr/local/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/learn_io/data_feeder.pyc in __init__(self, x, y, n_classes, batch_size, shuffle, random_state, epochs)
    248   if isinstance(array, (np.ndarray, list)):
    249     array = np.array(array, dtype=dtype, order=None, copy=False)
--> 250   return array
    251 
    252 

AttributeError: 'list' object has no attribute 'dtype'

In [ ]: