In [1]:
import tensorflow as tf
import pandas as pd
train_writer = tf.summary.FileWriter(logdir=r"c:/TensorFlowLogs/") # FOR TENSORBOARD


C:\ProgramData\Anaconda3\lib\site-packages\h5py\__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [2]:
# Fetch the data
TRAIN_URL = "http://download.tensorflow.org/data/iris_training.csv"
TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"

CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth',
                    'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Setosa', 'Versicolor', 'Virginica']

def maybe_download():
    train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1], TRAIN_URL)
    test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)

    return train_path, test_path

def load_data(y_name='Species'):
    """Returns the iris dataset as (train_x, train_y), (test_x, test_y)."""
    train_path, test_path = maybe_download()

    train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)
    train_x, train_y = train, train.pop(y_name)

    test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)
    test_x, test_y = test, test.pop(y_name)

    return (train_x, train_y), (test_x, test_y)

(train_x, train_y), (test_x, test_y) = load_data()

# Feature columns describe how to use the input.
# some cool options: bucketized_column(bucket of numeric by range), 

# categorical_column_with_identity/categorical_column_with_vocabulary_list(enum feature. First by num, other by srting),
# categorical_column_with_hash_bucket(bucket numbers by hash to x buckets - OK with many cats)
# crossed_column(feature pairs like lat-long)

#convert categorical to numeric (can't use otherwise):
# indicator_column(one hot) / embedding_column (take a category feature e.g. words, embed in lower dim) 
my_feature_columns = []
for key in train_x.keys():
    my_feature_columns.append(tf.feature_column.numeric_column(key=key))

In [13]:
def my_model(features, labels, mode, params):
    net = tf.feature_column.input_layer(features, params['feature_columns'])
    for units in params['hidden_units']:
        net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
        
    # Compute logits == exit layer (1 per class).
    logits = tf.layers.dense(net, params['n_classes'], activation=None)
    # Compute predictions for predict mode.
    predicted_classes = tf.argmax(logits, 1)
    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
            'class_ids': predicted_classes[:, tf.newaxis],
            'probabilities': tf.nn.softmax(logits),
            'logits': logits,
        }
        return tf.estimator.EstimatorSpec(mode, predictions=predictions)

    # Compute loss.
    loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
    # Compute evaluation metrics.
    accuracy = tf.metrics.accuracy(labels=labels,
                                   predictions=predicted_classes,
                                   name='acc_op')
    metrics = {'accuracy': accuracy}
    tf.summary.scalar('accuracy', accuracy[1]) #nice metrics for humans
    train_writer.write()

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(
            mode, loss=loss, eval_metric_ops=metrics)

    # Create training op.
    assert mode == tf.estimator.ModeKeys.TRAIN
    optimizer = tf.train.AdagradOptimizer(learning_rate=0.1) # Ada gradient decent
    train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) #global step is boilerplate for knowing when to stop
    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)

classifier = tf.estimator.Estimator(
        model_fn=my_model,
        params={
            'feature_columns': my_feature_columns,
            # Two hidden layers of 10 nodes each.
            'hidden_units': [10, 10],
            # The model must choose between 3 classes.
            'n_classes': 3,
        })


INFO:tensorflow:Using default config.
WARNING:tensorflow:Using temporary folder as model directory: C:\Users\liori\AppData\Local\Temp\tmpcpv6acz8
INFO:tensorflow:Using config: {'_model_dir': 'C:\\Users\\liori\\AppData\\Local\\Temp\\tmpcpv6acz8', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x0000026633BF42E8>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}

In [14]:
# Train the Model.
def get_train_sample(features, labels, batch_size=32):
    dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
    # Shuffle, repeat, and batch the examples.
    dataset = dataset.shuffle(1000).repeat().batch(batch_size)
    return dataset

classifier.train(
    input_fn=lambda:get_train_sample(train_x, train_y, 32),
    steps=200)


INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
INFO:tensorflow:Saving checkpoints for 1 into C:\Users\liori\AppData\Local\Temp\tmpcpv6acz8\model.ckpt.
INFO:tensorflow:loss = 1.2177665, step = 1
INFO:tensorflow:global_step/sec: 954.559
INFO:tensorflow:loss = 0.13435638, step = 101 (0.106 sec)
INFO:tensorflow:Saving checkpoints for 200 into C:\Users\liori\AppData\Local\Temp\tmpcpv6acz8\model.ckpt.
INFO:tensorflow:Loss for final step: 0.14792043.
Out[14]:
<tensorflow.python.estimator.estimator.Estimator at 0x26633bf4668>

In [15]:
def evaluate_on_set(features, labels, default_set_size=None):
    """An input function for evaluation or prediction"""
    set_size=default_set_size or features.shape[0]
    features=dict(features)
    if labels is None:
        # No labels, use only features.
        inputs = features
    else:
        inputs = (features, labels)
    # Convert the inputs to a Dataset.
    dataset = tf.data.Dataset.from_tensor_slices(inputs)

    # Batch the examples - take the whole set
    dataset = dataset.batch(set_size)
    return dataset

# Evaluate the model.
eval_result = classifier.evaluate(
    input_fn=lambda:evaluate_on_set(test_x, test_y))
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))


INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Starting evaluation at 2018-04-05-17:35:36
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Restoring parameters from C:\Users\liori\AppData\Local\Temp\tmpcpv6acz8\model.ckpt-200
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
INFO:tensorflow:Finished evaluation at 2018-04-05-17:35:36
INFO:tensorflow:Saving dict for global step 200: accuracy = 0.96666664, global_step = 200, loss = 0.106060505

Test set accuracy: 0.967


In [ ]: