In [1]:
import tensorflow as tf
import pandas as pd
train_writer = tf.summary.FileWriter(logdir=r"c:/TensorFlowLogs/") # FOR TENSORBOARD
In [2]:
# Fetch the data
TRAIN_URL = "http://download.tensorflow.org/data/iris_training.csv"
TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth',
'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Setosa', 'Versicolor', 'Virginica']
def maybe_download():
train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1], TRAIN_URL)
test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)
return train_path, test_path
def load_data(y_name='Species'):
"""Returns the iris dataset as (train_x, train_y), (test_x, test_y)."""
train_path, test_path = maybe_download()
train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)
train_x, train_y = train, train.pop(y_name)
test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)
test_x, test_y = test, test.pop(y_name)
return (train_x, train_y), (test_x, test_y)
(train_x, train_y), (test_x, test_y) = load_data()
# Feature columns describe how to use the input.
# some cool options: bucketized_column(bucket of numeric by range),
# categorical_column_with_identity/categorical_column_with_vocabulary_list(enum feature. First by num, other by srting),
# categorical_column_with_hash_bucket(bucket numbers by hash to x buckets - OK with many cats)
# crossed_column(feature pairs like lat-long)
#convert categorical to numeric (can't use otherwise):
# indicator_column(one hot) / embedding_column (take a category feature e.g. words, embed in lower dim)
my_feature_columns = []
for key in train_x.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
In [13]:
def my_model(features, labels, mode, params):
net = tf.feature_column.input_layer(features, params['feature_columns'])
for units in params['hidden_units']:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits == exit layer (1 per class).
logits = tf.layers.dense(net, params['n_classes'], activation=None)
# Compute predictions for predict mode.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Compute evaluation metrics.
accuracy = tf.metrics.accuracy(labels=labels,
predictions=predicted_classes,
name='acc_op')
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1]) #nice metrics for humans
train_writer.write()
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=metrics)
# Create training op.
assert mode == tf.estimator.ModeKeys.TRAIN
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1) # Ada gradient decent
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) #global step is boilerplate for knowing when to stop
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
classifier = tf.estimator.Estimator(
model_fn=my_model,
params={
'feature_columns': my_feature_columns,
# Two hidden layers of 10 nodes each.
'hidden_units': [10, 10],
# The model must choose between 3 classes.
'n_classes': 3,
})
In [14]:
# Train the Model.
def get_train_sample(features, labels, batch_size=32):
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle, repeat, and batch the examples.
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
return dataset
classifier.train(
input_fn=lambda:get_train_sample(train_x, train_y, 32),
steps=200)
Out[14]:
In [15]:
def evaluate_on_set(features, labels, default_set_size=None):
"""An input function for evaluation or prediction"""
set_size=default_set_size or features.shape[0]
features=dict(features)
if labels is None:
# No labels, use only features.
inputs = features
else:
inputs = (features, labels)
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(inputs)
# Batch the examples - take the whole set
dataset = dataset.batch(set_size)
return dataset
# Evaluate the model.
eval_result = classifier.evaluate(
input_fn=lambda:evaluate_on_set(test_x, test_y))
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
In [ ]: