In [1]:
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="2"
In [ ]:
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.contrib.tensor_forest.client import random_forest
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.examples.tutorials.mnist import input_data
#from tensorflow.python.platform import app
FLAGS = None
batch_size = 128
model_dir = '/tmp/mnist/'
num_trees = 10
max_nodes = 10
use_training_loss = True
def build_estimator(model_dir):
"""Build an estimator."""
params = tensor_forest.ForestHParams(
num_classes=10, num_features=784,
num_trees=num_trees, max_nodes=max_nodes)
graph_builder_class = tensor_forest.RandomForestGraphs
if use_training_loss:
graph_builder_class = tensor_forest.TrainingLossForest
# Use the SKCompat wrapper, which gives us a convenient way to split
# in-memory data like MNIST into batches.
return estimator.SKCompat(random_forest.TensorForestEstimator(
params, graph_builder_class=graph_builder_class,
model_dir=model_dir))
"""Train and evaluate the model."""
est = build_estimator(model_dir)
mnist = input_data.read_data_sets('/tmp/mnist/', one_hot=False)
est.fit(x=mnist.train.images, y=mnist.train.labels, batch_size=batch_size)
In [8]:
metric_name = 'accuracy'
metric = {metric_name:
metric_spec.MetricSpec(
eval_metrics.get_metric(metric_name),
prediction_key=eval_metrics.get_prediction_key(metric_name))}
results = est.score(x=mnist.test.images, y=mnist.test.labels,
batch_size=batch_size,
metrics=metric)
for key in sorted(results):
print('%s: %s' % (key, results[key]))
In [ ]: