In [86]:
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import

In [87]:
# We're using pandas to read the CSV file. This is easy for small datasets, but for large and complex datasets,
# tensorflow parsing and processing functions are more powerful
import pandas as pd
import numpy as np

# TensorFlow
import tensorflow as tf
print('please make sure that version >= 1.2:')
print(tf.__version__)
print('@monteirom: I made changes so it also works with 1.1.0 that is the current pip install version')
print('@monteirom: The lines that were changed have @1.2 as comment')

# Layers that will define the features
#
# real_value_column: real values, float32
# sparse_column_with_hash_bucket: Use this when your sparse features are in string or integer format, 
#                                 but you don't have a vocab file that maps each value to an integer ID. 
#                                 output_id = Hash(input_feature_string) % bucket_size
# sparse_column_with_keys: Look up logic is as follows: 
#                          lookup_id = index_of_feature_in_keys if feature in keys else default_value.
#                          You should use this when you know the vocab file for the feature
# one_hot_column: Creates an _OneHotColumn for a one-hot or multi-hot repr in a DNN.
#                 The input can be a _SparseColumn which is created by `sparse_column_with_*`
#                 or crossed_column functions
from tensorflow.contrib.layers import real_valued_column, sparse_column_with_keys, sparse_column_with_hash_bucket
from tensorflow.contrib.layers import one_hot_column


please make sure that version >= 1.2:
1.2.0-rc1
@monteirom: I made changes so it also works with 1.1.0 that is the current pip install version
@monteirom: The lines that were changed have @1.2 as comment

Please Download

https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data And move it to data/

So: data/imports-85.data is expected to exist!

Preparing the data


In [88]:
# The CSV file does not have a header, so we have to fill in column names.
names = [
    'symboling', 
    'normalized-losses', 
    'make', 
    'fuel-type', 
    'aspiration',
    'num-of-doors',
    'body-style',
    'drive-wheels',
    'engine-location',
    'wheel-base',
    'length',
    'width',
    'height',
    'curb-weight',
    'engine-type',
    'num-of-cylinders',
    'engine-size',
    'fuel-system',
    'bore',
    'stroke',
    'compression-ratio',
    'horsepower',
    'peak-rpm',
    'city-mpg',
    'highway-mpg',
    'price',
]

# We also have to specify dtypes.
dtypes = {
    'symboling': np.int32, 
    'normalized-losses': np.float32, 
    'make': str, 
    'fuel-type': str, 
    'aspiration': str,
    'num-of-doors': str,
    'body-style': str,
    'drive-wheels': str,
    'engine-location': str,
    'wheel-base': np.float32,
    'length': np.float32,
    'width': np.float32,
    'height': np.float32,
    'curb-weight': np.float32,
    'engine-type': str,
    'num-of-cylinders': str,
    'engine-size': np.float32,
    'fuel-system': str,
    'bore': np.float32,
    'stroke': np.float32,
    'compression-ratio': np.float32,
    'horsepower': np.float32,
    'peak-rpm': np.float32,
    'city-mpg': np.float32,
    'highway-mpg': np.float32,
    'price': np.float32,    
}

In [89]:
# Read the file.
df = pd.read_csv('data/imports-85.data', names=names, dtype=dtypes, na_values='?')

In [90]:
# Some rows don't have price data, we can't use those.
df = df.dropna(axis='rows', how='any', subset=['price'])

Dealing with NaN

There are many approaches possibles for NaN values in the data, here we just changing it to " " or 0 depending of the data type. This is the simplest way, but for sure is not the best in most cases, so in practice you should try some other ways to use the NaN data. Some approaches are:

  • use the mean of the row
  • use the mean of the column
  • if/else substituion (e.g if a lot of NaN do this, else do this other thing)
  • ...
  • google others

In [91]:
# Fill missing values in continuous columns with zeros instead of NaN.
float_columns = [k for k,v in dtypes.items() if v == np.float32]
df[float_columns] = df[float_columns].fillna(value=0., axis='columns')

# Fill missing values in continuous columns with '' instead of NaN (NaN mixed with strings is very bad for us).
string_columns = [k for k,v in dtypes.items() if v == str]
df[string_columns] = df[string_columns].fillna(value='', axis='columns')

Standardize features


In [92]:
# We have too many variables let's just use some of them
df = df[['num-of-doors','num-of-cylinders', 'horsepower', 'make', 'price', 'length', 'height', 'width']]

In [93]:
# Since we're possibly dealing with parameters of different units and scales. We'll need to rescale our data.
# There are two main ways to do it: 
# * Normalization, which scales all numeric variables in the range [0,1].
#   Example:
# * Standardization, it will then transform it to have zero mean and unit variance.
#   Example: 
# Which is better? It deppends of your data and your features.
# But one disadvantage of normalization over standardization is that it loses 
# some information in the data. Since normalization loses more info it can make harder
# for gradient descent to converse, so we'll use standardization.
# In practice: please analyse your data and see what gives you better results.

def std(x):
    return (x - x.mean()) / x.std()

before = df.length[0]
df.length = std(df.length)
df.width = std(df.width)
df.height = std(df.height)
df.horsepower = std(df.horsepower)

after = df.length[0]
print('before:', before, 'after:', after)


before: 168.8 after: -0.438314

Separating training data from testing data


In [94]:
TRAINING_DATA_SIZE = 160
TEST_DATA_SIZE = 10

LABEL = 'price'

# Split the data into a training set, eval set and test set
training_data = df[:TRAINING_DATA_SIZE]
eval_data = df[TRAINING_DATA_SIZE: TRAINING_DATA_SIZE + TEST_DATA_SIZE]
test_data = df[TRAINING_DATA_SIZE + TEST_DATA_SIZE:]

# Separate input features from labels
training_label = training_data.pop(LABEL)
eval_label = eval_data.pop(LABEL)
test_label = test_data.pop(LABEL)

Using Tensorflow

Defining input function


In [95]:
BATCH_SIZE = 64

# Make input function for training: 
#   num_epochs=None -> will cycle through input data forever
#   shuffle=True -> randomize order of input data
training_input_fn = tf.estimator.inputs.pandas_input_fn(x=training_data,
                                                        y=training_label,
                                                        batch_size=BATCH_SIZE,
                                                        shuffle=True,
                                                        num_epochs=None)

# Make input function for evaluation:
# shuffle=False -> do not randomize input data
eval_input_fn = tf.estimator.inputs.pandas_input_fn(x=eval_data,
                                                    y=eval_label,
                                                    batch_size=BATCH_SIZE,
                                                    shuffle=False)

# Make input function for testing:
# shuffle=False -> do not randomize input data
eval_input_fn = tf.estimator.inputs.pandas_input_fn(x=test_data,
                                                    y=test_label,
                                                    batch_size=1,
                                                    shuffle=False)

Defining a Linear Estimator


In [100]:
# Describe how the model should interpret the inputs. The names of the feature columns have to match the names
# of the series in the dataframe.

# @1.2.0 tf.feature_column.numeric_column -> tf.contrib.layers.real_valued_column
horsepower = real_valued_column('horsepower')
width = real_valued_column('width')
height = real_valued_column('height')
length = real_valued_column('length')

# @1.2.0 tf.feature_column.categorical_column_with_hash_bucket -> tf.contrib.layers.sparse_column_with_hash_bucket
make = sparse_column_with_hash_bucket('make', 50)

# @1.2.0 tf.feature_column.categorical_column_with_vocabulary_list -> tf.contrib.layers.sparse_column_with_keys
fuel_type = sparse_column_with_keys('fuel-type', keys=['diesel', 'gas'])
num_of_doors = sparse_column_with_keys('num-of-doors', keys=['two', 'four'])
num_of_cylinders = sparse_column_with_keys('num-of-cylinders', ['eight', 'five', 'four', 'six', 'three', 'twelve', 'two'])

linear_features = [horsepower, make, num_of_doors, num_of_cylinders, length, width, height]

In [101]:
regressor = tf.contrib.learn.LinearRegressor(feature_columns=linear_features, model_dir='tensorboard/linear_regressor/')


INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_save_checkpoints_steps': None, '_tf_config': gpu_options {
  per_process_gpu_memory_fraction: 1
}
, '_task_id': 0, '_keep_checkpoint_max': 5, '_save_checkpoints_secs': 600, '_session_config': None, '_model_dir': 'tensorboard/linear_regressor/', '_master': '', '_tf_random_seed': None, '_environment': 'local', '_num_ps_replicas': 0, '_num_worker_replicas': 0, '_task_type': None, '_keep_checkpoint_every_n_hours': 10000, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7faf67544b70>, '_save_summary_steps': 100, '_evaluation_master': '', '_is_chief': True}

Training


In [102]:
regressor.fit(input_fn=training_input_fn, steps=10000)


WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:From /usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:625: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Saving checkpoints for 1 into tensorboard/linear_regressor/model.ckpt.
INFO:tensorflow:loss = 2.59493e+08, step = 1
INFO:tensorflow:global_step/sec: 513.702
INFO:tensorflow:loss = 2.42048e+08, step = 101 (0.198 sec)
INFO:tensorflow:global_step/sec: 744.834
INFO:tensorflow:loss = 2.35011e+08, step = 201 (0.135 sec)
INFO:tensorflow:global_step/sec: 687.575
INFO:tensorflow:loss = 2.44327e+08, step = 301 (0.144 sec)
INFO:tensorflow:global_step/sec: 823.258
INFO:tensorflow:loss = 1.79146e+08, step = 401 (0.121 sec)
INFO:tensorflow:global_step/sec: 824.485
INFO:tensorflow:loss = 2.55799e+08, step = 501 (0.120 sec)
INFO:tensorflow:global_step/sec: 752.257
INFO:tensorflow:loss = 2.96889e+08, step = 601 (0.134 sec)
INFO:tensorflow:global_step/sec: 832.816
INFO:tensorflow:loss = 3.09307e+08, step = 701 (0.118 sec)
INFO:tensorflow:global_step/sec: 718.056
INFO:tensorflow:loss = 2.83337e+08, step = 801 (0.139 sec)
INFO:tensorflow:global_step/sec: 833.19
INFO:tensorflow:loss = 2.10819e+08, step = 901 (0.120 sec)
INFO:tensorflow:global_step/sec: 726.727
INFO:tensorflow:loss = 2.04816e+08, step = 1001 (0.139 sec)
INFO:tensorflow:global_step/sec: 672.972
INFO:tensorflow:loss = 2.82682e+08, step = 1101 (0.150 sec)
INFO:tensorflow:global_step/sec: 653.84
INFO:tensorflow:loss = 2.09374e+08, step = 1201 (0.152 sec)
INFO:tensorflow:global_step/sec: 768.883
INFO:tensorflow:loss = 2.12449e+08, step = 1301 (0.130 sec)
INFO:tensorflow:global_step/sec: 797.953
INFO:tensorflow:loss = 1.92286e+08, step = 1401 (0.125 sec)
INFO:tensorflow:global_step/sec: 773.69
INFO:tensorflow:loss = 3.47359e+08, step = 1501 (0.128 sec)
INFO:tensorflow:global_step/sec: 827.859
INFO:tensorflow:loss = 2.94114e+08, step = 1601 (0.122 sec)
INFO:tensorflow:global_step/sec: 743.422
INFO:tensorflow:loss = 3.01521e+08, step = 1701 (0.134 sec)
INFO:tensorflow:global_step/sec: 825.342
INFO:tensorflow:loss = 2.10661e+08, step = 1801 (0.120 sec)
INFO:tensorflow:global_step/sec: 711.548
INFO:tensorflow:loss = 2.28232e+08, step = 1901 (0.143 sec)
INFO:tensorflow:global_step/sec: 709.583
INFO:tensorflow:loss = 2.49248e+08, step = 2001 (0.139 sec)
INFO:tensorflow:global_step/sec: 708.353
INFO:tensorflow:loss = 1.88631e+08, step = 2101 (0.140 sec)
INFO:tensorflow:global_step/sec: 766.686
INFO:tensorflow:loss = 2.5245e+08, step = 2201 (0.130 sec)
INFO:tensorflow:global_step/sec: 843.11
INFO:tensorflow:loss = 2.74482e+08, step = 2301 (0.119 sec)
INFO:tensorflow:global_step/sec: 797.668
INFO:tensorflow:loss = 3.02005e+08, step = 2401 (0.125 sec)
INFO:tensorflow:global_step/sec: 769.467
INFO:tensorflow:loss = 2.83143e+08, step = 2501 (0.131 sec)
INFO:tensorflow:global_step/sec: 748.361
INFO:tensorflow:loss = 2.65891e+08, step = 2601 (0.134 sec)
INFO:tensorflow:global_step/sec: 707.099
INFO:tensorflow:loss = 1.98249e+08, step = 2701 (0.143 sec)
INFO:tensorflow:global_step/sec: 722.949
INFO:tensorflow:loss = 2.67378e+08, step = 2801 (0.137 sec)
INFO:tensorflow:global_step/sec: 833.043
INFO:tensorflow:loss = 2.40542e+08, step = 2901 (0.120 sec)
INFO:tensorflow:global_step/sec: 855.335
INFO:tensorflow:loss = 2.32458e+08, step = 3001 (0.117 sec)
INFO:tensorflow:global_step/sec: 827.752
INFO:tensorflow:loss = 2.22932e+08, step = 3101 (0.122 sec)
INFO:tensorflow:global_step/sec: 823.763
INFO:tensorflow:loss = 2.85029e+08, step = 3201 (0.120 sec)
INFO:tensorflow:global_step/sec: 886.22
INFO:tensorflow:loss = 2.4638e+08, step = 3301 (0.112 sec)
INFO:tensorflow:global_step/sec: 844.936
INFO:tensorflow:loss = 2.72305e+08, step = 3401 (0.119 sec)
INFO:tensorflow:global_step/sec: 635.667
INFO:tensorflow:loss = 2.84096e+08, step = 3501 (0.158 sec)
INFO:tensorflow:global_step/sec: 655.814
INFO:tensorflow:loss = 1.99117e+08, step = 3601 (0.153 sec)
INFO:tensorflow:global_step/sec: 771.2
INFO:tensorflow:loss = 2.14304e+08, step = 3701 (0.129 sec)
INFO:tensorflow:global_step/sec: 798.976
INFO:tensorflow:loss = 3.32135e+08, step = 3801 (0.125 sec)
INFO:tensorflow:global_step/sec: 735.718
INFO:tensorflow:loss = 2.27205e+08, step = 3901 (0.136 sec)
INFO:tensorflow:global_step/sec: 628.634
INFO:tensorflow:loss = 2.03773e+08, step = 4001 (0.160 sec)
INFO:tensorflow:global_step/sec: 654.16
INFO:tensorflow:loss = 2.15381e+08, step = 4101 (0.153 sec)
INFO:tensorflow:global_step/sec: 668.065
INFO:tensorflow:loss = 2.63055e+08, step = 4201 (0.151 sec)
INFO:tensorflow:global_step/sec: 674.845
INFO:tensorflow:loss = 2.20646e+08, step = 4301 (0.146 sec)
INFO:tensorflow:global_step/sec: 621.045
INFO:tensorflow:loss = 3.02877e+08, step = 4401 (0.164 sec)
INFO:tensorflow:global_step/sec: 632.865
INFO:tensorflow:loss = 2.75835e+08, step = 4501 (0.157 sec)
INFO:tensorflow:global_step/sec: 711.414
INFO:tensorflow:loss = 2.20396e+08, step = 4601 (0.139 sec)
INFO:tensorflow:global_step/sec: 738.189
INFO:tensorflow:loss = 2.86457e+08, step = 4701 (0.136 sec)
INFO:tensorflow:global_step/sec: 676.309
INFO:tensorflow:loss = 2.37476e+08, step = 4801 (0.148 sec)
INFO:tensorflow:global_step/sec: 685.468
INFO:tensorflow:loss = 2.08786e+08, step = 4901 (0.144 sec)
INFO:tensorflow:global_step/sec: 741.955
INFO:tensorflow:loss = 1.92465e+08, step = 5001 (0.136 sec)
INFO:tensorflow:global_step/sec: 715.902
INFO:tensorflow:loss = 2.23654e+08, step = 5101 (0.140 sec)
INFO:tensorflow:global_step/sec: 704.612
INFO:tensorflow:loss = 2.53477e+08, step = 5201 (0.142 sec)
INFO:tensorflow:global_step/sec: 685.031
INFO:tensorflow:loss = 2.66573e+08, step = 5301 (0.146 sec)
INFO:tensorflow:global_step/sec: 755.733
INFO:tensorflow:loss = 2.77477e+08, step = 5401 (0.132 sec)
INFO:tensorflow:global_step/sec: 669.653
INFO:tensorflow:loss = 3.05158e+08, step = 5501 (0.148 sec)
INFO:tensorflow:global_step/sec: 664.346
INFO:tensorflow:loss = 2.8148e+08, step = 5601 (0.151 sec)
INFO:tensorflow:global_step/sec: 701.741
INFO:tensorflow:loss = 2.33417e+08, step = 5701 (0.143 sec)
INFO:tensorflow:global_step/sec: 725.72
INFO:tensorflow:loss = 1.87216e+08, step = 5801 (0.138 sec)
INFO:tensorflow:global_step/sec: 654.661
INFO:tensorflow:loss = 1.3973e+08, step = 5901 (0.152 sec)
INFO:tensorflow:global_step/sec: 621.231
INFO:tensorflow:loss = 3.39514e+08, step = 6001 (0.162 sec)
INFO:tensorflow:global_step/sec: 602.049
INFO:tensorflow:loss = 2.48877e+08, step = 6101 (0.166 sec)
INFO:tensorflow:global_step/sec: 730.538
INFO:tensorflow:loss = 2.49429e+08, step = 6201 (0.138 sec)
INFO:tensorflow:global_step/sec: 726.021
INFO:tensorflow:loss = 2.78524e+08, step = 6301 (0.136 sec)
INFO:tensorflow:global_step/sec: 608.588
INFO:tensorflow:loss = 2.63528e+08, step = 6401 (0.163 sec)
INFO:tensorflow:global_step/sec: 768.71
INFO:tensorflow:loss = 2.81293e+08, step = 6501 (0.131 sec)
INFO:tensorflow:global_step/sec: 763.739
INFO:tensorflow:loss = 2.44084e+08, step = 6601 (0.133 sec)
INFO:tensorflow:global_step/sec: 755.37
INFO:tensorflow:loss = 2.50055e+08, step = 6701 (0.130 sec)
INFO:tensorflow:global_step/sec: 752.313
INFO:tensorflow:loss = 2.21139e+08, step = 6801 (0.133 sec)
INFO:tensorflow:global_step/sec: 774.118
INFO:tensorflow:loss = 3.46903e+08, step = 6901 (0.129 sec)
INFO:tensorflow:global_step/sec: 742.067
INFO:tensorflow:loss = 3.03337e+08, step = 7001 (0.135 sec)
INFO:tensorflow:global_step/sec: 737.658
INFO:tensorflow:loss = 2.33171e+08, step = 7101 (0.136 sec)
INFO:tensorflow:global_step/sec: 746.162
INFO:tensorflow:loss = 2.02141e+08, step = 7201 (0.134 sec)
INFO:tensorflow:global_step/sec: 644.51
INFO:tensorflow:loss = 3.23866e+08, step = 7301 (0.156 sec)
INFO:tensorflow:global_step/sec: 748.617
INFO:tensorflow:loss = 2.31643e+08, step = 7401 (0.132 sec)
INFO:tensorflow:global_step/sec: 758.476
INFO:tensorflow:loss = 2.89519e+08, step = 7501 (0.132 sec)
INFO:tensorflow:global_step/sec: 802.441
INFO:tensorflow:loss = 2.23578e+08, step = 7601 (0.125 sec)
INFO:tensorflow:global_step/sec: 838.834
INFO:tensorflow:loss = 2.81537e+08, step = 7701 (0.119 sec)
INFO:tensorflow:global_step/sec: 841.294
INFO:tensorflow:loss = 1.60254e+08, step = 7801 (0.118 sec)
INFO:tensorflow:global_step/sec: 826.338
INFO:tensorflow:loss = 3.09894e+08, step = 7901 (0.121 sec)
INFO:tensorflow:global_step/sec: 849.904
INFO:tensorflow:loss = 2.2752e+08, step = 8001 (0.118 sec)
INFO:tensorflow:global_step/sec: 837.425
INFO:tensorflow:loss = 2.81988e+08, step = 8101 (0.119 sec)
INFO:tensorflow:global_step/sec: 813.826
INFO:tensorflow:loss = 2.47757e+08, step = 8201 (0.123 sec)
INFO:tensorflow:global_step/sec: 736.863
INFO:tensorflow:loss = 1.83806e+08, step = 8301 (0.137 sec)
INFO:tensorflow:global_step/sec: 752.277
INFO:tensorflow:loss = 2.35405e+08, step = 8401 (0.132 sec)
INFO:tensorflow:global_step/sec: 779.909
INFO:tensorflow:loss = 2.49566e+08, step = 8501 (0.128 sec)
INFO:tensorflow:global_step/sec: 792.817
INFO:tensorflow:loss = 2.83184e+08, step = 8601 (0.127 sec)
INFO:tensorflow:global_step/sec: 770.227
INFO:tensorflow:loss = 2.14609e+08, step = 8701 (0.129 sec)
INFO:tensorflow:global_step/sec: 786.916
INFO:tensorflow:loss = 2.05047e+08, step = 8801 (0.127 sec)
INFO:tensorflow:global_step/sec: 764.332
INFO:tensorflow:loss = 2.36374e+08, step = 8901 (0.131 sec)
INFO:tensorflow:global_step/sec: 857.398
INFO:tensorflow:loss = 2.206e+08, step = 9001 (0.118 sec)
INFO:tensorflow:global_step/sec: 723.33
INFO:tensorflow:loss = 2.28052e+08, step = 9101 (0.137 sec)
INFO:tensorflow:global_step/sec: 841.887
INFO:tensorflow:loss = 2.89942e+08, step = 9201 (0.119 sec)
INFO:tensorflow:global_step/sec: 720.547
INFO:tensorflow:loss = 2.20093e+08, step = 9301 (0.138 sec)
INFO:tensorflow:global_step/sec: 850.308
INFO:tensorflow:loss = 1.75145e+08, step = 9401 (0.117 sec)
INFO:tensorflow:global_step/sec: 876.639
INFO:tensorflow:loss = 2.48838e+08, step = 9501 (0.114 sec)
INFO:tensorflow:global_step/sec: 869.603
INFO:tensorflow:loss = 1.56434e+08, step = 9601 (0.117 sec)
INFO:tensorflow:global_step/sec: 813.764
INFO:tensorflow:loss = 2.46945e+08, step = 9701 (0.123 sec)
INFO:tensorflow:global_step/sec: 840.973
INFO:tensorflow:loss = 2.20785e+08, step = 9801 (0.117 sec)
INFO:tensorflow:global_step/sec: 830.713
INFO:tensorflow:loss = 2.54528e+08, step = 9901 (0.120 sec)
INFO:tensorflow:Saving checkpoints for 10000 into tensorboard/linear_regressor/model.ckpt.
INFO:tensorflow:Loss for final step: 2.51601e+08.
Out[102]:
LinearRegressor(params={'joint_weights': False, 'gradient_clip_norm': None, 'feature_columns': [_RealValuedColumn(column_name='horsepower', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), _SparseColumnHashed(column_name='make', is_integerized=False, bucket_size=50, lookup_config=None, combiner='sum', dtype=tf.string), _SparseColumnKeys(column_name='num-of-doors', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('two', 'four'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string), _SparseColumnKeys(column_name='num-of-cylinders', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('eight', 'five', 'four', 'six', 'three', 'twelve', 'two'), num_oov_buckets=0, vocab_size=7, default_value=-1), combiner='sum', dtype=tf.string), _RealValuedColumn(column_name='length', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), _RealValuedColumn(column_name='width', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), _RealValuedColumn(column_name='height', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)], 'optimizer': None, 'head': <tensorflow.contrib.learn.python.learn.estimators.head._RegressionHead object at 0x7faf67544860>})

Evaluating


In [103]:
regressor.evaluate(input_fn=eval_input_fn)


WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:From /usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:625: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
INFO:tensorflow:Starting evaluation at 2017-06-15-14:01:39
INFO:tensorflow:Restoring parameters from tensorboard/linear_regressor/model.ckpt-10000
INFO:tensorflow:Finished evaluation at 2017-06-15-14:01:39
INFO:tensorflow:Saving dict for global step 10000: global_step = 10000, loss = 2.03146e+08
Out[103]:
{'global_step': 10000, 'loss': 2.0314605e+08}

Predicting


In [104]:
preds = list(regressor.predict(input_fn=eval_input_fn))

for i in range(TEST_DATA_SIZE):
    print('prediction:', preds[i], 'real value:', test_label.iloc[i])


WARNING:tensorflow:From /usr/local/lib/python3.4/dist-packages/tensorflow/python/util/deprecation.py:347: calling LinearRegressor.predict (from tensorflow.contrib.learn.python.learn.estimators.linear) with outputs=None is deprecated and will be removed after 2017-03-01.
Instructions for updating:
Please switch to predict_scores, or set `outputs` argument.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
INFO:tensorflow:Restoring parameters from tensorboard/linear_regressor/model.ckpt-10000
prediction: 142.403 real value: 10698.0
prediction: 159.54 real value: 9988.0
prediction: 160.006 real value: 10898.0
prediction: 159.54 real value: 11248.0
prediction: 262.848 real value: 16558.0
prediction: 262.848 real value: 15998.0
prediction: 251.596 real value: 15690.0
prediction: 251.596 real value: 15750.0
prediction: 58.2873 real value: 7775.0
prediction: 88.861 real value: 7975.0

Defining a DNN Estimator


In [105]:
# @1.2.0 tf.feature_column.indicator_column -> tf.contrib.layers.one_hot_column(tf.contrib.layers.sparse_column_with_keys(...))
dnn_features = [
    #numerical features
    length, width, height, horsepower,    
    # densify categorical features:
    one_hot_column(make),
    one_hot_column(num_of_doors)
]

In [107]:
dnnregressor = tf.contrib.learn.DNNRegressor(feature_columns=dnn_features,
                                             hidden_units=[50, 30, 10], model_dir='tensorboard/DNN_regressor/')


INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_save_checkpoints_steps': None, '_tf_config': gpu_options {
  per_process_gpu_memory_fraction: 1
}
, '_task_id': 0, '_keep_checkpoint_max': 5, '_save_checkpoints_secs': 600, '_session_config': None, '_model_dir': 'tensorboard/DNN_regressor/', '_master': '', '_tf_random_seed': None, '_environment': 'local', '_num_ps_replicas': 0, '_num_worker_replicas': 0, '_task_type': None, '_keep_checkpoint_every_n_hours': 10000, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7faf6408c9e8>, '_save_summary_steps': 100, '_evaluation_master': '', '_is_chief': True}

Training


In [108]:
dnnregressor.fit(input_fn=training_input_fn, steps=10000)


WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:From /usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:625: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Saving checkpoints for 1 into tensorboard/DNN_regressor/model.ckpt.
INFO:tensorflow:loss = 2.90802e+08, step = 1
INFO:tensorflow:global_step/sec: 643.468
INFO:tensorflow:loss = 1.1641e+07, step = 101 (0.156 sec)
INFO:tensorflow:global_step/sec: 694.261
INFO:tensorflow:loss = 7.98193e+06, step = 201 (0.144 sec)
INFO:tensorflow:global_step/sec: 747.178
INFO:tensorflow:loss = 3.06143e+06, step = 301 (0.134 sec)
INFO:tensorflow:global_step/sec: 746.46
INFO:tensorflow:loss = 4.55193e+06, step = 401 (0.135 sec)
INFO:tensorflow:global_step/sec: 804.846
INFO:tensorflow:loss = 4.13768e+06, step = 501 (0.124 sec)
INFO:tensorflow:global_step/sec: 834.797
INFO:tensorflow:loss = 4.86764e+06, step = 601 (0.119 sec)
INFO:tensorflow:global_step/sec: 740.98
INFO:tensorflow:loss = 4.57349e+06, step = 701 (0.136 sec)
INFO:tensorflow:global_step/sec: 820.291
INFO:tensorflow:loss = 3.10496e+06, step = 801 (0.121 sec)
INFO:tensorflow:global_step/sec: 755.382
INFO:tensorflow:loss = 4.67391e+06, step = 901 (0.133 sec)
INFO:tensorflow:global_step/sec: 819.709
INFO:tensorflow:loss = 3.29688e+06, step = 1001 (0.121 sec)
INFO:tensorflow:global_step/sec: 701.289
INFO:tensorflow:loss = 4.17016e+06, step = 1101 (0.142 sec)
INFO:tensorflow:global_step/sec: 742.823
INFO:tensorflow:loss = 2.59178e+06, step = 1201 (0.136 sec)
INFO:tensorflow:global_step/sec: 655.648
INFO:tensorflow:loss = 5.13116e+06, step = 1301 (0.151 sec)
INFO:tensorflow:global_step/sec: 746.064
INFO:tensorflow:loss = 2.73089e+06, step = 1401 (0.135 sec)
INFO:tensorflow:global_step/sec: 704.622
INFO:tensorflow:loss = 2.7186e+06, step = 1501 (0.144 sec)
INFO:tensorflow:global_step/sec: 741.466
INFO:tensorflow:loss = 4.57346e+06, step = 1601 (0.134 sec)
INFO:tensorflow:global_step/sec: 670.247
INFO:tensorflow:loss = 3.98322e+06, step = 1701 (0.149 sec)
INFO:tensorflow:global_step/sec: 629.013
INFO:tensorflow:loss = 3.03508e+06, step = 1801 (0.158 sec)
INFO:tensorflow:global_step/sec: 627.101
INFO:tensorflow:loss = 5.12813e+06, step = 1901 (0.160 sec)
INFO:tensorflow:global_step/sec: 610.105
INFO:tensorflow:loss = 4.6744e+06, step = 2001 (0.165 sec)
INFO:tensorflow:global_step/sec: 634.092
INFO:tensorflow:loss = 3.57615e+06, step = 2101 (0.157 sec)
INFO:tensorflow:global_step/sec: 846.508
INFO:tensorflow:loss = 2.10253e+06, step = 2201 (0.116 sec)
INFO:tensorflow:global_step/sec: 645.051
INFO:tensorflow:loss = 3.35572e+06, step = 2301 (0.154 sec)
INFO:tensorflow:global_step/sec: 829.184
INFO:tensorflow:loss = 2.67412e+06, step = 2401 (0.122 sec)
INFO:tensorflow:global_step/sec: 729.485
INFO:tensorflow:loss = 1.82536e+06, step = 2501 (0.138 sec)
INFO:tensorflow:global_step/sec: 734.258
INFO:tensorflow:loss = 2.79449e+06, step = 2601 (0.135 sec)
INFO:tensorflow:global_step/sec: 807.086
INFO:tensorflow:loss = 5.17955e+06, step = 2701 (0.124 sec)
INFO:tensorflow:global_step/sec: 649.419
INFO:tensorflow:loss = 5.65098e+06, step = 2801 (0.152 sec)
INFO:tensorflow:global_step/sec: 887.304
INFO:tensorflow:loss = 5.58609e+06, step = 2901 (0.114 sec)
INFO:tensorflow:global_step/sec: 684.772
INFO:tensorflow:loss = 3.50584e+06, step = 3001 (0.144 sec)
INFO:tensorflow:global_step/sec: 705.663
INFO:tensorflow:loss = 2.62402e+06, step = 3101 (0.142 sec)
INFO:tensorflow:global_step/sec: 686.328
INFO:tensorflow:loss = 2.59574e+06, step = 3201 (0.147 sec)
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-108-e5b06bef6748> in <module>()
----> 1 dnnregressor.fit(input_fn=training_input_fn, steps=100000)

/usr/local/lib/python3.4/dist-packages/tensorflow/python/util/deprecation.py in new_func(*args, **kwargs)
    287             'in a future version' if date is None else ('after %s' % date),
    288             instructions)
--> 289       return func(*args, **kwargs)
    290     return tf_decorator.make_decorator(func, new_func, 'deprecated',
    291                                        _add_deprecated_arg_notice_to_docstring(

/usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py in fit(self, x, y, input_fn, steps, batch_size, monitors, max_steps)
    453       hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
    454 
--> 455     loss = self._train_model(input_fn=input_fn, hooks=hooks)
    456     logging.info('Loss for final step: %s.', loss)
    457     return self

/usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py in _train_model(self, input_fn, hooks)
   1005         loss = None
   1006         while not mon_sess.should_stop():
-> 1007           _, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
   1008       summary_io.SummaryWriterCache.clear()
   1009       return loss

/usr/local/lib/python3.4/dist-packages/tensorflow/python/training/monitored_session.py in run(self, fetches, feed_dict, options, run_metadata)
    503                           feed_dict=feed_dict,
    504                           options=options,
--> 505                           run_metadata=run_metadata)
    506 
    507   def should_stop(self):

/usr/local/lib/python3.4/dist-packages/tensorflow/python/training/monitored_session.py in run(self, fetches, feed_dict, options, run_metadata)
    840                               feed_dict=feed_dict,
    841                               options=options,
--> 842                               run_metadata=run_metadata)
    843       except _PREEMPTION_ERRORS as e:
    844         logging.info('An error was raised. This may be due to a preemption in '

/usr/local/lib/python3.4/dist-packages/tensorflow/python/training/monitored_session.py in run(self, *args, **kwargs)
    796 
    797   def run(self, *args, **kwargs):
--> 798     return self._sess.run(*args, **kwargs)
    799 
    800 

/usr/local/lib/python3.4/dist-packages/tensorflow/python/training/monitored_session.py in run(self, fetches, feed_dict, options, run_metadata)
    950                                   feed_dict=feed_dict,
    951                                   options=options,
--> 952                                   run_metadata=run_metadata)
    953 
    954     for hook in self._hooks:

/usr/local/lib/python3.4/dist-packages/tensorflow/python/training/monitored_session.py in run(self, *args, **kwargs)
    796 
    797   def run(self, *args, **kwargs):
--> 798     return self._sess.run(*args, **kwargs)
    799 
    800 

/usr/local/lib/python3.4/dist-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    787     try:
    788       result = self._run(None, fetches, feed_dict, options_ptr,
--> 789                          run_metadata_ptr)
    790       if run_metadata:
    791         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/usr/local/lib/python3.4/dist-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    995     if final_fetches or final_targets:
    996       results = self._do_run(handle, final_targets, final_fetches,
--> 997                              feed_dict_string, options, run_metadata)
    998     else:
    999       results = []

/usr/local/lib/python3.4/dist-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1130     if handle is None:
   1131       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1132                            target_list, options, run_metadata)
   1133     else:
   1134       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/usr/local/lib/python3.4/dist-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1137   def _do_call(self, fn, *args):
   1138     try:
-> 1139       return fn(*args)
   1140     except errors.OpError as e:
   1141       message = compat.as_text(e.message)

/usr/local/lib/python3.4/dist-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1119         return tf_session.TF_Run(session, options,
   1120                                  feed_dict, fetch_list, target_list,
-> 1121                                  status, run_metadata)
   1122 
   1123     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

Evaluating


In [80]:
dnnregressor.evaluate(input_fn=eval_input_fn)


WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:From /usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:625: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
INFO:tensorflow:Starting evaluation at 2017-06-15-00:11:52
INFO:tensorflow:Restoring parameters from /tmp/tmp5u7roxz9/model.ckpt-10000
INFO:tensorflow:Finished evaluation at 2017-06-15-00:11:52
INFO:tensorflow:Saving dict for global step 10000: global_step = 10000, loss = 1.54887e+07
Out[80]:
{'global_step': 10000, 'loss': 15488682.0}

Predicting


In [81]:
preds = list(dnnregressor.predict(input_fn=eval_input_fn))

for i in range(TEST_DATA_SIZE):
    print('prediction:', preds[i], 'real value:', test_label.iloc[i])


WARNING:tensorflow:From /usr/local/lib/python3.4/dist-packages/tensorflow/python/util/deprecation.py:347: calling DNNRegressor.predict (from tensorflow.contrib.learn.python.learn.estimators.dnn) with outputs=None is deprecated and will be removed after 2017-03-01.
Instructions for updating:
Please switch to predict_scores, or set `outputs` argument.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
INFO:tensorflow:Restoring parameters from /tmp/tmp5u7roxz9/model.ckpt-10000
prediction: 9597.67 real value: 10698.0
prediction: 11866.2 real value: 9988.0
prediction: 11514.2 real value: 10898.0
prediction: 11866.2 real value: 11248.0
prediction: 23688.1 real value: 16558.0
prediction: 23688.1 real value: 15998.0
prediction: 22791.1 real value: 15690.0
prediction: 22791.1 real value: 15750.0
prediction: 4272.87 real value: 7775.0
prediction: 6275.6 real value: 7975.0

Creating an Experiment


In [82]:
# @1.2.0 experiment_fn(run_config, params) - > experiment_fn(output_dir)
def experiment_fn(output_dir):
    # This function makes an Experiment, containing an Estimator and inputs for training and evaluation.
    # You can use params and config here to customize the Estimator depending on the cluster or to use
    # hyperparameter tuning.

    # Collect information for training
    # @1.2.0 config=run_config -> ''
    return tf.contrib.learn.Experiment(estimator=tf.contrib.learn.LinearRegressor(
                                     feature_columns=linear_features, model_dir=output_dir),
                                     train_input_fn=training_input_fn,
                                     train_steps=10000,
                                     eval_input_fn=eval_input_fn)

In [83]:
import shutil
# @1.2.0 tf.contrib.learn.learn_runner(exp, run_config=tf.contrib.learn.RunConfig(model_dir="/tmp/output_dir")
# -> tf.contrib.learn.python.learn.learm_runner.run(exp, output_dir='/tmp/output_dir')
shutil.rmtree("/tmp/output_dir", ignore_errors=True)

from tensorflow.contrib.learn.python.learn import learn_runner
learn_runner.run(experiment_fn, output_dir='/tmp/output_dir')


INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_save_checkpoints_steps': None, '_tf_config': gpu_options {
  per_process_gpu_memory_fraction: 1
}
, '_task_id': 0, '_keep_checkpoint_max': 5, '_save_checkpoints_secs': 600, '_session_config': None, '_model_dir': '/tmp/output_dir', '_master': '', '_tf_random_seed': None, '_environment': 'local', '_num_ps_replicas': 0, '_num_worker_replicas': 0, '_task_type': None, '_keep_checkpoint_every_n_hours': 10000, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7faf2c6dc5f8>, '_save_summary_steps': 100, '_evaluation_master': '', '_is_chief': True}
WARNING:tensorflow:From /usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/monitors.py:268: BaseMonitor.__init__ (from tensorflow.contrib.learn.python.learn.monitors) is deprecated and will be removed after 2016-12-05.
Instructions for updating:
Monitors are deprecated. Please use tf.train.SessionRunHook.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:From /usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:625: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Saving checkpoints for 1 into /tmp/output_dir/model.ckpt.
INFO:tensorflow:loss = 2.53779e+08, step = 1
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:From /usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:625: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
INFO:tensorflow:Starting evaluation at 2017-06-15-00:13:35
INFO:tensorflow:Restoring parameters from /tmp/output_dir/model.ckpt-1
INFO:tensorflow:Evaluation [1/100]
INFO:tensorflow:Evaluation [2/100]
INFO:tensorflow:Evaluation [3/100]
INFO:tensorflow:Evaluation [4/100]
INFO:tensorflow:Evaluation [5/100]
INFO:tensorflow:Evaluation [6/100]
INFO:tensorflow:Evaluation [7/100]
INFO:tensorflow:Evaluation [8/100]
INFO:tensorflow:Evaluation [9/100]
INFO:tensorflow:Evaluation [10/100]
INFO:tensorflow:Evaluation [11/100]
INFO:tensorflow:Evaluation [12/100]
INFO:tensorflow:Evaluation [13/100]
INFO:tensorflow:Evaluation [14/100]
INFO:tensorflow:Evaluation [15/100]
INFO:tensorflow:Evaluation [16/100]
INFO:tensorflow:Evaluation [17/100]
INFO:tensorflow:Evaluation [18/100]
INFO:tensorflow:Evaluation [19/100]
INFO:tensorflow:Evaluation [20/100]
INFO:tensorflow:Evaluation [21/100]
INFO:tensorflow:Evaluation [22/100]
INFO:tensorflow:Evaluation [23/100]
INFO:tensorflow:Evaluation [24/100]
INFO:tensorflow:Evaluation [25/100]
INFO:tensorflow:Evaluation [26/100]
INFO:tensorflow:Evaluation [27/100]
INFO:tensorflow:Evaluation [28/100]
INFO:tensorflow:Evaluation [29/100]
INFO:tensorflow:Evaluation [30/100]
INFO:tensorflow:Evaluation [31/100]
INFO:tensorflow:Finished evaluation at 2017-06-15-00:13:35
INFO:tensorflow:Saving dict for global step 1: global_step = 1, loss = 2.08159e+08
INFO:tensorflow:Validation (step 1): loss = 2.08159e+08, global_step = 1
INFO:tensorflow:global_step/sec: 54.6108
INFO:tensorflow:loss = 3.2995e+08, step = 101 (1.832 sec)
INFO:tensorflow:global_step/sec: 454.199
INFO:tensorflow:loss = 2.2434e+08, step = 201 (0.220 sec)
INFO:tensorflow:global_step/sec: 456.823
INFO:tensorflow:loss = 2.70825e+08, step = 301 (0.219 sec)
INFO:tensorflow:global_step/sec: 472.326
INFO:tensorflow:loss = 2.6304e+08, step = 401 (0.212 sec)
INFO:tensorflow:global_step/sec: 462.905
INFO:tensorflow:loss = 2.07089e+08, step = 501 (0.216 sec)
INFO:tensorflow:global_step/sec: 468.526
INFO:tensorflow:loss = 2.40862e+08, step = 601 (0.213 sec)
INFO:tensorflow:global_step/sec: 473.318
INFO:tensorflow:loss = 2.4237e+08, step = 701 (0.211 sec)
INFO:tensorflow:global_step/sec: 478.001
INFO:tensorflow:loss = 1.96818e+08, step = 801 (0.209 sec)
INFO:tensorflow:global_step/sec: 452.254
INFO:tensorflow:loss = 2.08855e+08, step = 901 (0.222 sec)
INFO:tensorflow:global_step/sec: 463.583
INFO:tensorflow:loss = 2.83313e+08, step = 1001 (0.215 sec)
INFO:tensorflow:global_step/sec: 396.998
INFO:tensorflow:loss = 3.63846e+08, step = 1101 (0.252 sec)
INFO:tensorflow:global_step/sec: 472.892
INFO:tensorflow:loss = 2.27487e+08, step = 1201 (0.211 sec)
INFO:tensorflow:global_step/sec: 473.153
INFO:tensorflow:loss = 1.95032e+08, step = 1301 (0.211 sec)
INFO:tensorflow:global_step/sec: 426.272
INFO:tensorflow:loss = 2.15183e+08, step = 1401 (0.235 sec)
INFO:tensorflow:global_step/sec: 473.015
INFO:tensorflow:loss = 2.63166e+08, step = 1501 (0.211 sec)
INFO:tensorflow:global_step/sec: 472.721
INFO:tensorflow:loss = 2.94977e+08, step = 1601 (0.212 sec)
INFO:tensorflow:global_step/sec: 478.031
INFO:tensorflow:loss = 2.07383e+08, step = 1701 (0.210 sec)
INFO:tensorflow:global_step/sec: 475.855
INFO:tensorflow:loss = 2.41123e+08, step = 1801 (0.209 sec)
INFO:tensorflow:global_step/sec: 480.033
INFO:tensorflow:loss = 2.24982e+08, step = 1901 (0.208 sec)
INFO:tensorflow:global_step/sec: 475.881
INFO:tensorflow:loss = 2.31737e+08, step = 2001 (0.210 sec)
INFO:tensorflow:global_step/sec: 482.01
INFO:tensorflow:loss = 2.37991e+08, step = 2101 (0.208 sec)
INFO:tensorflow:global_step/sec: 473.816
INFO:tensorflow:loss = 1.94863e+08, step = 2201 (0.211 sec)
INFO:tensorflow:global_step/sec: 468.928
INFO:tensorflow:loss = 2.36104e+08, step = 2301 (0.213 sec)
INFO:tensorflow:global_step/sec: 468.532
INFO:tensorflow:loss = 2.42888e+08, step = 2401 (0.214 sec)
INFO:tensorflow:global_step/sec: 481.561
INFO:tensorflow:loss = 2.06083e+08, step = 2501 (0.208 sec)
INFO:tensorflow:global_step/sec: 477.19
INFO:tensorflow:loss = 2.10886e+08, step = 2601 (0.210 sec)
INFO:tensorflow:global_step/sec: 464.82
INFO:tensorflow:loss = 2.36597e+08, step = 2701 (0.215 sec)
INFO:tensorflow:global_step/sec: 480.239
INFO:tensorflow:loss = 2.64082e+08, step = 2801 (0.208 sec)
INFO:tensorflow:global_step/sec: 454.252
INFO:tensorflow:loss = 2.7465e+08, step = 2901 (0.220 sec)
INFO:tensorflow:global_step/sec: 473.216
INFO:tensorflow:loss = 3.03645e+08, step = 3001 (0.211 sec)
INFO:tensorflow:global_step/sec: 481.783
INFO:tensorflow:loss = 2.82896e+08, step = 3101 (0.208 sec)
INFO:tensorflow:global_step/sec: 472.608
INFO:tensorflow:loss = 2.05005e+08, step = 3201 (0.212 sec)
INFO:tensorflow:global_step/sec: 481.667
INFO:tensorflow:loss = 2.04795e+08, step = 3301 (0.208 sec)
INFO:tensorflow:global_step/sec: 485.621
INFO:tensorflow:loss = 2.34377e+08, step = 3401 (0.206 sec)
INFO:tensorflow:global_step/sec: 486.705
INFO:tensorflow:loss = 2.8409e+08, step = 3501 (0.205 sec)
INFO:tensorflow:global_step/sec: 487.064
INFO:tensorflow:loss = 2.64078e+08, step = 3601 (0.205 sec)
INFO:tensorflow:global_step/sec: 479.578
INFO:tensorflow:loss = 1.93784e+08, step = 3701 (0.209 sec)
INFO:tensorflow:global_step/sec: 477.774
INFO:tensorflow:loss = 2.8074e+08, step = 3801 (0.209 sec)
INFO:tensorflow:global_step/sec: 485.669
INFO:tensorflow:loss = 2.33637e+08, step = 3901 (0.206 sec)
INFO:tensorflow:global_step/sec: 486.921
INFO:tensorflow:loss = 1.72349e+08, step = 4001 (0.205 sec)
INFO:tensorflow:global_step/sec: 485.449
INFO:tensorflow:loss = 2.2439e+08, step = 4101 (0.206 sec)
INFO:tensorflow:global_step/sec: 485.478
INFO:tensorflow:loss = 3.11015e+08, step = 4201 (0.206 sec)
INFO:tensorflow:global_step/sec: 467.359
INFO:tensorflow:loss = 3.30783e+08, step = 4301 (0.214 sec)
INFO:tensorflow:global_step/sec: 470.685
INFO:tensorflow:loss = 1.86487e+08, step = 4401 (0.212 sec)
INFO:tensorflow:global_step/sec: 469.766
INFO:tensorflow:loss = 2.14433e+08, step = 4501 (0.213 sec)
INFO:tensorflow:global_step/sec: 453.857
INFO:tensorflow:loss = 1.76995e+08, step = 4601 (0.220 sec)
INFO:tensorflow:global_step/sec: 460.425
INFO:tensorflow:loss = 2.38683e+08, step = 4701 (0.217 sec)
INFO:tensorflow:global_step/sec: 470.707
INFO:tensorflow:loss = 2.83825e+08, step = 4801 (0.212 sec)
INFO:tensorflow:global_step/sec: 474.829
INFO:tensorflow:loss = 2.53666e+08, step = 4901 (0.211 sec)
INFO:tensorflow:global_step/sec: 486.946
INFO:tensorflow:loss = 2.04523e+08, step = 5001 (0.205 sec)
INFO:tensorflow:global_step/sec: 487.178
INFO:tensorflow:loss = 2.86669e+08, step = 5101 (0.205 sec)
INFO:tensorflow:global_step/sec: 487.332
INFO:tensorflow:loss = 2.7821e+08, step = 5201 (0.205 sec)
INFO:tensorflow:global_step/sec: 487.092
INFO:tensorflow:loss = 2.59791e+08, step = 5301 (0.205 sec)
INFO:tensorflow:global_step/sec: 491.003
INFO:tensorflow:loss = 2.47141e+08, step = 5401 (0.204 sec)
INFO:tensorflow:global_step/sec: 491.106
INFO:tensorflow:loss = 2.37231e+08, step = 5501 (0.204 sec)
INFO:tensorflow:global_step/sec: 487.993
INFO:tensorflow:loss = 2.72222e+08, step = 5601 (0.205 sec)
INFO:tensorflow:global_step/sec: 484.965
INFO:tensorflow:loss = 2.79429e+08, step = 5701 (0.206 sec)
INFO:tensorflow:global_step/sec: 487.171
INFO:tensorflow:loss = 3.34573e+08, step = 5801 (0.205 sec)
INFO:tensorflow:global_step/sec: 487.233
INFO:tensorflow:loss = 3.56468e+08, step = 5901 (0.205 sec)
INFO:tensorflow:global_step/sec: 484.368
INFO:tensorflow:loss = 2.97041e+08, step = 6001 (0.207 sec)
INFO:tensorflow:global_step/sec: 467.584
INFO:tensorflow:loss = 2.29266e+08, step = 6101 (0.213 sec)
INFO:tensorflow:global_step/sec: 474.962
INFO:tensorflow:loss = 2.10434e+08, step = 6201 (0.211 sec)
INFO:tensorflow:global_step/sec: 471.391
INFO:tensorflow:loss = 1.9858e+08, step = 6301 (0.212 sec)
INFO:tensorflow:global_step/sec: 455.808
INFO:tensorflow:loss = 3.40392e+08, step = 6401 (0.219 sec)
INFO:tensorflow:global_step/sec: 449.701
INFO:tensorflow:loss = 1.35302e+08, step = 6501 (0.223 sec)
INFO:tensorflow:global_step/sec: 447.52
INFO:tensorflow:loss = 2.45073e+08, step = 6601 (0.223 sec)
INFO:tensorflow:global_step/sec: 472.151
INFO:tensorflow:loss = 1.8786e+08, step = 6701 (0.212 sec)
INFO:tensorflow:global_step/sec: 486.783
INFO:tensorflow:loss = 2.59138e+08, step = 6801 (0.205 sec)
INFO:tensorflow:global_step/sec: 487.139
INFO:tensorflow:loss = 2.56774e+08, step = 6901 (0.205 sec)
INFO:tensorflow:global_step/sec: 480.202
INFO:tensorflow:loss = 2.22381e+08, step = 7001 (0.208 sec)
INFO:tensorflow:global_step/sec: 449.518
INFO:tensorflow:loss = 3.19742e+08, step = 7101 (0.222 sec)
INFO:tensorflow:global_step/sec: 458.642
INFO:tensorflow:loss = 1.45798e+08, step = 7201 (0.218 sec)
INFO:tensorflow:global_step/sec: 437.489
INFO:tensorflow:loss = 2.8065e+08, step = 7301 (0.229 sec)
INFO:tensorflow:global_step/sec: 433.284
INFO:tensorflow:loss = 3.20594e+08, step = 7401 (0.231 sec)
INFO:tensorflow:global_step/sec: 420.923
INFO:tensorflow:loss = 2.86083e+08, step = 7501 (0.238 sec)
INFO:tensorflow:global_step/sec: 452.777
INFO:tensorflow:loss = 3.02958e+08, step = 7601 (0.221 sec)
INFO:tensorflow:global_step/sec: 426.444
INFO:tensorflow:loss = 2.35591e+08, step = 7701 (0.234 sec)
INFO:tensorflow:global_step/sec: 414.848
INFO:tensorflow:loss = 2.33625e+08, step = 7801 (0.241 sec)
INFO:tensorflow:global_step/sec: 467.736
INFO:tensorflow:loss = 2.14244e+08, step = 7901 (0.214 sec)
INFO:tensorflow:global_step/sec: 470.138
INFO:tensorflow:loss = 2.29226e+08, step = 8001 (0.213 sec)
INFO:tensorflow:global_step/sec: 452.767
INFO:tensorflow:loss = 3.2403e+08, step = 8101 (0.222 sec)
INFO:tensorflow:global_step/sec: 463.772
INFO:tensorflow:loss = 2.40912e+08, step = 8201 (0.216 sec)
INFO:tensorflow:global_step/sec: 457.843
INFO:tensorflow:loss = 2.55732e+08, step = 8301 (0.218 sec)
INFO:tensorflow:global_step/sec: 446.67
INFO:tensorflow:loss = 2.01069e+08, step = 8401 (0.224 sec)
INFO:tensorflow:global_step/sec: 426.5
INFO:tensorflow:loss = 2.51314e+08, step = 8501 (0.235 sec)
INFO:tensorflow:global_step/sec: 466.806
INFO:tensorflow:loss = 2.33035e+08, step = 8601 (0.213 sec)
INFO:tensorflow:global_step/sec: 477.854
INFO:tensorflow:loss = 2.13737e+08, step = 8701 (0.209 sec)
INFO:tensorflow:global_step/sec: 462.588
INFO:tensorflow:loss = 2.20099e+08, step = 8801 (0.216 sec)
INFO:tensorflow:global_step/sec: 483.643
INFO:tensorflow:loss = 2.17736e+08, step = 8901 (0.207 sec)
INFO:tensorflow:global_step/sec: 487.195
INFO:tensorflow:loss = 3.29561e+08, step = 9001 (0.205 sec)
INFO:tensorflow:global_step/sec: 487.331
INFO:tensorflow:loss = 2.18982e+08, step = 9101 (0.205 sec)
INFO:tensorflow:global_step/sec: 477.163
INFO:tensorflow:loss = 2.73778e+08, step = 9201 (0.210 sec)
INFO:tensorflow:global_step/sec: 464.112
INFO:tensorflow:loss = 1.85688e+08, step = 9301 (0.216 sec)
INFO:tensorflow:global_step/sec: 481.984
INFO:tensorflow:loss = 2.32726e+08, step = 9401 (0.207 sec)
INFO:tensorflow:global_step/sec: 487.21
INFO:tensorflow:loss = 2.143e+08, step = 9501 (0.205 sec)
INFO:tensorflow:global_step/sec: 482.697
INFO:tensorflow:loss = 2.24054e+08, step = 9601 (0.207 sec)
INFO:tensorflow:global_step/sec: 467.808
INFO:tensorflow:loss = 3.07036e+08, step = 9701 (0.214 sec)
INFO:tensorflow:global_step/sec: 464.613
INFO:tensorflow:loss = 2.25359e+08, step = 9801 (0.215 sec)
INFO:tensorflow:global_step/sec: 486.992
INFO:tensorflow:loss = 2.78171e+08, step = 9901 (0.205 sec)
INFO:tensorflow:Saving checkpoints for 10000 into /tmp/output_dir/model.ckpt.
INFO:tensorflow:Loss for final step: 2.36605e+08.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:From /usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:625: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
INFO:tensorflow:Starting evaluation at 2017-06-15-00:13:58
INFO:tensorflow:Restoring parameters from /tmp/output_dir/model.ckpt-10000
INFO:tensorflow:Evaluation [1/100]
INFO:tensorflow:Evaluation [2/100]
INFO:tensorflow:Evaluation [3/100]
INFO:tensorflow:Evaluation [4/100]
INFO:tensorflow:Evaluation [5/100]
INFO:tensorflow:Evaluation [6/100]
INFO:tensorflow:Evaluation [7/100]
INFO:tensorflow:Evaluation [8/100]
INFO:tensorflow:Evaluation [9/100]
INFO:tensorflow:Evaluation [10/100]
INFO:tensorflow:Evaluation [11/100]
INFO:tensorflow:Evaluation [12/100]
INFO:tensorflow:Evaluation [13/100]
INFO:tensorflow:Evaluation [14/100]
INFO:tensorflow:Evaluation [15/100]
INFO:tensorflow:Evaluation [16/100]
INFO:tensorflow:Evaluation [17/100]
INFO:tensorflow:Evaluation [18/100]
INFO:tensorflow:Evaluation [19/100]
INFO:tensorflow:Evaluation [20/100]
INFO:tensorflow:Evaluation [21/100]
INFO:tensorflow:Evaluation [22/100]
INFO:tensorflow:Evaluation [23/100]
INFO:tensorflow:Evaluation [24/100]
INFO:tensorflow:Evaluation [25/100]
INFO:tensorflow:Evaluation [26/100]
INFO:tensorflow:Evaluation [27/100]
INFO:tensorflow:Evaluation [28/100]
INFO:tensorflow:Evaluation [29/100]
INFO:tensorflow:Evaluation [30/100]
INFO:tensorflow:Evaluation [31/100]
INFO:tensorflow:Finished evaluation at 2017-06-15-00:13:59
INFO:tensorflow:Saving dict for global step 10000: global_step = 10000, loss = 2.03137e+08
Out[83]:
({'global_step': 10000, 'loss': 2.0313659e+08}, [])