In [6]:
# predict mediam house values

import tensorflow as tf

feature_column_data = [1,2.4,0,9.9,3,120]
feature_tensor = tf.constant(feature_column_data)

sparse_tensor = tf.SparseTensor(indices=[[0,1], [2,4]],
                              values=[6, 0.5],
                              dense_shape=[3,5])

sess = tf.Session()
sess.run(sparse_tensor)


Out[6]:
SparseTensorValue(indices=array([[0, 1],
       [2, 4]]), values=array([ 6. ,  0.5], dtype=float32), dense_shape=array([3, 5]))

In [ ]:
classifier.fit(input_fn=my_input_fn, steps = 2000)

def my_input_function_training_set():
    return my_input_function(training_set)

classifer.fit(input_fn=my_input_fn_training_set, steps=2000)

classifer.fit(input_fn=functools.partial(my_input_function,
                                        data_set=training_set), setps=2000)

classifer.fit(input_fn=lambda: my_input_fn(training_set))

classifer.fit(input_fn=lambda: my_input_fn(test_set), steps=2000)

In [9]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import itertools

import pandas as pd
import tensorflow as tf

tf.logging.set_verbosity(tf.logging.INFO)

In [11]:
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
           "dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
            "age", "dis", "tax", "ptratio"]

LABEL = "medv"

training_set = pd.read_csv("/Users/lipingzhang/Desktop/program/jd/crackingtensorflow/crackingtensorflow/crackingcode/day5/boston_train.csv", skipinitialspace=True,
                          skiprows=1, names=COLUMNS)

test_set = pd.read_csv("/Users/lipingzhang/Desktop/program/jd/crackingtensorflow/crackingtensorflow/crackingcode/day5/boston_test.csv", skipinitialspace=True,
                      skiprows=1, names=COLUMNS)

prediction_set = pd.read_csv("/Users/lipingzhang/Desktop/program/jd/crackingtensorflow/crackingtensorflow/crackingcode/day5/boston_predict.csv", skipinitialspace=True,
                            skiprows=1, names=COLUMNS)

In [12]:
features_cols = [tf.contrib.layers.real_valued_column(k)
                for k in FEATURES]

In [17]:
regressor = tf.contrib.learn.DNNRegressor(feature_columns=features_cols,
                          hidden_units=[10, 10],
                          model_dir="/tmp/boston_model")


INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_num_ps_replicas': 0, '_keep_checkpoint_max': 5, '_tf_random_seed': None, '_task_type': None, '_environment': 'local', '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x11e117750>, '_tf_config': gpu_options {
  per_process_gpu_memory_fraction: 1
}
, '_task_id': 0, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_evaluation_master': '', '_keep_checkpoint_every_n_hours': 10000, '_master': ''}

In [18]:
def input_fn(data_set):
    feature_cols = {k: tf.constant(data_set[k].values)
                   for k in FEATURES}
    labels = tf.constant(data_set[LABEL].values)
    return feature_cols, labels

In [20]:
regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)


WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:From /Users/lipingzhang/anaconda/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:1362: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Saving checkpoints for 1 into /tmp/boston_model/model.ckpt.
INFO:tensorflow:loss = 3835.97, step = 1
INFO:tensorflow:global_step/sec: 1305.12
INFO:tensorflow:loss = 98.2116, step = 101
INFO:tensorflow:global_step/sec: 1349.28
INFO:tensorflow:loss = 81.5826, step = 201
INFO:tensorflow:global_step/sec: 1328.55
INFO:tensorflow:loss = 76.5882, step = 301
INFO:tensorflow:global_step/sec: 1321.11
INFO:tensorflow:loss = 73.2967, step = 401
INFO:tensorflow:global_step/sec: 1333.96
INFO:tensorflow:loss = 70.6866, step = 501
INFO:tensorflow:global_step/sec: 1334.56
INFO:tensorflow:loss = 67.5376, step = 601
INFO:tensorflow:global_step/sec: 1293.86
INFO:tensorflow:loss = 65.7355, step = 701
INFO:tensorflow:global_step/sec: 1349.78
INFO:tensorflow:loss = 63.4801, step = 801
INFO:tensorflow:global_step/sec: 1366.14
INFO:tensorflow:loss = 61.1619, step = 901
INFO:tensorflow:global_step/sec: 1419.29
INFO:tensorflow:loss = 58.8873, step = 1001
INFO:tensorflow:global_step/sec: 1348.27
INFO:tensorflow:loss = 56.4514, step = 1101
INFO:tensorflow:global_step/sec: 1298.08
INFO:tensorflow:loss = 53.4835, step = 1201
INFO:tensorflow:global_step/sec: 1321.81
INFO:tensorflow:loss = 51.4035, step = 1301
INFO:tensorflow:global_step/sec: 1313.73
INFO:tensorflow:loss = 49.3561, step = 1401
INFO:tensorflow:global_step/sec: 1356.65
INFO:tensorflow:loss = 47.3795, step = 1501
INFO:tensorflow:global_step/sec: 1318.72
INFO:tensorflow:loss = 45.1028, step = 1601
INFO:tensorflow:global_step/sec: 1327.27
INFO:tensorflow:loss = 44.1918, step = 1701
INFO:tensorflow:global_step/sec: 1373.43
INFO:tensorflow:loss = 103.955, step = 1801
INFO:tensorflow:global_step/sec: 1280.67
INFO:tensorflow:loss = 40.3892, step = 1901
INFO:tensorflow:global_step/sec: 1372.33
INFO:tensorflow:loss = 39.9576, step = 2001
INFO:tensorflow:global_step/sec: 1453.23
INFO:tensorflow:loss = 37.9068, step = 2101
INFO:tensorflow:global_step/sec: 1337.27
INFO:tensorflow:loss = 37.7232, step = 2201
INFO:tensorflow:global_step/sec: 1238.94
INFO:tensorflow:loss = 37.0419, step = 2301
INFO:tensorflow:global_step/sec: 1320.9
INFO:tensorflow:loss = 35.8491, step = 2401
INFO:tensorflow:global_step/sec: 1317.75
INFO:tensorflow:loss = 34.5074, step = 2501
INFO:tensorflow:global_step/sec: 1361.79
INFO:tensorflow:loss = 33.897, step = 2601
INFO:tensorflow:global_step/sec: 1356.12
INFO:tensorflow:loss = 34.1091, step = 2701
INFO:tensorflow:global_step/sec: 1340.2
INFO:tensorflow:loss = 33.4445, step = 2801
INFO:tensorflow:global_step/sec: 1362.3
INFO:tensorflow:loss = 33.469, step = 2901
INFO:tensorflow:global_step/sec: 1344.32
INFO:tensorflow:loss = 32.9832, step = 3001
INFO:tensorflow:global_step/sec: 1354.9
INFO:tensorflow:loss = 31.8194, step = 3101
INFO:tensorflow:global_step/sec: 1307.24
INFO:tensorflow:loss = 31.394, step = 3201
INFO:tensorflow:global_step/sec: 1359.6
INFO:tensorflow:loss = 32.4078, step = 3301
INFO:tensorflow:global_step/sec: 1371.86
INFO:tensorflow:loss = 32.1035, step = 3401
INFO:tensorflow:global_step/sec: 1352.26
INFO:tensorflow:loss = 32.0015, step = 3501
INFO:tensorflow:global_step/sec: 1353.97
INFO:tensorflow:loss = 31.5788, step = 3601
INFO:tensorflow:global_step/sec: 1296.75
INFO:tensorflow:loss = 31.3084, step = 3701
INFO:tensorflow:global_step/sec: 1261.11
INFO:tensorflow:loss = 31.2654, step = 3801
INFO:tensorflow:global_step/sec: 1400.4
INFO:tensorflow:loss = 30.873, step = 3901
INFO:tensorflow:global_step/sec: 1322.26
INFO:tensorflow:loss = 31.058, step = 4001
INFO:tensorflow:global_step/sec: 1334.17
INFO:tensorflow:loss = 30.7417, step = 4101
INFO:tensorflow:global_step/sec: 1278.31
INFO:tensorflow:loss = 30.624, step = 4201
INFO:tensorflow:global_step/sec: 1296.39
INFO:tensorflow:loss = 30.6231, step = 4301
INFO:tensorflow:global_step/sec: 1437.69
INFO:tensorflow:loss = 30.5645, step = 4401
INFO:tensorflow:global_step/sec: 1298.92
INFO:tensorflow:loss = 30.8233, step = 4501
INFO:tensorflow:global_step/sec: 1305.33
INFO:tensorflow:loss = 30.215, step = 4601
INFO:tensorflow:global_step/sec: 1328.71
INFO:tensorflow:loss = 30.3208, step = 4701
INFO:tensorflow:global_step/sec: 1320.95
INFO:tensorflow:loss = 29.7568, step = 4801
INFO:tensorflow:global_step/sec: 1257.94
INFO:tensorflow:loss = 29.6858, step = 4901
INFO:tensorflow:Saving checkpoints for 5000 into /tmp/boston_model/model.ckpt.
INFO:tensorflow:Loss for final step: 30.7742.
Out[20]:
DNNRegressor(params={'head': <tensorflow.contrib.learn.python.learn.estimators.head._RegressionHead object at 0x11e117b10>, 'hidden_units': [10, 10], 'feature_columns': (_RealValuedColumn(column_name='crim', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), _RealValuedColumn(column_name='zn', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), _RealValuedColumn(column_name='indus', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), _RealValuedColumn(column_name='nox', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), _RealValuedColumn(column_name='rm', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), _RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), _RealValuedColumn(column_name='dis', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), _RealValuedColumn(column_name='tax', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), _RealValuedColumn(column_name='ptratio', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)), 'embedding_lr_multipliers': None, 'optimizer': None, 'dropout': None, 'gradient_clip_norm': None, 'activation_fn': <function relu at 0x10d7612a8>, 'input_layer_min_slice_size': None})

In [21]:
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps = 1)


WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:From /Users/lipingzhang/anaconda/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:1362: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
INFO:tensorflow:Starting evaluation at 2017-10-26-00:25:04
INFO:tensorflow:Evaluation [1/1]
INFO:tensorflow:Finished evaluation at 2017-10-26-00:25:04
INFO:tensorflow:Saving dict for global step 5000: global_step = 5000, loss = 16.3261
WARNING:tensorflow:Skipping summary for global_step, must be a float or np.float32.

In [22]:
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))


Loss: 16.326117

In [24]:
y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
# .predict() returns an iterator; convert to a list and print predictions
predictions = list(itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))


WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.
Predictions: [35.791576, 18.658943, 24.397549, 36.567764, 16.061752, 20.300821]

In [ ]: