In [1]:
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.python.ops import array_ops as array_ops_
import matplotlib.pyplot as plt
learn = tf.contrib.learn

1. 设置神经网络的参数。


In [2]:
HIDDEN_SIZE = 30
NUM_LAYERS = 2

TIMESTEPS = 10
TRAINING_STEPS = 3000
BATCH_SIZE = 32

TRAINING_EXAMPLES = 10000
TESTING_EXAMPLES = 1000
SAMPLE_GAP = 0.01

2. 定义生成正弦数据的函数。


In [3]:
def generate_data(seq):
    X = []
    y = []

    for i in range(len(seq) - TIMESTEPS - 1):
        X.append([seq[i: i + TIMESTEPS]])
        y.append([seq[i + TIMESTEPS]])
    return np.array(X, dtype=np.float32), np.array(y, dtype=np.float32)

3. 定义lstm模型。


In [4]:
def lstm_model(X, y):
    lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)
    cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * NUM_LAYERS)
    x_ = tf.unpack(X, axis=1)

    output, _ = tf.nn.rnn(cell, x_, dtype=tf.float32)
    output = output[-1]
    
    # 通过无激活函数的全联接层计算线性回归,并将数据压缩成一维数组的结构。
    predictions = tf.contrib.layers.fully_connected(output, 1, None)
    predictions = array_ops_.squeeze(predictions, squeeze_dims=[1])
    loss = tf.contrib.losses.mean_squared_error(predictions, y)
    
    train_op = tf.contrib.layers.optimize_loss(
        loss, tf.contrib.framework.get_global_step(),
        optimizer="Adagrad", learning_rate=0.1)

    return predictions, loss, train_op

4. 进行训练。


In [5]:
# 封装之前定义的lstm。
regressor = SKCompat(learn.Estimator(model_fn=lstm_model,model_dir="Models/model_2"))

# 生成数据。
test_start = TRAINING_EXAMPLES * SAMPLE_GAP
test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP
train_X, train_y = generate_data(np.sin(np.linspace(
    0, test_start, TRAINING_EXAMPLES, dtype=np.float32)))
test_X, test_y = generate_data(np.sin(np.linspace(
    test_start, test_end, TESTING_EXAMPLES, dtype=np.float32)))

# 拟合数据。
regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)

# 计算预测值。
predicted = [[pred] for pred in regressor.predict(test_X)]

# 计算MSE。
rmse = np.sqrt(((predicted - test_y) ** 2).mean(axis=0))
print ("Mean Square Error is: %f" % rmse[0])


INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_num_ps_replicas': 0, '_keep_checkpoint_max': 5, '_task_type': None, '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f6052530290>, '_model_dir': 'Models/model_2', '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_session_config': None, '_tf_random_seed': None, '_save_summary_steps': 100, '_environment': 'local', '_num_worker_replicas': 0, '_task_id': 0, '_log_step_count_steps': 100, '_tf_config': gpu_options {
  per_process_gpu_memory_fraction: 1.0
}
, '_evaluation_master': '', '_master': ''}
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-5-9c7783935d45> in <module>()
     11 
     12 # 拟合数据。
---> 13 regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS)
     14 
     15 # 计算预测值。

/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc in fit(self, x, y, batch_size, steps, max_steps, monitors)
   1351                         steps=steps,
   1352                         max_steps=max_steps,
-> 1353                         monitors=all_monitors)
   1354     return self
   1355 

/usr/local/lib/python2.7/dist-packages/tensorflow/python/util/deprecation.pyc in new_func(*args, **kwargs)
    294               'in a future version' if date is None else ('after %s' % date),
    295               instructions)
--> 296       return func(*args, **kwargs)
    297     return tf_decorator.make_decorator(func, new_func, 'deprecated',
    298                                        _add_deprecated_arg_notice_to_docstring(

/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc in fit(self, x, y, input_fn, steps, batch_size, monitors, max_steps)
    456       hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
    457 
--> 458     loss = self._train_model(input_fn=input_fn, hooks=hooks)
    459     logging.info('Loss for final step: %s.', loss)
    460     return self

/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc in _train_model(self, input_fn, hooks)
    956       features, labels = input_fn()
    957       self._check_inputs(features, labels)
--> 958       model_fn_ops = self._get_train_ops(features, labels)
    959       ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
    960       all_hooks.extend(hooks)

/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc in _get_train_ops(self, features, labels)
   1163       `ModelFnOps` object.
   1164     """
-> 1165     return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
   1166 
   1167   def _get_eval_ops(self, features, labels, metrics):

/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc in _call_model_fn(self, features, labels, mode)
   1134     if 'model_dir' in model_fn_args:
   1135       kwargs['model_dir'] = self.model_dir
-> 1136     model_fn_results = self._model_fn(features, labels, **kwargs)
   1137 
   1138     if isinstance(model_fn_results, model_fn_lib.ModelFnOps):

<ipython-input-4-1327d8cf63b8> in lstm_model(X, y)
      2     lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)
      3     cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * NUM_LAYERS)
----> 4     x_ = tf.unpack(X, axis=1)
      5 
      6     output, _ = tf.nn.rnn(cell, x_, dtype=tf.float32)

AttributeError: 'module' object has no attribute 'unpack'

In [ ]:

5. 画出预测值和真实值的曲线。


In [6]:
plot_predicted, = plt.plot(predicted, label='predicted')
plot_test, = plt.plot(test_y, label='real_sin')
plt.legend([plot_predicted, plot_test],['predicted', 'real_sin'])
plt.show()