In [1]:
import numpy as np
import tensorflow as tf
rng = np.random

# Meta-parameters and debugging knobs
learning_rate = 0.01
training_epochs = 2000
display_step = 50

# Test data
y = np.asarray([1, 1, 1, 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10])
num_steps = y.shape[0]

# Input data placeholders
data_in = tf.placeholder('float')
data_out = tf.placeholder('float')

# ETS params
level0 = tf.Variable(0.1 * rng.randn(), name = 'level0', dtype = tf.float32)
pace0 = tf.Variable(0.1 * rng.randn(), name = 'pace0', dtype = tf.float32)
alpha = tf.Variable(0.5, name = 'alpha', dtype = tf.float32)
beta = tf.Variable(0.1, name = 'beta', dtype = tf.float32)

# Definition of the ETS update
def update(y, level, pace):
    output = level + pace
    new_level = output + alpha * (y - output)
    new_pace = pace + beta * (y - output)
    return output, new_level, new_pace

# Unrolled ETS loop
outputs = []
level, pace = level0, pace0
for time_step in range(num_steps):
    output, level, pace = update(data_in[time_step], level, pace)
    outputs.append(output)

# Mean squared error
cost = tf.reduce_sum(tf.pow(tf.pack(outputs) - data_out, 2))

# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# Initializing the variables
init = tf.initialize_all_variables()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)

    # Fit the data.    
    for epoch in range(training_epochs):
        sess.run(optimizer, feed_dict={data_in: y, data_out: y})

        # Display logs per epoch step
        if (epoch + 1) % display_step == 0:
            c = sess.run(cost, feed_dict={data_in: y, data_out: y})
            print "Epoch:", '%04d' % (epoch+1), \
                "cost=", "{:.9f}".format(c), \
                "level0=", sess.run(level0), \
                "pace0=", sess.run(pace0), \
                "alpha=", sess.run(alpha), \
                "beta=", sess.run(beta)

    print "Optimization Finished!"
    training_cost = sess.run(cost, feed_dict={data_in: y, data_out: y})
    print "Training cost=", training_cost, \
        "level0=", sess.run(level0), \
        "pace0=", sess.run(pace0), \
        "alpha=", sess.run(alpha), \
        "beta=", sess.run(beta)


Epoch: 0050 cost= 2.328951836 level0= 0.531206 pace0= 0.228928 alpha= 0.626971 beta= 0.477133
Epoch: 0100 cost= 2.279419184 level0= 0.666729 pace0= 0.177188 alpha= 0.669274 beta= 0.466676
Epoch: 0150 cost= 2.272072315 level0= 0.716537 pace0= 0.151718 alpha= 0.688094 beta= 0.465966
Epoch: 0200 cost= 2.270692587 level0= 0.737201 pace0= 0.141077 alpha= 0.699073 beta= 0.465683
Epoch: 0250 cost= 2.270378828 level0= 0.746348 pace0= 0.136834 alpha= 0.706098 beta= 0.465303
Epoch: 0300 cost= 2.270293474 level0= 0.750614 pace0= 0.135232 alpha= 0.710722 beta= 0.464922
Epoch: 0350 cost= 2.270263195 level0= 0.752709 pace0= 0.134688 alpha= 0.713796 beta= 0.464604
Epoch: 0400 cost= 2.270252943 level0= 0.753792 pace0= 0.134551 alpha= 0.715849 beta= 0.464362
Epoch: 0450 cost= 2.270247698 level0= 0.754382 pace0= 0.134558 alpha= 0.717224 beta= 0.464186
Epoch: 0500 cost= 2.270246267 level0= 0.75472 pace0= 0.134605 alpha= 0.718147 beta= 0.464062
Epoch: 0550 cost= 2.270245075 level0= 0.754922 pace0= 0.134654 alpha= 0.718767 beta= 0.463976
Epoch: 0600 cost= 2.270244837 level0= 0.755046 pace0= 0.134696 alpha= 0.719183 beta= 0.463917
Epoch: 0650 cost= 2.270244598 level0= 0.755125 pace0= 0.134727 alpha= 0.719464 beta= 0.463876
Epoch: 0700 cost= 2.270245075 level0= 0.755177 pace0= 0.13475 alpha= 0.719652 beta= 0.463849
Epoch: 0750 cost= 2.270244598 level0= 0.75521 pace0= 0.134766 alpha= 0.719779 beta= 0.463831
Epoch: 0800 cost= 2.270244837 level0= 0.755233 pace0= 0.134777 alpha= 0.719865 beta= 0.463818
Epoch: 0850 cost= 2.270244598 level0= 0.755247 pace0= 0.134784 alpha= 0.719923 beta= 0.46381
Epoch: 0900 cost= 2.270244598 level0= 0.755257 pace0= 0.13479 alpha= 0.719961 beta= 0.463804
Epoch: 0950 cost= 2.270244837 level0= 0.755264 pace0= 0.134793 alpha= 0.719987 beta= 0.4638
Epoch: 1000 cost= 2.270244598 level0= 0.755268 pace0= 0.134795 alpha= 0.720005 beta= 0.463798
Epoch: 1050 cost= 2.270244360 level0= 0.755271 pace0= 0.134797 alpha= 0.720017 beta= 0.463796
Epoch: 1100 cost= 2.270244837 level0= 0.755274 pace0= 0.134798 alpha= 0.720025 beta= 0.463795
Epoch: 1150 cost= 2.270244837 level0= 0.755275 pace0= 0.134799 alpha= 0.72003 beta= 0.463794
Epoch: 1200 cost= 2.270244837 level0= 0.755275 pace0= 0.134799 alpha= 0.720034 beta= 0.463794
Epoch: 1250 cost= 2.270244360 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1300 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1350 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1400 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1450 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1500 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1550 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1600 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1650 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1700 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1750 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1800 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1850 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1900 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 1950 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Epoch: 2000 cost= 2.270244837 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793
Optimization Finished!
Training cost= 2.27024 level0= 0.755276 pace0= 0.1348 alpha= 0.720037 beta= 0.463793

BOOYAH

And we have another match! The results from R:

> ets(c(1, 1, 1, 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10), model = 'AAN')
ETS(A,A,N) 

Call:
 ets(y = c(1, 1, 1, 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10), model = "AAN") 

  Smoothing parameters:
    alpha = 0.7199 
    beta  = 0.4639 

  Initial states:
    l = 0.7554 
    b = 0.1348 

  sigma:  0.4027

     AIC     AICc      BIC 
21.47843 28.97843 24.67371

In [ ]: