In [3]:
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import math

In [4]:
a = np.zeros([50,4,10])
a = np.delete(a, [range(2,4)],2)
print(a.shape)
print(range(2,4))


(50, 4, 8)
[2, 3]

In [5]:
NSAMPLE = 1000
x_data = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T
r_data = np.float32(np.random.normal(0,0.5, size=(NSAMPLE,1)))

y_data_1 = np.float32(np.sin(0.75*x_data)*7.0+x_data*0.5+r_data*0.5)
y_data_2 = np.float32(np.sin(0.5*x_data)*3.0-x_data*0.5+r_data*1.0)
y_data = np.hstack((y_data_1, y_data_2))

print(x_data.shape)
print(y_data[:10,:])
plt.figure(figsize=(8, 8))
plt.plot(x_data,y_data[:,0],'ro',x_data, y_data[:,1],'bo',alpha=0.3)
plt.show()


(1000, 1)
[[ -5.97456694  -1.75575125]
 [  4.87567329   0.92984724]
 [  5.35202646   1.87261534]
 [  3.78638029   2.8225162 ]
 [-12.14080238   7.75624847]
 [ -5.71792126  -1.24774313]
 [  1.07428157  -6.22118235]
 [ -9.92322826   7.80824661]
 [ -3.36284876   6.96821976]
 [  7.76152658   1.5701437 ]]

In [6]:
x = tf.placeholder(dtype=tf.float32, shape=[None,1])
y = tf.placeholder(dtype=tf.float32, shape=[None,2])

NHIDDEN = 20
W = tf.Variable(tf.random_normal([1,NHIDDEN], stddev=0.1, dtype=tf.float32))
b = tf.Variable(tf.random_normal([NHIDDEN], stddev=0.1, dtype=tf.float32))

W_out = tf.Variable(tf.random_normal([NHIDDEN,4], stddev=0.1, dtype=tf.float32))
b_out = tf.Variable(tf.random_normal([4], stddev=0.1, dtype=tf.float32))

hidden_layer = tf.nn.tanh(tf.matmul(x, W) + b)

y_out = tf.matmul(hidden_layer,W_out) + b_out

out_sigma = tf.placeholder(dtype=tf.float32, shape=[2], name="mixparam")
out_mu = tf.placeholder(dtype=tf.float32, shape=[2], name="mixparam")
out_mu, out_sigma = tf.split(1,2, y_out)#get_mixture_coef(y_out)
out_sigma = tf.exp(out_sigma)*1.0
y_out = tf.concat(1, [out_mu, out_sigma])

loss_left = tf.reduce_mean(tf.log(out_sigma))
loss_right = tf.reduce_mean(0.5*tf.square(tf.div(tf.sub(out_mu, y),out_sigma)))

lossfunc = tf.reduce_mean(tf.log(out_sigma)+0.5*tf.square(tf.div(tf.sub(out_mu, y),out_sigma)))

train_op = tf.train.AdamOptimizer(learning_rate=0.01).minimize(lossfunc)
#train_op = tf.train.RMSPropOptimizer(learning_rate=0.1, decay=0.8).minimize(lossfunc)

sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())

NEPOCH = 5000
for i in range(NEPOCH):
    l,_, ll, lr = sess.run([lossfunc,train_op, loss_left, loss_right],feed_dict={x: x_data, y: y_data})
    if i%100 == 0:
        print("Step: %d, loss: %.8f (%.4f + %.4f)"%(i,l,ll,lr))

print('Finished training')

#l = sess.run([l2], feed_dict={x: x_data, y: y_data})
#print(l)
#diag_loss = np.hstack((om, l1, l2, l3))
#print(diag_loss[:10,:])

x_test = np.float32(np.arange(-10.5,10.5,0.1))
x_test = x_test.reshape(x_test.size,1)
y_test, os = sess.run([y_out, out_sigma],feed_dict={x: x_test})

sess.close()


Step: 0, loss: 16.94827271 (-0.0861 + 17.0344)
Step: 100, loss: 1.80989039 (1.2891 + 0.5208)
Step: 200, loss: 1.75252545 (1.2467 + 0.5059)
Step: 300, loss: 1.72710156 (1.2279 + 0.4992)
Step: 400, loss: 1.70832789 (1.2135 + 0.4948)
Step: 500, loss: 1.67492712 (1.1850 + 0.4899)
Step: 600, loss: 1.61547744 (1.1273 + 0.4882)
Step: 700, loss: 1.55106044 (1.0573 + 0.4938)
Step: 800, loss: 1.46432340 (0.9687 + 0.4957)
Step: 900, loss: 1.39176810 (0.8924 + 0.4993)
Step: 1000, loss: 1.31197774 (0.8121 + 0.4999)
Step: 1100, loss: 1.03023601 (0.5257 + 0.5045)
Step: 1200, loss: 0.95259273 (0.4534 + 0.4992)
Step: 1300, loss: 0.94336963 (0.4444 + 0.4989)
Step: 1400, loss: 0.93702114 (0.4387 + 0.4984)
Step: 1500, loss: 0.93016392 (0.4313 + 0.4989)
Step: 1600, loss: 0.92575574 (0.4243 + 0.5015)
Step: 1700, loss: 0.90958869 (0.4113 + 0.4983)
Step: 1800, loss: 0.89283264 (0.3951 + 0.4978)
Step: 1900, loss: 0.87062174 (0.3735 + 0.4971)
Step: 2000, loss: 0.84424782 (0.3478 + 0.4965)
Step: 2100, loss: 0.81442422 (0.3179 + 0.4965)
Step: 2200, loss: 0.78122795 (0.2823 + 0.4989)
Step: 2300, loss: 0.72681814 (0.2306 + 0.4962)
Step: 2400, loss: 0.64509618 (0.1498 + 0.4953)
Step: 2500, loss: 0.53525412 (0.0388 + 0.4965)
Step: 2600, loss: 0.42161059 (-0.0784 + 0.5000)
Step: 2700, loss: 0.20124882 (-0.2969 + 0.4982)
Step: 2800, loss: 0.00357413 (-0.4958 + 0.4994)
Step: 2900, loss: -0.06837961 (-0.5668 + 0.4984)
Step: 3000, loss: -0.12303396 (-0.6225 + 0.4995)
Step: 3100, loss: -0.18642032 (-0.6789 + 0.4925)
Step: 3200, loss: -0.25653100 (-0.7494 + 0.4929)
Step: 3300, loss: -0.37231714 (-0.8628 + 0.4905)
Step: 3400, loss: -0.49531707 (-0.9956 + 0.5003)
Step: 3500, loss: -0.52873009 (-1.0278 + 0.4991)
Step: 3600, loss: -0.54847705 (-1.0478 + 0.4993)
Step: 3700, loss: -0.55199248 (-1.0490 + 0.4970)
Step: 3800, loss: -0.56558657 (-1.0630 + 0.4975)
Step: 3900, loss: -0.57232559 (-1.0704 + 0.4981)
Step: 4000, loss: -0.56957704 (-1.0772 + 0.5076)
Step: 4100, loss: -0.57759547 (-1.0757 + 0.4981)
Step: 4200, loss: -0.57853609 (-1.0778 + 0.4992)
Step: 4300, loss: -0.58030486 (-1.0777 + 0.4974)
Step: 4400, loss: -0.56489366 (-1.0649 + 0.5000)
Step: 4500, loss: -0.58258116 (-1.0834 + 0.5008)
Step: 4600, loss: -0.58337504 (-1.0832 + 0.4998)
Step: 4700, loss: -0.58392066 (-1.0867 + 0.5027)
Step: 4800, loss: -0.58308315 (-1.0750 + 0.4919)
Step: 4900, loss: -0.58368051 (-1.0851 + 0.5014)
Finished training

In [7]:
print(y_test[:30,:])
plt.figure(figsize=(8, 8))
#plt.plot(x_data,y_data,'ro', x_test,y_test[:,0],'bo',alpha=0.3)
#plt.plot(x_data,y_data,'ro', x_test,y_test[:,0],'bo', x_test, y_test[:,1], 'b-', alpha=0.3)
plt.plot(x_data,y_data[:,0],'ro',x_data,y_data[:,1],'bo',x_test, y_test[:,0],'g*',x_test, y_test[:,1],'g*',alpha=0.3)
plt.show()


[[-12.54631996   8.02966785   0.40164357   0.62812573]
 [-12.353755     8.01272392   0.38443363   0.61238438]
 [-12.14631844   7.99498606   0.36765271   0.59671307]
 [-11.92306614   7.97613096   0.35136986   0.58117574]
 [-11.68302345   7.95576811   0.33565611   0.56584245]
 [-11.42520809   7.93345833   0.32058221   0.55078894]
 [-11.14863014   7.90868807   0.30621743   0.53609502]
 [-10.85233116   7.88089418   0.29262814   0.52184325]
 [-10.53533173   7.8494525    0.27987498   0.50811708]
 [-10.19673729   7.8137002    0.26801389   0.49500003]
 [ -9.83569908   7.77293396   0.25709203   0.48257238]
 [ -9.45149231   7.72643518   0.24714954   0.47091025]
 [ -9.04355431   7.67348719   0.23821703   0.46008292]
 [ -8.61152649   7.61339664   0.23031615   0.45015109]
 [ -8.15535831   7.54553223   0.22345874   0.44116485]
 [ -7.67535639   7.46933794   0.21764745   0.43316233]
 [ -7.17229223   7.38437843   0.21287447   0.42616722]
 [ -6.64746618   7.2903657    0.2091224    0.42018875]
 [ -6.1027689    7.18717146   0.2063628    0.41521969]
 [ -5.54075384   7.07486296   0.20455673   0.41123652]
 [ -4.96460199   6.95369196   0.20365347   0.40819928]
 [ -4.37813616   6.82409716   0.20359091   0.40605354]
 [ -3.7856946    6.68668842   0.20429516   0.40473133]
 [ -3.19204044   6.54219723   0.20568112   0.40415478]
 [ -2.60217047   6.39145756   0.20765421   0.40423989]
 [ -2.02112293   6.23532104   0.21011239   0.40490091]
 [ -1.45379293   6.07464123   0.21294895   0.40605497]
 [ -0.90474069   5.91017866   0.2160565    0.40762803]
 [ -0.3780216    5.74259138   0.21933089   0.409558  ]
 [  0.12291443   5.5723815    0.22267501   0.41179964]]