In [14]:
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn import datasets, linear_model
from sklearn import cross_validation
import numpy as np
numpy.longfloat
import pandas as pd
from sklearn import preprocessing
In [31]:
df = pd.read_excel("data0505.xlsx",header=0,dtype=np.longfloat)
# clean up data
df = df.dropna(how = 'all')
df = df.fillna(0)
df = df.round(4)
df.head()
In [28]:
Out[28]:
In [22]:
df_normalized=(df-df.mean())/df.std()
# min_max_scaler = preprocessing.MinMaxScaler()
# np_scaled = min_max_scaler.fit_transform(df)
# df_normalized = pd.DataFrame(np_scaled)
df_normalized.head()
Out[22]:
In [17]:
x = np.array(df_normalized.ix[:,0:2])#first three column are SoC, SoH, power
y = np.array(df_normalized.ix[:,5])#delta SEI
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(
x, y, test_size=0.2, random_state=42)
total_len = X_train.shape[0]
total_len
Out[17]:
In [20]:
print(str.format('{0:.15f}', y[1]))
In [8]:
# Parameters
learning_rate = 0.001
training_epochs = 50
batch_size = 100
display_step = 1
dropout_rate = 0.1
# Network Parameters
n_hidden_1 = 10 # 1st layer number of features
n_hidden_2 = 5 # 2nd layer number of features
n_input = X_train.shape[1]
n_classes = 1
# tf Graph input
x = tf.placeholder("float", [None, 3])
y = tf.placeholder("float", [None])
In [9]:
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
In [10]:
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 0.1)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], 0, 0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 0.1)),
'b2': tf.Variable(tf.random_normal([n_hidden_2], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_classes], 0, 0.1))
}
In [11]:
# Construct model
pred = multilayer_perceptron(x, weights, biases)
In [12]:
# Define loss and optimizer
cost = tf.reduce_mean((tf.transpose(pred)-y)*(tf.transpose(pred)-y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
tf.initialize_all_variables()
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(total_len/batch_size)
# Loop over all batches
for i in range(total_batch-1):
batch_x = X_train[i*batch_size:(i+1)*batch_size]
batch_y = Y_train[i*batch_size:(i+1)*batch_size]
# Run optimization op (backprop) and cost op (to get loss value)
_, c, p = sess.run([optimizer, cost, pred], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# sample prediction
label_value = batch_y
estimate = p
err = label_value-estimate
print ("num batch:", total_batch)
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print ("[*]----------------------------")
for i in range(3):
print ("label value:", label_value[i], \
"estimated value:", estimate[i])
print ("[*]============================")
print ("Optimization Finished!")
# Test model
# correct_prediction = tf.equal(tf.argmax(pred,0), tf.argmax(y,0))
# Calculate accuracy
accuracy = tf.reduce_mean((tf.transpose(pred)-y)*(tf.transpose(pred)-y))
print ("MSE:", accuracy.eval({x: X_test, y: Y_test}))
In [ ]: