In [1]:
%matplotlib notebook
import numpy as np
import pandas as pd
from __future__ import print_function

import tensorflow as tf
import numpy
import matplotlib.pyplot as plt

In [2]:
df=pd.read_excel('Type C calibration_corrected_temp.xlsx')
df.head()


Out[2]:
T mV
0 77.15 -1.870959
1 77.25 -1.870931
2 77.35 -1.870902
3 77.45 -1.870873
4 77.55 -1.870843

In [3]:
# rng = numpy.random

# # Parameters
# learning_rate = 0.01
# training_epochs = 1000
# display_step = 50

# # Training Data
# # train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
# #                          7.042,10.791,5.313,7.997,5.654,9.27,3.1])
# train_X = df['mV'].values
# # train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
# #                          2.827,3.465,1.65,2.904,2.42,2.94,1.3])
# train_Y = df['T']


# n_samples = train_X.shape[0]

# # tf Graph Input
# X = tf.placeholder("float")
# Y = tf.placeholder("float")

# # Set model weights
# W = tf.Variable(rng.randn(), name="weight")
# b = tf.Variable(rng.randn(), name="bias")

# # Construct a linear model
# pred = tf.add(tf.multiply(X, W), b)

# # Mean squared error
# # cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)

# cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# # Gradient descent
# #  Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)

# # Initialize the variables (i.e. assign their default value)
# init = tf.global_variables_initializer()

# # Start training
# with tf.Session() as sess:

#     # Run the initializer
#     sess.run(init)

#     # Fit all training data
#     for epoch in range(training_epochs):
#         for (x, y) in zip(train_X, train_Y):
#             sess.run(optimizer, feed_dict={X: x, Y: y})

#         # Display logs per epoch step
#         if (epoch+1) % display_step == 0:
#             c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
#             print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
#                 "W=", sess.run(W), "b=", sess.run(b))

#     print("Optimization Finished!")
#     training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
#     print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')

#     # Graphic display
#     plt.plot(train_X, train_Y, 'ro', label='Original data')
#     plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
#     plt.legend()
#     plt.show()

#     # Testing example, as requested (Issue #2)
# #     test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])
# #     test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])
#     test_X = df['mV'].values
#     test_Y = df['T'].values
#     print("Testing... (Mean square loss Comparison)")
#     testing_cost = sess.run(
#         tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),
#         feed_dict={X: test_X, Y: test_Y})  # same function as cost above
#     print("Testing cost=", testing_cost)
#     print("Absolute mean square loss difference:", abs(
#         training_cost - testing_cost))

#     plt.plot(test_X, test_Y, 'bo', label='Testing data')
#     plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
#     plt.legend()
# plt.show()

In [13]:
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
reg = linear_model.LinearRegression()

reg.fit(df['mV'].reshape(-1,1), df['T'].values.reshape(-1,1))

y_pred = reg.predict(df['mV'].reshape(-1,1))
print('reg coef: {0}\nintercept: {1}'.format(reg.coef_, reg.intercept_))
print('MSE: {0}'.format(mean_squared_error(df['T'].values.reshape(-1,1), y_pred)))


reg coef: [[ 59.31890329]]
intercept: [ 241.15599006]
MSE: 2512.688085268828
C:\Users\manolis\Anaconda3\lib\site-packages\ipykernel\__main__.py:5: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
C:\Users\manolis\Anaconda3\lib\site-packages\ipykernel\__main__.py:7: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead

In [7]:
fig, ax = plt.subplots()
ax.plot(df['mV'], df['T'], df['mV'], y_pred)
plt.title('Linear Reg')


Out[7]:
<matplotlib.text.Text at 0x1cb15ea8390>

In [17]:
# Ridge Regression
alpha= 0.5
reg = linear_model.Ridge(alpha=alpha)
reg.fit(df['mV'].reshape(-1,1), df['T'].values.reshape(-1,1))

y_pred = reg.predict(df['mV'].reshape(-1,1))
print('reg coef: {0}\nintercept: {1}'.format(reg.coef_, reg.intercept_))
print('MSE: {0}'.format(mean_squared_error(df['T'].values.reshape(-1,1), y_pred)))
fig, ax = plt.subplots()
ax.plot(df['mV'], df['T'], df['mV'], y_pred)
plt.title('Ridge Reg: a={0}'.format(alpha))


reg coef: [[ 59.31889534]]
intercept: [ 241.15613631]
MSE: 2512.6880852782165
C:\Users\manolis\Anaconda3\lib\site-packages\ipykernel\__main__.py:4: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
C:\Users\manolis\Anaconda3\lib\site-packages\ipykernel\__main__.py:6: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead
Out[17]:
<matplotlib.text.Text at 0x1cb1a322400>

In [49]:
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
import numpy as np
def poly(coefs, df):
    pol = 0
    for k, v in enumerate(coefs):
        pol = v*(df['mV']**k) + pol
    return pol
for deg in range(10):

    model = Pipeline([('poly', PolynomialFeatures(degree=deg)), ('linear', LinearRegression(fit_intercept=False))])
     # fit to an order-3 polynomial data
    # x = np.arange(5)
    x = df['mV']
    y = df['T']
    # y = 3 - 2 * x + x ** 2 - x ** 3
    model = model.fit(np.tanh(x[:, np.newaxis])*1/(np.tanh(x[:, np.newaxis])), y)
    coefs = model.named_steps['linear'].coef_

    print('deg: {0} MSE: {1}'.format(deg, mean_squared_error(df['T'].values.reshape(-1,1), poly(coefs, df))))
fig, ax = plt.subplots()
plt.title('poly')
ax.plot(df['mV'], df['T'], df['mV'], poly(coefs, df))


deg: 0 MSE: 525426.7491669025
deg: 1 MSE: 189159634.7829909
deg: 2 MSE: 91003333444.9167
deg: 3 MSE: 53126034929506.45
deg: 4 MSE: 3.755982412240932e+16
deg: 5 MSE: 3.00519519567236e+19
deg: 6 MSE: 2.6168403187044738e+22
deg: 7 MSE: 2.4232607048578173e+25
deg: 8 MSE: 2.351450947106488e+28
deg: 9 MSE: 2.367248440036068e+31
Out[49]:
[<matplotlib.lines.Line2D at 0x1cb1c496f28>,
 <matplotlib.lines.Line2D at 0x1cb1c49c160>]

In [45]:
fig, ax = plt.subplots()
plt.title('poly')
ax.plot(df['mV'], df['T'], df['mV'], poly(coefs, df))


Out[45]:
[<matplotlib.lines.Line2D at 0x1cb17e5ee80>,
 <matplotlib.lines.Line2D at 0x1cb1bd470b8>]

In [38]:
d = poly(coefs, df) - df['T']
fig, ax = plt.subplots()
plt.title('difference')
ax.plot(df['mV'], d)


Out[38]:
[<matplotlib.lines.Line2D at 0x1cb14d49f28>]

In [50]:
for deg in range(10):
    model = Pipeline([('poly', PolynomialFeatures(degree=deg)), ('linear', LinearRegression(fit_intercept=False))])
     # fit to an order-3 polynomial data
    # x = np.arange(5)
    x = df['mV']
    y = d
    # y = 3 - 2 * x + x ** 2 - x ** 3
    model = model.fit(x[:, np.newaxis], y)
    coefs = model.named_steps['linear'].coef_

    print('deg: {0} MSE: {1}'.format(deg, mean_squared_error(df['T'].values.reshape(-1,1), poly(coefs, df))))
fig, ax = plt.subplots()
plt.title('difference fitted')
ax.plot(df['mV'], d, df['mV'],poly(coefs, df))


deg: 0 MSE: 2301975.7070148173
deg: 1 MSE: 2302518.0820074873
deg: 2 MSE: 2302550.609237209
deg: 3 MSE: 2302565.2202231213
deg: 4 MSE: 2302564.1429112577
deg: 5 MSE: 2302564.777543645
deg: 6 MSE: 2302564.7232570793
deg: 7 MSE: 2302564.7387156147
deg: 8 MSE: 2302564.7414372335
deg: 9 MSE: 2302565.3165300167
Out[50]:
[<matplotlib.lines.Line2D at 0x1cb1c515668>,
 <matplotlib.lines.Line2D at 0x1cb1c515860>]

In [57]:
from sklearn.preprocessing import MinMaxScaler
minmaxscaler = MinMaxScaler()
minmaxscaler.fit(df['T'].values.reshape(-1,1))
t_train = minmaxscaler.transform(df['T'].values.reshape(-1,1))

minmaxscaler.fit(df['mV'].values.reshape(-1,1))
mv_train = minmaxscaler.transform(df['mV'].values.reshape(-1,1))

In [62]:
fig,ax = plt.subplots()
ax.plot(mv_train, t_train)
plt.xlabel('mV norm')
plt.ylabel('T norm')


Out[62]:
<matplotlib.text.Text at 0x1cb1c594c18>

In [92]:
from sklearn.neural_network import MLPRegressor as mlp
reg = mlp(hidden_layer_sizes=(1000,3 ),solver='sgd',tol=1e-10,activation='tanh', learning_rate='constant', verbose=True, warm_start=False, shuffle=True)
reg.fit(mv_train, t_train.ravel())
print('MSE: {0}'.format(mean_squared_error(t_train.ravel(),reg.predict(mv_train))))
fig,ax = plt.subplots()
ax.plot(mv_train, t_train, mv_train,reg.predict(mv_train))


Iteration 1, loss = 0.04147188
Iteration 2, loss = 0.00836848
Iteration 3, loss = 0.00168463
Iteration 4, loss = 0.00042258
Iteration 5, loss = 0.00020305
Iteration 6, loss = 0.00016620
Iteration 7, loss = 0.00015998
Iteration 8, loss = 0.00015877
Iteration 9, loss = 0.00015837
Iteration 10, loss = 0.00015810
Iteration 11, loss = 0.00015788
Iteration 12, loss = 0.00015765
Iteration 13, loss = 0.00015740
Iteration 14, loss = 0.00015719
Iteration 15, loss = 0.00015700
Iteration 16, loss = 0.00015675
Iteration 17, loss = 0.00015656
Iteration 18, loss = 0.00015632
Iteration 19, loss = 0.00015610
Iteration 20, loss = 0.00015594
Iteration 21, loss = 0.00015570
Iteration 22, loss = 0.00015550
Iteration 23, loss = 0.00015533
Iteration 24, loss = 0.00015510
Iteration 25, loss = 0.00015494
Iteration 26, loss = 0.00015470
Iteration 27, loss = 0.00015446
Iteration 28, loss = 0.00015428
Iteration 29, loss = 0.00015411
Iteration 30, loss = 0.00015388
Iteration 31, loss = 0.00015370
Iteration 32, loss = 0.00015354
Iteration 33, loss = 0.00015328
Iteration 34, loss = 0.00015313
Iteration 35, loss = 0.00015294
Iteration 36, loss = 0.00015277
Iteration 37, loss = 0.00015253
Iteration 38, loss = 0.00015236
Iteration 39, loss = 0.00015221
Iteration 40, loss = 0.00015201
Iteration 41, loss = 0.00015182
Iteration 42, loss = 0.00015164
Iteration 43, loss = 0.00015144
Iteration 44, loss = 0.00015137
Iteration 45, loss = 0.00015112
Iteration 46, loss = 0.00015095
Iteration 47, loss = 0.00015078
Iteration 48, loss = 0.00015060
Iteration 49, loss = 0.00015045
Iteration 50, loss = 0.00015029
Iteration 51, loss = 0.00015010
Iteration 52, loss = 0.00014993
Iteration 53, loss = 0.00014978
Iteration 54, loss = 0.00014960
Iteration 55, loss = 0.00014950
Iteration 56, loss = 0.00014929
Iteration 57, loss = 0.00014911
Iteration 58, loss = 0.00014895
Iteration 59, loss = 0.00014877
Iteration 60, loss = 0.00014867
Iteration 61, loss = 0.00014848
Iteration 62, loss = 0.00014831
Iteration 63, loss = 0.00014819
Iteration 64, loss = 0.00014801
Iteration 65, loss = 0.00014785
Iteration 66, loss = 0.00014769
Iteration 67, loss = 0.00014755
Iteration 68, loss = 0.00014740
Iteration 69, loss = 0.00014724
Iteration 70, loss = 0.00014717
Iteration 71, loss = 0.00014697
Iteration 72, loss = 0.00014679
Iteration 73, loss = 0.00014665
Iteration 74, loss = 0.00014654
Iteration 75, loss = 0.00014635
Iteration 76, loss = 0.00014623
Iteration 77, loss = 0.00014608
Iteration 78, loss = 0.00014594
Iteration 79, loss = 0.00014583
Iteration 80, loss = 0.00014565
Iteration 81, loss = 0.00014553
Iteration 82, loss = 0.00014536
Iteration 83, loss = 0.00014520
Iteration 84, loss = 0.00014510
Iteration 85, loss = 0.00014496
Iteration 86, loss = 0.00014484
Iteration 87, loss = 0.00014468
Iteration 88, loss = 0.00014458
Iteration 89, loss = 0.00014444
Iteration 90, loss = 0.00014430
Iteration 91, loss = 0.00014425
Iteration 92, loss = 0.00014401
Iteration 93, loss = 0.00014391
Iteration 94, loss = 0.00014375
Iteration 95, loss = 0.00014362
Iteration 96, loss = 0.00014353
Iteration 97, loss = 0.00014338
Iteration 98, loss = 0.00014323
Iteration 99, loss = 0.00014310
Iteration 100, loss = 0.00014300
Iteration 101, loss = 0.00014285
Iteration 102, loss = 0.00014274
Iteration 103, loss = 0.00014265
Iteration 104, loss = 0.00014249
Iteration 105, loss = 0.00014238
Iteration 106, loss = 0.00014225
Iteration 107, loss = 0.00014215
Iteration 108, loss = 0.00014200
Iteration 109, loss = 0.00014185
Iteration 110, loss = 0.00014180
Iteration 111, loss = 0.00014164
Iteration 112, loss = 0.00014153
Iteration 113, loss = 0.00014141
Iteration 114, loss = 0.00014128
Iteration 115, loss = 0.00014118
Iteration 116, loss = 0.00014104
Iteration 117, loss = 0.00014095
Iteration 118, loss = 0.00014082
Iteration 119, loss = 0.00014072
Iteration 120, loss = 0.00014060
Iteration 121, loss = 0.00014052
Iteration 122, loss = 0.00014036
Iteration 123, loss = 0.00014026
Iteration 124, loss = 0.00014016
Iteration 125, loss = 0.00014006
Iteration 126, loss = 0.00013994
Iteration 127, loss = 0.00013984
Iteration 128, loss = 0.00013970
Iteration 129, loss = 0.00013957
Iteration 130, loss = 0.00013947
Iteration 131, loss = 0.00013939
Iteration 132, loss = 0.00013929
Iteration 133, loss = 0.00013916
Iteration 134, loss = 0.00013905
Iteration 135, loss = 0.00013894
Iteration 136, loss = 0.00013884
Iteration 137, loss = 0.00013871
Iteration 138, loss = 0.00013862
Iteration 139, loss = 0.00013851
Iteration 140, loss = 0.00013842
Iteration 141, loss = 0.00013832
Iteration 142, loss = 0.00013821
Iteration 143, loss = 0.00013809
Iteration 144, loss = 0.00013799
Iteration 145, loss = 0.00013787
Iteration 146, loss = 0.00013780
Iteration 147, loss = 0.00013768
Iteration 148, loss = 0.00013758
Iteration 149, loss = 0.00013747
Iteration 150, loss = 0.00013737
Iteration 151, loss = 0.00013730
Iteration 152, loss = 0.00013719
Iteration 153, loss = 0.00013711
Iteration 154, loss = 0.00013699
Iteration 155, loss = 0.00013691
Iteration 156, loss = 0.00013678
Iteration 157, loss = 0.00013673
Iteration 158, loss = 0.00013660
Iteration 159, loss = 0.00013651
Iteration 160, loss = 0.00013640
Iteration 161, loss = 0.00013632
Iteration 162, loss = 0.00013620
Iteration 163, loss = 0.00013610
Iteration 164, loss = 0.00013601
Iteration 165, loss = 0.00013593
Iteration 166, loss = 0.00013585
Iteration 167, loss = 0.00013573
Iteration 168, loss = 0.00013564
Iteration 169, loss = 0.00013557
Iteration 170, loss = 0.00013547
Iteration 171, loss = 0.00013536
Iteration 172, loss = 0.00013528
Iteration 173, loss = 0.00013519
Iteration 174, loss = 0.00013510
Iteration 175, loss = 0.00013500
Iteration 176, loss = 0.00013490
Iteration 177, loss = 0.00013479
Iteration 178, loss = 0.00013473
Iteration 179, loss = 0.00013465
Iteration 180, loss = 0.00013454
Iteration 181, loss = 0.00013446
Iteration 182, loss = 0.00013438
Iteration 183, loss = 0.00013431
Iteration 184, loss = 0.00013417
Iteration 185, loss = 0.00013412
Iteration 186, loss = 0.00013402
Iteration 187, loss = 0.00013391
Iteration 188, loss = 0.00013383
Iteration 189, loss = 0.00013374
Iteration 190, loss = 0.00013366
Iteration 191, loss = 0.00013356
Iteration 192, loss = 0.00013350
Iteration 193, loss = 0.00013340
Iteration 194, loss = 0.00013333
Iteration 195, loss = 0.00013322
Iteration 196, loss = 0.00013316
Iteration 197, loss = 0.00013304
Iteration 198, loss = 0.00013297
Iteration 199, loss = 0.00013287
Iteration 200, loss = 0.00013280
C:\Users\manolis\Anaconda3\lib\site-packages\sklearn\neural_network\multilayer_perceptron.py:563: ConvergenceWarning: Stochastic Optimizer: Maximum iterations reached and the optimization hasn't converged yet.
  % (), ConvergenceWarning)
MSE: 0.0002602359064573629
Out[92]:
[<matplotlib.lines.Line2D at 0x1cb22b30390>,
 <matplotlib.lines.Line2D at 0x1cb22bc5f60>]

In [ ]: