In [31]:
x = [1.1,4.3,7.4,3.45,2.54,3.3,9.7,6.77,0.65,4.22]
y = [3.2,6.66,3.43,0.06,3.44,2.2,3.43,2.32,4.11,1.65]
sumofx = sum(x)
sumofy = sum(y)
#Mean
meanx = (sumofx/len(x))
meany = (sumofy/len(y))
print("Mean of x is :",meanx)
print("Mean of y is :",meany)
#Variance
varofx = sum([(xi - meanx)**2 for xi in x])/len(x)
print("Variance of x is :",varofx)
#Covariance
cov = 0
for i in range(0, len(x)):
cov += ((x[i] - meanx)*(y[i] - meany))
print("Covariance is :", cov)
#Value of m
valm = cov/varofx
print("Value of m is :",valm)
#Value of c
valc = (meany - valm * meanx)
print("Value of c is :",valc)
In [63]:
##USING TENSORFLOW
import numpy as np
import tensorflow as tf
arr1=np.asarray([1.1,4.3,7.4,3.45,2.54,3.3,9.7,6.77,0.65,4.22])
arr2=np.asarray([3.2,6.66,3.43,0.06,3.44,2.2,3.43,2.32,4.11,1.65])
x = tf.constant(arr1,dtype = tf.float32)
y = tf.constant(arr2,dtype = tf.float32)
ses=tf.Session()
print( ses.run(x))
ses=tf.Session()
print( ses.run(y))
#Mean
meanx=tf.reduce_mean(x)
with tf.Session() as ses:
ans = ses.run(meanx)
print("Mean of x is :",ans)
meany=tf.reduce_mean(y)
with tf.Session() as ses:
ans = ses.run(meany)
print("Mean of y is :",ans)
#Variance
mean,var = tf.nn.moments(x, axes=[0])
with tf.Session() as ses:
ans1 = ses.run(mean)
ans2=ses.run(var)
print("Mean of x is :",ans)
print("Variance of is :",ans2)
#Covariance
xi=tf.subtract(x,meanx)
with tf.Session() as ses:
ans = ses.run(xi)
print(ans)
yi=tf.subtract(y,meany)
with tf.Session() as ses:
ans = ses.run(yi)
print(ans)
mult=tf.multiply(xi,yi)
with tf.Session() as ses:
ans = ses.run(mult)
print(ans)
val=tf.reduce_sum(mult)
with tf.Session() as ses:
ans = ses.run(val)
print(ans)
lenn=len(arr1)
cov=tf.divide(val,lenn)
with tf.Session() as ses:
ans = ses.run(cov)
print("Covariance is :",ans)
#Value of m
m=tf.divide(cov,var)
with tf.Session() as ses:
ans = ses.run(m)
print("The value of m is :",ans)
#Value of c
a=tf.multiply(m,meanx)
c=tf.subtract(meany,a)
with tf.Session() as ses:
ans = ses.run(c)
print("The value of c is :",ans)
In [65]:
import numpy as np
import matplotlib.pyplot as plt
learning_rate = 0.0001
training_epochs = 1000
display_step = 50
n_samples = len(arr1)
rangee = np.random
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Set model weights
W = tf.Variable(rangee.randn(), name="weight")
b = tf.Variable(rangee.randn(), name="bias")
# Construct a linear model
pred = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (p, r) in zip(arr1, arr2):
sess.run(optimizer, feed_dict={X: p, Y: r})
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: arr1, Y: arr2})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: arr1, Y: arr2})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
# Graphic display
plt.plot(arr1, arr2, 'ro', label='Original data')
plt.plot(arr1, sess.run(W) * arr1 + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
In [ ]: