In [1]:
# simple logistic regression
# 2017-03-11 jkang
# Python3.5
# Tensorflow1.0.1
# ref:
# - http://web.stanford.edu/class/cs20si/
# - iris dataset from Matlab Neural Network example
#
# Input: iris data (4 features)
# Output: iris label (3 categories)

import tensorflow as tf
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt

In [2]:
learning_Rate = 0.01
batch_size = 10
max_epochs = 30

irisInputs_tmp = sio.loadmat('irisInputs.mat')
irisInputs = irisInputs_tmp['irisInputs'].T
irisTargets_tmp = sio.loadmat('irisTargets')
irisTargets = irisTargets_tmp['irisTargets'].T

X = tf.placeholder(tf.float32, [batch_size, 4], name='irisInputs')
Y = tf.placeholder(tf.float32, [batch_size, 3], name='irisTargets')

w = tf.Variable(np.zeros((4, 3)), name='weight', dtype=np.float32)
b = tf.Variable(np.zeros((1, 3)), name='bias', dtype=np.float32)

logits = tf.matmul(X, w) + b

entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)
loss = tf.reduce_mean(entropy)
optimizer = tf.train.GradientDescentOptimizer(learning_Rate).minimize(loss)

def softmax(x):
    ex_val = np.exp(x - np.max(x))
    return ex_val / ex_val.sum()

In [3]:
with tf.Session() as sess:
    # training
    writer = tf.summary.FileWriter('./graph', sess.graph)
    sess.run(tf.global_variables_initializer())
    n_batches = int(irisTargets.shape[0] / batch_size)
    for i in range(max_epochs):
        total_loss = 0
        for ibatch in range(n_batches):
            x_batch = irisInputs[batch_size *
                                 ibatch: batch_size * ibatch + batch_size]
            y_batch = irisTargets[batch_size *
                                  ibatch: batch_size * ibatch + batch_size]
            _, loss_batch = sess.run([optimizer, loss], feed_dict={
                                     X: x_batch, Y: y_batch})
            total_loss += loss_batch
        print('Average loss at epoch {0}: {1}'.format(
            i, total_loss / n_batches))
    print('Optimization finished!')
    weights, bias = sess.run([w, b])
    writer.close()


Average loss at epoch 0: 1.0566088040669759
Average loss at epoch 1: 0.9699530641237895
Average loss at epoch 2: 0.9008939743041993
Average loss at epoch 3: 0.844332234064738
Average loss at epoch 4: 0.7976635773976644
Average loss at epoch 5: 0.7587910453478496
Average loss at epoch 6: 0.726058288415273
Average loss at epoch 7: 0.6981823921203614
Average loss at epoch 8: 0.6741775631904602
Average loss at epoch 9: 0.6532862265904744
Average loss at epoch 10: 0.6349237163861593
Average loss at epoch 11: 0.6186354239781697
Average loss at epoch 12: 0.6040651202201843
Average loss at epoch 13: 0.5909309069315593
Average loss at epoch 14: 0.5790078043937683
Average loss at epoch 15: 0.568114572763443
Average loss at epoch 16: 0.5581039170424144
Average loss at epoch 17: 0.5488551616668701
Average loss at epoch 18: 0.5402686436971028
Average loss at epoch 19: 0.5322613716125488
Average loss at epoch 20: 0.5247637848059337
Average loss at epoch 21: 0.5177172323067983
Average loss at epoch 22: 0.5110718568166097
Average loss at epoch 23: 0.5047851463158926
Average loss at epoch 24: 0.498820432027181
Average loss at epoch 25: 0.4931462287902832
Average loss at epoch 26: 0.4877350707848867
Average loss at epoch 27: 0.4825630704561869
Average loss at epoch 28: 0.477609250942866
Average loss at epoch 29: 0.4728552341461182
Optimization finished!

In [4]:
# testing
rand_idx = np.random.permutation(irisInputs.shape[0])[0]
x_data = irisInputs[rand_idx]
y_data = irisTargets[rand_idx]
pred = softmax(np.dot(x_data, weights) + bias)
print('Y:', y_data)
print('pred:', np.argmax(pred) + 1, 'th element')


Y: [0 0 1]
pred: 3 th element