In [1]:
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras import layers

import numpy as np


Using TensorFlow backend.

In [2]:
dataset = pd.read_csv("Iris.csv")

In [3]:
dataset.head()


Out[3]:
Id SepalLengthCm SepalWidthCm PetalLengthCm PetalWidthCm Species
0 1 5.1 3.5 1.4 0.2 Iris-setosa
1 2 4.9 3.0 1.4 0.2 Iris-setosa
2 3 4.7 3.2 1.3 0.2 Iris-setosa
3 4 4.6 3.1 1.5 0.2 Iris-setosa
4 5 5.0 3.6 1.4 0.2 Iris-setosa

In [4]:
dataset.info()


<class 'pandas.core.frame.DataFrame'>
RangeIndex: 150 entries, 0 to 149
Data columns (total 6 columns):
Id               150 non-null int64
SepalLengthCm    150 non-null float64
SepalWidthCm     150 non-null float64
PetalLengthCm    150 non-null float64
PetalWidthCm     150 non-null float64
Species          150 non-null object
dtypes: float64(4), int64(1), object(1)
memory usage: 7.1+ KB

In [6]:
dataset["Species"].sum()


Out[6]:
'Iris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-setosaIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-versicolorIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginicaIris-virginica'

In [7]:
train_data = dataset[["SepalLengthCm", "SepalWidthCm", "PetalLengthCm", "PetalWidthCm"]]
train_labels = dataset[["Species"]]

In [8]:
train_data.shape


Out[8]:
(150, 4)

In [9]:
train_labels.shape


Out[9]:
(150, 1)

In [10]:
train_data.head()


Out[10]:
SepalLengthCm SepalWidthCm PetalLengthCm PetalWidthCm
0 5.1 3.5 1.4 0.2
1 4.9 3.0 1.4 0.2
2 4.7 3.2 1.3 0.2
3 4.6 3.1 1.5 0.2
4 5.0 3.6 1.4 0.2

In [11]:
mean = np.mean(train_data)
std = np.std(train_data)

In [12]:
train_data -= mean
train_data /= std

In [13]:
train_data.head()


Out[13]:
SepalLengthCm SepalWidthCm PetalLengthCm PetalWidthCm
0 -0.900681 1.032057 -1.341272 -1.312977
1 -1.143017 -0.124958 -1.341272 -1.312977
2 -1.385353 0.337848 -1.398138 -1.312977
3 -1.506521 0.106445 -1.284407 -1.312977
4 -1.021849 1.263460 -1.341272 -1.312977

In [14]:
train_labels.head()


Out[14]:
Species
0 Iris-setosa
1 Iris-setosa
2 Iris-setosa
3 Iris-setosa
4 Iris-setosa

In [15]:
encoder = LabelEncoder()
encoder.fit(train_labels)
encoded_labels = encoder.transform(train_labels)


/home/rahul/.local/lib/python3.5/site-packages/sklearn/preprocessing/label.py:112: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().
  y = column_or_1d(y, warn=True)
/home/rahul/.local/lib/python3.5/site-packages/sklearn/preprocessing/label.py:147: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().
  y = column_or_1d(y, warn=True)

In [16]:
encoded_labels


Out[16]:
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
       2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
       2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2])

In [17]:
def vectorize_data(values, dimension = 3):
    results = np.zeros((len(values), dimension))
    for i, value in enumerate(values):
        results[i, value] = 1.
    return results

In [18]:
labels = vectorize_data(encoded_labels)

In [19]:
labels[57]


Out[19]:
array([ 0.,  1.,  0.])

In [20]:
train_data.shape


Out[20]:
(150, 4)

In [21]:
labels.shape


Out[21]:
(150, 3)

In [22]:
x_train = train_data[:140]
x_test = train_data[140:]

train_labs = labels[:140]
test_labs = labels[140:]

In [23]:
x_train.shape


Out[23]:
(140, 4)

In [24]:
x_test.shape


Out[24]:
(10, 4)

In [25]:
def build_neural_network():
    nn = Sequential()
    nn.add(layers.Dense(64, activation = "relu", input_shape = (x_train.shape[1], )))
    nn.add(layers.Dense(64, activation = "relu"))
    nn.add(layers.Dense(3, activation = "softmax"))
    nn.compile(loss = "categorical_crossentropy", optimizer = "rmsprop", metrics = ["accuracy"])
    return nn

In [26]:
x_train = np.array(x_train)
x_test = np.array(x_test)

train_labs = np.array(train_labs)
test_labs = np.array(test_labs)

In [27]:
x_train[0]


Out[27]:
array([-0.90068117,  1.03205722, -1.3412724 , -1.31297673])

In [28]:
model = build_neural_network()

In [39]:
history = model.fit(x_train, train_labs, epochs = 100)


Epoch 1/100
140/140 [==============================] - 0s - loss: 0.0363 - acc: 0.9929     
Epoch 2/100
140/140 [==============================] - 0s - loss: 0.0384 - acc: 0.9857     
Epoch 3/100
140/140 [==============================] - 0s - loss: 0.0446 - acc: 0.9857     
Epoch 4/100
140/140 [==============================] - 0s - loss: 0.0371 - acc: 0.9929     
Epoch 5/100
140/140 [==============================] - 0s - loss: 0.0363 - acc: 0.9929     
Epoch 6/100
140/140 [==============================] - 0s - loss: 0.0370 - acc: 0.9857     
Epoch 7/100
140/140 [==============================] - 0s - loss: 0.0338 - acc: 0.9857     
Epoch 8/100
140/140 [==============================] - 0s - loss: 0.0364 - acc: 0.9857     
Epoch 9/100
140/140 [==============================] - 0s - loss: 0.0342 - acc: 0.9857     
Epoch 10/100
140/140 [==============================] - 0s - loss: 0.0365 - acc: 0.9857     
Epoch 11/100
140/140 [==============================] - 0s - loss: 0.0340 - acc: 0.9929     
Epoch 12/100
140/140 [==============================] - 0s - loss: 0.0361 - acc: 0.9857     
Epoch 13/100
140/140 [==============================] - 0s - loss: 0.0364 - acc: 0.9857     
Epoch 14/100
140/140 [==============================] - 0s - loss: 0.0366 - acc: 0.9857     
Epoch 15/100
140/140 [==============================] - 0s - loss: 0.0313 - acc: 0.9929     
Epoch 16/100
140/140 [==============================] - 0s - loss: 0.0321 - acc: 0.9929     
Epoch 17/100
140/140 [==============================] - 0s - loss: 0.0369 - acc: 0.9786     
Epoch 18/100
140/140 [==============================] - 0s - loss: 0.0327 - acc: 0.9857     
Epoch 19/100
140/140 [==============================] - 0s - loss: 0.0356 - acc: 0.9857     
Epoch 20/100
140/140 [==============================] - 0s - loss: 0.0328 - acc: 0.9857     
Epoch 21/100
140/140 [==============================] - 0s - loss: 0.0323 - acc: 0.9929     
Epoch 22/100
140/140 [==============================] - 0s - loss: 0.0319 - acc: 0.9929     
Epoch 23/100
140/140 [==============================] - 0s - loss: 0.0329 - acc: 0.9929     
Epoch 24/100
140/140 [==============================] - 0s - loss: 0.0309 - acc: 0.9929     
Epoch 25/100
140/140 [==============================] - 0s - loss: 0.0301 - acc: 0.9929     
Epoch 26/100
140/140 [==============================] - 0s - loss: 0.0321 - acc: 0.9857     
Epoch 27/100
140/140 [==============================] - 0s - loss: 0.0347 - acc: 0.9857     
Epoch 28/100
140/140 [==============================] - 0s - loss: 0.0318 - acc: 0.9929     
Epoch 29/100
140/140 [==============================] - 0s - loss: 0.0309 - acc: 0.9929     
Epoch 30/100
140/140 [==============================] - 0s - loss: 0.0330 - acc: 0.9929     
Epoch 31/100
140/140 [==============================] - 0s - loss: 0.0339 - acc: 0.9857     
Epoch 32/100
140/140 [==============================] - 0s - loss: 0.0322 - acc: 0.9857     
Epoch 33/100
140/140 [==============================] - 0s - loss: 0.0303 - acc: 0.9929     
Epoch 34/100
140/140 [==============================] - 0s - loss: 0.0353 - acc: 0.9857     
Epoch 35/100
140/140 [==============================] - 0s - loss: 0.0306 - acc: 0.9857     
Epoch 36/100
140/140 [==============================] - 0s - loss: 0.0289 - acc: 0.9857     
Epoch 37/100
140/140 [==============================] - 0s - loss: 0.0319 - acc: 0.9929     
Epoch 38/100
140/140 [==============================] - 0s - loss: 0.0312 - acc: 0.9857     
Epoch 39/100
140/140 [==============================] - 0s - loss: 0.0323 - acc: 0.9857     
Epoch 40/100
140/140 [==============================] - 0s - loss: 0.0289 - acc: 0.9929     
Epoch 41/100
140/140 [==============================] - 0s - loss: 0.0289 - acc: 0.9857     
Epoch 42/100
140/140 [==============================] - 0s - loss: 0.0293 - acc: 0.9929     
Epoch 43/100
140/140 [==============================] - 0s - loss: 0.0318 - acc: 0.9929     
Epoch 44/100
140/140 [==============================] - 0s - loss: 0.0296 - acc: 0.9857     
Epoch 45/100
140/140 [==============================] - 0s - loss: 0.0281 - acc: 0.9929     
Epoch 46/100
140/140 [==============================] - 0s - loss: 0.0288 - acc: 0.9929     
Epoch 47/100
140/140 [==============================] - 0s - loss: 0.0276 - acc: 0.9929     
Epoch 48/100
140/140 [==============================] - 0s - loss: 0.0296 - acc: 0.9857     
Epoch 49/100
140/140 [==============================] - 0s - loss: 0.0283 - acc: 0.9929     
Epoch 50/100
140/140 [==============================] - 0s - loss: 0.0311 - acc: 0.9786     
Epoch 51/100
140/140 [==============================] - 0s - loss: 0.0285 - acc: 0.9929     
Epoch 52/100
140/140 [==============================] - ETA: 0s - loss: 0.0115 - acc: 1.0000 - 0s - loss: 0.0267 - acc: 0.9929     
Epoch 53/100
140/140 [==============================] - 0s - loss: 0.0274 - acc: 0.9929     
Epoch 54/100
140/140 [==============================] - 0s - loss: 0.0270 - acc: 0.9929     
Epoch 55/100
140/140 [==============================] - 0s - loss: 0.0311 - acc: 0.9929     
Epoch 56/100
140/140 [==============================] - 0s - loss: 0.0259 - acc: 0.9929     
Epoch 57/100
140/140 [==============================] - 0s - loss: 0.0323 - acc: 0.9857     
Epoch 58/100
140/140 [==============================] - 0s - loss: 0.0258 - acc: 0.9929     
Epoch 59/100
140/140 [==============================] - 0s - loss: 0.0280 - acc: 0.9929     
Epoch 60/100
140/140 [==============================] - 0s - loss: 0.0277 - acc: 0.9857     
Epoch 61/100
140/140 [==============================] - 0s - loss: 0.0249 - acc: 0.9929     
Epoch 62/100
140/140 [==============================] - 0s - loss: 0.0254 - acc: 0.9857     
Epoch 63/100
140/140 [==============================] - 0s - loss: 0.0265 - acc: 0.9929     
Epoch 64/100
140/140 [==============================] - 0s - loss: 0.0277 - acc: 0.9929     
Epoch 65/100
140/140 [==============================] - 0s - loss: 0.0252 - acc: 0.9929     
Epoch 66/100
140/140 [==============================] - 0s - loss: 0.0279 - acc: 0.9857         
Epoch 67/100
140/140 [==============================] - 0s - loss: 0.0249 - acc: 0.9929     
Epoch 68/100
140/140 [==============================] - 0s - loss: 0.0268 - acc: 0.9929     
Epoch 69/100
140/140 [==============================] - 0s - loss: 0.0251 - acc: 0.9929     
Epoch 70/100
140/140 [==============================] - 0s - loss: 0.0234 - acc: 0.9929     
Epoch 71/100
140/140 [==============================] - 0s - loss: 0.0235 - acc: 0.9929     
Epoch 72/100
140/140 [==============================] - 0s - loss: 0.0239 - acc: 0.9929     
Epoch 73/100
140/140 [==============================] - 0s - loss: 0.0236 - acc: 0.9929     
Epoch 74/100
140/140 [==============================] - 0s - loss: 0.0260 - acc: 0.9929     
Epoch 75/100
140/140 [==============================] - 0s - loss: 0.0263 - acc: 0.9929     
Epoch 76/100
140/140 [==============================] - 0s - loss: 0.0247 - acc: 0.9929     
Epoch 77/100
140/140 [==============================] - 0s - loss: 0.0246 - acc: 0.9929     
Epoch 78/100
140/140 [==============================] - 0s - loss: 0.0247 - acc: 0.9929     
Epoch 79/100
140/140 [==============================] - 0s - loss: 0.0259 - acc: 0.9929     
Epoch 80/100
140/140 [==============================] - 0s - loss: 0.0218 - acc: 0.9929     
Epoch 81/100
140/140 [==============================] - 0s - loss: 0.0221 - acc: 0.9929     
Epoch 82/100
140/140 [==============================] - 0s - loss: 0.0216 - acc: 0.9929     
Epoch 83/100
140/140 [==============================] - 0s - loss: 0.0249 - acc: 0.9929     
Epoch 84/100
140/140 [==============================] - 0s - loss: 0.0218 - acc: 0.9929     
Epoch 85/100
140/140 [==============================] - 0s - loss: 0.0255 - acc: 0.9857     
Epoch 86/100
140/140 [==============================] - 0s - loss: 0.0272 - acc: 0.9857     
Epoch 87/100
140/140 [==============================] - 0s - loss: 0.0207 - acc: 0.9929     
Epoch 88/100
140/140 [==============================] - 0s - loss: 0.0216 - acc: 0.9929     
Epoch 89/100
140/140 [==============================] - 0s - loss: 0.0224 - acc: 0.9929     
Epoch 90/100
140/140 [==============================] - 0s - loss: 0.0228 - acc: 0.9929     
Epoch 91/100
140/140 [==============================] - 0s - loss: 0.0224 - acc: 0.9929         
Epoch 92/100
140/140 [==============================] - 0s - loss: 0.0218 - acc: 0.9929     
Epoch 93/100
140/140 [==============================] - 0s - loss: 0.0215 - acc: 0.9929     
Epoch 94/100
140/140 [==============================] - 0s - loss: 0.0233 - acc: 0.9929     
Epoch 95/100
140/140 [==============================] - 0s - loss: 0.0210 - acc: 0.9929     
Epoch 96/100
140/140 [==============================] - 0s - loss: 0.0236 - acc: 0.9929     
Epoch 97/100
140/140 [==============================] - 0s - loss: 0.0229 - acc: 0.9929     
Epoch 98/100
140/140 [==============================] - 0s - loss: 0.0206 - acc: 0.9929     
Epoch 99/100
140/140 [==============================] - 0s - loss: 0.0193 - acc: 0.9929     
Epoch 100/100
140/140 [==============================] - 0s - loss: 0.0206 - acc: 0.9929     

In [41]:
type(history)


Out[41]:
keras.callbacks.History

In [42]:
results = history.history

In [44]:
results.keys()


Out[44]:
dict_keys(['loss', 'acc'])

In [45]:
import matplotlib.pyplot as plt

In [46]:
epochs = range(1, 101)

train_loss = results["loss"]
plt.plot(epochs, train_loss, 'b')
plt.xlabel("Epochs")
plt.ylabel("loss")
plt.title("Training loss")
plt.show()



In [ ]: