In [10]:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from theano.printing import pydotprint
import pickle
sns.set_style("dark")
sns.set_context("notebook")

In [16]:
from keras.layers import Input, Dense
from keras.models import Model, Sequential
from keras import regularizers

In [17]:
net_input = Input(shape=(2,))
percetron  = Dense(8, activation='tanh', name='p1')(net_input)
percetron2 = Dense(3, activation='tanh', name='p2')(percetron)
out        = Dense(1, activation='tanh')(percetron2) 
xor = Model(input=net_input, output=out)
xor.compile(optimizer='adadelta', loss='binary_crossentropy')

In [18]:
with open('../data/xor.tuple', 'rb') as xtuple:
    x_train, y_train, x_test, y_test = pickle.load(xtuple)

In [12]:
history = xor.fit(x_train, y_train,
                verbose=0,
                nb_epoch=2000,
                batch_size=256,
                shuffle=True,
                validation_data=(x_test, y_test))

In [13]:
plt.subplot(121)
for key, data in history.history.items():
    plt.plot(data,label=key)
plt.legend(history.history.keys())

x = np.arange(-1,1,0.01)
y = np.arange(-1,1,0.01)
xx, yy = np.meshgrid(x,y)
# xx
x1 = xx.reshape(-1,1)
x2 = yy.reshape(-1,1)
z = xor.predict(np.concatenate((x1,x2),axis=1))
z = z.reshape(len(x),len(y))
plt.subplot(122)
plt.imshow(z,cmap='coolwarm')


Out[13]:
<matplotlib.image.AxesImage at 0x7f9d58b02940>

In [14]:
plt.plot(x_train[:, 0], x_train[:,1], '.b')


Out[14]:
[<matplotlib.lines.Line2D at 0x7f9d55540b38>]

In [15]:
plt.imshow(z >= 0,cmap='coolwarm')


Out[15]:
<matplotlib.image.AxesImage at 0x7f9d5549fef0>

In [15]:
import pickle
with open('xor.keras', 'wb') as out:
    pickle.dump(xor,out)

In [16]:
with open('xor.keras' , 'rb') as kmodel:
    model = pickle.load(kmodel)

In [23]:
list(map(lambda w: len(w.shape),xor.get_weights()))


Out[23]:
[2, 1, 2, 1, 2, 1]

In [27]:
plt.imshow(model.get_weights()[0],interpolation='none')


Out[27]:
<matplotlib.image.AxesImage at 0x7f4d4ed792e8>

In [35]:
plt.imshow(model.get_weights()[2],interpolation='none')


Out[35]:
<matplotlib.image.AxesImage at 0x7f4d4ebf31d0>

In [36]:
plt.imshow(model.get_weights()[4],interpolation='none')


Out[36]:
<matplotlib.image.AxesImage at 0x7f4d4eb456a0>

In [32]:
line_data = np.array([[0,0],[10,0]],np.float32)

In [33]:
model.predict_generator()


Out[33]:
(2, 2)

In [64]:
x_train = np.stack([[train] for train in x_train])
x_train


Out[64]:
array([[[ 0.25605177,  0.09945316]],

       [[ 0.09038142,  0.73530157]],

       [[ 0.37323466, -0.95365933]],

       ..., 
       [[ 0.16260626,  0.02399319]],

       [[ 0.8690958 , -0.79675192]],

       [[-0.60360096, -0.89927752]]])

In [65]:
x_test = np.stack([[train] for train in x_test])
x_test


Out[65]:
array([[[-0.79131411, -0.77976044]],

       [[ 0.04424915, -0.86545411]],

       [[-0.8366925 ,  0.30229779]],

       [[-0.79876331,  0.07151722]],

       [[ 0.064258  , -0.61246099]],

       [[-0.68370615,  0.24054863]],

       [[ 0.30467747, -0.9792534 ]],

       [[ 0.34419495, -0.51436667]],

       [[ 0.89061669, -0.5125816 ]],

       [[-0.81917936, -0.54223214]],

       [[ 0.51262548, -0.73442538]],

       [[-0.55279148, -0.74874001]],

       [[ 0.87135617, -0.98844479]],

       [[ 0.07811781,  0.28294731]],

       [[-0.92514646,  0.52117114]],

       [[ 0.30745261, -0.98113058]],

       [[-0.4584139 ,  0.01495633]],

       [[-0.93604015, -0.86965921]],

       [[ 0.17143382,  0.06401793]],

       [[ 0.22521283,  0.22937821]],

       [[-0.99782744,  0.10601103]],

       [[ 0.40924048, -0.17932446]],

       [[-0.52596952,  0.12039976]],

       [[-0.79382171, -0.02895413]],

       [[ 0.56068016, -0.25414911]],

       [[-0.10102168, -0.95941872]],

       [[-0.10658783, -0.07383877]],

       [[-0.7313021 ,  0.12495261]],

       [[-0.80683006, -0.9542606 ]],

       [[-0.67080999, -0.10312495]],

       [[ 0.98457407,  0.03838647]],

       [[-0.24636134, -0.16078339]],

       [[-0.05376807,  0.90341895]],

       [[-0.8717698 ,  0.67394281]],

       [[-0.11699974,  0.80660908]],

       [[-0.44948322, -0.63459717]],

       [[-0.07994794,  0.97219054]],

       [[ 0.51785588,  0.84980907]],

       [[-0.43901   ,  0.92565992]],

       [[ 0.11378172,  0.03098363]],

       [[-0.81656921, -0.88323228]],

       [[-0.57292235,  0.60633588]],

       [[-0.87970343, -0.27300285]],

       [[-0.79545892, -0.66467188]],

       [[ 0.46010772,  0.6291197 ]],

       [[-0.67114849,  0.29863954]],

       [[-0.66747864, -0.61982775]],

       [[ 0.66521984,  0.92969232]],

       [[ 0.20853076,  0.51526293]],

       [[-0.71874449,  0.32432782]],

       [[-0.34623085,  0.05685211]],

       [[ 0.60715096,  0.45900976]],

       [[ 0.51040783,  0.88168512]],

       [[-0.52940749, -0.00966903]],

       [[ 0.23138453, -0.94202998]],

       [[-0.86115113, -0.23335582]],

       [[-0.77979111, -0.29660402]],

       [[ 0.21197174,  0.64942863]],

       [[ 0.58097363, -0.56192792]],

       [[ 0.80926086, -0.60036802]],

       [[-0.27154321,  0.20611145]],

       [[-0.07178341, -0.46263551]],

       [[ 0.65454839, -0.39611662]],

       [[-0.82756364, -0.88941194]],

       [[-0.78060395, -0.26701336]],

       [[-0.1244749 ,  0.98203534]],

       [[-0.09844577, -0.71240475]],

       [[-0.50207062,  0.42694632]],

       [[-0.81415396, -0.66337331]],

       [[-0.8705055 ,  0.91971292]],

       [[-0.8528468 ,  0.00446147]],

       [[-0.88366723,  0.610729  ]],

       [[ 0.09413104,  0.87071846]],

       [[ 0.13048551,  0.76454999]],

       [[-0.42199258,  0.65865633]],

       [[ 0.74866065,  0.96670305]],

       [[-0.33150143,  0.24323172]],

       [[-0.82641486,  0.20696323]],

       [[ 0.41214564, -0.5188547 ]],

       [[ 0.53663813, -0.66998655]],

       [[ 0.5458514 , -0.58688853]],

       [[-0.56327384,  0.871792  ]],

       [[-0.49011924, -0.88664934]],

       [[ 0.64169464, -0.98318318]],

       [[-0.74315238, -0.05062828]],

       [[ 0.90953924, -0.8817027 ]],

       [[-0.82526089, -0.42964778]],

       [[-0.51610013,  0.25626057]],

       [[-0.9527224 ,  0.17691652]],

       [[-0.89510619, -0.24575955]],

       [[-0.28726192,  0.37996942]],

       [[-0.19077321,  0.93421095]],

       [[-0.3238371 ,  0.40491054]],

       [[-0.8017425 ,  0.92210318]],

       [[-0.03333982,  0.52690518]],

       [[ 0.03299752, -0.97817998]],

       [[-0.54016278, -0.76861041]],

       [[ 0.54670455,  0.20725888]],

       [[ 0.29357553, -0.01766554]],

       [[ 0.79379224,  0.32578795]]])

In [9]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import LSTM

In [66]:
model = Sequential()
# model.add(Embedding(max_features, 256, input_length=maxlen))
model.add(LSTM(32, input_shape=(1,2)))
# model.add(LSTM()
# model.add(Dropout(0.5))
# model.add(LSTM(2, return_sequences=True))
# model.add(LSTM(32, return_sequences=False))
# model.add(Dropout(0.5))
# model.add(Dense(len(chars)))
# model.add(Activation('softmax'))
model.add(Dense(1))
model.add(Activation('softmax'))

model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

In [60]:
model.predict(x_train).shape


Out[60]:
(5000, 1)

In [67]:
history = model.fit(x_train, y_train,
                verbose=1,
                nb_epoch=100,
                batch_size=256,
                shuffle=True,
                validation_data=(x_test, y_test))


Train on 5000 samples, validate on 100 samples
Epoch 1/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 2/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 3/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 4/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 5/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 6/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 7/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 8/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 9/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 10/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 11/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 12/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 13/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 14/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 15/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 16/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 17/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 18/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 19/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 20/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 21/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 22/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 23/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 24/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 25/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 26/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 27/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 28/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 29/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 30/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 31/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 32/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 33/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 34/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 35/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 36/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 37/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 38/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 39/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 40/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 41/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 42/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 43/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 44/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 45/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 46/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 47/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 48/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 49/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 50/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 51/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 52/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 53/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 54/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 55/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 56/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 57/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 58/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 59/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 60/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 61/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 62/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 63/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 64/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 65/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 66/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 67/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 68/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 69/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 70/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 71/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 72/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 73/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 74/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 75/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 76/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 77/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 78/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 79/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 80/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 81/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 82/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 83/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 84/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 85/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 86/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 87/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 88/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 89/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 90/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 91/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 92/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 93/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 94/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 95/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 96/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 97/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 98/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 99/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500
Epoch 100/100
5000/5000 [==============================] - 0s - loss: 8.1115 - acc: 0.4912 - val_loss: 8.7683 - val_acc: 0.4500

In [ ]:


In [17]:
x_train.shape


Out[17]:
(5000, 2)

In [ ]: