In [47]:
# import common APIs
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import os
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.decomposition import PCA
from sklearn.metrics import classification_report,confusion_matrix,precision_recall_curve,auc,roc_auc_score,roc_curve

In [120]:
# Data observation
filepath = '/Users/mac/Desktop/Kaggle_datasets/Pokemon/'
filename01 = 'Pokemon721.csv'

df_full = pd.read_csv(os.path.join(filepath, filename01))
df_full.head()


Out[120]:
# Name Type 1 Type 2 Total HP Attack Defense Sp. Atk Sp. Def Speed Generation Legendary
0 1 Bulbasaur Grass Poison 318 45 49 49 65 65 45 1 False
1 2 Ivysaur Grass Poison 405 60 62 63 80 80 60 1 False
2 3 Venusaur Grass Poison 525 80 82 83 100 100 80 1 False
3 3 VenusaurMega Venusaur Grass Poison 625 80 100 123 122 120 80 1 False
4 4 Charmander Fire NaN 309 39 52 43 60 50 65 1 False

In [4]:
df_full.info()


<class 'pandas.core.frame.DataFrame'>
RangeIndex: 800 entries, 0 to 799
Data columns (total 13 columns):
#             800 non-null int64
Name          800 non-null object
Type 1        800 non-null object
Type 2        414 non-null object
Total         800 non-null int64
HP            800 non-null int64
Attack        800 non-null int64
Defense       800 non-null int64
Sp. Atk       800 non-null int64
Sp. Def       800 non-null int64
Speed         800 non-null int64
Generation    800 non-null int64
Legendary     800 non-null bool
dtypes: bool(1), int64(9), object(3)
memory usage: 75.9+ KB

In [9]:
df_full.columns


Out[9]:
Index(['#', 'Name', 'Type 1', 'Type 2', 'Total', 'HP', 'Attack', 'Defense',
       'Sp. Atk', 'Sp. Def', 'Speed', 'Generation', 'Legendary'],
      dtype='object')

In [6]:
sns.jointplot(x="HP", y="Attack", data=df_full)
plt.show()



In [7]:
sns.jointplot(x="Attack", y="Defense", data=df_full)
plt.show()



In [8]:
sns.jointplot(x="Attack", y="Sp. Atk", data=df_full)
plt.show()



In [12]:
#創造純數字的欄位
num_cols = ['HP', 'Attack', 'Defense','Sp. Atk', 'Sp. Def', 'Speed']
df_num = df_full[num_cols]

In [19]:
plt.figure(figsize=(8,6))
sns.boxplot(data=df_num)
plt.show()



In [13]:
sns.pairplot(df_num)
plt.show()



In [125]:
plt.figure(figsize=(6,6)) #可以調整大小
sns.set(font_scale=1.25)
hm = sns.heatmap(df_num.corr(), cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10},
                 cmap='rainbow')
hm.xaxis.set_ticks_position('top')
plt.show()



In [36]:
pkmn = pd.melt(df_full, id_vars=["Name", "Type 1", "Type 2"], value_vars=num_cols)
pkmn.head()


Out[36]:
Name Type 1 Type 2 variable value
0 Bulbasaur Grass Poison HP 45
1 Ivysaur Grass Poison HP 60
2 Venusaur Grass Poison HP 80
3 VenusaurMega Venusaur Grass Poison HP 80
4 Charmander Fire NaN HP 39

In [38]:
plt.figure(figsize=(10,10))
sns.swarmplot(x='variable', y="value", data=pkmn, hue="Type 1");
plt.show()



In [41]:
plt.figure(figsize=(12,10))
plt.ylim(0, 275)
sns.swarmplot(x='variable', y="value", data=pkmn, hue="Type 1", dodge=True, size=7)
plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.);
plt.show()



In [46]:
sns.set_style("whitegrid")
with sns.color_palette([
    "#8ED752", "#F95643", "#53AFFE", "#C3D221", "#BBBDAF",
    "#AD5CA2", "#F8E64E", "#F0CA42", "#F9AEFE", "#A35449",
    "#FB61B4", "#CDBD72", "#7673DA", "#66EBFF", "#8B76FF",
    "#8E6856", "#C3C1D7", "#75A4F9"], n_colors=18, desat=.9):
    plt.figure(figsize=(12,10))
    plt.ylim(0, 275)
    sns.swarmplot(x="variable", y="value", data=pkmn, hue="Type 1", dodge=True, size=7)
    plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.);
    plt.show()


PCA


In [48]:
scaler = StandardScaler().fit(df_num)
df_scaled = scaler.transform(df_num)

print(df_scaled[:,0].mean())  # zero (or very close)
print(df_scaled[:,0].std())  # 1 (or very close)


-2.48689957516e-16
1.0

In [50]:
pca = PCA(n_components=0.8)  # consider enough components to explain 80% of the variance
pca.fit(df_scaled)
pcscores = pd.DataFrame(pca.transform(df_scaled))
pcscores.columns = ['PC'+str(i+1) for i in range(len(pcscores.columns))]

In [52]:
pcscores.head()


Out[52]:
PC1 PC2 PC3 PC4
0 -1.556375 -0.021482 0.666504 0.184176
1 -0.362867 -0.050269 0.667913 0.269254
2 1.280152 -0.062720 0.623914 0.331391
3 2.620916 0.704263 0.995538 -0.199321
4 -1.758284 -0.706179 0.411454 -0.268602

In [55]:
pca.components_ #PCA對每個column variable解釋的程度


Out[55]:
array([[ 0.38988584,  0.43925373,  0.36374733,  0.45716229,  0.4485704 ,
         0.33544048],
       [ 0.08483455, -0.01182493,  0.62878867, -0.30541446,  0.2390967 ,
        -0.66846305],
       [-0.47192614, -0.59415339,  0.06933913,  0.30561186,  0.56559403,
         0.07851327],
       [ 0.71769131, -0.4058359 , -0.41923734,  0.14751659,  0.18544475,
        -0.29716251]])

In [53]:
# num_cols = ['HP', 'Attack', 'Defense','Sp. Atk', 'Sp. Def', 'Speed']
loadings = pd.DataFrame(pca.components_, columns=num_cols)
loadings.index = ['PC'+str(i+1) for i in range(len(pcscores.columns))]

In [54]:
loadings


Out[54]:
HP Attack Defense Sp. Atk Sp. Def Speed
PC1 0.389886 0.439254 0.363747 0.457162 0.448570 0.335440
PC2 0.084835 -0.011825 0.628789 -0.305414 0.239097 -0.668463
PC3 -0.471926 -0.594153 0.069339 0.305612 0.565594 0.078513
PC4 0.717691 -0.405836 -0.419237 0.147517 0.185445 -0.297163

In [72]:
plt.figure(figsize=(6,6))

load_sqr = loadings**2
ax = sns.heatmap(load_sqr.T, linewidths=0.5, cmap="BuGn", annot=True, annot_kws={"size": 15})
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=0, fontsize=15)
ax.set_yticklabels(ax.yaxis.get_majorticklabels(), rotation=0, fontsize=15)
plt.show()



In [71]:
plt.figure(figsize=(6,6))

ax = sns.heatmap(loadings.T, center=0, linewidths=0.5, 
                 cmap="RdBu", vmin=-1, vmax=1, annot=True, annot_kws={"size": 15})
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=0, fontsize=15)
ax.set_yticklabels(ax.yaxis.get_majorticklabels(), rotation=0, fontsize=15)
plt.show()



In [77]:
best = pcscores.sort_values(by='PC4', ascending=False)[:5] #PC4最大的剛好HP也都高
df_full.loc[best.index]


Out[77]:
# Name Type 1 Type 2 Total HP Attack Defense Sp. Atk Sp. Def Speed Generation Legendary
261 242 Blissey Normal NaN 540 255 10 10 75 135 55 2 False
121 113 Chansey Normal NaN 450 250 5 5 35 105 50 1 False
217 202 Wobbuffet Psychic NaN 405 190 33 58 33 58 33 2 False
351 321 Wailord Water NaN 500 170 90 45 90 45 60 3 False
155 143 Snorlax Normal NaN 540 160 110 65 65 110 30 1 False

In [75]:
df_full.sort_values(by='HP', ascending=False)[:5] #結果是一樣的


Out[75]:
# Name Type 1 Type 2 Total HP Attack Defense Sp. Atk Sp. Def Speed Generation Legendary
261 242 Blissey Normal NaN 540 255 10 10 75 135 55 2 False
121 113 Chansey Normal NaN 450 250 5 5 35 105 50 1 False
217 202 Wobbuffet Psychic NaN 405 190 33 58 33 58 33 2 False
351 321 Wailord Water NaN 500 170 90 45 90 45 60 3 False
655 594 Alomomola Water NaN 470 165 75 80 40 45 65 5 False

目標:使用原始資料,預測誰是Legendary


In [121]:
dict_legend = {True:1, False:0}  #他給的是bool,所以不能寫成str喔!!
df_full['Legendary'] = df_full['Legendary'].map(dict_legend)

df_full.head()


Out[121]:
# Name Type 1 Type 2 Total HP Attack Defense Sp. Atk Sp. Def Speed Generation Legendary
0 1 Bulbasaur Grass Poison 318 45 49 49 65 65 45 1 0
1 2 Ivysaur Grass Poison 405 60 62 63 80 80 60 1 0
2 3 Venusaur Grass Poison 525 80 82 83 100 100 80 1 0
3 3 VenusaurMega Venusaur Grass Poison 625 80 100 123 122 120 80 1 0
4 4 Charmander Fire NaN 309 39 52 43 60 50 65 1 0

In [88]:
cols = ['HP', 'Attack', 'Defense','Sp. Atk', 'Sp. Def','Speed','Legendary']
df_fl = df_full[cols]

In [89]:
from sklearn.utils import shuffle

shuffle_df = shuffle(df_fl, random_state=42)

df_label = shuffle_df['Legendary']
df_feature = shuffle_df.drop('Legendary', axis=1)

cut_point = round(len(df_fl)*0.6)
train_feature = np.array(df_feature.values[:cut_point,:])
train_label = np.array(df_label.values[:cut_point])
test_feature = np.array(df_feature.values[cut_point:,:])
test_label = np.array(df_label.values[cut_point:])

Scikit-Learn


In [90]:
from sklearn import datasets,cross_validation,ensemble

X_train,X_test,y_train,y_test = cross_validation.train_test_split(train_feature,train_label, 
                                              test_size=0.25, random_state=0,stratify=train_label) #分層取樣
clf = ensemble.RandomForestClassifier()
clf.fit(X_train,y_train)
print("Traing Score:%f"%clf.score(train_feature,train_label))
print("Testing Score:%f"%clf.score(test_feature,test_label))


Traing Score:0.979167
Testing Score:0.928125
//anaconda/lib/python3.5/site-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)

In [91]:
from sklearn import datasets,cross_validation,tree

X_train,X_test,y_train,y_test = cross_validation.train_test_split(train_feature,train_label, 
                                              test_size=0.25, random_state=0,stratify=train_label) #分層取樣
clf = tree.DecisionTreeClassifier()
clf.fit(X_train,y_train)
print("Traing Score:%f"%clf.score(train_feature,train_label))
print("Testing Score:%f"%clf.score(test_feature,test_label)) #over-training


Traing Score:0.987500
Testing Score:0.903125

In [92]:
from sklearn import datasets,cross_validation,svm

X_train,X_test,y_train,y_test = cross_validation.train_test_split(train_feature,train_label, 
                                              test_size=0.25, random_state=0,stratify=train_label) #分層取樣
clf = svm.SVC()
clf.fit(X_train,y_train)
print("Traing Score:%f"%clf.score(train_feature,train_label))
print("Testing Score:%f"%clf.score(test_feature,test_label)) #over-training


Traing Score:0.981250
Testing Score:0.906250

Keras: MLP


In [93]:
# Standardize
scaler = MinMaxScaler()
scaler.fit(train_feature)
train_feature_trans = scaler.transform(train_feature)
test_feature_trans = scaler.transform(test_feature)


//anaconda/lib/python3.5/site-packages/sklearn/utils/validation.py:444: DataConversionWarning: Data with input dtype int64 was converted to float64 by MinMaxScaler.
  warnings.warn(msg, DataConversionWarning)

In [98]:
# Keras MLP models
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout

def show_train_history(train_history,train,validation):
    plt.plot(train_history.history[train])
    plt.plot(train_history.history[validation])
    plt.title('Train History')
    plt.ylabel(train)
    plt.xlabel('Epoch')
    plt.legend(['train', 'validation'], loc='best')
    plt.show()

model = Sequential() 
model.add(Dense(units=200, 
                input_dim=6, 
                kernel_initializer='uniform', 
                activation='relu'))
model.add(Dropout(0.5))

model.add(Dense(units=200,  
                kernel_initializer='uniform', 
                activation='relu'))
model.add(Dropout(0.5))

model.add(Dense(units=1, #輸出一個數字 
                kernel_initializer='uniform',
                activation='sigmoid'))

print(model.summary()) #可以清楚看到model還有參數數量

model.compile(loss='binary_crossentropy',   #二元用binary
              optimizer='adam', metrics=['accuracy'])

train_history = model.fit(x=train_feature_trans, y=train_label,  #上面多分割一步在keras是內建的
                          validation_split=0.8, epochs=200, 
                          batch_size=2000, verbose=2) #verbose=2表示顯示訓練過程

show_train_history(train_history,'acc','val_acc')
show_train_history(train_history,'loss','val_loss')

scores = model.evaluate(test_feature_trans, test_label)
print('\n')
print('accuracy=',scores[1])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_13 (Dense)             (None, 200)               1400      
_________________________________________________________________
dropout_9 (Dropout)          (None, 200)               0         
_________________________________________________________________
dense_14 (Dense)             (None, 200)               40200     
_________________________________________________________________
dropout_10 (Dropout)         (None, 200)               0         
_________________________________________________________________
dense_15 (Dense)             (None, 1)                 201       
=================================================================
Total params: 41,801
Trainable params: 41,801
Non-trainable params: 0
_________________________________________________________________
None
Train on 95 samples, validate on 385 samples
Epoch 1/200
0s - loss: 0.6950 - acc: 0.1684 - val_loss: 0.6909 - val_acc: 0.9247
Epoch 2/200
0s - loss: 0.6909 - acc: 0.9053 - val_loss: 0.6871 - val_acc: 0.9247
Epoch 3/200
0s - loss: 0.6870 - acc: 0.9368 - val_loss: 0.6830 - val_acc: 0.9247
Epoch 4/200
0s - loss: 0.6825 - acc: 0.9368 - val_loss: 0.6782 - val_acc: 0.9247
Epoch 5/200
0s - loss: 0.6773 - acc: 0.9368 - val_loss: 0.6727 - val_acc: 0.9247
Epoch 6/200
0s - loss: 0.6722 - acc: 0.9368 - val_loss: 0.6666 - val_acc: 0.9247
Epoch 7/200
0s - loss: 0.6649 - acc: 0.9368 - val_loss: 0.6593 - val_acc: 0.9247
Epoch 8/200
0s - loss: 0.6573 - acc: 0.9368 - val_loss: 0.6510 - val_acc: 0.9247
Epoch 9/200
0s - loss: 0.6491 - acc: 0.9368 - val_loss: 0.6415 - val_acc: 0.9247
Epoch 10/200
0s - loss: 0.6382 - acc: 0.9368 - val_loss: 0.6307 - val_acc: 0.9247
Epoch 11/200
0s - loss: 0.6262 - acc: 0.9368 - val_loss: 0.6186 - val_acc: 0.9247
Epoch 12/200
0s - loss: 0.6161 - acc: 0.9368 - val_loss: 0.6052 - val_acc: 0.9247
Epoch 13/200
0s - loss: 0.5991 - acc: 0.9368 - val_loss: 0.5904 - val_acc: 0.9247
Epoch 14/200
0s - loss: 0.5833 - acc: 0.9368 - val_loss: 0.5743 - val_acc: 0.9247
Epoch 15/200
0s - loss: 0.5666 - acc: 0.9368 - val_loss: 0.5571 - val_acc: 0.9247
Epoch 16/200
0s - loss: 0.5524 - acc: 0.9368 - val_loss: 0.5387 - val_acc: 0.9247
Epoch 17/200
0s - loss: 0.5297 - acc: 0.9368 - val_loss: 0.5192 - val_acc: 0.9247
Epoch 18/200
0s - loss: 0.5121 - acc: 0.9368 - val_loss: 0.4990 - val_acc: 0.9247
Epoch 19/200
0s - loss: 0.4828 - acc: 0.9368 - val_loss: 0.4783 - val_acc: 0.9247
Epoch 20/200
0s - loss: 0.4638 - acc: 0.9368 - val_loss: 0.4575 - val_acc: 0.9247
Epoch 21/200
0s - loss: 0.4483 - acc: 0.9368 - val_loss: 0.4371 - val_acc: 0.9247
Epoch 22/200
0s - loss: 0.4212 - acc: 0.9368 - val_loss: 0.4177 - val_acc: 0.9247
Epoch 23/200
0s - loss: 0.4085 - acc: 0.9368 - val_loss: 0.3994 - val_acc: 0.9247
Epoch 24/200
0s - loss: 0.3726 - acc: 0.9368 - val_loss: 0.3829 - val_acc: 0.9247
Epoch 25/200
0s - loss: 0.3573 - acc: 0.9368 - val_loss: 0.3688 - val_acc: 0.9247
Epoch 26/200
0s - loss: 0.3539 - acc: 0.9368 - val_loss: 0.3573 - val_acc: 0.9247
Epoch 27/200
0s - loss: 0.3233 - acc: 0.9368 - val_loss: 0.3486 - val_acc: 0.9247
Epoch 28/200
0s - loss: 0.3339 - acc: 0.9368 - val_loss: 0.3429 - val_acc: 0.9247
Epoch 29/200
0s - loss: 0.3031 - acc: 0.9368 - val_loss: 0.3399 - val_acc: 0.9247
Epoch 30/200
0s - loss: 0.3148 - acc: 0.9368 - val_loss: 0.3394 - val_acc: 0.9247
Epoch 31/200
0s - loss: 0.2885 - acc: 0.9368 - val_loss: 0.3409 - val_acc: 0.9247
Epoch 32/200
0s - loss: 0.3131 - acc: 0.9368 - val_loss: 0.3437 - val_acc: 0.9247
Epoch 33/200
0s - loss: 0.2887 - acc: 0.9368 - val_loss: 0.3473 - val_acc: 0.9247
Epoch 34/200
0s - loss: 0.2984 - acc: 0.9368 - val_loss: 0.3511 - val_acc: 0.9247
Epoch 35/200
0s - loss: 0.2975 - acc: 0.9368 - val_loss: 0.3547 - val_acc: 0.9247
Epoch 36/200
0s - loss: 0.3062 - acc: 0.9368 - val_loss: 0.3577 - val_acc: 0.9247
Epoch 37/200
0s - loss: 0.3108 - acc: 0.9368 - val_loss: 0.3598 - val_acc: 0.9247
Epoch 38/200
0s - loss: 0.3245 - acc: 0.9368 - val_loss: 0.3608 - val_acc: 0.9247
Epoch 39/200
0s - loss: 0.3214 - acc: 0.9368 - val_loss: 0.3607 - val_acc: 0.9247
Epoch 40/200
0s - loss: 0.3380 - acc: 0.9368 - val_loss: 0.3595 - val_acc: 0.9247
Epoch 41/200
0s - loss: 0.3147 - acc: 0.9368 - val_loss: 0.3575 - val_acc: 0.9247
Epoch 42/200
0s - loss: 0.3027 - acc: 0.9368 - val_loss: 0.3549 - val_acc: 0.9247
Epoch 43/200
0s - loss: 0.3183 - acc: 0.9368 - val_loss: 0.3519 - val_acc: 0.9247
Epoch 44/200
0s - loss: 0.3039 - acc: 0.9368 - val_loss: 0.3487 - val_acc: 0.9247
Epoch 45/200
0s - loss: 0.3172 - acc: 0.9368 - val_loss: 0.3451 - val_acc: 0.9247
Epoch 46/200
0s - loss: 0.2841 - acc: 0.9368 - val_loss: 0.3417 - val_acc: 0.9247
Epoch 47/200
0s - loss: 0.3063 - acc: 0.9368 - val_loss: 0.3384 - val_acc: 0.9247
Epoch 48/200
0s - loss: 0.2983 - acc: 0.9368 - val_loss: 0.3352 - val_acc: 0.9247
Epoch 49/200
0s - loss: 0.3004 - acc: 0.9368 - val_loss: 0.3323 - val_acc: 0.9247
Epoch 50/200
0s - loss: 0.3016 - acc: 0.9368 - val_loss: 0.3297 - val_acc: 0.9247
Epoch 51/200
0s - loss: 0.3092 - acc: 0.9368 - val_loss: 0.3274 - val_acc: 0.9247
Epoch 52/200
0s - loss: 0.3091 - acc: 0.9368 - val_loss: 0.3254 - val_acc: 0.9247
Epoch 53/200
0s - loss: 0.2800 - acc: 0.9368 - val_loss: 0.3236 - val_acc: 0.9247
Epoch 54/200
0s - loss: 0.2930 - acc: 0.9368 - val_loss: 0.3222 - val_acc: 0.9247
Epoch 55/200
0s - loss: 0.2750 - acc: 0.9368 - val_loss: 0.3209 - val_acc: 0.9247
Epoch 56/200
0s - loss: 0.3006 - acc: 0.9368 - val_loss: 0.3197 - val_acc: 0.9247
Epoch 57/200
0s - loss: 0.2834 - acc: 0.9368 - val_loss: 0.3186 - val_acc: 0.9247
Epoch 58/200
0s - loss: 0.2755 - acc: 0.9368 - val_loss: 0.3176 - val_acc: 0.9247
Epoch 59/200
0s - loss: 0.2819 - acc: 0.9368 - val_loss: 0.3166 - val_acc: 0.9247
Epoch 60/200
0s - loss: 0.2765 - acc: 0.9368 - val_loss: 0.3156 - val_acc: 0.9247
Epoch 61/200
0s - loss: 0.2747 - acc: 0.9368 - val_loss: 0.3146 - val_acc: 0.9247
Epoch 62/200
0s - loss: 0.2958 - acc: 0.9368 - val_loss: 0.3135 - val_acc: 0.9247
Epoch 63/200
0s - loss: 0.2753 - acc: 0.9368 - val_loss: 0.3125 - val_acc: 0.9247
Epoch 64/200
0s - loss: 0.2721 - acc: 0.9368 - val_loss: 0.3114 - val_acc: 0.9247
Epoch 65/200
0s - loss: 0.2764 - acc: 0.9368 - val_loss: 0.3103 - val_acc: 0.9247
Epoch 66/200
0s - loss: 0.2827 - acc: 0.9368 - val_loss: 0.3093 - val_acc: 0.9247
Epoch 67/200
0s - loss: 0.2699 - acc: 0.9368 - val_loss: 0.3082 - val_acc: 0.9247
Epoch 68/200
0s - loss: 0.2702 - acc: 0.9368 - val_loss: 0.3073 - val_acc: 0.9247
Epoch 69/200
0s - loss: 0.2677 - acc: 0.9368 - val_loss: 0.3063 - val_acc: 0.9247
Epoch 70/200
0s - loss: 0.2641 - acc: 0.9368 - val_loss: 0.3054 - val_acc: 0.9247
Epoch 71/200
0s - loss: 0.2527 - acc: 0.9368 - val_loss: 0.3045 - val_acc: 0.9247
Epoch 72/200
0s - loss: 0.2605 - acc: 0.9368 - val_loss: 0.3037 - val_acc: 0.9247
Epoch 73/200
0s - loss: 0.2666 - acc: 0.9368 - val_loss: 0.3028 - val_acc: 0.9247
Epoch 74/200
0s - loss: 0.2515 - acc: 0.9368 - val_loss: 0.3020 - val_acc: 0.9247
Epoch 75/200
0s - loss: 0.2651 - acc: 0.9368 - val_loss: 0.3011 - val_acc: 0.9247
Epoch 76/200
0s - loss: 0.2544 - acc: 0.9368 - val_loss: 0.3001 - val_acc: 0.9247
Epoch 77/200
0s - loss: 0.2509 - acc: 0.9368 - val_loss: 0.2990 - val_acc: 0.9247
Epoch 78/200
0s - loss: 0.2684 - acc: 0.9368 - val_loss: 0.2979 - val_acc: 0.9247
Epoch 79/200
0s - loss: 0.2661 - acc: 0.9368 - val_loss: 0.2967 - val_acc: 0.9247
Epoch 80/200
0s - loss: 0.2454 - acc: 0.9368 - val_loss: 0.2955 - val_acc: 0.9247
Epoch 81/200
0s - loss: 0.2576 - acc: 0.9368 - val_loss: 0.2942 - val_acc: 0.9247
Epoch 82/200
0s - loss: 0.2487 - acc: 0.9368 - val_loss: 0.2928 - val_acc: 0.9247
Epoch 83/200
0s - loss: 0.2481 - acc: 0.9368 - val_loss: 0.2914 - val_acc: 0.9247
Epoch 84/200
0s - loss: 0.2472 - acc: 0.9368 - val_loss: 0.2900 - val_acc: 0.9247
Epoch 85/200
0s - loss: 0.2554 - acc: 0.9368 - val_loss: 0.2884 - val_acc: 0.9247
Epoch 86/200
0s - loss: 0.2619 - acc: 0.9368 - val_loss: 0.2868 - val_acc: 0.9247
Epoch 87/200
0s - loss: 0.2411 - acc: 0.9368 - val_loss: 0.2853 - val_acc: 0.9247
Epoch 88/200
0s - loss: 0.2697 - acc: 0.9368 - val_loss: 0.2836 - val_acc: 0.9247
Epoch 89/200
0s - loss: 0.2405 - acc: 0.9368 - val_loss: 0.2819 - val_acc: 0.9247
Epoch 90/200
0s - loss: 0.2576 - acc: 0.9368 - val_loss: 0.2803 - val_acc: 0.9247
Epoch 91/200
0s - loss: 0.2438 - acc: 0.9368 - val_loss: 0.2787 - val_acc: 0.9247
Epoch 92/200
0s - loss: 0.2426 - acc: 0.9368 - val_loss: 0.2771 - val_acc: 0.9247
Epoch 93/200
0s - loss: 0.2417 - acc: 0.9368 - val_loss: 0.2755 - val_acc: 0.9247
Epoch 94/200
0s - loss: 0.2312 - acc: 0.9368 - val_loss: 0.2738 - val_acc: 0.9247
Epoch 95/200
0s - loss: 0.2369 - acc: 0.9368 - val_loss: 0.2722 - val_acc: 0.9247
Epoch 96/200
0s - loss: 0.2244 - acc: 0.9368 - val_loss: 0.2706 - val_acc: 0.9247
Epoch 97/200
0s - loss: 0.2382 - acc: 0.9368 - val_loss: 0.2690 - val_acc: 0.9247
Epoch 98/200
0s - loss: 0.2257 - acc: 0.9368 - val_loss: 0.2674 - val_acc: 0.9247
Epoch 99/200
0s - loss: 0.2266 - acc: 0.9368 - val_loss: 0.2657 - val_acc: 0.9247
Epoch 100/200
0s - loss: 0.2399 - acc: 0.9368 - val_loss: 0.2641 - val_acc: 0.9247
Epoch 101/200
0s - loss: 0.2255 - acc: 0.9368 - val_loss: 0.2624 - val_acc: 0.9247
Epoch 102/200
0s - loss: 0.2035 - acc: 0.9368 - val_loss: 0.2608 - val_acc: 0.9247
Epoch 103/200
0s - loss: 0.2233 - acc: 0.9368 - val_loss: 0.2593 - val_acc: 0.9247
Epoch 104/200
0s - loss: 0.2163 - acc: 0.9368 - val_loss: 0.2578 - val_acc: 0.9247
Epoch 105/200
0s - loss: 0.2209 - acc: 0.9368 - val_loss: 0.2563 - val_acc: 0.9247
Epoch 106/200
0s - loss: 0.2205 - acc: 0.9368 - val_loss: 0.2548 - val_acc: 0.9247
Epoch 107/200
0s - loss: 0.2008 - acc: 0.9368 - val_loss: 0.2534 - val_acc: 0.9247
Epoch 108/200
0s - loss: 0.2206 - acc: 0.9368 - val_loss: 0.2520 - val_acc: 0.9247
Epoch 109/200
0s - loss: 0.2092 - acc: 0.9368 - val_loss: 0.2504 - val_acc: 0.9247
Epoch 110/200
0s - loss: 0.2142 - acc: 0.9368 - val_loss: 0.2488 - val_acc: 0.9247
Epoch 111/200
0s - loss: 0.2000 - acc: 0.9368 - val_loss: 0.2472 - val_acc: 0.9247
Epoch 112/200
0s - loss: 0.2156 - acc: 0.9368 - val_loss: 0.2454 - val_acc: 0.9247
Epoch 113/200
0s - loss: 0.2053 - acc: 0.9368 - val_loss: 0.2435 - val_acc: 0.9247
Epoch 114/200
0s - loss: 0.2088 - acc: 0.9368 - val_loss: 0.2415 - val_acc: 0.9247
Epoch 115/200
0s - loss: 0.1934 - acc: 0.9368 - val_loss: 0.2396 - val_acc: 0.9247
Epoch 116/200
0s - loss: 0.2027 - acc: 0.9368 - val_loss: 0.2376 - val_acc: 0.9247
Epoch 117/200
0s - loss: 0.1867 - acc: 0.9368 - val_loss: 0.2356 - val_acc: 0.9247
Epoch 118/200
0s - loss: 0.1888 - acc: 0.9368 - val_loss: 0.2338 - val_acc: 0.9247
Epoch 119/200
0s - loss: 0.1833 - acc: 0.9368 - val_loss: 0.2321 - val_acc: 0.9247
Epoch 120/200
0s - loss: 0.1872 - acc: 0.9368 - val_loss: 0.2304 - val_acc: 0.9247
Epoch 121/200
0s - loss: 0.1802 - acc: 0.9368 - val_loss: 0.2287 - val_acc: 0.9247
Epoch 122/200
0s - loss: 0.1914 - acc: 0.9368 - val_loss: 0.2270 - val_acc: 0.9247
Epoch 123/200
0s - loss: 0.2003 - acc: 0.9368 - val_loss: 0.2252 - val_acc: 0.9247
Epoch 124/200
0s - loss: 0.1814 - acc: 0.9368 - val_loss: 0.2235 - val_acc: 0.9247
Epoch 125/200
0s - loss: 0.1891 - acc: 0.9368 - val_loss: 0.2218 - val_acc: 0.9247
Epoch 126/200
0s - loss: 0.1880 - acc: 0.9368 - val_loss: 0.2200 - val_acc: 0.9247
Epoch 127/200
0s - loss: 0.1839 - acc: 0.9368 - val_loss: 0.2182 - val_acc: 0.9247
Epoch 128/200
0s - loss: 0.1697 - acc: 0.9368 - val_loss: 0.2166 - val_acc: 0.9247
Epoch 129/200
0s - loss: 0.1793 - acc: 0.9368 - val_loss: 0.2151 - val_acc: 0.9247
Epoch 130/200
0s - loss: 0.1639 - acc: 0.9368 - val_loss: 0.2138 - val_acc: 0.9247
Epoch 131/200
0s - loss: 0.1832 - acc: 0.9368 - val_loss: 0.2124 - val_acc: 0.9247
Epoch 132/200
0s - loss: 0.1860 - acc: 0.9368 - val_loss: 0.2109 - val_acc: 0.9247
Epoch 133/200
0s - loss: 0.1641 - acc: 0.9368 - val_loss: 0.2094 - val_acc: 0.9247
Epoch 134/200
0s - loss: 0.1664 - acc: 0.9368 - val_loss: 0.2078 - val_acc: 0.9247
Epoch 135/200
0s - loss: 0.1707 - acc: 0.9368 - val_loss: 0.2065 - val_acc: 0.9247
Epoch 136/200
0s - loss: 0.1631 - acc: 0.9368 - val_loss: 0.2051 - val_acc: 0.9247
Epoch 137/200
0s - loss: 0.1554 - acc: 0.9368 - val_loss: 0.2038 - val_acc: 0.9247
Epoch 138/200
0s - loss: 0.1629 - acc: 0.9368 - val_loss: 0.2022 - val_acc: 0.9247
Epoch 139/200
0s - loss: 0.1613 - acc: 0.9368 - val_loss: 0.2008 - val_acc: 0.9247
Epoch 140/200
0s - loss: 0.1609 - acc: 0.9368 - val_loss: 0.1993 - val_acc: 0.9247
Epoch 141/200
0s - loss: 0.1522 - acc: 0.9368 - val_loss: 0.1981 - val_acc: 0.9247
Epoch 142/200
0s - loss: 0.1419 - acc: 0.9368 - val_loss: 0.1970 - val_acc: 0.9247
Epoch 143/200
0s - loss: 0.1401 - acc: 0.9368 - val_loss: 0.1961 - val_acc: 0.9247
Epoch 144/200
0s - loss: 0.1699 - acc: 0.9368 - val_loss: 0.1952 - val_acc: 0.9247
Epoch 145/200
0s - loss: 0.1610 - acc: 0.9368 - val_loss: 0.1940 - val_acc: 0.9247
Epoch 146/200
0s - loss: 0.1472 - acc: 0.9368 - val_loss: 0.1928 - val_acc: 0.9247
Epoch 147/200
0s - loss: 0.1441 - acc: 0.9368 - val_loss: 0.1918 - val_acc: 0.9247
Epoch 148/200
0s - loss: 0.1408 - acc: 0.9368 - val_loss: 0.1911 - val_acc: 0.9247
Epoch 149/200
0s - loss: 0.1366 - acc: 0.9368 - val_loss: 0.1907 - val_acc: 0.9247
Epoch 150/200
0s - loss: 0.1512 - acc: 0.9368 - val_loss: 0.1900 - val_acc: 0.9247
Epoch 151/200
0s - loss: 0.1469 - acc: 0.9368 - val_loss: 0.1893 - val_acc: 0.9247
Epoch 152/200
0s - loss: 0.1446 - acc: 0.9368 - val_loss: 0.1888 - val_acc: 0.9247
Epoch 153/200
0s - loss: 0.1257 - acc: 0.9368 - val_loss: 0.1885 - val_acc: 0.9247
Epoch 154/200
0s - loss: 0.1357 - acc: 0.9368 - val_loss: 0.1883 - val_acc: 0.9247
Epoch 155/200
0s - loss: 0.1432 - acc: 0.9368 - val_loss: 0.1883 - val_acc: 0.9247
Epoch 156/200
0s - loss: 0.1504 - acc: 0.9368 - val_loss: 0.1881 - val_acc: 0.9247
Epoch 157/200
0s - loss: 0.1381 - acc: 0.9368 - val_loss: 0.1874 - val_acc: 0.9247
Epoch 158/200
0s - loss: 0.1432 - acc: 0.9368 - val_loss: 0.1864 - val_acc: 0.9247
Epoch 159/200
0s - loss: 0.1327 - acc: 0.9368 - val_loss: 0.1856 - val_acc: 0.9247
Epoch 160/200
0s - loss: 0.1416 - acc: 0.9368 - val_loss: 0.1846 - val_acc: 0.9247
Epoch 161/200
0s - loss: 0.1301 - acc: 0.9368 - val_loss: 0.1835 - val_acc: 0.9247
Epoch 162/200
0s - loss: 0.1274 - acc: 0.9368 - val_loss: 0.1823 - val_acc: 0.9247
Epoch 163/200
0s - loss: 0.1292 - acc: 0.9368 - val_loss: 0.1810 - val_acc: 0.9247
Epoch 164/200
0s - loss: 0.1234 - acc: 0.9368 - val_loss: 0.1801 - val_acc: 0.9247
Epoch 165/200
0s - loss: 0.1280 - acc: 0.9368 - val_loss: 0.1795 - val_acc: 0.9247
Epoch 166/200
0s - loss: 0.1148 - acc: 0.9368 - val_loss: 0.1793 - val_acc: 0.9247
Epoch 167/200
0s - loss: 0.1131 - acc: 0.9368 - val_loss: 0.1793 - val_acc: 0.9247
Epoch 168/200
0s - loss: 0.1265 - acc: 0.9368 - val_loss: 0.1792 - val_acc: 0.9247
Epoch 169/200
0s - loss: 0.1325 - acc: 0.9368 - val_loss: 0.1787 - val_acc: 0.9247
Epoch 170/200
0s - loss: 0.1428 - acc: 0.9579 - val_loss: 0.1781 - val_acc: 0.9247
Epoch 171/200
0s - loss: 0.1179 - acc: 0.9579 - val_loss: 0.1776 - val_acc: 0.9247
Epoch 172/200
0s - loss: 0.1293 - acc: 0.9368 - val_loss: 0.1777 - val_acc: 0.9247
Epoch 173/200
0s - loss: 0.1350 - acc: 0.9368 - val_loss: 0.1779 - val_acc: 0.9247
Epoch 174/200
0s - loss: 0.1142 - acc: 0.9368 - val_loss: 0.1783 - val_acc: 0.9247
Epoch 175/200
0s - loss: 0.1317 - acc: 0.9368 - val_loss: 0.1784 - val_acc: 0.9247
Epoch 176/200
0s - loss: 0.1089 - acc: 0.9368 - val_loss: 0.1783 - val_acc: 0.9247
Epoch 177/200
0s - loss: 0.1272 - acc: 0.9368 - val_loss: 0.1782 - val_acc: 0.9247
Epoch 178/200
0s - loss: 0.1191 - acc: 0.9368 - val_loss: 0.1783 - val_acc: 0.9247
Epoch 179/200
0s - loss: 0.1122 - acc: 0.9368 - val_loss: 0.1782 - val_acc: 0.9247
Epoch 180/200
0s - loss: 0.1221 - acc: 0.9368 - val_loss: 0.1778 - val_acc: 0.9273
Epoch 181/200
0s - loss: 0.1205 - acc: 0.9368 - val_loss: 0.1771 - val_acc: 0.9273
Epoch 182/200
0s - loss: 0.1219 - acc: 0.9474 - val_loss: 0.1767 - val_acc: 0.9325
Epoch 183/200
0s - loss: 0.1165 - acc: 0.9579 - val_loss: 0.1761 - val_acc: 0.9351
Epoch 184/200
0s - loss: 0.1237 - acc: 0.9474 - val_loss: 0.1757 - val_acc: 0.9351
Epoch 185/200
0s - loss: 0.1080 - acc: 0.9579 - val_loss: 0.1758 - val_acc: 0.9377
Epoch 186/200
0s - loss: 0.1072 - acc: 0.9474 - val_loss: 0.1760 - val_acc: 0.9377
Epoch 187/200
0s - loss: 0.0979 - acc: 0.9579 - val_loss: 0.1765 - val_acc: 0.9377
Epoch 188/200
0s - loss: 0.1098 - acc: 0.9474 - val_loss: 0.1770 - val_acc: 0.9377
Epoch 189/200
0s - loss: 0.1128 - acc: 0.9579 - val_loss: 0.1778 - val_acc: 0.9377
Epoch 190/200
0s - loss: 0.1057 - acc: 0.9579 - val_loss: 0.1782 - val_acc: 0.9377
Epoch 191/200
0s - loss: 0.1213 - acc: 0.9579 - val_loss: 0.1782 - val_acc: 0.9377
Epoch 192/200
0s - loss: 0.1068 - acc: 0.9579 - val_loss: 0.1777 - val_acc: 0.9377
Epoch 193/200
0s - loss: 0.1072 - acc: 0.9579 - val_loss: 0.1773 - val_acc: 0.9377
Epoch 194/200
0s - loss: 0.1031 - acc: 0.9684 - val_loss: 0.1770 - val_acc: 0.9377
Epoch 195/200
0s - loss: 0.1044 - acc: 0.9474 - val_loss: 0.1767 - val_acc: 0.9377
Epoch 196/200
0s - loss: 0.0971 - acc: 0.9684 - val_loss: 0.1768 - val_acc: 0.9377
Epoch 197/200
0s - loss: 0.1202 - acc: 0.9579 - val_loss: 0.1774 - val_acc: 0.9377
Epoch 198/200
0s - loss: 0.0924 - acc: 0.9579 - val_loss: 0.1780 - val_acc: 0.9377
Epoch 199/200
0s - loss: 0.1014 - acc: 0.9579 - val_loss: 0.1784 - val_acc: 0.9377
Epoch 200/200
0s - loss: 0.0990 - acc: 0.9474 - val_loss: 0.1787 - val_acc: 0.9377
 32/320 [==>...........................] - ETA: 0s

accuracy= 0.90625

使用PCA(80%),預測誰是Legendary:結果反而提高了ML的成績!!


In [112]:
from sklearn.utils import shuffle

df_PCA = pcscores
df_PCA['Legendary'] = df_full['Legendary']

shuffle_df = shuffle(df_PCA, random_state=42)

df_label = shuffle_df['Legendary']
df_feature = shuffle_df.drop('Legendary', axis=1)

cut_point = round(len(df_PCA)*0.6)
train_feature = np.array(df_feature.values[:cut_point,:])
train_label = np.array(df_label.values[:cut_point])
test_feature = np.array(df_feature.values[cut_point:,:])
test_label = np.array(df_label.values[cut_point:])

In [114]:
df_PCA.head()


Out[114]:
PC1 PC2 PC3 PC4 Legendary
0 -1.556375 -0.021482 0.666504 0.184176 0
1 -0.362867 -0.050269 0.667913 0.269254 0
2 1.280152 -0.062720 0.623914 0.331391 0
3 2.620916 0.704263 0.995538 -0.199321 0
4 -1.758284 -0.706179 0.411454 -0.268602 0

Scikit-Learn


In [115]:
from sklearn import datasets,cross_validation,ensemble

X_train,X_test,y_train,y_test = cross_validation.train_test_split(train_feature,train_label, 
                                              test_size=0.25, random_state=0,stratify=train_label) #分層取樣
clf = ensemble.RandomForestClassifier()
clf.fit(X_train,y_train)
print("Traing Score:%f"%clf.score(train_feature,train_label))
print("Testing Score:%f"%clf.score(test_feature,test_label))


Traing Score:0.991667
Testing Score:0.943750

In [116]:
from sklearn import datasets,cross_validation,tree

X_train,X_test,y_train,y_test = cross_validation.train_test_split(train_feature,train_label, 
                                              test_size=0.25, random_state=0,stratify=train_label) #分層取樣
clf = tree.DecisionTreeClassifier()
clf.fit(X_train,y_train)
print("Traing Score:%f"%clf.score(train_feature,train_label))
print("Testing Score:%f"%clf.score(test_feature,test_label)) #over-training


Traing Score:0.989583
Testing Score:0.934375

In [117]:
from sklearn import datasets,cross_validation,svm

X_train,X_test,y_train,y_test = cross_validation.train_test_split(train_feature,train_label, 
                                              test_size=0.25, random_state=0,stratify=train_label) #分層取樣
clf = svm.SVC()
clf.fit(X_train,y_train)
print("Traing Score:%f"%clf.score(train_feature,train_label))
print("Testing Score:%f"%clf.score(test_feature,test_label)) #over-training


Traing Score:0.954167
Testing Score:0.931250

Keras: MLP


In [119]:
# Keras MLP models
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout

def show_train_history(train_history,train,validation):
    plt.plot(train_history.history[train])
    plt.plot(train_history.history[validation])
    plt.title('Train History')
    plt.ylabel(train)
    plt.xlabel('Epoch')
    plt.legend(['train', 'validation'], loc='best')
    plt.show()

model = Sequential() 
model.add(Dense(units=200, 
                input_dim=4, 
                kernel_initializer='uniform', 
                activation='relu'))
model.add(Dropout(0.5))

model.add(Dense(units=200,  
                kernel_initializer='uniform', 
                activation='relu'))
model.add(Dropout(0.5))

model.add(Dense(units=1, #輸出一個數字 
                kernel_initializer='uniform',
                activation='sigmoid'))

print(model.summary()) #可以清楚看到model還有參數數量

model.compile(loss='binary_crossentropy',   #二元用binary
              optimizer='adam', metrics=['accuracy'])

train_history = model.fit(x=train_feature, y=train_label,  #上面多分割一步在keras是內建的
                          validation_split=0.8, epochs=120, 
                          batch_size=2000, verbose=2) #verbose=2表示顯示訓練過程

show_train_history(train_history,'acc','val_acc')
show_train_history(train_history,'loss','val_loss')

scores = model.evaluate(test_feature, test_label)
print('\n')
print('accuracy=',scores[1])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_19 (Dense)             (None, 200)               1000      
_________________________________________________________________
dropout_13 (Dropout)         (None, 200)               0         
_________________________________________________________________
dense_20 (Dense)             (None, 200)               40200     
_________________________________________________________________
dropout_14 (Dropout)         (None, 200)               0         
_________________________________________________________________
dense_21 (Dense)             (None, 1)                 201       
=================================================================
Total params: 41,401
Trainable params: 41,401
Non-trainable params: 0
_________________________________________________________________
None
Train on 95 samples, validate on 385 samples
Epoch 1/120
0s - loss: 0.6965 - acc: 0.2421 - val_loss: 0.6897 - val_acc: 0.9247
Epoch 2/120
0s - loss: 0.6901 - acc: 0.7895 - val_loss: 0.6830 - val_acc: 0.9247
Epoch 3/120
0s - loss: 0.6831 - acc: 0.9368 - val_loss: 0.6769 - val_acc: 0.9247
Epoch 4/120
0s - loss: 0.6760 - acc: 0.9368 - val_loss: 0.6703 - val_acc: 0.9247
Epoch 5/120
0s - loss: 0.6694 - acc: 0.9368 - val_loss: 0.6632 - val_acc: 0.9247
Epoch 6/120
0s - loss: 0.6631 - acc: 0.9368 - val_loss: 0.6555 - val_acc: 0.9247
Epoch 7/120
0s - loss: 0.6522 - acc: 0.9368 - val_loss: 0.6471 - val_acc: 0.9247
Epoch 8/120
0s - loss: 0.6463 - acc: 0.9368 - val_loss: 0.6379 - val_acc: 0.9247
Epoch 9/120
0s - loss: 0.6369 - acc: 0.9368 - val_loss: 0.6279 - val_acc: 0.9247
Epoch 10/120
0s - loss: 0.6239 - acc: 0.9368 - val_loss: 0.6169 - val_acc: 0.9247
Epoch 11/120
0s - loss: 0.6138 - acc: 0.9368 - val_loss: 0.6051 - val_acc: 0.9247
Epoch 12/120
0s - loss: 0.6028 - acc: 0.9368 - val_loss: 0.5924 - val_acc: 0.9247
Epoch 13/120
0s - loss: 0.5884 - acc: 0.9368 - val_loss: 0.5788 - val_acc: 0.9247
Epoch 14/120
0s - loss: 0.5738 - acc: 0.9368 - val_loss: 0.5644 - val_acc: 0.9247
Epoch 15/120
0s - loss: 0.5587 - acc: 0.9368 - val_loss: 0.5491 - val_acc: 0.9247
Epoch 16/120
0s - loss: 0.5433 - acc: 0.9368 - val_loss: 0.5332 - val_acc: 0.9247
Epoch 17/120
0s - loss: 0.5272 - acc: 0.9368 - val_loss: 0.5165 - val_acc: 0.9247
Epoch 18/120
0s - loss: 0.5090 - acc: 0.9368 - val_loss: 0.4994 - val_acc: 0.9247
Epoch 19/120
0s - loss: 0.4954 - acc: 0.9368 - val_loss: 0.4819 - val_acc: 0.9247
Epoch 20/120
0s - loss: 0.4775 - acc: 0.9368 - val_loss: 0.4644 - val_acc: 0.9247
Epoch 21/120
0s - loss: 0.4562 - acc: 0.9368 - val_loss: 0.4468 - val_acc: 0.9247
Epoch 22/120
0s - loss: 0.4390 - acc: 0.9368 - val_loss: 0.4291 - val_acc: 0.9247
Epoch 23/120
0s - loss: 0.4083 - acc: 0.9368 - val_loss: 0.4120 - val_acc: 0.9247
Epoch 24/120
0s - loss: 0.4101 - acc: 0.9368 - val_loss: 0.3951 - val_acc: 0.9247
Epoch 25/120
0s - loss: 0.3799 - acc: 0.9368 - val_loss: 0.3790 - val_acc: 0.9247
Epoch 26/120
0s - loss: 0.3687 - acc: 0.9368 - val_loss: 0.3638 - val_acc: 0.9247
Epoch 27/120
0s - loss: 0.3496 - acc: 0.9368 - val_loss: 0.3494 - val_acc: 0.9247
Epoch 28/120
0s - loss: 0.3433 - acc: 0.9368 - val_loss: 0.3361 - val_acc: 0.9247
Epoch 29/120
0s - loss: 0.3302 - acc: 0.9368 - val_loss: 0.3238 - val_acc: 0.9247
Epoch 30/120
0s - loss: 0.3182 - acc: 0.9368 - val_loss: 0.3126 - val_acc: 0.9247
Epoch 31/120
0s - loss: 0.3023 - acc: 0.9368 - val_loss: 0.3024 - val_acc: 0.9247
Epoch 32/120
0s - loss: 0.2925 - acc: 0.9368 - val_loss: 0.2932 - val_acc: 0.9247
Epoch 33/120
0s - loss: 0.2864 - acc: 0.9368 - val_loss: 0.2847 - val_acc: 0.9247
Epoch 34/120
0s - loss: 0.2693 - acc: 0.9368 - val_loss: 0.2773 - val_acc: 0.9247
Epoch 35/120
0s - loss: 0.2630 - acc: 0.9368 - val_loss: 0.2704 - val_acc: 0.9247
Epoch 36/120
0s - loss: 0.2633 - acc: 0.9368 - val_loss: 0.2642 - val_acc: 0.9247
Epoch 37/120
0s - loss: 0.2630 - acc: 0.9368 - val_loss: 0.2585 - val_acc: 0.9247
Epoch 38/120
0s - loss: 0.2507 - acc: 0.9368 - val_loss: 0.2533 - val_acc: 0.9247
Epoch 39/120
0s - loss: 0.2407 - acc: 0.9368 - val_loss: 0.2484 - val_acc: 0.9247
Epoch 40/120
0s - loss: 0.2339 - acc: 0.9368 - val_loss: 0.2438 - val_acc: 0.9247
Epoch 41/120
0s - loss: 0.2277 - acc: 0.9368 - val_loss: 0.2395 - val_acc: 0.9247
Epoch 42/120
0s - loss: 0.2364 - acc: 0.9368 - val_loss: 0.2354 - val_acc: 0.9247
Epoch 43/120
0s - loss: 0.2358 - acc: 0.9368 - val_loss: 0.2314 - val_acc: 0.9247
Epoch 44/120
0s - loss: 0.2255 - acc: 0.9368 - val_loss: 0.2275 - val_acc: 0.9247
Epoch 45/120
0s - loss: 0.2164 - acc: 0.9368 - val_loss: 0.2238 - val_acc: 0.9247
Epoch 46/120
0s - loss: 0.2032 - acc: 0.9368 - val_loss: 0.2203 - val_acc: 0.9247
Epoch 47/120
0s - loss: 0.2118 - acc: 0.9368 - val_loss: 0.2168 - val_acc: 0.9247
Epoch 48/120
0s - loss: 0.1931 - acc: 0.9368 - val_loss: 0.2135 - val_acc: 0.9247
Epoch 49/120
0s - loss: 0.2146 - acc: 0.9368 - val_loss: 0.2103 - val_acc: 0.9247
Epoch 50/120
0s - loss: 0.2070 - acc: 0.9368 - val_loss: 0.2071 - val_acc: 0.9247
Epoch 51/120
0s - loss: 0.1951 - acc: 0.9368 - val_loss: 0.2041 - val_acc: 0.9247
Epoch 52/120
0s - loss: 0.1930 - acc: 0.9368 - val_loss: 0.2010 - val_acc: 0.9247
Epoch 53/120
0s - loss: 0.2021 - acc: 0.9368 - val_loss: 0.1981 - val_acc: 0.9247
Epoch 54/120
0s - loss: 0.1918 - acc: 0.9368 - val_loss: 0.1952 - val_acc: 0.9247
Epoch 55/120
0s - loss: 0.1987 - acc: 0.9368 - val_loss: 0.1923 - val_acc: 0.9247
Epoch 56/120
0s - loss: 0.1684 - acc: 0.9368 - val_loss: 0.1896 - val_acc: 0.9247
Epoch 57/120
0s - loss: 0.1853 - acc: 0.9368 - val_loss: 0.1870 - val_acc: 0.9247
Epoch 58/120
0s - loss: 0.1838 - acc: 0.9368 - val_loss: 0.1846 - val_acc: 0.9247
Epoch 59/120
0s - loss: 0.1801 - acc: 0.9368 - val_loss: 0.1821 - val_acc: 0.9247
Epoch 60/120
0s - loss: 0.1747 - acc: 0.9368 - val_loss: 0.1798 - val_acc: 0.9247
Epoch 61/120
0s - loss: 0.1681 - acc: 0.9368 - val_loss: 0.1776 - val_acc: 0.9247
Epoch 62/120
0s - loss: 0.1757 - acc: 0.9368 - val_loss: 0.1755 - val_acc: 0.9247
Epoch 63/120
0s - loss: 0.1664 - acc: 0.9368 - val_loss: 0.1735 - val_acc: 0.9247
Epoch 64/120
0s - loss: 0.1633 - acc: 0.9368 - val_loss: 0.1717 - val_acc: 0.9247
Epoch 65/120
0s - loss: 0.1560 - acc: 0.9368 - val_loss: 0.1699 - val_acc: 0.9247
Epoch 66/120
0s - loss: 0.1436 - acc: 0.9368 - val_loss: 0.1683 - val_acc: 0.9247
Epoch 67/120
0s - loss: 0.1516 - acc: 0.9368 - val_loss: 0.1668 - val_acc: 0.9247
Epoch 68/120
0s - loss: 0.1490 - acc: 0.9368 - val_loss: 0.1654 - val_acc: 0.9247
Epoch 69/120
0s - loss: 0.1414 - acc: 0.9368 - val_loss: 0.1641 - val_acc: 0.9247
Epoch 70/120
0s - loss: 0.1499 - acc: 0.9368 - val_loss: 0.1628 - val_acc: 0.9247
Epoch 71/120
0s - loss: 0.1431 - acc: 0.9368 - val_loss: 0.1617 - val_acc: 0.9247
Epoch 72/120
0s - loss: 0.1388 - acc: 0.9368 - val_loss: 0.1606 - val_acc: 0.9247
Epoch 73/120
0s - loss: 0.1496 - acc: 0.9368 - val_loss: 0.1595 - val_acc: 0.9247
Epoch 74/120
0s - loss: 0.1430 - acc: 0.9368 - val_loss: 0.1583 - val_acc: 0.9247
Epoch 75/120
0s - loss: 0.1419 - acc: 0.9368 - val_loss: 0.1571 - val_acc: 0.9247
Epoch 76/120
0s - loss: 0.1404 - acc: 0.9368 - val_loss: 0.1560 - val_acc: 0.9247
Epoch 77/120
0s - loss: 0.1318 - acc: 0.9368 - val_loss: 0.1549 - val_acc: 0.9247
Epoch 78/120
0s - loss: 0.1349 - acc: 0.9368 - val_loss: 0.1538 - val_acc: 0.9247
Epoch 79/120
0s - loss: 0.1324 - acc: 0.9368 - val_loss: 0.1528 - val_acc: 0.9247
Epoch 80/120
0s - loss: 0.1334 - acc: 0.9368 - val_loss: 0.1518 - val_acc: 0.9247
Epoch 81/120
0s - loss: 0.1247 - acc: 0.9368 - val_loss: 0.1508 - val_acc: 0.9247
Epoch 82/120
0s - loss: 0.1262 - acc: 0.9368 - val_loss: 0.1499 - val_acc: 0.9247
Epoch 83/120
0s - loss: 0.1241 - acc: 0.9368 - val_loss: 0.1490 - val_acc: 0.9247
Epoch 84/120
0s - loss: 0.1241 - acc: 0.9368 - val_loss: 0.1481 - val_acc: 0.9247
Epoch 85/120
0s - loss: 0.1221 - acc: 0.9368 - val_loss: 0.1471 - val_acc: 0.9247
Epoch 86/120
0s - loss: 0.1153 - acc: 0.9368 - val_loss: 0.1463 - val_acc: 0.9247
Epoch 87/120
0s - loss: 0.1236 - acc: 0.9368 - val_loss: 0.1454 - val_acc: 0.9247
Epoch 88/120
0s - loss: 0.1198 - acc: 0.9368 - val_loss: 0.1446 - val_acc: 0.9247
Epoch 89/120
0s - loss: 0.1189 - acc: 0.9368 - val_loss: 0.1437 - val_acc: 0.9247
Epoch 90/120
0s - loss: 0.1139 - acc: 0.9368 - val_loss: 0.1429 - val_acc: 0.9247
Epoch 91/120
0s - loss: 0.1109 - acc: 0.9368 - val_loss: 0.1420 - val_acc: 0.9247
Epoch 92/120
0s - loss: 0.1040 - acc: 0.9368 - val_loss: 0.1412 - val_acc: 0.9247
Epoch 93/120
0s - loss: 0.1157 - acc: 0.9368 - val_loss: 0.1403 - val_acc: 0.9247
Epoch 94/120
0s - loss: 0.1047 - acc: 0.9368 - val_loss: 0.1396 - val_acc: 0.9247
Epoch 95/120
0s - loss: 0.1068 - acc: 0.9368 - val_loss: 0.1387 - val_acc: 0.9247
Epoch 96/120
0s - loss: 0.1057 - acc: 0.9368 - val_loss: 0.1379 - val_acc: 0.9247
Epoch 97/120
0s - loss: 0.1067 - acc: 0.9368 - val_loss: 0.1371 - val_acc: 0.9247
Epoch 98/120
0s - loss: 0.1067 - acc: 0.9368 - val_loss: 0.1364 - val_acc: 0.9247
Epoch 99/120
0s - loss: 0.1034 - acc: 0.9368 - val_loss: 0.1357 - val_acc: 0.9247
Epoch 100/120
0s - loss: 0.0934 - acc: 0.9474 - val_loss: 0.1350 - val_acc: 0.9247
Epoch 101/120
0s - loss: 0.0986 - acc: 0.9474 - val_loss: 0.1343 - val_acc: 0.9247
Epoch 102/120
0s - loss: 0.0996 - acc: 0.9368 - val_loss: 0.1335 - val_acc: 0.9247
Epoch 103/120
0s - loss: 0.0903 - acc: 0.9474 - val_loss: 0.1327 - val_acc: 0.9273
Epoch 104/120
0s - loss: 0.0867 - acc: 0.9579 - val_loss: 0.1321 - val_acc: 0.9325
Epoch 105/120
0s - loss: 0.0861 - acc: 0.9474 - val_loss: 0.1315 - val_acc: 0.9325
Epoch 106/120
0s - loss: 0.0884 - acc: 0.9474 - val_loss: 0.1310 - val_acc: 0.9325
Epoch 107/120
0s - loss: 0.0883 - acc: 0.9474 - val_loss: 0.1306 - val_acc: 0.9325
Epoch 108/120
0s - loss: 0.0894 - acc: 0.9474 - val_loss: 0.1302 - val_acc: 0.9351
Epoch 109/120
0s - loss: 0.0898 - acc: 0.9474 - val_loss: 0.1299 - val_acc: 0.9377
Epoch 110/120
0s - loss: 0.0889 - acc: 0.9579 - val_loss: 0.1295 - val_acc: 0.9351
Epoch 111/120
0s - loss: 0.0946 - acc: 0.9474 - val_loss: 0.1291 - val_acc: 0.9351
Epoch 112/120
0s - loss: 0.0797 - acc: 0.9579 - val_loss: 0.1286 - val_acc: 0.9325
Epoch 113/120
0s - loss: 0.0767 - acc: 0.9579 - val_loss: 0.1281 - val_acc: 0.9377
Epoch 114/120
0s - loss: 0.0784 - acc: 0.9474 - val_loss: 0.1275 - val_acc: 0.9403
Epoch 115/120
0s - loss: 0.0717 - acc: 0.9789 - val_loss: 0.1270 - val_acc: 0.9377
Epoch 116/120
0s - loss: 0.0753 - acc: 0.9579 - val_loss: 0.1266 - val_acc: 0.9403
Epoch 117/120
0s - loss: 0.0756 - acc: 0.9789 - val_loss: 0.1262 - val_acc: 0.9377
Epoch 118/120
0s - loss: 0.0681 - acc: 0.9789 - val_loss: 0.1261 - val_acc: 0.9377
Epoch 119/120
0s - loss: 0.0696 - acc: 0.9789 - val_loss: 0.1261 - val_acc: 0.9377
Epoch 120/120
0s - loss: 0.0675 - acc: 0.9789 - val_loss: 0.1260 - val_acc: 0.9377
 32/320 [==>...........................] - ETA: 0s

accuracy= 0.9125

In [ ]: