In [214]:
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import zipfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle

from skimage import color, io
from scipy.misc import imresize

from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D, Activation, GlobalAveragePooling2D
from keras.callbacks import EarlyStopping, ModelCheckpoint, LambdaCallback
from keras.utils import np_utils
from keras.models import Model

np.random.seed(31337)

# Config the matplotlib backend as plotting inline in IPython
%matplotlib inline

In [2]:
# Load Squeeznet CNN codes
X_train_squeezenet=np.load('squeezenet_train_preds-86528D.npy')

In [3]:
X_hard_squeezenet= np.load('squeezenet_hard_preds-86528D.npy')

In [4]:
print(X_train_squeezenet.shape)
print(X_hard_squeezenet.shape)


(23814, 13, 13, 512)
(1186, 13, 13, 512)

In [5]:
# labels
Y_train=np.load('catfish-Y_train.npy')
Y_hard=np.load('catfish-Y_hard.npy')
#Y_train = np_utils.to_categorical(Y_train, 2)
#Y_hard = np_utils.to_categorical(Y_hard, 2)

In [171]:
print(Y_train.shape)
print(Y_hard.shape)
Y_train = np_utils.to_categorical(Y_train, 2)
Y_hard = np_utils.to_categorical(Y_hard, 2)


(23814,)
(1186,)

In [215]:
# Build model_squeezenet

model = Sequential()
model.add(Dropout(0.5, name='drop9',input_shape=(13,13,512)))
model.add(Convolution2D(2, 1, 1,border_mode='valid',activation="linear",name="conv10", input_shape=(13,13,512)))
model.add(Activation(activation='relu', name='relu_conv10'))
model.add(GlobalAveragePooling2D())
model.add(Activation(activation='softmax', name='loss'))
model.summary()

# # Compile model_squeezenet
# model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

# Callbacks
early_stop_cb = EarlyStopping(monitor='val_loss', patience=5, verbose=1)

checkpoit_cb = ModelCheckpoint("catfish_Convolution2D_classifier.h5", save_best_only=True)

# Print the batch number at the beginning of every batch.
batch_print_cb = LambdaCallback(on_batch_begin=lambda batch, logs: print(".",end=''), 
                                on_epoch_end=lambda batch, logs: print(batch))

# Plot the loss after every epoch.
plot_loss_cb = LambdaCallback(on_epoch_end=lambda epoch, logs: 
                              print (logs))
                              #plt.plot(np.arange(epoch), logs['loss']))


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
drop9 (Dropout)                  (None, 13, 13, 512)   0           dropout_input_18[0][0]           
____________________________________________________________________________________________________
conv10 (Convolution2D)           (None, 13, 13, 2)     1026        drop9[0][0]                      
____________________________________________________________________________________________________
relu_conv10 (Activation)         (None, 13, 13, 2)     0           conv10[0][0]                     
____________________________________________________________________________________________________
globalaveragepooling2d_1 (Global (None, 2)             0           relu_conv10[0][0]                
____________________________________________________________________________________________________
loss (Activation)                (None, 2)             0           globalaveragepooling2d_1[0][0]   
====================================================================================================
Total params: 1,026
Trainable params: 1,026
Non-trainable params: 0
____________________________________________________________________________________________________

In [216]:
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'],
             )

# Fit the model 
history = model.fit(X_train_squeezenet[:22000], Y_train[:22000], 
                             validation_data=(X_train_squeezenet[22000:], Y_train[22000:]),
                             #validation_split=0.33, 
                             nb_epoch=50, 
                             batch_size=4000, 
                             callbacks=[early_stop_cb, checkpoit_cb, batch_print_cb, plot_loss_cb],
                             verbose=0)

# list all data in history
print(history.history.keys())


......0
{'acc': 0.44672730565071106, 'loss': 1.925785248929804, 'val_acc': 0.45589858293533325, 'val_loss': 1.4531068801879883}
......1
{'acc': 0.50318187475204468, 'loss': 1.3438686674291438, 'val_acc': 0.55567806959152222, 'val_loss': 1.0700684785842896}
......2
{'acc': 0.61618186127055774, 'loss': 0.93013215065002441, 'val_acc': 0.669239342212677, 'val_loss': 0.75283652544021606}
......3
{'acc': 0.70704549551010132, 'loss': 0.70088331265883008, 'val_acc': 0.72877621650695801, 'val_loss': 0.59359109401702881}
......4
{'acc': 0.77304550192572852, 'loss': 0.53760828484188428, 'val_acc': 0.77012133598327637, 'val_loss': 0.50110322237014771}
......5
{'acc': 0.81304553422060877, 'loss': 0.44151618805798615, 'val_acc': 0.81201779842376709, 'val_loss': 0.42966368794441223}
......6
{'acc': 0.84354549104517151, 'loss': 0.37600139596245508, 'val_acc': 0.83406847715377808, 'val_loss': 0.37032151222229004}
......7
{'acc': 0.86327282407067041, 'loss': 0.33269749988209119, 'val_acc': 0.8544653058052063, 'val_loss': 0.3309098482131958}
......8
{'acc': 0.87713644721291284, 'loss': 0.2986109527674588, 'val_acc': 0.86714452505111694, 'val_loss': 0.30121994018554688}
......9
{'acc': 0.89000007239255041, 'loss': 0.26959085735407745, 'val_acc': 0.87706726789474487, 'val_loss': 0.27624630928039551}
......10
{'acc': 0.89681823687119921, 'loss': 0.25506669147448108, 'val_acc': 0.88809269666671753, 'val_loss': 0.25331050157546997}
......11
{'acc': 0.90595462647351355, 'loss': 0.23322205787355249, 'val_acc': 0.90132308006286621, 'val_loss': 0.23484393954277039}
......12
{'acc': 0.91050008752129297, 'loss': 0.21926370804960077, 'val_acc': 0.90573316812515259, 'val_loss': 0.21982766687870026}
......13
{'acc': 0.91500010273673316, 'loss': 0.20417314090512015, 'val_acc': 0.91014337539672852, 'val_loss': 0.20730292797088623}
......14
{'acc': 0.91945462335239758, 'loss': 0.19689898870208047, 'val_acc': 0.91455352306365967, 'val_loss': 0.19482287764549255}
......15
{'acc': 0.925363686951724, 'loss': 0.18422498486258768, 'val_acc': 0.91841244697570801, 'val_loss': 0.18406820297241211}
......16
{'acc': 0.92768189040097326, 'loss': 0.1787360446019606, 'val_acc': 0.92392510175704956, 'val_loss': 0.17558176815509796}
......17
{'acc': 0.93322734941135754, 'loss': 0.16785724732008847, 'val_acc': 0.92557895183563232, 'val_loss': 0.16841596364974976}
......18
{'acc': 0.9354091720147566, 'loss': 0.16011600602756848, 'val_acc': 0.92998898029327393, 'val_loss': 0.16192089021205902}
......19
{'acc': 0.93700010668147693, 'loss': 0.15804810415614734, 'val_acc': 0.93164277076721191, 'val_loss': 0.15568241477012634}
......20
{'acc': 0.93790918588638306, 'loss': 0.15244426239620557, 'val_acc': 0.93439918756484985, 'val_loss': 0.15049724280834198}
......21
{'acc': 0.94104555520144373, 'loss': 0.14856043999845331, 'val_acc': 0.93715554475784302, 'val_loss': 0.14623525738716125}
......22
{'acc': 0.94095465811816126, 'loss': 0.14354334094307639, 'val_acc': 0.93936061859130859, 'val_loss': 0.14262360334396362}
......23
{'acc': 0.94268192486329516, 'loss': 0.14094072851267728, 'val_acc': 0.94101440906524658, 'val_loss': 0.1388649046421051}
......24
{'acc': 0.94422733241861512, 'loss': 0.13945256444540891, 'val_acc': 0.94321948289871216, 'val_loss': 0.13514891266822815}
......25
{'acc': 0.94500007954510779, 'loss': 0.13510700518434698, 'val_acc': 0.94542461633682251, 'val_loss': 0.13223576545715332}
......26
{'acc': 0.94718189672990283, 'loss': 0.13019297475164587, 'val_acc': 0.94652712345123291, 'val_loss': 0.12920969724655151}
......27
{'acc': 0.94790918176824401, 'loss': 0.12885182147676294, 'val_acc': 0.94652712345123291, 'val_loss': 0.12693963944911957}
......28
{'acc': 0.94868189638311218, 'loss': 0.12767209315841849, 'val_acc': 0.9470784068107605, 'val_loss': 0.12473336607217789}
......29
{'acc': 0.95027277144518763, 'loss': 0.12410771440375935, 'val_acc': 0.94762969017028809, 'val_loss': 0.12227071076631546}
......30
{'acc': 0.95036374980753113, 'loss': 0.1231129453940825, 'val_acc': 0.94928336143493652, 'val_loss': 0.12016025930643082}
......31
{'acc': 0.95136372067711572, 'loss': 0.11939987811175259, 'val_acc': 0.94873225688934326, 'val_loss': 0.1184752956032753}
......32
{'acc': 0.95359101620587439, 'loss': 0.11739711327986284, 'val_acc': 0.95148849487304688, 'val_loss': 0.11656294763088226}
......33
{'acc': 0.95331828702579846, 'loss': 0.11693266372789037, 'val_acc': 0.95369356870651245, 'val_loss': 0.11500082910060883}
......34
{'acc': 0.95381826704198669, 'loss': 0.11547703499143774, 'val_acc': 0.95369356870651245, 'val_loss': 0.11389318853616714}
......35
{'acc': 0.95468191667036573, 'loss': 0.11514146287332881, 'val_acc': 0.95479607582092285, 'val_loss': 0.11245539784431458}
......36
{'acc': 0.9566818963397633, 'loss': 0.10990519889376381, 'val_acc': 0.9558987021446228, 'val_loss': 0.11059621721506119}
......37
{'acc': 0.9561818892305548, 'loss': 0.11116497015411203, 'val_acc': 0.95644986629486084, 'val_loss': 0.10936605185270309}
......38
{'acc': 0.9570455226031217, 'loss': 0.10942171175371516, 'val_acc': 0.95644986629486084, 'val_loss': 0.10811571031808853}
......39
{'acc': 0.95718189803036779, 'loss': 0.107808379964395, 'val_acc': 0.95700114965438843, 'val_loss': 0.10720551013946533}
......40
{'acc': 0.95872735435312439, 'loss': 0.10589546236124905, 'val_acc': 0.95700114965438843, 'val_loss': 0.10589742660522461}
......41
{'acc': 0.95713645761663269, 'loss': 0.10638472437858582, 'val_acc': 0.95755243301391602, 'val_loss': 0.10493499785661697}
......42
{'acc': 0.95827280933206727, 'loss': 0.10485957291993228, 'val_acc': 0.95755243301391602, 'val_loss': 0.10399224609136581}
......43
{'acc': 0.95831827142021875, 'loss': 0.10671849684281783, 'val_acc': 0.9581037163734436, 'val_loss': 0.10307808220386505}
......44
{'acc': 0.95850008726119995, 'loss': 0.10317982665517113, 'val_acc': 0.9581037163734436, 'val_loss': 0.10198897868394852}
......45
{'acc': 0.96000008149580518, 'loss': 0.10219702530990947, 'val_acc': 0.95975750684738159, 'val_loss': 0.10145699977874756}
......46
{'acc': 0.95936373147097498, 'loss': 0.1002087952061133, 'val_acc': 0.9581037163734436, 'val_loss': 0.099990770220756531}
......47
{'acc': 0.96013645150444726, 'loss': 0.10114385323090987, 'val_acc': 0.95920628309249878, 'val_loss': 0.099588140845298767}
......48
{'acc': 0.9605910019441084, 'loss': 0.098559576679359787, 'val_acc': 0.95975756645202637, 'val_loss': 0.098661988973617554}
......49
{'acc': 0.96031825650822034, 'loss': 0.099495072933760559, 'val_acc': 0.95975756645202637, 'val_loss': 0.097791202366352081}
['acc', 'loss', 'val_acc', 'val_loss']

In [217]:
score_hard = model.evaluate(X_hard_squeezenet, Y_hard, verbose=0)

print("OOS %s: %.2f%%" % (model.metrics_names[1], score_hard[1]*100))
print("OOS %s: %.2f" % (model.metrics_names[0], score_hard[0]))

print("min(val los)",np.min(history.history['val_loss']))


OOS acc: 92.33%
OOS loss: 0.19
min(val los) 0.0977912023664

In [218]:
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='lower right')
plt.show()


# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss (log scale)')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper right')
plt.yscale('log')
plt.show()



In [229]:
from keras.optimizers import SGD

model.compile(loss='categorical_crossentropy',
              optimizer=SGD(lr=1e-3, momentum=0.9),
              metrics=['accuracy'],
             )


# Fit the model 
history = model.fit(X_train_squeezenet[:16000], Y_train[:16000], 
                             validation_data=(X_train_squeezenet[20000:22000], Y_train[20000:22000]),
                             #validation_split=0.33, 
                             nb_epoch=100, 
                             batch_size=4000, 
                             callbacks=[early_stop_cb, checkpoit_cb, batch_print_cb, plot_loss_cb],
                             initial_epoch=50,
                             verbose=0)


....50
{'acc': 0.96137510240077972, 'loss': 0.098303591832518578, 'val_acc': 0.96350002288818359, 'val_loss': 0.086843892931938171}
....51
{'acc': 0.96062511205673218, 'loss': 0.096319388598203659, 'val_acc': 0.96400004625320435, 'val_loss': 0.086790628731250763}
....52
{'acc': 0.96118757128715515, 'loss': 0.097392728552222252, 'val_acc': 0.9645000696182251, 'val_loss': 0.086715668439865112}
....53
{'acc': 0.96093757450580597, 'loss': 0.096868766471743584, 'val_acc': 0.96500009298324585, 'val_loss': 0.086597546935081482}
....54
{'acc': 0.96212513744831085, 'loss': 0.09520980529487133, 'val_acc': 0.96500003337860107, 'val_loss': 0.086455628275871277}
....55
{'acc': 0.96056260168552399, 'loss': 0.095045940950512886, 'val_acc': 0.96500009298324585, 'val_loss': 0.086296647787094116}
....56
{'acc': 0.96312509477138519, 'loss': 0.096294308081269264, 'val_acc': 0.96549999713897705, 'val_loss': 0.086112163960933685}
....57
{'acc': 0.96125008165836334, 'loss': 0.096079995855689049, 'val_acc': 0.96550005674362183, 'val_loss': 0.085912346839904785}
....58
{'acc': 0.96200008690357208, 'loss': 0.096330711618065834, 'val_acc': 0.96549999713897705, 'val_loss': 0.085732787847518921}
....59
{'acc': 0.96243759989738464, 'loss': 0.09574098140001297, 'val_acc': 0.96550005674362183, 'val_loss': 0.085577137768268585}
....60
{'acc': 0.96312505006790161, 'loss': 0.09311230480670929, 'val_acc': 0.96549999713897705, 'val_loss': 0.085427343845367432}
....61
{'acc': 0.96400007605552673, 'loss': 0.093252098187804222, 'val_acc': 0.96550005674362183, 'val_loss': 0.085254102945327759}
....62
{'acc': 0.96256259083747864, 'loss': 0.092599384486675262, 'val_acc': 0.96500003337860107, 'val_loss': 0.085133224725723267}
....63
{'acc': 0.96275007724761963, 'loss': 0.093295009806752205, 'val_acc': 0.96500003337860107, 'val_loss': 0.084970913827419281}
....64
{'acc': 0.96375012397766113, 'loss': 0.091916687786579132, 'val_acc': 0.96500003337860107, 'val_loss': 0.084775365889072418}
....65
{'acc': 0.96268758177757263, 'loss': 0.093598628416657448, 'val_acc': 0.96550005674362183, 'val_loss': 0.084574542939662933}
....66
{'acc': 0.9633750319480896, 'loss': 0.092896278947591782, 'val_acc': 0.96500003337860107, 'val_loss': 0.084431469440460205}
....67
{'acc': 0.96356259286403656, 'loss': 0.092569110915064812, 'val_acc': 0.96500003337860107, 'val_loss': 0.084324963390827179}
....68
{'acc': 0.96287506818771362, 'loss': 0.091218959540128708, 'val_acc': 0.96500003337860107, 'val_loss': 0.084207721054553986}
....69
{'acc': 0.96262510120868683, 'loss': 0.091994127258658409, 'val_acc': 0.96500009298324585, 'val_loss': 0.084054052829742432}
....70
{'acc': 0.96450009942054749, 'loss': 0.090202528983354568, 'val_acc': 0.96500003337860107, 'val_loss': 0.083899408578872681}
....71
{'acc': 0.96356256306171417, 'loss': 0.091338362544775009, 'val_acc': 0.96500009298324585, 'val_loss': 0.083760425448417664}
....72
{'acc': 0.96393759548664093, 'loss': 0.091351829469203949, 'val_acc': 0.96500003337860107, 'val_loss': 0.083602316677570343}
....73
{'acc': 0.96381261944770813, 'loss': 0.089641384780406952, 'val_acc': 0.9655001163482666, 'val_loss': 0.083456017076969147}
....74
{'acc': 0.96375012397766113, 'loss': 0.090159859508275986, 'val_acc': 0.96550005674362183, 'val_loss': 0.083335399627685547}
....75
{'acc': 0.96350011229515076, 'loss': 0.089311840012669563, 'val_acc': 0.9655001163482666, 'val_loss': 0.083243332803249359}
....76
{'acc': 0.9643125981092453, 'loss': 0.090249355882406235, 'val_acc': 0.96550005674362183, 'val_loss': 0.08309483528137207}
....77
{'acc': 0.96393758058547974, 'loss': 0.090675417333841324, 'val_acc': 0.9655001163482666, 'val_loss': 0.082955382764339447}
....78
{'acc': 0.96468758583068848, 'loss': 0.091838065534830093, 'val_acc': 0.96550005674362183, 'val_loss': 0.082849755883216858}
....79
{'acc': 0.96643757820129395, 'loss': 0.090114522725343704, 'val_acc': 0.9655001163482666, 'val_loss': 0.082688502967357635}
....80
{'acc': 0.96437513828277588, 'loss': 0.08902212418615818, 'val_acc': 0.96550005674362183, 'val_loss': 0.08257642388343811}
....81
{'acc': 0.96487510204315186, 'loss': 0.090272162109613419, 'val_acc': 0.9655001163482666, 'val_loss': 0.082482032477855682}
....82
{'acc': 0.96387511491775513, 'loss': 0.091400805860757828, 'val_acc': 0.96600013971328735, 'val_loss': 0.082422412931919098}
....83
{'acc': 0.96500009298324585, 'loss': 0.088377973064780235, 'val_acc': 0.9655001163482666, 'val_loss': 0.082315795123577118}
....84
{'acc': 0.96568754315376282, 'loss': 0.087865505367517471, 'val_acc': 0.96550005674362183, 'val_loss': 0.082162603735923767}
....85
{'acc': 0.96468757092952728, 'loss': 0.088928010314702988, 'val_acc': 0.96600013971328735, 'val_loss': 0.081958755850791931}
....86
{'acc': 0.96612510085105896, 'loss': 0.087550342082977295, 'val_acc': 0.96550005674362183, 'val_loss': 0.081837333738803864}
....87
{'acc': 0.96568757295608521, 'loss': 0.086381293833255768, 'val_acc': 0.96550005674362183, 'val_loss': 0.081746712327003479}
....88
{'acc': 0.9656250923871994, 'loss': 0.088503221049904823, 'val_acc': 0.96550005674362183, 'val_loss': 0.081679351627826691}
....89
{'acc': 0.96550007164478302, 'loss': 0.08665887638926506, 'val_acc': 0.9655001163482666, 'val_loss': 0.08158642053604126}
....90
{'acc': 0.96512509882450104, 'loss': 0.087239135056734085, 'val_acc': 0.9660000205039978, 'val_loss': 0.081455454230308533}
....91
{'acc': 0.96581257879734039, 'loss': 0.085692645981907845, 'val_acc': 0.96600008010864258, 'val_loss': 0.081351593136787415}
....92
{'acc': 0.96718758344650269, 'loss': 0.085266599431633949, 'val_acc': 0.9660000205039978, 'val_loss': 0.081235811114311218}
....93
{'acc': 0.96412506699562073, 'loss': 0.087946655228734016, 'val_acc': 0.96600008010864258, 'val_loss': 0.081153340637683868}
....94
{'acc': 0.96493758261203766, 'loss': 0.087645364925265312, 'val_acc': 0.9660000205039978, 'val_loss': 0.081067696213722229}
....95
{'acc': 0.96612510085105896, 'loss': 0.086354697123169899, 'val_acc': 0.96600008010864258, 'val_loss': 0.081014335155487061}
....96
{'acc': 0.96612507104873657, 'loss': 0.08628384955227375, 'val_acc': 0.9660000205039978, 'val_loss': 0.080951042473316193}
....97
{'acc': 0.96531261503696442, 'loss': 0.086797682568430901, 'val_acc': 0.96600008010864258, 'val_loss': 0.080821633338928223}
....98
{'acc': 0.9655625969171524, 'loss': 0.084656026214361191, 'val_acc': 0.9660000205039978, 'val_loss': 0.080713666975498199}
....99
{'acc': 0.96512503921985626, 'loss': 0.08834698423743248, 'val_acc': 0.96600008010864258, 'val_loss': 0.080576568841934204}

In [230]:
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='lower right')
plt.show()


# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss (log scale)')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper right')
plt.yscale('log')
plt.show()



In [236]:
score_hard = model.evaluate(X_hard_squeezenet, Y_hard, verbose=0)

print("OOS %s: %.2f%%" % (model.metrics_names[1], score_hard[1]*100))
print("OOS %s: %.2f" % (model.metrics_names[0], score_hard[0]))

print("min(val los)",np.min(history.history['val_loss']))


OOS acc: 92.24%
OOS loss: 0.17
min(val los) 0.0805765688419

In [232]:
from sklearn.metrics import log_loss

Y_train_preds=model.predict(X_train_squeezenet)
Y_hard_preds=model.predict(X_hard_squeezenet)

In [233]:
score_hard = model.evaluate(X_train_squeezenet, Y_train, verbose=0)

print("OOS %s: %.2f%%" % (model.metrics_names[1], score_hard[1]*100))
print("OOS %s: %.2f" % (model.metrics_names[0], score_hard[0]))


OOS acc: 96.75%
OOS loss: 0.08

In [235]:
for i in range(1,10):
    m=i/100.0
    print(m,log_loss(Y_train,Y_train_preds.clip(min=m, max=1-m)),m,log_loss(Y_hard,Y_hard_preds.clip(min=m, max=1-m)))


0.01 0.0859608004013 0.01 0.176782003828
0.02 0.0932254262986 0.02 0.181356018794
0.03 0.100983118987 0.03 0.186440562658
0.04 0.1090721105 0.04 0.191907269274
0.05 0.117377279798 0.05 0.197914152125
0.06 0.125881397731 0.06 0.204348693195
0.07 0.134575577972 0.07 0.211168072992
0.08 0.143475183693 0.08 0.218303679176
0.09 0.152557854332 0.09 0.22564850502

In [237]:
print(log_loss(Y_train, Y_train_preds))
print(log_loss(Y_hard,Y_hard_preds))


0.0804215156362
0.174303788769

In [205]:
Y_train_preds.clip(min=0.02, max=0.98)
Y_train_preds


Out[205]:
array([[  1.00000000e+00,   0.00000000e+00],
       [  1.00000000e+00,   6.31518146e-30],
       [  0.00000000e+00,   1.00000000e+00],
       ..., 
       [  0.00000000e+00,   1.00000000e+00],
       [  0.00000000e+00,   1.00000000e+00],
       [  1.00000000e+00,   0.00000000e+00]], dtype=float32)

In [ ]: