keras 实现线性模型
In [1]:
from utils import *
from keras.optimizers import SGD, RMSprop, Adam
In [2]:
x = random((30, 2))
x[:3]
Out[2]:
In [3]:
y = x.dot([2., 3.]) + 1.
y[:3]
Out[3]:
In [4]:
lm = Sequential([Dense(1, input_shape=(2,))])
lm.compile(optimizer=SGD(lr=.1), loss='mse')
In [5]:
lm.fit(x, y, nb_epoch=10, batch_size=1)
Out[5]:
In [6]:
lm.get_weights()
Out[6]:
VGG change
In [2]:
import vgg16
from vgg16 import Vgg16
In [3]:
vgg = Vgg16()
In [9]:
# path = 'data/redux/'
path = 'data/redux/sample/'
model_path = path + 'models/'
In [ ]:
In [10]:
BATCH_SIZE = 8
In [11]:
val_batches = get_batches(path + 'valid', shuffle=False, batch_size=BATCH_SIZE)
batches = get_batches(path + 'train', shuffle=False, batch_size=BATCH_SIZE)
In [12]:
#batches.nb_sample
#batches.next()
In [13]:
import bcolz
def save_array(fname, arr): c=bcolz.carray(arr, rootdir=fname, mode='w'); c.flush()
def load_array(fname): return bcolz.open(fname)[:]
def onehot(x): return np.array(OneHotEncoder().fit_transform(x.reshape(-1,1)).todense())
In [14]:
# val_batches
In [15]:
# val_data = get_data(path + 'valid')
In [16]:
# trn_data = get_data(path + 'train')
In [17]:
# save_array(model_path + 'train_data.bc', trn_data)
# save_array(model_path + 'valid_data.bc', val_data)
In [18]:
# trn_data = load_array(model_path + 'train_data.bc')
# val_data = load_array(model_path + 'valid_data.bc')
In [19]:
# trn_data.shape
In [20]:
val_classes = val_batches.classes
trn_classes = batches.classes
val_labels = onehot(val_classes)
trn_labels = onehot(trn_classes)
In [21]:
trn_labels.shape
Out[21]:
In [22]:
# trn_features = model.predict(trn_data, batch_size=BATCH_SIZE)
# trn_features
In [23]:
# val_features = model.predict(val_data, batch_size=BATCH_SIZE)
# val_features
In [25]:
val_features = vgg.model.predict_generator(val_batches, val_batches.nb_sample)
In [26]:
val_features.shape
Out[26]:
In [29]:
trn_features = vgg.model.predict_generator(batches, batches.nb_sample)
In [31]:
trn_features.shape
Out[31]:
In [32]:
# vgg.compile()
# vgg.fit(batches, val_batches, nb_epoch=1)
In [33]:
save_array(model_path + 'train_lastlayer_features.bc', trn_features)
save_array(model_path + 'valid_lastlayer_features.bc', val_features)
In [34]:
trn_features = load_array(model_path + 'train_lastlayer_features.bc')
val_features = load_array(model_path + 'valid_lastlayer_features.bc')
In [35]:
lm = Sequential([Dense(2, activation='softmax', input_shape=(1000, ))])
lm.compile(optimizer=RMSprop(lr=.1), loss='categorical_crossentropy', metrics=['accuracy'])
In [36]:
lm.fit(trn_features, trn_labels, nb_epoch=3, batch_size=BATCH_SIZE, validation_data=(val_features, val_labels))
Out[36]:
In [37]:
lm.fit(trn_features, trn_labels, nb_epoch=6, batch_size=BATCH_SIZE, validation_data=(val_features, val_labels))
Out[37]:
In [38]:
lm.summary()
In [41]:
vgg.model.pop()
In [43]:
for layer in vgg.model.layers: layer.trainable=False
In [46]:
vgg.model.add(Dense(2, activation='softmax'))
In [48]:
# gen=image.ImageDataGenerator()
# batches = gen.flow(trn_data, trn_labels, batch_size=BATCH_SIZE, shuffle=True)
# val_batches = gen.flow(val_data, val_labels,)
In [49]:
def fit_model(model, batches, val_batches, nb_epoch=1):
model.fit_generator(batches, samples_per_epoch=batches.nb_sample, nb_epoch=nb_epoch, validation_data=val_batches, nb_val_samples=val_batches.nb_sample)
In [51]:
opt = RMSprop(lr=.1)
vgg.model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
In [53]:
fit_model(vgg.model, batches, val_batches, nb_epoch=3)
In [ ]: