In [1]:
from keras.datasets import imdb
from keras.preprocessing.sequence import pad_sequences
In [2]:
max_features = 10000
max_len = 500
In [3]:
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
In [4]:
print('Pad sequences (samples x time)')
x_train = pad_sequences(x_train, maxlen=max_len)
x_test = pad_sequences(x_test, maxlen=max_len)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
In [5]:
from keras.models import Sequential
from keras.layers import Embedding, Conv1D, MaxPooling1D, GlobalMaxPooling1D, Dense
from keras.optimizers import RMSprop
In [6]:
model = Sequential()
model.add(Embedding(input_dim = max_features,
output_dim = 128,
input_length = max_len))
model.add(Conv1D(filters = 32,
kernel_size = 7,
activation = 'relu'))
model.add(MaxPooling1D(pool_size = 5))
model.add(Conv1D(filters = 32,
kernel_size = 7,
activation = 'relu'))
model.add(GlobalMaxPooling1D())
model.add(Dense(units = 1))
model.summary()
model.compile(optimizer = RMSprop(lr = 1e-4),
loss = 'binary_crossentropy',
metrics = ['acc'])
In [7]:
history = model.fit(x = x_train,
y = y_train,
epochs = 10,
batch_size = 128,
validation_split = 0.2)
In [8]:
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
In [9]:
import os
import numpy as np
data_dir = './data/Chapter 6.3 - Advanced use of recurrent neural networks/'
fname = os.path.join(data_dir, 'jena_climate_2009_2016.csv')
# Reading the CSV file
f = open(fname)
data = f.read()
f.close()
# Splitting the file line by line
lines = data.split('\n')
# Splitting the line by comma
header = lines[0].split(',')
lines = lines[1:]
float_data = np.zeros((len(lines), len(header) - 1))
for i, line in enumerate(lines):
values = [float(x) for x in line.split(',')[1:]]
float_data[i, :] = values
mean = float_data[:200000].mean(axis=0)
float_data -= mean
std = float_data[:200000].std(axis=0)
float_data /= std
In [10]:
# Generator
def generator(data, lookback, delay, min_index, max_index,
shuffle=False, batch_size=128, step=6):
if max_index is None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(
min_index + lookback, max_index, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i += len(rows)
samples = np.zeros((len(rows),
lookback // step,
data.shape[-1]))
targets = np.zeros((len(rows),))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples, targets
In [11]:
lookback = 1440
step = 6
delay = 144
batch_size = 128
In [12]:
train_gen = generator(float_data,
lookback = lookback,
delay = delay,
min_index = 0,
max_index = 200000,
shuffle = True,
step = step,
batch_size = batch_size)
val_gen = generator(float_data,
lookback = lookback,
delay = delay,
min_index = 200001,
max_index = 300000,
step = step,
batch_size = batch_size)
test_gen = generator(float_data,
lookback = lookback,
delay = delay,
min_index = 300001,
max_index = None,
step = step,
batch_size = batch_size)
In [13]:
# Number of step to be drawn to see whole validation dataset
val_steps = (300000 - 200001 - lookback) // batch_size
# Number of step to be drawn to see whole test dataset
test_steps = (len(float_data) - 300001 - lookback) // batch_size
In [14]:
model = Sequential()
model.add(Conv1D(filters = 32,
kernel_size = 7,
activation = 'relu',
input_shape = (None, float_data.shape[-1])))
model.add(MaxPooling1D(pool_size = 3))
model.add(Conv1D(filters = 32,
kernel_size = 5,
activation = 'relu'))
model.add(MaxPooling1D(pool_size = 3))
model.add(Conv1D(filters = 32,
kernel_size = 5,
activation = 'relu'))
model.add(GlobalMaxPooling1D())
model.add(Dense(units = 1))
model.summary()
model.compile(optimizer = RMSprop(),
loss = 'mae')
In [15]:
history = model.fit_generator(train_gen,
steps_per_epoch = 500,
epochs = 20,
validation_data = val_gen,
validation_steps = val_steps)
In [16]:
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label = 'Training loss')
plt.plot(epochs, val_loss, 'b', label = 'Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
In [18]:
from keras.layers import GRU
In [20]:
model = Sequential()
model.add(Conv1D(filters = 32,
kernel_size = 7,
activation = 'relu',
input_shape = (None, float_data.shape[-1])))
model.add(MaxPooling1D(pool_size = 3))
model.add(Conv1D(filters = 32,
kernel_size = 5,
activation = 'relu'))
model.add(GRU(units = 32,
dropout = 0.1,
recurrent_dropout = 0.5))
model.add(Dense(units = 1))
model.summary()
model.compile(optimizer = RMSprop(),
loss = 'mae')
In [21]:
history = model.fit_generator(train_gen,
steps_per_epoch = 500,
epochs = 20,
validation_data = val_gen,
validation_steps = val_steps)
In [22]:
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label = 'Training loss')
plt.plot(epochs, val_loss, 'b', label = 'Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()