In [1]:
import pandas as pd
data_df = pd.read_csv('data/hourly_wages.csv')
In [2]:
data_df.head()
Out[2]:
In [3]:
data_df.describe()
Out[3]:
In [4]:
target = data_df.wage_per_hour.as_matrix()
predictors = data_df.drop(['wage_per_hour'], axis=1).as_matrix()
In [5]:
n_cols = predictors.shape[1]
In [6]:
from keras.models import Sequential
from keras.layers import Dense
my_model = Sequential()
my_model.add(Dense(100, activation='relu', input_shape=(n_cols,)))
my_model.add(Dense(100, activation='relu'))
my_model.add(Dense(1))
my_model.summary()
In [7]:
my_model.compile(optimizer = 'adam', loss='mean_squared_error')
In [8]:
my_model.fit(predictors, target)
Out[8]:
In [9]:
def get_new_model():
my_model = Sequential()
my_model.add(Dense(100, activation='relu', input_shape=(n_cols,)))
my_model.add(Dense(100, activation='relu'))
my_model.add(Dense(1))
my_model.compile(optimizer = 'adam', loss='mean_squared_error')
return(my_model)
my_model = get_new_model()
my_model.fit(predictors, target, validation_split=0.3)
Out[9]:
In [10]:
from keras.callbacks import EarlyStopping
early_stopping_monitor = EarlyStopping(patience=2)
my_model = get_new_model()
my_model.fit(predictors, target, validation_split=0.3, nb_epoch=20, callbacks=[early_stopping_monitor])
Out[10]:
In [11]:
titanic_data = pd.read_csv('data/titanic_all_numeric.csv')
titanic_data.head()
Out[11]:
In [12]:
from keras.utils.np_utils import to_categorical
target = to_categorical(titanic_data.survived)
predictors = titanic_data.drop(['survived'], axis=1).as_matrix()
n_cols = predictors.shape[1]
def get_classification_model(n_cols):
model = Sequential()
model.add(Dense(100, activation='relu', input_shape=(n_cols,)))
model.add(Dense(100, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return(model)
my_model = get_classification_model(n_cols)
my_model.fit(predictors, target, validation_split=0.3, nb_epoch=20, callbacks=[early_stopping_monitor])
Out[12]:
In [13]:
from keras.datasets import mnist
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# input image dimensions
img_rows, img_cols = 28, 28
# Using tf dim ordering
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = to_categorical(y_train, nb_classes)
Y_test = to_categorical(y_test, nb_classes)
In [14]:
from keras.layers import Dense, Flatten, Convolution2D, MaxPooling2D
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (2, 2)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape, activation='relu'))
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], activation='relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(nb_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
print(model.summary())
In [24]:
model.summary()
In [15]:
model.fit(X_train, Y_train, nb_epoch=5, validation_data=(X_test, Y_test))
Out[15]:
Loaded model from https://github.com/fchollet/deep-learning-models
In [16]:
from vgg16 import VGG16
from keras.preprocessing import image
model = VGG16(weights='imagenet', include_top=False)
In [17]:
model.summary()
In [19]:
from keras.preprocessing.image import ImageDataGenerator
train_data_dir = 'data/dogs_and_cats/train'
val_data_dir = 'data/dogs_and_cats/val'
n_cats_training = 40
n_dogs_training = 40
training_size = n_cats_training + n_dogs_training
n_cats_val = 10
n_dogs_val = 10
val_size = n_cats_val + n_dogs_val
datagen = ImageDataGenerator(rescale=1./255)
In [21]:
train_generator = datagen.flow_from_directory(
train_data_dir,
target_size=(150, 150),
class_mode=None,
shuffle=False) # keep data in order, since this is only a transform
val_generator = datagen.flow_from_directory(
val_data_dir,
target_size=(150, 150),
class_mode=None,
shuffle=False)
train_data = model.predict_generator(train_generator, training_size)
val_data = model.predict_generator(val_generator, val_size)
In [22]:
import numpy as np
train_labels = np.array([0] * n_cats_training + [1] * n_dogs_training)
val_labels = np.array([0] * n_cats_val + [1] * n_dogs_val)
In [23]:
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(train_data, train_labels,
nb_epoch=20,
validation_data=(val_data, val_labels))
Out[23]:
In [ ]: