In [17]:
from glob import glob
from keras.utils import np_utils
from sklearn.datasets import load_files
import numpy as np
def load_dataset(path):
data = load_files(path)
bones_files = np.array(data['filenames'])
bones_targets = np_utils.to_categorical(np.array(data['target']), 3)
return bones_files, bones_targets
names = [item[20:-1] for item in sorted(glob("images/bones/train/*/"))]
train_files, train_targets = load_dataset('images/bones/train')
valid_files, valid_targets = load_dataset('images/bones/valid')
test_files, test_targets = load_dataset('images/bones/test')
print('There are %d total bone disease categories.' % len(names))
print('There are %s total bone images.\n' % len(np.hstack([train_files, valid_files, test_files])))
print('There are %d training bone images.' % len(train_files))
print('There are %d validation bone images.' % len(valid_files))
print('There are %d test bone images.'% len(test_files))
In [18]:
from keras.preprocessing import image
from tqdm import tqdm
def path_to_tensor(img_path):
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
In [19]:
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
train_tensors = paths_to_tensor(train_files).astype('float32')/255
valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
test_tensors = paths_to_tensor(test_files).astype('float32')/255
In [20]:
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(224, 224, 3)))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(3, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(3, activation='softmax'))
model.summary()
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
In [21]:
from keras.callbacks import ModelCheckpoint
epochs = 10
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5',
verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets, validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)
Out[21]:
In [22]:
model.load_weights('saved_models/weights.best.from_scratch.hdf5')
bone_diseases_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
test_accuracy = 100*np.sum(np.array(bone_diseases_predictions)==np.argmax(test_targets, axis=1))/len(bone_diseases_predictions)
print('Bones test accuracy: %.4f%%' % test_accuracy)
In [23]:
categories = {
0: 'Fracture',
1: 'Osteomyelitis',
2: 'Osteochondroma'
}
def prediction_machine(img_path):
tensor = path_to_tensor(img_path)
prediction_array = model.predict(tensor)
print(prediction_array)
prediction = np.argmax(prediction_array)
return categories[prediction]
In [24]:
import matplotlib.pyplot as plt
import cv2
%matplotlib inline
final_images = np.array(glob('images/bones/final/humerus/*'))
for img_path in final_images:
p_img = cv2.imread(img_path)
p_img_gray = cv2.cvtColor(p_img, cv2.COLOR_BGR2GRAY)
plt.imshow(p_img)
plt.show()
print(img_path)
prediction = prediction_machine(img_path)
print('Predicted Disease: {0}'.format(prediction))
In [54]:
# bottleneck_features = np.load('bottleneck_features/DogVGG16Data.npz')
# bf_train = bottleneck_features['train']
# bf_valid = bottleneck_features['valid']
# bf_test = bottleneck_features['test']
from keras.applications.resnet50 import ResNet50
from keras.layers import Input
base_model = ResNet50(include_top=False, weights='imagenet', input_tensor=Input(shape=(224, 224, 3)))
transfered_model = base_model.output
transfered_model.add(GlobalAveragePooling2D())
transfered_model.add(Dense(3, activation='softmax'))
transfered_model.load_weights('saved_models/weights.best.from_scratch.hdf5')
bf_model.summary()
transfered_model = Model(input=base_model.input, output=transfered_model(base_model.output))
transfered_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
In [55]:
transfered_model = Model(input=base_model.input, output=transfered_model(base_model.output))
transfered_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
In [44]:
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.VGG16.hdf5',
verbose=1, save_best_only=True)
bf_model.fit(bf_train, train_targets, validation_data=(bf_valid, valid_targets),
epochs=20, batch_size=20, callbacks=[checkpointer], verbose=1)
In [ ]: