In [1]:
import os
import gc
import numpy as np
import ipywidgets as widgets
import matplotlib.pyplot as plt
from PIL import Image
from IPython.display import display
from log_progress import log_progress
from skimage import filters
from keras import models
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, \
Flatten, Convolution2D, MaxPooling2D, \
BatchNormalization, UpSampling2D
from keras.utils import plot_model
from keras.utils.vis_utils import model_to_dot
%matplotlib inline
In [2]:
dataset_folder = 'dataset'
dataset_original_images = dataset_folder + '/original'
dataset_detection_images = dataset_folder + '/detection'
dataset_numpy_file = 'dataset.npy'
In [3]:
original_images_filenames = os.listdir(dataset_original_images)
print('Всего файлов в датасете {}'.format(len(original_images_filenames)))
In [4]:
subsample_region_size = 256
dataset = []
for filename in log_progress(original_images_filenames):
original_filename = os.path.join(dataset_original_images, filename)
detection_filename = os.path.join(dataset_detection_images, filename)
original_image = Image.open(original_filename)
detection_image = Image.open(detection_filename)
minimum_side = subsample_region_size
original_image_resized = original_image.resize((minimum_side, minimum_side), Image.ANTIALIAS).convert('L')
detection_image_resized = detection_image.resize((minimum_side, minimum_side), Image.ANTIALIAS).convert('L')
dataset.append((np.array(original_image_resized).astype('float32'),
np.array(detection_image_resized).astype('float32')))
print('Преобразовываем в массив NumPy')
dataset = np.array(dataset)
print('Перемешиваем')
np.random.shuffle(dataset)
print('Сохраняем')
np.save(os.path.join(dataset_folder, dataset_numpy_file), dataset)
print('Набор данных {} содержит {} элемет(ов)'.format(dataset_numpy_file, len(dataset)))
In [5]:
train_ratio = 0.8
dataset = np.load(os.path.join(dataset_folder, dataset_numpy_file))
test, train = dataset[:int(len(dataset) * train_ratio)], dataset[int(len(dataset) * train_ratio):]
train_x, train_y, test_x, test_y = test[:, 0], test[:, 1], train[:, 0], train[:, 1]
print('Набор данных загружен. \
Элементов в обучающей выборке {}, элементов в тестовой выборке {}'.format(len(test), len(train)))
In [6]:
train_x = np.array([d.reshape((subsample_region_size, subsample_region_size, 1)) for d in train_x])
train_y = np.array([d.reshape((subsample_region_size, subsample_region_size, 1)) for d in train_y])
test_x = np.array([d.reshape((subsample_region_size, subsample_region_size, 1)) for d in test_x])
test_y = np.array([d.reshape((subsample_region_size, subsample_region_size, 1)) for d in test_y])
train_x = train_x / 255.0
train_y = train_y / 255.0
test_x = test_x / 255.0
test_y = test_y / 255.0
print('Данные нормализованы')
In [7]:
fig, ax = plt.subplots(2, 5, figsize=(20, 7))
for i in range(5):
rnd = np.random.randint(0, len(train_x))
ax[0, i].imshow(train_x[rnd].reshape((subsample_region_size, subsample_region_size)), cmap='gray')
ax[1, i].imshow(train_y[rnd].reshape((subsample_region_size, subsample_region_size)), cmap='gray')
In [8]:
model = Sequential()
model.add(Convolution2D(filters=32,
kernel_size=(3, 3),
activation='relu',
input_shape=(subsample_region_size, subsample_region_size, 1),
padding='same'
))
model.add(Convolution2D(filters=64,
kernel_size=(3, 3),
activation='sigmoid',
padding='same'
))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Dense(64, activation='relu'))
model.add(Convolution2D(filters=1,
kernel_size=(3, 3),
activation='sigmoid',
padding='same'
))
model.add(UpSampling2D(size=(2,2)))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy','mse'])
display(model.summary())
In [9]:
model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=10, batch_size=10);
In [10]:
model.save('network')
scores = model.evaluate(test_x, test_y, verbose=0)
print("Accuracy: {:.2f}%".format(scores[1] * 100))
In [11]:
model = models.load_model('network')
In [12]:
results = model.predict(test_x[:5])
fig, ax = plt.subplots(3, 5, figsize=(20, 11))
for i in range(5):
ax[0, i].imshow(test_x[i].reshape((subsample_region_size, subsample_region_size)), cmap='gray')
ax[2, i].imshow(test_y[i].reshape((subsample_region_size, subsample_region_size)), cmap='gray')
ax[1, i].imshow(results[i].reshape((subsample_region_size, subsample_region_size)), cmap='gray')
In [13]:
window_size = 256
max_side = int(512 * 2)
hires_image = Image.open('3.jpg').convert('RGB')
hires_image = hires_image.resize((max_side, max_side))
heat_map = np.zeros((max_side, max_side))
for x in range(window_size // 2, max_side - window_size // 2 + 1, window_size):
for y in range(window_size // 2, max_side - window_size // 2 + 1, window_size):
hires_image_crop = hires_image.crop((x - window_size // 2,
y - window_size // 2,
x + window_size // 2,
y + window_size // 2)).convert('L')
hires_image_crop = np.array(hires_image_crop) / 255
hires_image_crop = hires_image_crop.reshape((window_size, window_size, 1))
result = model.predict(np.array([hires_image_crop]))
result = np.squeeze(result)
heat_map[y - window_size // 2:y + window_size // 2,
x - window_size // 2:x + window_size // 2] += result.reshape((window_size, window_size))
fig, ax = plt.subplots(1, 2, figsize=(15, 15))
ax[0].imshow(hires_image)
ax[1].imshow(heat_map, cmap='hot')
plt.show()
In [14]: