TODO:
In [1]:
import pylab as plt
# %matplotlib inline
In [2]:
import numpy as np
Synthetic image generation function:
In [4]:
def gen_im(n=1):
rn = np.random.uniform(low=0, high=1, size=(20,))
# Make a random plot...
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rn,"r")
ax.grid()
DPI = fig.get_dpi()
fig.set_size_inches(150/float(DPI),150/float(DPI))
# fig.set_size_inches(3,3)
fig.canvas.draw()
# Now we can save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
# data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close('all')
return data, rn, fig.canvas.get_width_height()[::-1]
In [5]:
_,_,size = gen_im(n=1)
Batch image generation function:
In [6]:
def batch_gen(n=1):
for i in range(n):
if i==0:
arr, vals, size = gen_im()
else:
tmparr, tmpvals, size = gen_im()
arr = np.vstack((arr,tmparr))
vals = np.vstack((vals,tmpvals))
arr = arr.reshape(arr.shape[0], *size, 3)[:,:,:,:]
return arr, vals
Let's start with 1000 training samples and 100 test samples
In [7]:
x_train, y_train = batch_gen(n=1000)
x_test, y_test = batch_gen(n=100)
We reshaped the images to be squared just because it's easier. Here's how they look:
In [11]:
plt.imshow(x_train[0,:].reshape(*size,3))
plt.show()
Let's implement CNN's until they work!
In [10]:
import keras
from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPooling2D
from keras.models import Sequential
from keras.layers.convolutional import ZeroPadding2D
from keras.layers import Activation
from keras.optimizers import SGD
import matplotlib.pylab as plt
batch_size = 1
num_classes = 20
epochs = 10
# input image dimensions
img_x, img_y = size
input_shape = (img_x, img_y, 3)
print('x_train shape:', x_train.shape)
In [14]:
filter_size = 3
pool_size = 2
model = Sequential([
Conv2D(32, (6, 6), input_shape=input_shape, data_format="channels_last", activation='relu'),
MaxPooling2D(pool_size=(pool_size, pool_size)),
Conv2D(64, (filter_size, filter_size), data_format="channels_last", activation='relu'),
MaxPooling2D(pool_size=(pool_size, pool_size)),
Conv2D(128, (filter_size, filter_size), data_format="channels_last", activation='relu'),
# # MaxPooling2D(pool_size=(pool_size, pool_size)),
Conv2D(128, (filter_size, filter_size), data_format="channels_last", activation='relu'),
# # MaxPooling2D(pool_size=(pool_size, pool_size)),
Flatten(),
# Dropout(0.4),
# Dense(1000, activation='relu'),
Dropout(0.4),
Dense(num_classes, activation='linear'),
])
model.compile('adadelta', 'mse')
In [15]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[history])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score)
Previous run took ~10mins
In [16]:
model.predict(x_test[0].reshape(1,*size, 3))
Out[16]:
In [18]:
plt.plot(model.predict(x_test)[0])
plt.plot(y_test[0])
plt.show()
Not bad at all!
In [42]:
from PIL import Image
im = Image.open("log_crop_smaller.png")
im
Out[42]:
In [43]:
im = im.resize((150,150)).rotate(90)
im
Out[43]:
In [53]:
im_arr = np.array(im.getdata())[:,:-1].reshape(1, im.size[1], im.size[0], 3)
In [58]:
plt.plot(model.predict(im_arr)[0])
plt.show()
Trying to lineup things:
In [80]:
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True)
ax0.imshow(im, extent=(-2, 19, 0, 1), aspect=4)
ax0.set_title('Real Log')
ax0.set_xlim(-3,20)
ax1.plot(model.predict(im_arr)[0],'r')
ax1.set_title('Predicted Log')
ax1.set_ylim(0,1)
plt.show()