In [1]:
%matplotlib inline

import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np

import theano.tensor as T
import theano
from itertools import izip
import random
from random import shuffle

讀取 Miku


In [2]:
img_count = 0
def showimg(img):
    muki_pr = np.zeros((500,500,3))
    l =img.tolist()
    count = 0
    for x in range(500):
        for y in range(500):
            muki_pr[y][x] = l[count]
            count += 1
    plt.imshow(muki_pr)
def saveimg(fname,img):
    muki_pr = np.zeros((500,500,3))
    l =img.tolist()
    count = 0
    for x in range(500):
        for y in range(500):
            muki_pr[y][x] = l[count]
            count += 1
    plt.imsave(fname,muki_pr)

In [3]:
def read_muki():
    img_data = np.random.randn(250000,1)
    xy_data = []
    import random

    f = open('./muki.txt','rb')
    count = 0
    for line in f:
        y,x,c = line.split()
        xy_data.append([float(x),float(y)])
        x = (float(x) )*100. + 250
        y = (float(y) )*100. + 250
        c = float(c)

        img_data[count] = c
        
        count = count + 1
    return np.matrix(xy_data),img_data

xy_data,img_data = read_muki()    
showimg(img_data)



In [ ]:
print xy_data[:10]
print img_data[:10]

Muki NN


In [ ]:
from keras.models import Sequential

model = Sequential()

In [ ]:
from keras.layers import Dense, Activation
from keras.optimizers import SGD

model.add(Dense(output_dim=128, input_dim=2))
model.add(Activation("relu"))

model.add(Dense(output_dim=128, input_dim=128))
model.add(Activation("relu"))

model.add(Dense(output_dim=8, input_dim=128))
model.add(Activation("relu"))

model.add(Dense(output_dim=1, input_dim=8))
model.add(Activation("relu"))


model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.1, momentum=0.9, nesterov=True), metrics=['accuracy'])

In [ ]:
from IPython.display import SVG
from keras.utils.visualize_util import model_to_dot

SVG(model_to_dot(model).create(prog='dot', format='svg'))

In [ ]:
for ii in range(101,300):
    model.fit(xy_data, img_data, nb_epoch=5, batch_size=500)
    result = model.predict(xy_data)
    saveimg('./imgs/muki_relu_'+ str(ii) +'.png', result.T[0])

In [ ]:
result = model.predict(xy_data)
showimg(result.T[0])

結論:

  • data 作 random suffule 的效果奇差,原因可以解釋為其實圖片的資料上下位置有相關系,如果 random 打散再作 mini batch ,反而喪失掉上下 pixle 之間相關的資料
  • 在 hidden layer 設 128 個 neuron解釋力較強,至於原因需要再探咎
  • 128 nerou
  • Training 過程
  • 256 nerou
  • Training 過程

In [ ]: