In [ ]:
import os
import sys
import re # for regex
import math
import json
import pickle

from PIL import Image
import numpy as np

from sklearn.datasets import fetch_mldata
import matplotlib.pyplot as plt
%matplotlib inline

# import pycuda.autoinit

from chainer import cuda, Function, FunctionSet, gradient_check, Variable, optimizers
import chainer.functions as F

from dA import DenoisingAutoencoder
from SdA import StackedDenoisingAutoencoder
from CdA import ConvolutionalDenoisingAutoencoder
from utils import *

In [ ]:
## make input movie
size = 16
frame = 30000
movie = np.zeros((frame, size, size), dtype=np.float)

# put stick
num_stick = 4
y_stick = np.zeros((num_stick,), dtype=np.float)
x_stick = np.zeros((num_stick,), dtype=np.float)
len_stick = np.zeros((num_stick,), dtype=np.float)
range_stick = 3

for i_frame in np.arange(frame):
    img = np.zeros((size, size), dtype=np.float)
    
    for i_stick in np.arange(num_stick):
        # stick move
        y_stick[i_stick] -= len_stick[i_stick]//2 + 1
        
        # stick respawn
        if y_stick[i_stick]<0 or i_frame==0:
            l = np.random.randint(7)+1
            len_stick[i_stick] = l
            y_stick[i_stick] = size - range_stick
            x_stick[i_stick] = np.random.randint(size-l)
            
        # draw stick
        y, x = np.meshgrid((np.arange(len_stick[i_stick])+x_stick[i_stick]).astype(np.int8), (np.arange(range_stick)+y_stick[i_stick]).astype(np.int8))
        img[x, y] = 1
        
    movie[i_frame, :, :] = img
    
# check image files
if frame < 30:
    for i_frame in np.arange(frame):
        img = movie[i_frame, :, :]
        im = Image.fromarray((img*255).astype(np.uint8))
        im.save(str(i_frame)+'.png')

In [ ]:
## ML params
n_frame = 5
n_output = 100

v_all = makeInputsAsMovie(movie, n_frame)

n_pximage = size**2
n_pxmovie = n_pximage * n_frame
n_movie = v_all.shape[0]

## ML values
n_epoch = 500
batchsize = 100

In [ ]:
## training
dA = DenoisingAutoencoder(n_pxmovie, n_output)

for epoch in range(n_epoch):
    indexes = np.random.permutation(v_all.shape[0])
    for i in range(0, v_all.shape[0], batchsize):
        x_batch = v_all[indexes[i : i + batchsize]]
        loss = dA.train(x_batch)
    print('epoch:'+str(epoch)+' loss:' + str(loss))

dA.save('history', n_output, n_epoch, batchsize)

In [ ]:
from utils import *

## draw weight
num_show = 8
for i in range(num_show):
    for i_frame in range(n_frame):
        plt.subplot(n_frame, num_show, num_show*i_frame+i+1)
        iw_s = n_pximage*i_frame
        iw_e = n_pximage*(i_frame+1)
        draw_weight( dA.model.encode.W[i][iw_s:iw_e], (size, size) )

In [ ]: