In [1]:
import sys
sys.path.insert(0,'../tools/DCGAN-tensorflow')
import os
import scipy.misc
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
import tensorflow.contrib.slim as slim
from model import DCGAN
from utils import pp, visualize, to_json, show_all_variables, save_images
In [2]:
input_width, input_height = 128, 128
output_width, output_height = 128, 128
batch_size = 64
z_dim = 100
crop = False
dataset_name = 'eyes_collected_128'
checkpoint_dir = '../tools/DCGAN-tensorflow/checkpoints'
data_dir = '../tools/DCGAN-tensorflow/datasets/'
sample_dir = '../tools/DCGAN-tensorflow/samples'
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
with tf.Session(config=run_config) as sess:
dcgan = DCGAN(sess,
input_width=input_width,
input_height=input_height,
output_width=output_width,
output_height=output_height,
batch_size=batch_size,
sample_num=batch_size,
z_dim=z_dim,
dataset_name=dataset_name,
input_fname_pattern='*.jpg',
crop=crop,
checkpoint_dir=checkpoint_dir,
data_dir=data_dir,
sample_dir=sample_dir)
dcgan.load(checkpoint_dir=checkpoint_dir)
Out[2]:
In [8]:
def sample_progression(dcgan, n):
z1 = np.random.uniform(-1, 1, size=(dcgan.z_dim))
samples = []
for i in range(n):
z2 = np.random.uniform(-1, 1, size=(dcgan.z_dim))
zs = np.array([ (1.0-t)*z1 + t*z2 for t in np.linspace(0, 1, dcgan.batch_size)])
samples.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: zs}))
z1 = z2
samples = [s for segment in samples for s in segment]
return samples
def inverse_transform(x):
return (x + 1.0) / 2.0
def display(sample):
s = inverse_transform(sample)
s = np.array(255.0*s).astype(np.uint8)
plt.imshow(s, interpolation='nearest')
plt.show()
In [9]:
# one way to make z_sample, interpolate between two endpoints
z1 = np.random.uniform(-1, 1, size=(dcgan.z_dim))
z2 = np.random.uniform(-1, 1, size=(dcgan.z_dim))
z_sample = np.array([ (1.0-t)*z1 + t*z2 for t in np.linspace(0, 1, batch_size)])
# sample_progression will do the above N times (e.g. N=12, as in below)
samples = sample_progression(dcgan, 12)
# you can display the first image
display(samples[0])
In [5]:
import moviepy.editor as mpy
def make_frame(t):
nf, fps = 768, 30
x = samples[int(min(nf-1, fps*t))]
return ((x+1.)/2.*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=25.6)
clip.write_videofile('test_eyes.mp4', fps=30)