In [1]:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from time import gmtime, strftime


def xavier_init(size):
    in_dim = size[0]
    xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
    return tf.random_normal(shape=size, stddev=xavier_stddev)


X = tf.placeholder(tf.float32, shape=[None, 784])

D_W1 = tf.Variable(xavier_init([784, 128]))
D_b1 = tf.Variable(tf.zeros(shape=[128]))

D_W2 = tf.Variable(xavier_init([128, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))

theta_D = [D_W1, D_W2, D_b1, D_b2]


Z = tf.placeholder(tf.float32, shape=[None, 100])

G_W1 = tf.Variable(xavier_init([100, 128]))
G_b1 = tf.Variable(tf.zeros(shape=[128]))

G_W2 = tf.Variable(xavier_init([128, 784]))
G_b2 = tf.Variable(tf.zeros(shape=[784]))

theta_G = [G_W1, G_W2, G_b1, G_b2]


def sample_Z(m, n):
    return np.random.uniform(-1., 1., size=[m, n])


def generator(z):
    G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)
    G_log_prob = tf.matmul(G_h1, G_W2) + G_b2
    G_prob = tf.nn.sigmoid(G_log_prob)

    return G_prob


def discriminator(x):
    D_h1 = tf.nn.relu(tf.matmul(x, D_W1) + D_b1)
    D_logit = tf.matmul(D_h1, D_W2) + D_b2
    D_prob = tf.nn.sigmoid(D_logit)

    return D_prob, D_logit


def plot(samples):
    fig = plt.figure(figsize=(8, 2))
    gs = gridspec.GridSpec(2, 8)
    gs.update(wspace=0.05, hspace=0.05)

    for i, sample in enumerate(samples):
        ax = plt.subplot(gs[i])
        plt.axis('off')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
        plt.imshow(sample.reshape(28, 28), cmap='Greys_r')

    return fig


G_sample = generator(Z)
D_real, D_logit_real = discriminator(X)
D_fake, D_logit_fake = discriminator(G_sample)

# D_loss = -tf.reduce_mean(tf.log(D_real) + tf.log(1. - D_fake))
# G_loss = -tf.reduce_mean(tf.log(D_fake))

# Alternative losses:
# -------------------
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))

D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)

mb_size = 128
Z_dim = 100

mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)

sess = tf.Session()
sess.run(tf.global_variables_initializer())

if not os.path.exists('../]data/GAN_pics/'):
    os.makedirs('../]data/GAN_pics/')

for it in range(100000):
    if it == 50000:
        samples = sess.run(G_sample, feed_dict={Z: sample_Z(16, Z_dim)})
    
        fig = plot(samples)
        figname = '../]data/GAN_pics/{}.png'.format(strftime("%m-%d_%H:%M:%S", gmtime()))
        plt.savefig(figname, bbox_inches='tight')
        plt.close(fig)

    X_mb, _ = mnist.train.next_batch(mb_size)

    _, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: sample_Z(mb_size, Z_dim)})
    _, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={Z: sample_Z(mb_size, Z_dim)})

    if it % 1000 == 0:
        print('Iter: {}'.format(it))
        print('D loss: {:.4}'. format(D_loss_curr))
        print('G_loss: {:.4}'.format(G_loss_curr))
        print()


Extracting ../../MNIST_data/train-images-idx3-ubyte.gz
Extracting ../../MNIST_data/train-labels-idx1-ubyte.gz
Extracting ../../MNIST_data/t10k-images-idx3-ubyte.gz
Extracting ../../MNIST_data/t10k-labels-idx1-ubyte.gz
Iter: 0
D loss: 1.345
G_loss: 2.835

Iter: 1000
D loss: 0.008279
G_loss: 8.882

Iter: 2000
D loss: 0.007269
G_loss: 7.919

Iter: 3000
D loss: 0.02655
G_loss: 6.665

Iter: 4000
D loss: 0.09383
G_loss: 5.329

Iter: 5000
D loss: 0.3489
G_loss: 4.742

Iter: 6000
D loss: 0.2805
G_loss: 5.725

Iter: 7000
D loss: 0.6084
G_loss: 4.151

Iter: 8000
D loss: 0.3338
G_loss: 4.148

Iter: 9000
D loss: 0.4542
G_loss: 3.531

Iter: 10000
D loss: 0.6875
G_loss: 3.318

Iter: 11000
D loss: 0.8915
G_loss: 2.642

Iter: 12000
D loss: 0.6995
G_loss: 3.09

Iter: 13000
D loss: 0.5893
G_loss: 2.283

Iter: 14000
D loss: 0.6485
G_loss: 2.508

Iter: 15000
D loss: 0.5707
G_loss: 2.608

Iter: 16000
D loss: 0.7081
G_loss: 2.622

Iter: 17000
D loss: 0.9328
G_loss: 2.312

Iter: 18000
D loss: 0.7952
G_loss: 2.13

Iter: 19000
D loss: 1.026
G_loss: 1.981

Iter: 20000
D loss: 0.7425
G_loss: 2.376

Iter: 21000
D loss: 0.8426
G_loss: 2.334

Iter: 22000
D loss: 0.9146
G_loss: 2.244

Iter: 23000
D loss: 0.7128
G_loss: 1.949

Iter: 24000
D loss: 0.9
G_loss: 2.239

Iter: 25000
D loss: 0.7367
G_loss: 2.34

Iter: 26000
D loss: 0.6964
G_loss: 2.178

Iter: 27000
D loss: 0.7618
G_loss: 2.272

Iter: 28000
D loss: 0.7258
G_loss: 2.146

Iter: 29000
D loss: 0.8085
G_loss: 2.174

Iter: 30000
D loss: 0.5519
G_loss: 2.897

Iter: 31000
D loss: 0.7519
G_loss: 2.348

Iter: 32000
D loss: 0.6469
G_loss: 1.985

Iter: 33000
D loss: 0.6594
G_loss: 2.232

Iter: 34000
D loss: 0.6944
G_loss: 2.791

Iter: 35000
D loss: 0.668
G_loss: 2.486

Iter: 36000
D loss: 0.5903
G_loss: 3.012

Iter: 37000
D loss: 0.72
G_loss: 2.424

Iter: 38000
D loss: 0.59
G_loss: 3.081

Iter: 39000
D loss: 0.6315
G_loss: 2.24

Iter: 40000
D loss: 0.6464
G_loss: 2.498

Iter: 41000
D loss: 0.7019
G_loss: 2.015

Iter: 42000
D loss: 0.6009
G_loss: 2.453

Iter: 43000
D loss: 0.6068
G_loss: 2.621

Iter: 44000
D loss: 0.5718
G_loss: 2.616

Iter: 45000
D loss: 0.7738
G_loss: 2.51

Iter: 46000
D loss: 0.5719
G_loss: 2.512

Iter: 47000
D loss: 0.5485
G_loss: 2.563

Iter: 48000
D loss: 0.7601
G_loss: 2.631

Iter: 49000
D loss: 0.5547
G_loss: 2.449

Iter: 50000
D loss: 0.7119
G_loss: 2.461

Iter: 51000
D loss: 0.5353
G_loss: 2.602

Iter: 52000
D loss: 0.5843
G_loss: 2.569

Iter: 53000
D loss: 0.5986
G_loss: 2.372

Iter: 54000
D loss: 0.4055
G_loss: 2.313

Iter: 55000
D loss: 0.5779
G_loss: 2.266

Iter: 56000
D loss: 0.6254
G_loss: 2.497

Iter: 57000
D loss: 0.6582
G_loss: 2.677

Iter: 58000
D loss: 0.6383
G_loss: 2.355

Iter: 59000
D loss: 0.644
G_loss: 2.252

Iter: 60000
D loss: 0.6637
G_loss: 2.132

Iter: 61000
D loss: 0.7379
G_loss: 2.348

Iter: 62000
D loss: 0.6409
G_loss: 2.403

Iter: 63000
D loss: 0.6484
G_loss: 2.233

Iter: 64000
D loss: 0.5196
G_loss: 2.372

Iter: 65000
D loss: 0.4837
G_loss: 2.393

Iter: 66000
D loss: 0.667
G_loss: 2.522

Iter: 67000
D loss: 0.6571
G_loss: 2.12

Iter: 68000
D loss: 0.7709
G_loss: 2.202

Iter: 69000
D loss: 0.5456
G_loss: 2.13

Iter: 70000
D loss: 0.5813
G_loss: 2.24

Iter: 71000
D loss: 0.5705
G_loss: 2.321

Iter: 72000
D loss: 0.5801
G_loss: 2.443

Iter: 73000
D loss: 0.5191
G_loss: 2.292

Iter: 74000
D loss: 0.6146
G_loss: 2.053

Iter: 75000
D loss: 0.4914
G_loss: 2.264

Iter: 76000
D loss: 0.6028
G_loss: 2.007

Iter: 77000
D loss: 0.6287
G_loss: 2.363

Iter: 78000
D loss: 0.5764
G_loss: 2.472

Iter: 79000
D loss: 0.6712
G_loss: 2.582

Iter: 80000
D loss: 0.4987
G_loss: 2.523

Iter: 81000
D loss: 0.5658
G_loss: 2.347

Iter: 82000
D loss: 0.5894
G_loss: 2.597

Iter: 83000
D loss: 0.598
G_loss: 2.311

Iter: 84000
D loss: 0.6271
G_loss: 2.25

Iter: 85000
D loss: 0.6176
G_loss: 2.405

Iter: 86000
D loss: 0.6083
G_loss: 2.417

Iter: 87000
D loss: 0.5725
G_loss: 2.16

Iter: 88000
D loss: 0.4575
G_loss: 2.505

Iter: 89000
D loss: 0.6543
G_loss: 2.086

Iter: 90000
D loss: 0.6061
G_loss: 2.085

Iter: 91000
D loss: 0.6236
G_loss: 2.33

Iter: 92000
D loss: 0.752
G_loss: 2.173

Iter: 93000
D loss: 0.6992
G_loss: 2.125

Iter: 94000
D loss: 0.7013
G_loss: 2.36

Iter: 95000
D loss: 0.5732
G_loss: 2.239

Iter: 96000
D loss: 0.7756
G_loss: 2.577

Iter: 97000
D loss: 0.6767
G_loss: 2.439

Iter: 98000
D loss: 0.5981
G_loss: 2.428

Iter: 99000
D loss: 0.7236
G_loss: 2.192


In [7]:
import matplotlib.image as mpimg
img=mpimg.imread(figname)
imgplot = plt.imshow(img)
plt.axis('off')
plt.show()



In [ ]: