In [ ]:
In [ ]:
# Dataset
In [ ]:
class Autoencoder(object):
def __init__(self, ):
# Set hyperparameters
# TODOs
# Build the graph
# TODOs
# Initialize paramters
# TODOs
# Build the netowrk and the loss functions
def build(self):
# TODOs
# Encode
# x -> z
# TODOs
# Decode
# z -> x_hat
# TODOs
# Loss
# Reconstruction loss
# Minimize the cross-entropy loss
# H(x, x_hat) = -\Sigma x*log(x_hat) + (1-x)*log(1-x_hat)
# TODOs
# Optimizer
# TODOs
return
# Execute the forward and the backward pass
def run_single_step(self, ):
# TODOs
return
# x -> x_hat
def reconstructor(self, ):
# TODOs
return
# x -> z
def transformer(self, ):
# TODOs
return
In [ ]:
def trainer(model_class, ):
# Create a model
# TODOs
# Training loop
# TODOs
return
In [ ]:
# Train a model
model = trainer(Autoencoder)
In [ ]:
def test_reconstruction(model, mnist, h=28, w=28, batch_size=100):
# Test the trained model: reconstruction
batch = mnist.test.next_batch(batch_size)
x_reconstructed = model.reconstructor(batch[0])
n = np.sqrt(batch_size).astype(np.int32)
I_reconstructed = np.empty((h*n, 2*w*n))
for i in range(n):
for j in range(n):
x = np.concatenate(
(x_reconstructed[i*n+j, :].reshape(h, w),
batch[0][i*n+j, :].reshape(h, w)),
axis=1
)
I_reconstructed[i*h:(i+1)*h, j*2*w:(j+1)*2*w] = x
plt.figure(figsize=(10, 20))
plt.imshow(I_reconstructed, cmap='gray')
In [ ]:
test_reconstruction(model, mnist)
In [ ]:
# Train a model with 2d latent space
model_2d = trainer(Autoencoder, n_z=2)
In [ ]:
test_reconstruction(model_2d, mnist)
In [ ]:
def test_transformation(model_2d, mnist, batch_size=3000):
# Test the trained model: transformation
assert model_2d.n_z == 2
batch = mnist.test.next_batch(batch_size)
z = model_2d.transformer(batch[0])
plt.figure(figsize=(10, 8))
plt.scatter(z[:, 0], z[:, 1], c=np.argmax(batch[1], 1), s=20)
plt.colorbar()
plt.grid()
In [ ]:
test_transformation(model_2d, mnist)
In [ ]:
class VariantionalAutoencoder(object):
def __init__(self, ):
In [ ]:
# Train a vae model
model_vae = trainer(VariantionalAutoencoder)
In [ ]:
test_reconstruction(model_vae, mnist)
In [ ]:
def test_generation(model, z=None, h=28, w=28, batch_size=100):
# Test the trained model: generation
# Sample noise vectors from N(0, 1)
if z is None:
z = np.random.normal(size=[batch_size, model.n_z])
x_generated = model.generator(z)
n = np.sqrt(batch_size).astype(np.int32)
I_generated = np.empty((h*n, w*n))
for i in range(n):
for j in range(n):
I_generated[i*h:(i+1)*h, j*w:(j+1)*w] = x_generated[i*n+j, :].reshape(h, w)
plt.figure(figsize=(8, 8))
plt.imshow(I_generated, cmap='gray')
In [ ]:
test_generation(model_vae)
In [ ]:
# Train a model with 2d latent space
model_vae_2d = trainer(VariantionalAutoencoder, n_z=2)
In [ ]:
test_transformation(model_vae_2d, mnist)
In [ ]:
# Test the trained model: uniformly samlpe in the latent space
n = 20
x = np.linspace(-2, 2, n)
y = np.flip(np.linspace(-2, 2, n))
z = []
for i, xi in enumerate(x):
for j, yi in enumerate(y):
z.append(np.array([xi, yi]))
z = np.stack(z)
# generate images
test_generation(model_vae_2d, z, batch_size=n**2)