In [ ]:
is_stylegan_v1 = False

In [ ]:
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
from datetime import datetime
from tqdm import tqdm

# ffmpeg installation location, for creating videos
plt.rcParams['animation.ffmpeg_path'] = str('/usr/bin/ffmpeg')
import ipywidgets as widgets
from ipywidgets import interact, interact_manual
from IPython.display import display
from ipywidgets import Button

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

%load_ext autoreload
%autoreload 2

# StyleGAN2 Repo
sys.path.append('/tf/notebooks/stylegan2')

# StyleGAN Utils
from stylegan_utils import load_network, gen_image_fun, synth_image_fun, create_video
# v1 override
if is_stylegan_v1:
    from stylegan_utils import load_network_v1 as load_network
    from stylegan_utils import gen_image_fun_v1 as gen_image_fun
    from stylegan_utils import synth_image_fun_v1 as synth_image_fun

import run_projector
import projector
import training.dataset
import training.misc

# Data Science Utils
sys.path.append(os.path.join(*[os.pardir]*3, 'data-science-learning'))

from ds_utils import generative_utils

In [ ]:
res_dir = Path.home() / 'Documents/generated_data/stylegan'

Load Network


In [ ]:
MODELS_DIR = Path.home() / 'Documents/models/stylegan2'
MODEL_NAME = 'original_ffhq'
SNAPSHOT_NAME = 'stylegan2-ffhq-config-f'

Gs, Gs_kwargs, noise_vars = load_network(str(MODELS_DIR / MODEL_NAME / SNAPSHOT_NAME) + '.pkl')

Z_SIZE = Gs.input_shape[1]
IMG_SIZE = Gs.output_shape[2:]
IMG_SIZE

In [ ]:
img = gen_image_fun(Gs, np.random.randn(1, Z_SIZE), Gs_kwargs, noise_vars)
plt.imshow(img)

Explore Directions


In [ ]:
def plot_direction_grid(dlatent, direction, coeffs):
    fig, ax = plt.subplots(1, len(coeffs), figsize=(15, 10), dpi=100)
    
    for i, coeff in enumerate(coeffs):
        new_latent = (dlatent.copy() + coeff*direction)
        ax[i].imshow(synth_image_fun(Gs, new_latent, Gs_kwargs, randomize_noise=False))
        ax[i].set_title(f'Coeff: {coeff:0.1f}')
        ax[i].axis('off')
    plt.show()

In [ ]:
# load learned direction
direction = np.load('/tf/media/datasets/stylegan/learned_directions.npy')

In [ ]:
nb_latents = 5

# generate dlatents from mapping network
dlatents = Gs.components.mapping.run(np.random.rand(nb_latents, Z_SIZE), None, truncation_psi=1.)

for i in range(nb_latents):
    plot_direction_grid(dlatents[i:i+1], direction, np.linspace(-2, 2, 5))

Interactive


In [ ]:
# Setup plot image
dpi = 100
fig, ax = plt.subplots(dpi=dpi, figsize=(7, 7))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0, wspace=0)
plt.axis('off')
im = ax.imshow(gen_image_fun(Gs, np.random.randn(1, Z_SIZE),Gs_kwargs, noise_vars, truncation_psi=1))

#prevent any output for this cell
plt.close()

In [ ]:
# fetch attributes names
directions_dir = MODELS_DIR / MODEL_NAME / 'directions' / 'set01'
attributes = [e.stem for e in directions_dir.glob('*.npy')]

# get names or projected images
data_dir = res_dir / 'projection' / MODEL_NAME / SNAPSHOT_NAME / '20200215_192547'
entries = [p.name for p in data_dir.glob("*") if p.is_dir()]
entries.remove('tfrecords')

# set target latent to play with
dlatents = Gs.components.mapping.run(np.random.rand(1, Z_SIZE), None, truncation_psi=0.5)
target_latent = dlatents[0:1]
#target_latent = np.array([np.load("/out_4/image_latents2000.npy")])

In [ ]:
%matplotlib inline

@interact
def i_direction(attribute=attributes, 
                entry=entries,
                coeff=(-10., 10.)):
    direction = np.load(directions_dir / f'{attribute}.npy')
    target_latent = np.array([np.load(data_dir / entry / "image_latents1000.npy")])
    
    new_latent_vector = target_latent.copy() + coeff*direction
    im.set_data(synth_image_fun(Gs, new_latent_vector, Gs_kwargs, True))
    ax.set_title('Coeff: %0.1f' % coeff)
    display(fig)