Self-Adaptive Evolution Strategy (SAES)

The SAES algorithm

TODO: this is wrong, redo this section !

Notations:

  • ${\cal{N}}$ denotes some independent standard Gaussian random variable
  • $d$ is the dimension of input vectors, $d \in \mathbb{N}^*_+$
  • $n$ is the current iteration index (or generation index), $n \in \mathbb{N}^*_+$

Algorithm's parameters:

  • $K > 0$,
  • $\zeta \geq 0$,
  • $\lambda > \mu > 0$

Input:

  • an initial parent population $\boldsymbol{x}_{1,i} \in \mathbb{R}^d$
  • an initial scalar $\sigma_{1,i} = 1$ with $i \in \{1, \dots, \mu \}$

$n\leftarrow 1$

while (stop condition) do

$\quad$ Generate $\lambda$ individuals $i_j$ independently with $j \in \{ 1, \dots, \lambda \}$ using

\begin{eqnarray} \sigma_j & = & \sigma_{n, mod(j-1, \mu) + 1} \times \exp\left( \frac{1}{2d} \cal{N} \right) \\ i_j & = & \boldsymbol{x}_{n, mod(j-1, \mu) + 1} + \sigma_j \cal{N}. \end{eqnarray}

$\quad$ Evaluate each of them $\lceil Kn^\zeta \rceil$ times and average their fitness values

$\quad$ Define $j_1, \dots, j_{\lambda}$ so that

$$ \mathbb{E}_{\lceil Kn^\zeta \rceil}[f(i_{j_1})]\leq \mathbb{E}_{\lceil Kn^\zeta \rceil}[f(i_{j_2})] \leq \dots \leq \mathbb{E}_{\lceil Kn^\zeta \rceil}[f(i_{j_{\lambda}})] $$

$\quad$ where $\mathbb{E}_m$ denotes a sample average over $m$ resamplings.

$\quad$ Update: compute $\boldsymbol{x}_{n+1, k}$ and $\sigma_{n+1, k}$ using

\begin{eqnarray} \sigma_{n+1,k} &=& \sigma_{j_{k}}, \quad k \in \{1, \dots, \mu\}\\ {\boldsymbol{x}_{n+1,k}} &=& i_{j_{k}}, \quad k \in \{1, \dots, \mu\} \end{eqnarray}

$\quad$ $n\leftarrow n+1$

end while

A Python inplementation


In [ ]:
# Init matplotlib

%matplotlib inline

import matplotlib
matplotlib.rcParams['figure.figsize'] = (8, 8)

In [ ]:
# Setup PyAI
import sys
sys.path.insert(0, '/Users/jdecock/git/pub/jdhp/pyai')

In [ ]:
# Set the objective function
#from pyai.optimize.functions import sphere as func
from pyai.optimize.functions import sphere2d as func
#from pyai.optimize.functions import additive_gaussian_noise as noise
from pyai.optimize.functions import multiplicative_gaussian_noise as noise
#from pyai.optimize.functions import additive_poisson_noise as noise

#func.noise = noise      # Comment this line to use a deterministic objective function

xmin = func.bounds[0]   # TODO
xmax = func.bounds[1]   # TODO

(1+1)-$\sigma$-Self-Adaptation-ES


In [ ]:
import numpy as np
import math

"""
This is a simple Python implementation of the (mu/1, lambda)-sigmaSA-ES
as discussed in 
http://www.scholarpedia.org/article/Evolution_Strategies
"""

mu = 3    # mu: the number of parents
lmb = 12  # lambda: the number of children
rho = 1   # rho: number of parents per child
selection_operator = '+'

d = 2    # number of dimension of the solution space

num_gen = 10

tau = 1./math.sqrt(2.*d)          # self-adaptation learning rate

# Init the population ##########################

# "pop" array layout:
# - the first mu lines contain parents
# - the next lambda lines contain children
# - the first column contains the individual's strategy (sigma)
# - the last column contains the individual's assess (f(x))
# - the other columns contain the individual value (x)

pop = np.full([mu+lmb, d+2], np.nan)
pop[:mu, 0] = 1.                                       # init the parents strategy to 1.0
pop[:mu, 1:-1] = np.random.normal(0., 1., size=[mu,d]) # init the parents value
pop[:mu, -1] = func(pop[:mu, 1:-1].T)                  # evaluate parents
print("Initial population:\n", pop)

## Sort parents
#pop = pop[pop[:,-1].argsort()]
#print(pop)

for gen in range(num_gen):
    # Make children ################################
    if rho == 1:
        # Each child is made from one randomly selected parent
        pop[mu:,:] = pop[np.random.randint(mu, size=lmb)]
    elif rho == mu:
        # Recombine all parents for each child
        raise NotImplemented() # TODO
    elif 1 < rho < mu:
        # Recombine rho randomly selected parents for each child
        raise NotImplemented() # TODO
    else:
        raise ValueError()

    pop[mu:,-1] = np.nan
    #print("Children:\n", pop)

    # Mutate children's sigma ######################
    pop[mu:,0] = pop[mu:,0] * np.exp(tau * np.random.normal(size=lmb))
    #print("Mutated children (sigma):\n", pop)

    # Mutate children's value ######################
    pop[mu:,1:-1] = pop[mu:,1:-1] + pop[mu:,1:-1] * np.random.normal(size=[lmb,d])
    #print("Mutated children (value):\n", pop)

    # Evaluate children ############################
    pop[mu:, -1] = func(pop[mu:, 1:-1].T)
    #print("Evaluated children:\n", pop)

    # Select the best individuals ##################
    if selection_operator == '+':
        # *plus-selection* operator
        pop = pop[pop[:,-1].argsort()]
    elif selection_operator == ',':
        # *comma-selection* operator
        pop[:lmb,:] = pop[pop[mu:,-1].argsort()]   # TODO: check this...
    else:
        raise ValueError()
        
    pop[mu:, :] = np.nan 

    #print("Selected individuals for the next generation:\n", pop)

print("Result:\n", pop[:mu, :])

Some explanations about $\sigma$ and $\tau$


In [ ]:
tau

In [ ]:
import random

sigma_list = [1.]

for i in range(1000):
    sigma_list.append(sigma_list[-1] * math.exp(tau * random.normalvariate(0., 1.)))         # mutate sigma

#sigma = sigma * exp(tau*randn)         # mutate sigma

plt.loglog(sigma_list);

In [ ]:
x = np.linspace(-4, 4, 100)
y1 = np.exp(1./math.sqrt(1.*d) * x)
y2 = np.exp(1./math.sqrt(2.*d) * x)
y3 = np.exp(1./math.sqrt(3.*d) * x)
y4 = np.exp(1./(2.*d) * x)

plt.plot(x, y1, label="tau1")
plt.plot(x, y2, label="tau2")
plt.plot(x, y3, label="tau3")
plt.plot(x, y4, label="tau4")

plt.legend();

In [ ]:
tau1 = 1./math.sqrt(1.*d)
tau2 = 1./math.sqrt(2.*d)
tau3 = 1./math.sqrt(3.*d)
tau4 = 1./(2.*d)

x1 = np.exp(tau1 * np.random.normal(size=[100000]))
x2 = np.exp(tau2 * np.random.normal(size=[100000]))
x3 = np.exp(tau3 * np.random.normal(size=[100000]))
x4 = np.exp(tau4 * np.random.normal(size=[100000]))

bins = np.linspace(0, 10, 100)
plt.hist(x1, bins=bins, alpha=0.5, label=r"$\exp\left(\frac{1}{\sqrt{d}} \mathcal{N}(0,1)\right)$", lw=2, histtype='step')
plt.hist(x2, bins=bins, alpha=0.5, label=r"$\exp\left(\frac{1}{\sqrt{2d}} \mathcal{N}(0,1)\right)$", lw=2, histtype='step')
plt.hist(x3, bins=bins, alpha=0.5, label=r"$\exp\left(\frac{1}{\sqrt{3d}} \mathcal{N}(0,1)\right)$", lw=2, histtype='step')
plt.hist(x4, bins=bins, alpha=0.5, label=r"$\exp\left(\frac{1}{2d} \mathcal{N}(0,1)\right)$", lw=2, histtype='step')

plt.xlim(-0.25, 7)

plt.axvline(1, color='k', linestyle='dotted')

plt.legend(fontsize='x-large');

Other inplementations

PyAI

Import required modules


In [ ]:
# Init matplotlib

%matplotlib inline

import matplotlib
matplotlib.rcParams['figure.figsize'] = (8, 8)

In [ ]:
# Setup PyAI
import sys
sys.path.insert(0, '/Users/jdecock/git/pub/jdhp/pyai')

In [ ]:
import numpy as np
import time

from pyai.optimize import SAES

In [ ]:
# Plot functions
from pyai.optimize.utils import plot_contour_2d_solution_space
from pyai.optimize.utils import plot_2d_solution_space

from pyai.optimize.utils import array_list_to_array
from pyai.optimize.utils import plot_fx_wt_iteration_number
from pyai.optimize.utils import plot_err_wt_iteration_number
from pyai.optimize.utils import plot_err_wt_execution_time
from pyai.optimize.utils import plot_err_wt_num_feval

Define the objective function


In [ ]:
## Objective function: Rosenbrock function (Scipy's implementation)
#func = scipy.optimize.rosen

In [ ]:
# Set the objective function
#from pyai.optimize.functions import sphere as func
from pyai.optimize.functions import sphere2d as func
#from pyai.optimize.functions import additive_gaussian_noise as noise
from pyai.optimize.functions import multiplicative_gaussian_noise as noise
#from pyai.optimize.functions import additive_poisson_noise as noise

func.noise = noise      # Comment this line to use a deterministic objective function

xmin = func.bounds[0]   # TODO
xmax = func.bounds[1]   # TODO

In [ ]:
%%time

saes = SAES()

func.do_eval_logs = True
func.reset_eval_counters()
func.reset_eval_logs()

res = saes.minimize(func, init_pop_mu=0., init_pop_sigma=1.)

func.do_eval_logs = False

eval_x_array = np.array(func.eval_logs_dict['x']).T
eval_error_array = np.array(func.eval_logs_dict['fx']) - func(func.arg_min)

In [ ]:
res

In [ ]:
plot_contour_2d_solution_space(func,
                               xmin=xmin,
                               xmax=xmax,
                               xstar=res,
                               xvisited=eval_x_array,
                               title="SAES");

In [ ]:
plot_err_wt_num_feval(eval_error_array, x_log=True, y_log=True)

In [ ]:
%%time

eval_error_array_list = []

NUM_RUNS = 100

for run_index in range(NUM_RUNS):
    saes = SAES()

    func.do_eval_logs = True
    func.reset_eval_counters()
    func.reset_eval_logs()

    res = saes.minimize(func, init_pop_mu=0., init_pop_sigma=1., lmb=6)

    func.do_eval_logs = False

    eval_error_array = np.array(func.eval_logs_dict['fx']) - func(func.arg_min)

    print("x* =", res)
    
    eval_error_array_list.append(eval_error_array);

In [ ]:
plot_err_wt_num_feval(array_list_to_array(eval_error_array_list), x_log=True, y_log=True, plot_option="mean")

Octave/Matlab

An external Octave (Matlab) implementation is available there: https://homepages.fhv.at/hgb/downloads/mu_mu_I_lambda-ES.oct.

Mathematica

An external Mathematica implementation is available there: https://homepages.fhv.at/hgb/downloads/mu_mu_I_lambda-ES.mat