In [11]:
%matplotlib inline
import pylab
import numpy as np
import functools
import adapt_float
import adapt_spike
import adapt_fixed_small

import ctn_benchmark.control as ctrl

In [72]:
def objective(args, adapt_type):
    D = 1
    dt=0.001
    T = 20.0
    seed=2
    noise=0.1
    Kp=args['Kp']
    Kd=args['Kd']
    Ki=args['Ki']
    print 'trying', Kp, Kd, Ki
    tau_d=0.001
    period=4
    amplitude=1
    n_neurons=500
    learning_rate=1
    max_freq=1.0
    synapse=0.01
    scale_add=2
    delay=0.01
    filter=0.01

    n_neurons=256

    signal = ctrl.Signal(D, period, dt=dt, max_freq=max_freq, seed=seed)

    system = ctrl.System(D, D, dt=dt, seed=seed,
            motor_noise=noise, sense_noise=noise,
            scale_add=scale_add,
            motor_scale=10,
            motor_delay=delay, sensor_delay=delay,
            motor_filter=filter, sensor_filter=filter)

    pid = ctrl.PID(Kp, Kd, Ki, tau_d=tau_d)
    
    if adapt_type == 'float':
        adapt = adapt_float.AdaptiveFloat(n_inputs=D, n_outputs=D, n_neurons=n_neurons, seed=seed,
                                     learning_rate=1e-4)
        scale = 1.0
    elif adapt_type == 'fixed':
        adapt = adapt_fixed_small.AdaptiveFixed(n_inputs=D, n_outputs=D, n_neurons=n_neurons, seed=seed,
                                                input_bits=8,
                                                state_bits=8,
                                                extra_bits=4,
                                                decoder_offset=4,
                                                decoder_bits=8,
                                                smoothing=10,
                                                learning_rate=1e-4)
        scale = adapt.input_max
    else:
        adapt = None

    steps = int(T / dt)
    time = np.arange(steps)*dt
    data_desired = np.zeros((steps, D))
    data_actual = np.zeros((steps, D))
    data_pid = np.zeros((steps, D))
    data_error = np.zeros((steps, D))

    for i in range(steps):
        desired = signal.value(i*dt)*amplitude
        data_desired[i,:] = desired

        actual = system.state
        data_actual[i,:] = actual

        raw_pid = pid.step(actual, desired)
        data_pid[i,:] = raw_pid

        if adapt is not None:
            adjust = adapt.step(actual*scale, -raw_pid*scale)/scale
        else:
            adjust = 0
            
        system.step(raw_pid + adjust)
        
    #pylab.plot(data_desired)
    #pylab.plot(data_actual)
        
    rmse = np.sqrt(np.mean((data_desired - data_actual)**2))
        
    return dict(
        status='ok',
        loss=rmse
        )

In [59]:
objective(dict(Kp=10, Kd=2, Ki=0), adapt_type='none')


trying 10 2 0
Out[59]:
{'loss': 0.072578950102291276, 'status': 'ok'}

In [73]:
objective(dict(Kp=10, Kd=2, Ki=0), adapt_type='fixed')


trying 10 2 0
Out[73]:
{'loss': 0.065557196457235914, 'status': 'ok'}

In [61]:
objective(dict(Kp=10, Kd=2, Ki=0), adapt_type='float')


trying 10 2 0
Out[61]:
{'loss': 0.059855123871354471, 'status': 'ok'}

In [42]:
import hyperopt as hp

space = dict(
    Kp=hp.hp.lognormal('Kp', np.log(50), 1),
    Kd=hp.hp.lognormal('Kd', np.log(20), 1),
    Ki=hp.hp.lognormal('Ki', np.log(10), 1)
)

trials = hp.Trials()

In [54]:
import functools
best = hp.fmin(functools.partial(objective, adapt_type='none'), space=space, algo=hp.tpe.suggest, max_evals=100, trials=trials)


trying 114.770987149 52.1093915339 34.523488315
trying 96.4564007286 8.94229157721 56.043252455
trying 146.393044894 22.2700142553 22.5102762544
trying 120.722168015 10.6342010579 10.4477337723
trying 296.980700034 21.5485290052 1.79378050616
trying 46.3596483055 43.4751578719 18.0655530425
trying 20.0025367749 34.6281892575 16.1782020117
trying 50.4005204591 33.4906747566 11.2156598723
trying 153.104519669 193.198823397 11.0467677988
trying 13.3916328139 16.551889787 32.9095297298
trying 12.1989544865 16.0317310375 138.635094794
trying 5.02679451709 14.4944484188 2.41750728437
trying 12.276807354 17.6464522014 0.759984221527
trying 14.2193810661 1.33140824233 2.70733666952
trying 7.15373973008 12.8073107368 77.3326922421
trying 16.0016607829 18.5604224371 33.9580068995
trying 1.23732563249 15.9709077005 1.33547699772
trying 9.61402483544 12.9619480359 3.53599541915
trying 15.8860748834 15.8714309145 29.3937545919
trying 4.96897501295 19.6898963023 0.819059912742
trying 38.0989784861 14.51787039 0.4359189851
trying 18.8749009036 11.6256602817 4.06620256042
trying 25.3743954387 17.1351592912 56.3212484688
trying 9.58835058582 21.0770074935 5.60492375321
trying 12.3069075208 4.30496402724 24.3595105373
trying 13.4463165055 13.4726566407 13.2780380955
trying 10.6686000273 9.77511817244 12.5963596495
trying 25.0319319683 13.619123049 12.977258005
trying 63.1956875854 8.01208152261 7.87465585453
trying 7.81005957911 73.3167050788 14.1951986202
trying 16.8895590246 11.8308120083 7.86332787746
trying 34.2032486168 24.8153443589 15.9878119315
trying 14.154447895 15.0924471296 19.4654893322
trying 24.8598215162 15.0294701324 19.2454618171
trying 14.2132664982 11.5585448788 25.5376249549
trying 11.4640385101 10.8218216303 19.5125578855
trying 22.5995709789 12.4680286716 14.4649305588
trying 71.3003034794 4.73499071454 9.41164198904
trying 53.7939504281 9.35854669636 21.5030229354
trying 17.7853386011 13.6797313631 12.7074219419
trying 2.81084190505 8.180658161 15.207776225
trying 14.454642453 14.8881769555 13.5886622762
trying 28.6957796656 23.801715705 13.4421036081
trying 43.3867096329 10.5910729175 11.6424420341
trying 15.1993903272 19.0503127592 11.9819866096
trying 295.132908874 90.0714165339 10.1971827861
trying 84.4428805693 12.1216807759 16.8107621342
trying 11.095055585 26.8763895395 13.8899158612
trying 9.52888112605 17.8131628576 15.333536944
trying 13.2174084104 41.0893734614 10.5883315214
trying 8.55993825066 10.9603842928 17.78128269
trying 6.01391598112 12.9390570844 8.24207309601
trying 12.8749992043 10.266248081 9.64075670521
trying 3.18737415864 7.24537647178 6.71989432777
trying 13.3004172922 10.1396492884 9.67051578283
trying 12.7650667062 8.90033316691 9.41532776936
trying 13.681960958 10.1581108622 8.77319020763
trying 11.6254539832 9.91534014733 8.74826197585
trying 10.0389650503 6.78117750007 8.5710041863
trying 16.2208361462 8.43503836372 7.35301002859
trying 14.918555277 9.30105739443 9.77731800554
trying 13.7244148423 5.62607789949 10.9541085714
trying 18.6121983238 7.64351401811 5.97798667921
trying 11.7606031999 9.60523650596 7.35883433357
trying 156.154362554 8.69864065438 10.0686556856
trying 15.4205958779 7.66306059092 9.03864645721
trying 20.6933724397 11.1507285277 11.6690633327
trying 16.6628983805 10.0699669287 10.4608282191
trying 13.6715695535 6.11005346784 8.24739678465
trying 17.482485557 11.2573230929 12.256544116
trying 11.0089349959 10.5113417873 10.7890354746
trying 12.1897366792 12.1234081872 11.4142728511
trying 10.406855249 13.4767990106 7.71715624345
trying 117.87571458 32.9250687569 8.45734155927
trying 30.2377853691 9.17615241185 8.88298260751
trying 9.0524357641 9.54817651681 5.06357009605
trying 12.6732467075 10.2895528254 6.65867919886
trying 14.6517691091 12.4401245401 9.6940407629
trying 40.8734159868 14.0596204379 7.29353044832
trying 57.0772073185 13.1887016704 9.13256118857
trying 15.5464349605 28.0800095403 8.00877062187
trying 135.136599177 22.217835294 44.0789214214
trying 23.4375141564 65.9406304644 4.08634119532
trying 36.3099671746 20.8602984489 10.2665219837
trying 12.0095124817 2.49603381816 9.89833074531
trying 11.202538933 11.4164903851 13.0764402009
trying 100.801557673 8.3204907005 14.8478715537
trying 13.8429950932 16.477740283 12.1626521769
trying 13.1035606278 16.3247219231 9.43528402911
trying 47.9564882349 15.5749313919 12.2078486795

In [55]:
best


Out[55]:
{'Kd': 16.477740282991654, 'Ki': 12.162652176904626, 'Kp': 13.842995093163033}

In [99]:
loss = [t['result']['loss'] for t in trials.trials]
pylab.plot(loss)
pylab.xlabel('trial')
pylab.ylabel('rmse')
pylab.ylim(0, 0.15)
pylab.show()
print 'best rmse', np.min(loss)


best rmse 0.0550078106508

In [93]:
limits = [1.0, 1.05, 1.1, np.inf]
limit_labels = ['<5%', '5-10%', '10%+']
best_rmse = np.min(loss)
data = []
for t in trials.trials:
    d = {}
    d['Kp'] = t['misc']['vals']['Kp'][0]
    d['Kd'] = t['misc']['vals']['Kd'][0]
    d['Ki'] = t['misc']['vals']['Ki'][0]
    rate = t['result']['loss'] / best_rmse
    index = 0
    while limits[index+1] < rate:
        index += 1
    d['category'] = limit_labels[index]
    
    data.append(d)

In [94]:
import pandas
import seaborn as sns

In [95]:
df = pandas.DataFrame(data)

In [97]:
grid = sns.pairplot(data=df, x_vars=['Kp', 'Kd', 'Ki'], y_vars=['Kp', 'Kd', 'Ki'], diag_kind='kde', hue='category', hue_order=limit_labels)
ax_lim=50
for i, row in enumerate(grid.axes):
    for j, ax in enumerate(row):
        if i==j:
            ax.set_xlim(0, ax_lim)
        else:
            ax.set_xlim(0, ax_lim)
            ax.set_ylim(0, ax_lim)
        
best_vals = best['Kp'], best['Kd'], best['Ki']
for i, row in enumerate(grid.axes):
        for j, ax in enumerate(row):
            if i==j:
                ax.axvline(best_vals[i], c='k')
            else:
                ax.axvline(best_vals[j], c='k')
                ax.axhline(best_vals[i], c='k')



In [ ]: