In [ ]:
# ensure python can find a Qwt plotting class
ENV["PYTHONPATH"] = joinpath(Pkg.dir("Qwt"), "src", "python");

# suppress the matplotlib gui, which doesn't play nice with other backends
ENV["MPLBACKEND"] = "Agg"

# initialize with custom defaults
using Plots, Distributions; qwt()
default(size=(500,300), leg=false)
colors = [:royalblue, :orangered]

# creates x/y vectors which can define a grid in a zig-zag pattern
function gridxy(lim, n::Int)
    xs = linspace(lim..., n)
    xypairs = vec([(x,y) for x in vcat(xs,reverse(xs)), y in xs])
    Plots.unzip(xypairs)
end

The problem... can we classify the functions?


In [ ]:
# these are the functions we want to classify
# scalar = 2  # larger is harder
noise = Distributions.Normal(0, 0.05)

maxt = 4π
f1(t) = (t*sin(t)/maxt, t*cos(t)/maxt)
f2(t) = (t*cos(t+0.5π)/maxt, -t*sin(t+0.5π)/maxt)

# our target function is ∈ {-1,1}
target(f) = f == f1 ? 1.0 : -1.0

On to the fun...


In [ ]:
# pick the plotting limits
lim = (-1,1)
tlim = (0, maxt)
ts = linspace(tlim..., 100)
funcs = [f1, f2]
n = 20
gridx, gridy = gridxy(lim, n)

function initialize_plot(funcs, ts, gridx, gridy; kw...)
    # show the grid
    p = plot([gridx gridy], [gridy gridx], c=:black; kw...)

    # show the funcs
    for (i,f) in enumerate(funcs)
        x, y = Plots.unzip(map(f, ts))
        plot!(x, y, line=(3,colors[i]))
    end
    p
end

# kick off an animation... we can save frames whenever we want, lets save the starting frame
function initialize_animation()
    anim = Animation()
    frame(anim)
    anim
end

# lets see what we're dealing with...
p = initialize_plot(funcs, ts, gridx, gridy)

Lets build a neural net!


In [ ]:
using OnlineAI

# gradientModel = SGDModel(η=1e-4, μ=0.8, λ=0)
# gradientModel = AdagradModel(η=1e-1)
# gradientModel = AdadeltaModel(η=0.1, ρ=0.99, λ=0)
# gradientModel = AdamModel(η=1e-4, λ=1e-8)
gradientModel = AdaMaxModel(η=1e-4, ρ1=0.9, ρ2=0.9)

# learningRateModel = FixedLearningRate()
learningRateModel = AdaptiveLearningRate(gradientModel, 2e-2, 0.05, wgt=ExponentialWeighting(30))

function initialWeights(nin::Int, nout::Int, activation::Activation)
    0.1randn(nout, nin) / sqrt(nin) + eye(nout, nin)
end

net = buildTanhClassificationNet(
    2,    # number of inputs
    1,    # number of outputs
    Int[2,2,2,2,2,2,2,2], # hidden layers structure
    params = NetParams(gradientModel = gradientModel, weightInit = initialWeights)
)

Update our model and the visualization


In [ ]:
# set up a visualization of the projections
layers = filter(l -> l.nout == 2, net.layers[1:end-1])
num_hidden_layers = length(layers)
plts = [initialize_plot(funcs, ts, gridx, gridy, title="Hidden Layer $i") for i in 1:num_hidden_layers]
sz = max(500, round(Int, sqrt(num_hidden_layers) * 300))

projectionviz = subplot(plts..., n=num_hidden_layers, size=(sz,sz), pos=(400,0))

# setup animation, then show the plots in a window
anim = initialize_animation()
gui()

# create another visualization to track the internal progress of the neural net
progressviz = track_progress(net, fields=[:w,:b,,:a], size=(length(net.layers)*300,800), m=2, w=0);

In [ ]:
dist = Distributions.Uniform(tlim...)
progressgui = false

function test_data(n, tlim, funcs)
    ts = linspace(tlim..., n)
    x1, x2 = [hcat(Plots.unzip(map(f,ts))...) for f in funcs]
    y1, y2 = ones(n), -ones(n)
    DataPoints(vcat(x1,x2), vcat(y1,y2))
end

testn = 100
testdata = test_data(testn, tlim, funcs)

function activateHidden(net, layers, x, y, seriesidx, plts)
    n = length(x)
    p = length(layers)
    projx, projy = zeros(n,p), zeros(n,p)
    for i in 1:n
        # feed the data through the neural net
        OnlineAI.forward!(net, [x[i], y[i]])
        
        # grab the net's activations at each layer
        for j in 1:p
            projx[i,j], projy[i,j] = layers[j].Σ
        end
    end
    
    # now we can update the plots
    for j in 1:p
        plts[j][seriesidx] = (vec(projx[:,j]), vec(projy[:,j]))
    end
end

# final plot to track test error
errviz = subplot([totalCost(net, testdata) gradientModel.η], m=3, title=["Error" "η"], n=2,nc=1, pos=(0,0))
gui(errviz)

In [ ]:
iterations_per_frame = 1000
total_frames = 100
for frm in 1:total_frames
    # pick one of the functions at random, sample from the x line, then update the
    # neural net with [x, f(x)] as the inputs
    for i in 1:iterations_per_frame
        f = sample(funcs)
        t = rand(dist)
        x = f(t)
        y = target(f)
        update!(net, Float64[x...], [y])
    end
    
    if length(net.layers) > 1
        # update the progress visualization
        update!(progressviz, true, show=progressgui)
    end
    
    # update the error plot
    err = totalCost(net, testdata)
    push!(errviz.plts[1], err)
    update!(learningRateModel, err)
    push!(errviz.plts[2], gradientModel.η)
    gui(errviz)

    if length(net.layers) > 1
        # update the projections
        ts = linspace(tlim..., 70)
        for (seriesidx, (x,y)) in enumerate([(gridx,gridy), (gridy,gridx), Plots.unzip(map(f1,ts)), Plots.unzip(map(f2,ts))])
            activateHidden(net, layers, x, y, seriesidx, projectionviz.plts)
        end

        # show/update the plot
        gui(projectionviz)
        frame(anim)
    end
    sleep(0.001)
end

Evaluate


In [ ]:
# show how the internals progressed during the fitting
progressviz.subplt

In [ ]:
# # show stacked and linked histograms of the predictions for each class
xs = OnlineAI.unzip(testdata)[1]
yhat = predict(net, xs)
yhat1, yhat2 = yhat[1:testn], yhat[testn+1:end]
subplot(histogram(yhat1), histogram(yhat2), nc=1, linkx=true, title=["f1 prediction" "f2 prediction"], size=(800,400))

In [ ]:
# switch to pyplot for the contour plot
be = backend()
pyplot()
xs = linspace(lim..., 100)
ts = linspace(tlim..., 100)

# make the gradient go from red to blue
cmap = ColorGradient(reverse(colors), alpha=0.5)
surf(x,y) = predict(net, Float64[x,y])[1]

# use contours to show the predictions from the neural net
p = plot(xs, xs, surf, w=2, fill=true, c=cmap, leg=true)

for (i,f) in enumerate(funcs)
    x, y = Plots.unzip(map(f, ts))
    plot!(x, y, line=(3,colors[i]))
end

# reset the backend to what it was before
backend(be)
p

Animate!


In [ ]:
gif(anim, fps = 5)

Network viz


In [ ]:
# show the network (uses Qwt, visualize isn't available unless you import it)
ENV["PYTHONPATH"] = joinpath(Pkg.dir("Qwt"), "src", "python");
import Qwt

In [ ]:
viz = visualize(net);

In [ ]:
# update the net representation with weights, etc
update!(viz)

In [ ]:

testing...


In [ ]:
p.o.layers

In [ ]:


In [ ]:


In [ ]:


In [ ]:
using Plots

colors = [:royalblue, :orangered]
ColorGradient(colors, alpha=0.2)

In [ ]:
selection[3][2]

In [ ]:
p[4][2] |> length

In [ ]:
gui(progressviz.subplt)

In [ ]:
histogram(yhat1)

In [ ]:
progressviz.subplt.plts[1].seriesargs[1][:serieshandle][:get_offsets]()

In [ ]:
learningRateModel

In [ ]:
update!(d,5)
diff(d)

In [ ]:
using Plots
p1 = plot(rand(20))
p2 = plot(rand(10))
p3 = scatter(rand(100))
p4 = plot(rand(1000))

In [ ]:
subplot(p1,p2,p3,p4, nr=1, leg=false)

In [ ]:
# ENV["MPLBACKEND"] = "qt4agg"
using Plots; pyplot()
p = scatter(rand(10))

In [ ]:
p.seriesargs[1][:serieshandle][:get_offsets]()

In [ ]:
PyPlot.backend

In [ ]:
push!(current(), ones(2))
# gui()
current()

In [ ]:
append!(p,1,rand(10))
gui()

In [ ]:
sp = progressviz.subplt.plts[1].o.widget[:minimumSizeHint]()

In [ ]:
testn = 100
xs = linspace(lim..., testn)
x1, x2 = [hcat(xs,map(f,xs)) for f in funcs]
y1, y2 = ones(testn), -ones(testn)
yhat1, yhat2 = [vec(predict(net, x)) for x in (x1,x2)]
DataPoints(vcat(x1,x2), vcat(y1,y2))

In [ ]:
errviz.n

In [ ]:
progressviz.subplt.plts[1].initargs[:linky]

In [ ]: