In [3]:
using Plots
using Optim

In [6]:
# example of Rosenbrock function
function f(x) 
    return (1.0 - x[1])^2 + 100.0 * (x[2] - x[1]^2)^2
end
function g!(x::Vector, storage::Vector)
storage[1] = -2.0 * (1.0 - x[1]) - 400.0 * (x[2] - x[1]^2) * x[1]
storage[2] = 200.0 * (x[2] - x[1]^2)
end

soln = optimize(f, g!, [0.0, 0.0], GradientDescent())


WARNING: Method definition f(Any) in module Main at In[5]:3 overwritten at In[6]:3.
WARNING: Method definition g!(Array{T<:Any, 1}, Array{T<:Any, 1}) in module Main at In[5]:6 overwritten at In[6]:6.
Out[6]:
Results of Optimization Algorithm
 * Algorithm: Gradient Descent
 * Starting Point: [0.0,0.0]
 * Minimizer: [0.9356732500354086,0.875073922357589]
 * Minimum: 0.004155
 * Iterations: 1000
 * Convergence: false
   * |x - x'| < 1.0e-32: false
   * |f(x) - f(x')| / |f(x)| < 1.0e-08: false
   * |g(x)| < 1.0e-08: false
   * Reached Maximum Number of Iterations: true
 * Objective Function Calls: 3532
 * Gradient Calls: 3532

In [5]:
reshape(randn(5,3),15,1)


Out[5]:
15×1 Array{Float64,2}:
 -0.961733 
  0.256498 
  1.35844  
  0.176834 
  0.0325291
 -0.494308 
 -1.62881  
  0.586711 
  1.18127  
  2.75914  
  0.697867 
 -0.408102 
 -0.121016 
  0.498591 
 -0.511114 

In [7]:
# now, we do the matrix factorization example
# originally from Poblano example2

function matrix_approx_function(x::Vector, A::Matrix, r::Int)
    # unpack U and V from x
    m,n = size(A)
    U = reshape(x[1:m*r],m,r)
    V = reshape(x[(m*r+1):end],n,r)
    return 0.5*vecnorm(A - U*V')^2
end

function matrix_approx_gradient!(x::Vector, storage::Vector, A::Matrix, r::Int)
    m,n = size(A)
    U = reshape(x[1:m*r],m,r)
    V = reshape(x[(m*r+1):end],n,r)
    D = A - U*V'
    storage[1:(m*r)] = -vec(D*V)
    storage[(m*r+1):end] = -vec(D'*U)
end


Out[7]:
matrix_approx_gradient! (generic function with 1 method)

In [9]:
m = 5
n = 4
A = randn(m,n)
r = 2
myf = x -> matrix_approx_function(x, A, r)
myg! = (x, storage) -> matrix_approx_gradient!(x, storage, A, r)

#soln = optimize(myf, myg!, randn(m*r+n*r), LBFGS(), Optim.Options(f_tol = 1e-8))
soln = optimize(myf, myg!, randn(m*r+n*r), LBFGS(), OptimizationOptions(ftol = 1e-8))
x = Optim.minimizer(soln)
@show soln
Uopt = reshape(x[(1:m*r)],m,r)
Vopt = reshape(x[(m*r+1):end],n,r)
objval = 2*myf(x)
opterr = vecnorm(A-Uopt*Vopt')^2

Usvd,Ssvd,Vsvd = svd(A)
svderr = vecnorm(A-Usvd[:,1:r]*diagm(Ssvd[1:r])*Vsvd[:,1:r]')^2
@show objval
@show opterr
@show svderr
; # hide final output in JuliaBox


soln = Results of Optimization Algorithm
 * Algorithm: L-BFGS
 * Starting Point: [0.5076239629135827,-2.4524181370438063, ...]
 * Minimizer: [-0.28410788577451856,-0.0274782495321566, ...]
 * Minimum: 1.254834
 * Iterations: 28
 * Convergence: true
   * |x - x'| < 1.0e-32: false
   * |f(x) - f(x')| / |f(x)| < 1.0e-08: true
   * |g(x)| < 1.0e-08: false
   * Reached Maximum Number of Iterations: false
 * Objective Function Calls: 102
 * Gradient Calls: 102
objval = 2.509668413720704
opterr = 2.509668413720704
svderr = 2.509668400349696

In [ ]: