In [1]:
# Create new environment
Boyle.mk("matrex_samples")
Out[1]:
In [2]:
# Activate new environment and load modules available in that environment
Boyle.activate("matrex_samples")
Out[2]:
In [3]:
# Install new dependency
Boyle.install({:matrex, "~> 0.6"})
Out[3]:
In [4]:
m = Matrex.magic(3)
Out[4]:
In [5]:
m[2][3]
Out[5]:
In [6]:
m[1..2]
Out[6]:
In [7]:
m[:rows]
Out[7]:
In [8]:
m[:size]
Out[8]:
In [9]:
m[:max]
Out[9]:
In [10]:
m[2][:max]
Out[10]:
In [11]:
m[:argmax]
Out[11]:
In [12]:
m[2][:argmax]
Out[12]:
In [13]:
import Matrex
defmodule LinearRegression do
def lr_cost_fun(%Matrex{} = theta, {%Matrex{} = x, %Matrex{} = y, lambda} = _params, iteration \\ 0)
when is_number(lambda) do
m = y[:rows]
h = Matrex.dot_and_apply(x, theta, :sigmoid)
l = Matrex.ones(theta[:rows], theta[:cols]) |> Matrex.set(1, 1, 0)
regularization =
Matrex.dot_tn(l, Matrex.square(theta))
|> Matrex.scalar()
|> Kernel.*(lambda / (2 * m))
# Compute the cost and add regularization parameter
j =
y
|> Matrex.dot_tn(Matrex.apply(h, :log), -1)
|> Matrex.subtract(
Matrex.dot_tn(
Matrex.subtract(1, y),
Matrex.apply(Matrex.subtract(1, h), :log)
)
)
|> Matrex.scalar()
|> (fn
:nan -> :nan
x -> x / m + regularization
end).()
# Compute gradient
grad =
x
|> Matrex.dot_tn(Matrex.subtract(h, y))
|> Matrex.add(Matrex.multiply(theta, l), 1.0, lambda)
|> Matrex.divide(m)
{j, grad}
end
# The same cost function, implemented with operators from `Matrex.Operators` module.
# Works 2 times slower, than standard implementation. But it's a way more readable.
# It is here for demonstrating possibilites of the library.
def lr_cost_fun_ops(%Matrex{} = theta, {%Matrex{} = x, %Matrex{} = y, lambda} = _params)
when is_number(lambda) do
# Turn off original operators. Use this with caution!
import Kernel, except: [-: 1, +: 2, -: 2, *: 2, /: 2, <|>: 2]
import Matrex
import Matrex.Operators
# This line is needed only when used from iex, to remove ambiguity of t/1 function.
import IEx.Helpers, except: [t: 1]
m = y[:rows]
h = sigmoid(x * theta)
l = ones(size(theta)) |> set(1, 1, 0.0)
j = (-t(y) * log(h) - t(1 - y) * log(1 - h) + lambda / 2 * t(l) * pow2(theta)) / m
grad = (t(x) * (h - y) + (theta <|> l) * lambda) / m
{scalar(j), grad}
end
end
Out[13]:
In [14]:
System.cmd("git", ["clone", "https://github.com/versilov/matrex", "resources/matrex"])
Out[14]:
In [15]:
ls "resources/matrex/test/data"
In [16]:
x = Matrex.load("resources/matrex/test/data/X.mtx.gz")
Out[16]:
In [17]:
x[1100..1115]
|> list_of_rows()
|> Enum.map(&(reshape(&1, 20, 20)
|> transpose()))
|> reshape(4, 4)
|> heatmap()
Out[17]:
In [18]:
y = Matrex.load("resources/matrex/test/data/Y.mtx")
Out[18]:
In [19]:
theta = Matrex.zeros(x[:cols], 1)
Out[19]:
In [20]:
lambda = 0.01
iterations = 100
Out[20]:
In [21]:
solutions =
1..10 # Our ten digits, we wish to recognize
|> Task.async_stream(
fn digit ->
# Prepare labels matrix with only current digit labeled with 1.0
y3 = Matrex.apply(y, fn val -> if(val == digit, do: 1.0, else: 0.0) end)
# Use fmincg() optimizer (ported to Elixir with Matrex functions) with previously defined cost function.
{sX, fX, _i} =
Matrex.Algorithms.fmincg(&LinearRegression.lr_cost_fun/3, theta, {x, y3, lambda}, iterations)
# Return the digit itself and the best found solution, which is a column matrix 401x1
{digit, List.last(fX), sX}
end,
max_concurrency: 4
) # Merge all 10 found solution column matrices into one 10x401 solutions matrix
|> Enum.map(fn {:ok, {_d, _l, theta}} -> Matrex.to_list(theta) end)
|> Matrex.new()
Out[21]:
In [22]:
predictions =
x
|> Matrex.dot_nt(solutions)
|> Matrex.apply(:sigmoid)
Out[22]:
In [23]:
accuracy =
1..predictions[:rows]
|> Enum.reduce(0, fn row, acc ->
if y[row] == predictions[row][:argmax], do: acc + 1, else: acc
end)
|> Kernel./(predictions[:rows])
|> Kernel.*(100)
Out[23]:
In [24]:
Enum.member?(m, 2.0)
Out[24]:
In [25]:
Enum.count(m)
Out[25]:
In [26]:
Enum.sum(m)
Out[26]:
In [27]:
Matrex.random(5) |> Matrex.save("rand.mtx")
Out[27]:
In [28]:
Matrex.load("rand.mtx")
Out[28]:
In [29]:
Matrex.magic(5) |> Matrex.divide(Matrex.eye(5)) |> Matrex.save("nan.csv")
Out[29]:
In [30]:
Matrex.load("nan.csv")
Out[30]:
In [31]:
m = Matrex.eye(3)
Out[31]:
In [32]:
n = Matrex.divide(m, Matrex.zeros(3))
Out[32]:
In [33]:
n[1][1]
Out[33]:
In [34]:
n[1][2]
Out[34]: