In [5]:
# Directly applying Bayesian interpretation of priors on the
# coefficients which attracts the mass of the density towards
# the prior (which usually has a mean of 0)
from sklearn.datasets import make_regression
In [6]:
X, y = make_regression(100, 10, n_informative=2, noise=20)
In [7]:
from sklearn.linear_model import BayesianRidge
In [8]:
br = BayesianRidge()
In [9]:
# the two sets of coefficients of interest are alpha_1/alpha2
# and lambda_1/lambda_2.
# the alphas are hyperparameters for the prior over the alpha
# parameter. and the lambdas are hyperparameters of the prior
# over the lambda parameter
In [10]:
# first, fit a linear_model with no mods to hyperparameters
br.fit(X,y)
Out[10]:
In [11]:
br.coef_
Out[11]:
In [12]:
# Now modify the hyperparameters to see changes in coefs
br_alphas = BayesianRidge(alpha_1=10, lambda_1=10)
In [13]:
br_alphas.fit(X,y)
Out[13]:
In [15]:
br_alphas.coef_
Out[15]:
In [ ]:
# For Bayesian Ridge Regression we assume a prior over the errors
# and alpha. Both these priors are gamma distributions.
# The gamma distribution is a very flexibile distribution.