Bayesian GLMs


In [1]:
import matplotlib.pyplot as plt
import numpy as np
from pymc3 import  *
import patsy
import theano
import pandas as pd
from collections import defaultdict
from statsmodels.formula.api import glm as glm_sm
import statsmodels.api as sm
from pandas.tools.plotting import scatter_matrix

Simple example

Lets generate some data with known slope and intercept and fit a simple linear GLM.


In [2]:
size = 50
true_intercept = 1
true_slope = 2

x = np.linspace(0, 1, size)
y = true_intercept + x*true_slope + np.random.normal(scale=.5, size=size)

data = dict(x=x, y=y)

The glm.linear_component() function can be used to generate the output variable y_est and coefficients of the specified linear model.


In [3]:
with Model() as model:
    y_est, coeffs = glm.linear_component('y ~ x', data)
    sigma = Uniform('sigma', 0, 20)
    y_obs = Normal('y_obs', mu=y_est, sd=sigma, observed=y)
    start = find_MAP(vars=[sigma])
    step = Slice(model.vars)
    trace = sample(2000, step, start, progressbar=False)

plt.figure(figsize=(5, 5))
plt.plot(x, y, 'x')
glm.plot_posterior_predictive(trace)


Since there are a couple of general linear models that are being used over and over again (Normally distributed noise, logistic regression etc), the glm.glm() function simplifies the above step by creating the likelihood (y_obs) and its priors (sigma) for us. Since we are working in the model context, the random variables are all added to the model behind the scenes. This function also automatically finds a good starting point which it returns.

Note that the below call to glm() is producing exactly the same model as above, just more succinctly.


In [4]:
with Model() as model:
    glm.glm('y ~ x', data)
    trace = sample(2000, Slice(model.vars), progressbar=False)

plt.figure(figsize=(5, 5))
plt.plot(x, y, 'x')
glm.plot_posterior_predictive(trace)


Robust GLM

Lets try the same model but with a few outliers in the data.


In [8]:
x_out = np.append(x, [.1, .15, .2])
y_out = np.append(y, [8, 6, 9])
data_outlier = dict(x=x_out, y=y_out)

In [10]:
with Model() as model:
    glm.glm('y ~ x', data_outlier)
    trace = sample(2000, Slice(model.vars), progressbar=False)

plt.figure(figsize=(5, 5))
plt.plot(x_out, y_out, 'x')
glm.plot_posterior_predictive(trace)


Because the normal distribution does not have a lot of mass in the tails, an outlier will affect the fit strongly.

Instead, we can replace the Normal likelihood with a student T distribution which has heavier tails and is more robust towards outliers. While this could be done with the linear_compoment() function and manually defining the T likelihood we can use the glm() function for more automation. By default this function uses a normal likelihood. To define the usage of a T distribution instead we can pass a family object that contains information on how to link the output to y_est (in this case we explicitly use the Identity link function which is also the default) and what the priors for the T distribution are. Here we fix the degrees of freedom nu to 1.5.


In [11]:
with Model() as model_robust:
    family = glm.families.T(link=glm.links.Identity,
			                priors={'nu': 1.5,
				                    'lam': ('sigma', Uniform.dist(0, 20))})
    glm.glm('y ~ x', data_outlier, family=family)
    
    trace = sample(2000, Slice(model.vars), progressbar=False)

plt.figure(figsize=(5, 5))
plt.plot(x_out, y_out, 'x')
glm.plot_posterior_predictive(trace)


Hierarchical GLM


In [16]:
sat_data = pd.read_csv('data/Guber1999data.txt')

In [17]:
with Model() as model_sat:
    grp_mean = Normal('grp_mean', mu=0, sd=10)
    grp_sd = Uniform('grp_sd', 0, 200)

    # Define priors for intercept and regression coefficients.
    priors = {'Intercept': Normal.dist(mu=sat_data.sat_t.mean(), sd=sat_data.sat_t.std()),
	      'spend': Normal.dist(mu=grp_mean, sd=grp_sd),
	      'stu_tea_rat': Normal.dist(mu=grp_mean, sd=grp_sd),
	      'salary': Normal.dist(mu=grp_mean, sd=grp_sd),
	      'prcnt_take': Normal.dist(mu=grp_mean, sd=grp_sd)
    }
    glm.glm('sat_t ~ spend + stu_tea_rat + salary + prcnt_take', 
                    sat_data,
	            priors=priors)
    trace_sat = sample(500, NUTS(), progressbar=False)

In [18]:
scatter_matrix(trace_to_dataframe(trace_sat), figsize=(12,12));



In [35]:
with Model() as model_sat:
    grp_mean = Normal('grp_mean', mu=0, sd=10)
    grp_prec = Gamma('grp_prec', alpha=1, beta=.1, testval=1.)
    slope = T.dist(mu=grp_mean, lam=grp_prec, nu=1)
    intercept = Normal.dist(mu=sat_data.sat_t.mean(), sd=sat_data.sat_t.std())
    glm.glm('sat_t ~ spend + stu_tea_rat + salary + prcnt_take', sat_data,
        intercept_prior=intercept, regressor_prior=slope)
    trace_sat = sample(500, NUTS(), progressbar=False)

In [39]:
scatter_matrix(trace_to_dataframe(trace_sat), figsize=(12,12));



In [41]:
tdf_gain = 5.
with Model() as model_sat:
    grp_mean = Normal('grp_mean', mu=0, sd=10)
    grp_prec = Gamma('grp_prec', alpha=1, beta=.1, testval=1.)
    #grp_sd = Uniform('grp_sd', 0, 100, testval=5)
    #grp_df_raw = Uniform('grp_df', 0, 1, testval=0.5)
    #grp_df = 1 - tdf_gain * theano.tensor.log((1-grp_df_raw))
    slope = T.dist(mu=grp_mean, lam=grp_prec, nu=1) #grp_df)
    intercept = Normal.dist(mu=sat_data.sat_t.mean(), sd=sat_data.sat_t.std())
    glm.glm('sat_t ~ spend + stu_tea_rat + salary + prcnt_take', sat_data,
                intercept_prior=intercept, regressor_prior=slope)

    trace_sat = sample(2000, NUTS(), progressbar=False)
    #step = NUTS([grp_mean, grp_prec, sigma] + coeffs, start)
    #step2 = Slice([grp_df_raw])
    #trace_sat = sample(2000, [step, step2], start)

Logistic Regression


In [4]:
htwt_data = pd.read_csv('data/HtWt.csv')
htwt_data.head()


Out[4]:
male height weight
0 0 63.2 168.7
1 0 68.7 169.8
2 0 64.8 176.6
3 0 67.9 246.8
4 1 68.9 151.6

In [15]:
m = glm_sm('male ~ height + weight', htwt_data, family=sm.families.Binomial()).fit()
print m.summary()


                 Generalized Linear Model Regression Results                  
==============================================================================
Dep. Variable:                   male   No. Observations:                   70
Model:                            GLM   Df Residuals:                       67
Model Family:                Binomial   Df Model:                            2
Link Function:                  logit   Scale:                             1.0
Method:                          IRLS   Log-Likelihood:                -28.298
Date:                Mon, 15 Jul 2013   Deviance:                       56.597
Time:                        09:01:52   Pearson chi2:                     62.8
No. Iterations:                     7                                         
==============================================================================
                 coef    std err          t      P>|t|      [95.0% Conf. Int.]
------------------------------------------------------------------------------
Intercept    -45.2059     10.887     -4.152      0.000       -66.545   -23.867
height         0.6571      0.164      4.018      0.000         0.337     0.978
weight         0.0096      0.011      0.892      0.376        -0.012     0.031
==============================================================================

In [14]:
with Model() as model_htwt:
    glm.glm('male ~ height + weight', htwt_data, family=glm.families.Binomial())
    trace_htwt = sample(1000, NUTS(), progressbar=False)

In [15]:
trace_df = trace_to_dataframe(trace_htwt)
print trace_df.describe().drop('count').T
scatter_matrix(trace_df, figsize=(8, 8));
print "P(weight < 0) = ", (trace_df['weight'] < 0).mean()
print "P(height < 0) = ", (trace_df['height'] < 0).mean()


                mean        std        min        25%        50%        75%  \
Intercept -52.471967  11.956980 -88.497183 -59.442733 -51.819902 -43.486117   
height      0.763006   0.180745   0.359032   0.630974   0.750271   0.865600   
weight      0.010924   0.010590  -0.014231   0.003520   0.010437   0.018188   

                 max  
Intercept -25.443466  
height      1.298437  
weight      0.048876  
P(weight < 0) =  0.154
P(height < 0) =  0.0

Bayesian Logistic Lasso


In [7]:
lp = Laplace.dist(mu=0, b=0.05)
x_eval = np.linspace(-.5, .5, 300)
plt.plot(x_eval, theano.tensor.exp(lp.logp(x_eval)).eval())
plt.xlabel('x')
plt.ylabel('Probability')
plt.title('Laplace distribution');



In [8]:
with Model() as model_lasso:
    #grp_lambda = Uniform('grp_lambda', 0.00001, 10)

    # Define priors for intercept and regression coefficients.
    priors = {'Intercept': Normal.dist(mu=0, sd=50),
	          'height': Laplace.dist(mu=0, b=0.05),
	          'weight': Laplace.dist(mu=0, b=0.05)
    }
    glm.glm('male ~ height + weight', htwt_data, family=glm.families.Binomial(),
                    priors=priors)
    
    trace_lasso = sample(500, NUTS(), progressbar=False)
    
trace_df = trace_to_dataframe(trace_lasso)
scatter_matrix(trace_df, figsize=(8, 8));
print trace_df.describe().drop('count').T


                mean       std        min        25%        50%        75%  \
Intercept -22.348449  7.145352 -44.688989 -27.898570 -22.252590 -16.166471   
height      0.308687  0.106537   0.096913   0.226177   0.310946   0.382381   
weight      0.011768  0.008298  -0.005977   0.005996   0.011636   0.016593   

                max  
Intercept -9.122010  
height     0.650051  
weight     0.041575