top of page

From: Bayesian Models for Astrophysical Data, Cambridge Univ. Press

(c) 2017,  Joseph M. Hilbe, Rafael S. de Souza and Emille E. O. Ishida

you are kindly asked to include the complete citation if you used this material in a publication

â€‹

â€‹

â€‹

Code 7.10 Zero-altered negative binomial (ZANB) or NB hurdle model in Python using Stan

=====================================================================

import numpy as np
import pystan
from scipy.stats import bernoulli, uniform, poisson
import statsmodels.api as sm

â€‹

# Data
np.random.seed(141)                                                            # set seed to replicate example
nobs = 750                                                                            # number of obs in model

x1 = uniform.rvs(size=nobs, loc=-0.5, scale=3.0)
xb = 0.75 + 1.5 * x1                                                            # linear predictor, xb

exb = np.exp(xb)
poy = ztp(nobs,  exb)

xc = -2.0 + 4.5 * x1                                                              # construct filter
pi = 1.0/(1.0 + np.exp(xc))

bern = [bernoulli.rvs(- pi[i]) for i in range(nobs)]

poy = [poy[i] * bern[i]  for  i  in range(nobs)]                    # add structural zeros

X = np.transpose(x1)
X = sm.add_constant(X)

# Prepare data for Stan
mydata = {}                                                                      # build data dictionary
mydata['Y'] = poy                                                             # response variable
mydata['N'] = nobs                                                           # sample size
mydata['Xb'] = X                                                             # predictors

mydata['Xc'] = X
mydata['Kb'] = X.shape[1]                                              # number of coefficients
mydata['Kc'] = X.shape[1]

stan_code = """
data{
int<lower=0> N;
int<lower=0> Kb;
int<lower=0> Kc;
matrix[N, Kb] Xb;
matrix[N, Kc] Xc;
int<lower=0> Y[N];
}
parameters{
vector[Kc] beta;
vector[Kb] gamma;
real<lower=0, upper=5.0> alpha;
}
transformed parameters{
vector[N] mu;
vector[N] Pi;
vector[N] temp;
vector[N] u;
mu = exp(Xc * beta);
temp = Xb * gamma;

for (i in 1:N) {
Pi[i] = inv_logit(temp[i]);
u[i] = 1.0/(1.0 + alpha * mu[i]);
}
}
model{
vector[N] LogTrunNB;
vector[N] z;
vector[N] l1;
vector[N] l2;
vector[N] ll;

for (i in 1:Kc){
beta[i] ~ normal(0, 100);
gamma[i] ~ normal(0, 100);
}

for (i in 1:N) {
LogTrunNB[i] = (1.0/alpha) * log(u[i]) + Y[i] * log(1 - u[i]) +
lgamma(Y[i] + 1.0/alpha) - lgamma(1.0/alpha) -
lgamma(Y[i] + 1) - log(1 - pow(u[i],1.0/alpha));
z[i] = step(Y[i] - 0.0001);
l1[i] = (1 - z[i]) * log(1 - Pi[i]);
l2[i] = z[i] * (log(Pi[i]) + LogTrunNB[i]);
ll[i] = l1[i] + l2[i];
}

target += ll;

}
"""

â€‹

# Run mcmc
fit = pystan.stan(model_code=stan_code, data=mydata, iter=6000, chains=3,
warmup=4000, n_jobs=3)

# Output
nlines = 10                                                             # number of lines in screen output

output = str(fit).split('\n')

for item in output[:nlines]:
print(item)

=====================================================================

Output on screen:

â€‹

Inference for Stan model: anon_model_661f8fcd0467c3b3961fbbedefd979aa.
3 chains, each with iter=6000; warmup=4000; thin=1;
post-warmup draws per chain=2000, total post-warmup draws=6000.

â€‹

mean     se_mean           sd           2.5%           25%          50%         75%     97.5%        n_eff      Rhat
beta[0]                0.82        6.8e-4        0.03            0.76             0.8           0.82         0.85        0.89         2586        1.0
beta[1]                1.47        3.4e-4        0.02            1.43           1.46          1.47          1.48          1.5         2569        1.0
gamma[0]         -1.74         3.5e-3        0.19          -2.14          -1.86         -1.73          -1.6       -1.37         3062        1.0
gamma[1]           4.51        6.5e-3        0.35            3.86           4.27            4.5          4.74         5.24        2904        1.0
alpha                1.4e-3       1.9e-5      1.2e-3         5.8e-5        5.2e-4       1.1e-3       2.1e-3      4.5e-3       4175        1.0

bottom of page