top of page

From: Bayesian Models for Astrophysical Data, Cambridge Univ. Press

(c) 2017,  Joseph M. Hilbe, Rafael S. de Souza and Emille E. O. Ishida

you are kindly asked to include the complete citation if you used this material in a publication

â€‹

â€‹

Code 6.23 Zero-truncated Poisson model in Python using Stan

====================================================

import numpy as np
import pystan
import statsmodels.api as sm

from scipy.stats import uniform, binom, poisson

â€‹

def  ztpoisson(N, lambda_par):
"""Zero truncated Poisson distribution."""

â€‹

temp = poisson.pmf(0, lambda_par)
p = [uniform.rvs(loc=item, scale=1-item) for item in temp]
ztp = [int(poisson.ppf(p[i],lambda_par[i])) for i in range(N)]

return np.array(ztp)

# Data
np.random.seed(123579)                                                # set seed to replicate example
nobs= 3000                                                                     # number of obs in model

â€‹

x1 = binom.rvs(1, 0.3, size=nobs)
x2 = binom.rvs(1, 0.6, size=nobs)
x3 = uniform.rvs(size=nobs)

â€‹

xb = 1.0 + 2.0 * x1 - 3.0 * x2 - 1.5 * x3                         # linear predictor

exb = np.exp(xb)

ztpy = ztpoisson(nobs, exb)                                           # create y as adjusted

X = np.column_stack((x1,x2,x3))

â€‹

# Fit
mydata = {}                                                                  # build data dictionary
mydata['N'] = nobs                                                       # sample size
mydata['X'] = X                                                            # predictors
mydata['Y'] = ztpy                                                        # response variable
mydata['K'] = X.shape[1]                                             # number of coefficients

â€‹

stan_code = """
data{
int N;
int K;
matrix[N, K] X;
int Y[N];
}
parameters{
vector[K] beta;
}
model{
vector[N] mu;

â€‹

mu = exp(X * beta);

â€‹

# likelihood
for (i in 1:N) Y[i] ~ poisson(mu[i]) T[0,];
}
"""

â€‹

# Run mcmc
fit = pystan.stan(model_code=stan_code, data=mydata, iter=5000, chains=3,
warmup=4000, n_jobs=3)

â€‹

# Output
print(fit)

====================================================

Output on screen:

â€‹

Inference for Stan model: anon_model_1ed7e9994470ca62f9e27a5bf88708c1.
3 chains, each with iter=5000; warmup=4000; thin=1;
post-warmup draws per chain=1000, total post-warmup draws=3000.

â€‹

mean     se_mean          sd          2.5%         25%         50%         75%      97.5%       n_eff       Rhat
beta[0]         1.02         9.3e-4       0.04          0.95          0.99         1.02         1.04         1.09      1517.0           1.0
beta[1]         1.98         9.0e-4       0.03          1.91          1.95         1.98           2.0         2.04      1476.0           1.0
beta[2]        -3.05         1.5e-3       0.07        -3.19         -3.09        -3.05          -3.0       -2.92      2147.0           1.0
beta[3]        -1.51         1.3e-3       0.05        -1.62         -1.55        -1.51        -1.48        -1.41     1731.0           1.0
lp__         4800.4             0.04       1.32     4797.1      4799.8      4800.7     4801.4     4802.0      1258.0          1.0

â€‹

Samples were drawn using NUTS at Sat Dec 24 23:04:30 2016.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at
convergence, Rhat=1).

bottom of page