From: Bayesian Models for Astrophysical Data, Cambridge Univ. Press

(c) 2017,  Joseph M. Hilbe, Rafael S. de Souza and Emille E. O. Ishida  

 

you are kindly asked to include the complete citation if you used this material in a publication

set.seed(33559)

# Sample size
nobs <- 750

 

# Generate predictors, design matrix
x1 <- runif(nobs,0,4)
xc <- -1 + 0.75*x1
exc <- exp(xc)

phi <- 0.066
 

r <- 1/phi
y <- rgamma(nobs,shape=r, rate=r/exc)
LG <- data.frame(y, x1)

 

# Construct filter
xb <- -2 + 1.5*x1
pi <- 1/(1+exp(-(xb)))
bern <- rbinom(nobs,size=1, prob=pi)

 

# Add structural zeros
LG$y <- LG$y*bern

Code 7.11 Bayesian log-gamma–logit hurdle model in R using JAGS

=======================================================

library(R2jags)

Xc <- model.matrix(~ 1 + x1, data=LG)
Xb <- model.matrix(~ 1 + x1, data=LG)


Kc <- ncol(Xc)
Kb <- ncol(Xb)

 

model.data <- list(
  Y = LG$y,                                                                    # response
  Xc = Xc,                                                                      # covariates from gamma component
  Xb = Xb,                                                                      # covariates from binary component
  Kc = Kc,                                                                      # number of betas
  Kb = Kb,                                                                      # number of gammas
  N = nrow(LG),                                                             # sample size
  Zeros = rep(0, nrow(LG)))

load.module('glm')

sink("ZAGGLM.txt")

 

cat("
model{
    # Priors for both beta and gamma components
    for (i in 1:Kc) {beta[i] ~ dnorm(0, 0.0001)}
    for (i in 1:Kb) {gamma[i] ~ dnorm(0, 0.0001)}

    # Prior for scale parameter, r
    r ~ dgamma(1e-2, 1e-2)

    # Likelihood using the zero trick
    C <- 10000

 

    for (i in 1:N) {
        Zeros[i] ~ dpois(-ll[i] + C)

        # gamma log-likelihood
        lg1[i] <- - loggam(r) + r * log(r / mu[i])
        lg2[i] <- (r - 1) * log(Y[i]) - (Y[i] * r) / mu[i]
        LG[i] <- lg1[i] + lg2[i]
        z[i] <- step(Y[i] - 0.0001)
        l1[i] <- (1 - z[i]) * log(1 - Pi[i])
        l2[i] <- z[i] * ( log(Pi[i]) +LG[i])
        ll[i] <- l1[i] + l2[i]
        log(mu[i]) <- inprod(beta[], Xc[i,])
        logit(Pi[i]) <- inprod(gamma[], Xb[i,])
    }

    phi <- 1/r
    }"
, fill = TRUE)

sink()

 

# Initial parameter values
inits <- function () {
  list(beta = rnorm(Kc, 0, 0.1),
        gamma = rnorm(Kb, 0, 0.1),
        r = runif(1, 0,100) )}

 

# Parameter values to be displayed in output
params <- c("beta", "gamma", "phi")

 

# MCMC sampling
ZAG <- jags(data = model.data,
                      inits = inits,
                      parameters = params,
                      model = "ZAGGLM.txt",
                      n.thin = 1,
                      n.chains = 3,
                       n.burnin = 2500,
                       n.iter = 5000)

# Model results
print(ZAG, intervals = c(0.025, 0.975), digits=3)

 

=======================================================

 

Output on screen:

Inference for Bugs model at "ZALN.txt", fit using jags,

    3 chains, each with 5000 iterations (first 2500 discarded)

    n.sims = 7500 iterations saved

 

                            mu.vect     sd.vect                    2.5%                   97.5%          Rhat           n.eff

beta[1]                    -0.991      0.027                  -1.045                   -0.939         1.003            790

beta[2]                     0.748      0.010                    0.728                    0.768         1.004            760

gamma[1]               -2.003      0.165                  -2.335                   -1.698         1.002          5300

gamma[2]                1.496      0.093                    1.325                    1.684         1.001          7200

phi                           0.064      0.004                    0.057                     0.071        1.001          7500

deviance    20002024.436      3.109      20002020.276       20002031.998        1.000                1

 

For each parameter, n.eff is a crude measure of effective sample size,

and Rhat is the potential scale reduction factor (at convergence, Rhat=1).

 

DIC info (using the rule, pD = var(deviance)/2)

pD = 4.8 and DIC = 20002029.3

DIC is an estimate of expected predictive error (lower deviance is better).

© 2017 by Emille E. O. Ishida