Ok, the Barlett decomposition really looks like what is implemented in the jags code from boral (normal prior for the lower diag, zero on the upper). However, they do not sample the diagonal from a chi-squared distribution, but from a truncated normal…
Thank you very much for these insights, I would not have guess that by myself!
## JAGS model written for boral version 1.5 on 2018-04-20 09:43:59 ##
model {
## Data Level ##
for(i in 1:n) {
for(j in 1:p) { eta[i,j] <- inprod(lv.coefs[j,2:(num.lv+1)],lvs[i,]) + row.coefs.ID1[row.ids[i,1]] }
for(j in 1:p) { y[i,j] ~ dpois(exp(lv.coefs[j,1] + eta[i,j])) }
}
## Latent variables ##
for(i in 1:n) { for(k in 1:num.lv) { lvs[i,k] ~ dnorm(0,1) } }
## Process level and priors ##
for(j in 1:p) { lv.coefs[j,1] ~ dnorm(0,0.1) } ## Separate species intercepts
for(i in 1:n.ID[1]) { row.coefs.ID1[i] ~ dnorm(0, pow(row.sigma.ID1,-2)) }
row.sigma.ID1 ~ dunif(0,30)
for(i in 1:(num.lv-1)) { for(j in (i+2):(num.lv+1)) { lv.coefs[i,j] <- 0 } } ## Constraints to 0 on upper diagonal
for(i in 1:num.lv) { lv.coefs[i,i+1] ~ dnorm(0,0.1)I(0,) } ## Sign constraints on diagonal elements
for(i in 2:num.lv) { for(j in 2:i) { lv.coefs[i,j] ~ dnorm(0,0.1) } } ## Free lower diagonals
for(i in (num.lv+1):p) { for(j in 2:(num.lv+1)) { lv.coefs[i,j] ~ dnorm(0,0.1) } } ## All other elements
}