I just implemented the following mixture model:

(small typo: there’s logit link for the mu)

From the manual, I know QR decomposition on the linear predictors can make the code more efficient. I’d appreciate it if someone could take a look at my model specification and tell me whether there’s additional room for improvement.

```
data {
int<lower=0> N; // number of data points
int<lower=1> I; // number of individual covariates
int<lower=1> G; // number of group covariates
int<lower=1> A[N]; // number of attempts
int<lower=0> y[N]; // number of successes
vector[I] xi[N]; // individual covariates
vector[G] xg[N]; // group covariates
}
transformed data {
real nil[N];
real z1[N];
real z3[N];
nil = rep_array(0.5,N);
for (n in 1:N) {
z1[n] = (1-2*min([nil[n],1.0*y[n]]));
z3[n] = (1-2*min([nil[n],1.0*(A[n]-y[n])]));
}
}
parameters {
real<lower=0> phi; // dispersion betabin
vector[I] beta;
matrix[2,G] gamma;
}
transformed parameters {
real<lower=0> rho;
rho = 1/phi;
}
model {
real mu;
vector[3] pz;
phi ~ normal(0, 10);
beta ~ normal(0, 3);
gamma[1] ~ normal(0, 3);
gamma[2] ~ normal(0, 3);
for (n in 1:N) {
mu = inv_logit(beta'*xi[n]);
pz = softmax([gamma[1]*xg[n],0,gamma[2]*xg[n]]');
target += log(
z1[n]*pz[1] + z3[n]*pz[3] +
pz[2]*exp(beta_binomial_lpmf(y[n]|A[n],mu*rho,(1-mu)*rho)));
}
}
```