Hi everyone,

I’m developing this model, and I’m wondering if I need to include a Jacobian adjustment. The model currently runs fine and appears to give meaningful results. My current code is included below (I’m sorry it’s a bit long and messy). As you can see, vhat0 is a matrix defined in the transformed parameters block as a combination of data and parameters. In the model block, I put priors on this matrix (which makes a lot of sense in the context of the data). I assume this means I need to include a Jacobian adjustment?

(Any advice in how to do so in this case, will also be greatly appreciated.)

Thanks for any help!

```
data {
int<lower = 1> N; // dim1 of data
int<lower = 1> J; // dim2 of data
int<lower = 1> B; // bounds of data
int<lower = -B, upper = B> Y[N, J]; // main data
vector<lower = -B, upper = B>[N] V; // more data
int<lower = 0, upper = 1> M[N, J]; // indicator of missing values
}
parameters {
matrix[N, 2] alpha_raw; // constant, raw
matrix<lower = 0>[N, 2] beta_raw; // factor loading, raw
real theta[J]; // latent factor
vector<lower = 0>[2] gamma; // sd of alpha
vector<lower = 0>[2] sd_beta; // sd of beta
real<lower = 0> sd_vhat;
real mean_vhat;
real<lower = 1> nu; // hyperparameter
real<lower = 0> tau; // hyperparameter
vector<lower = 0>[N] eta; // i's average variance x J^2
simplex[J] rho; // j's share of variance
vector<lower = 0, upper = 1>[N] delta; // mixing proportion
real<lower = .65, upper = 1> phi; // mean of prior on delta
real<lower = 2> lambda; // concentration of prior on delta
}
transformed parameters {
real<lower=0> alpha_delta = lambda * phi;
real<lower=0> beta_delta = lambda * (1 - phi);
matrix[N, 2] vhat0; // rescaled vectors
matrix[N, 2] alpha; // constant
matrix[N, 2] beta; // factor loading
matrix<lower = 0>[N, J] sigma; // sd's of errors
matrix[N, J] log_lik; // pointwise log-likelihood
alpha[, 1] = alpha_raw[, 1] * gamma[1];
alpha[, 2] = alpha_raw[, 2] * gamma[2];
beta[, 1] = (beta_raw[, 1] + 1 - mean(beta_raw[, 1])) * sd_beta[1] - sd_beta[1] + 1;
beta[, 2] = (beta_raw[, 2] + 1 - mean(beta_raw[, 2])) * sd_beta[2] - sd_beta[2] + 1;
sigma = sqrt(eta) * to_row_vector(rho);
for (i in 1:N) {
for (j in 1:J) {
if (M[i, j] == 0) {
log_lik[i, j] = log_mix( delta[i],
normal_lpdf(Y[i, j] | + alpha[i, 1] + beta[i, 1] * theta[j], sigma[i, j]),
normal_lpdf(Y[i, j] | - alpha[i, 2] - beta[i, 2] * theta[j], sigma[i, j]) );
}
else {log_lik[i, j] = 0;}
}
}
vhat0[, 1] = ((V - alpha[, 1]) ./ beta[, 1]); // DEFINING vhat0
vhat0[, 2] = ((V + alpha[, 2]) ./ - beta[, 2]); // DEFINING vhat0
}
model {
alpha_raw[, 1] ~ normal(0, 1);
alpha_raw[, 2] ~ normal(0, 1);
beta_raw[, 1] ~ normal(1, 1);
beta_raw[, 2] ~ normal(1, 1);
gamma ~ cauchy(0, B);
sd_beta ~ cauchy(0, .5);
sd_vhat ~ cauchy(0, B / 2.0);
mean_vhat ~ normal(0, B / 5.0);
theta ~ normal(0, 10);
nu ~ cauchy(0, 50);
tau ~ cauchy(0, J * B);
eta ~ scaled_inv_chi_square(nu, tau);
rho ~ dirichlet(rep_vector(5, J));
vhat0[, 1] ~ normal(mean_vhat, sd_vhat); // CALLS FOR JACOBIAN ADJUSTMENT?
vhat0[, 2] ~ normal(mean_vhat, sd_vhat);
delta ~ beta(alpha_delta, beta_delta);
lambda ~ pareto(2, 1.5);
target += sum(log_lik);
}
generated quantities {
vector[N] vhat;
vhat = (delta .* vhat0[, 1]) + ((1 - delta) .* vhat0[, 2]);
}
```