one of the objectives of my STAN model is to see how age covaries with the parameters of the model. However, in simulations, the model recovers a covariance matrix that is much lower than the actual covariance between parameters. Does anyone have any insight regarding why that could be with an approach like this?
TIA.
data {
int<lower=1> nS; // number of subjects
vector[nS] age_zscored; // Z-scored age
}
parameters {
vector[2] theta_mu;
matrix[2, nS] theta_pr_minus2;
vector<lower=0>[2] sigma; // parameter variance
cholesky_factor_corr[3] L; // include age
}
transformed parameters {
matrix[2, nS] theta_pr;
vector<lower=0>[3] sigma_pr;
matrix[nS, 3] theta;
//parameters that may covary with age
vector<lower=0,upper=1>[nS] alpha;
vector[nS] beta_1;
// Assign fixed standard deviation for age (first element)
sigma_pr[1] = 1;
sigma_pr[2:3] = sigma;
// Assign age as the first row of theta_pr
theta_pr[1, ] = to_row_vector(age_zscored);
theta_pr[2:3, ] = theta_pr_minus2;
// Transform theta_pr to get theta
theta = transpose(diag_pre_multiply(sigma_pr, L) * theta_pr);
beta_1 = (theta_mu[1] + theta[,2]) * 5;
alpha = Phi_approx(theta_mu[2] + theta[,3]);
}
model {
// Priors
target += normal_lpdf(theta_mu | 0, 2);
target += std_normal_lpdf(to_vector(theta_pr_minus2));
target += student_t_lpdf(sigma | 3, 0, 1);
target += lkj_corr_cholesky_lpdf(L | 1);
// model specification goes here
}
generated quantities {
matrix[3,3] Omega = multiply_lower_tri_self_transpose(L);
}