data {
int<lower=0> n_examinee; // Number of examinees
int<lower=0> n_item; // Number of items
int<lower=0> Y[n_examinee, n_item]; //The data matrix (count)
int<lower=1> K; // number of item-level covariates (e.g., item types)
matrix[n_item, K] X; //dummy matrix of covariates
}
parameters {
real<lower=0> omega_precision;
real<lower=0> sigma_beta;
vector[K] beta_difficulty;
vector[n_examinee] theta;
vector[n_item] difficulty_raw;
}
transformed parameters {
vector[n_item] item_difficulty;
vector[n_item] difficulty;//////////
difficulty = difficulty_raw - mean(difficulty_raw);
for (i in 1:n_item) {
item_difficulty[i] = difficulty[i] + dot_product(X[i], beta_difficulty);
}
}
model {
sigma_beta ~ cauchy(0, 5);
beta_difficulty ~ normal(0, sigma_beta);
omega_precision ~ gamma(1, 1);
theta ~ normal(0, sqrt(1/omega_precision));
matrix [n_examinee, n_item] lambdas;
for (i in 1:n_item) {
difficulty_raw[i] ~ normal(dot_product(X[i], beta_difficulty), sigma_beta);
lambdas[,i] = exp(theta + item_difficulty[i]);
target += poisson_lpmf(Y[,i] | lambdas[,i]);
}
}
Instead of computing dot products inside a loop, try to use columns_dot_product
(rows_dot_product
would also be faster than loops, but slightly slower than cdp).
Can you do a matrix multiplication here? Because then this line can just be pulled out of the loop like
difficulty_raw ~ normal_id_glm(X, 0, beta_difficulty, sigma_beta);
In addition to the normal_id_glm
, you could use poisson_log_glm
for your likelihood and remove the intermediate calculations:
transformed parameters {
vector[n_item] difficulty = difficulty_raw - mean(difficulty_raw);
}
model {
sigma_beta ~ cauchy(0, 5);
beta_difficulty ~ normal(0, sigma_beta);
omega_precision ~ gamma(1, 1);
theta ~ normal(0, sqrt(1/omega_precision));
difficulty_raw ~ normal_id_glm(X, 0, beta_difficulty, sigma_beta);
for (i in 1:n_item) {
Y[,i] ~ poisson_log_glm(X[i], theta + difficulty[i], beta_difficulty);
}
}