Dear all,

currently I am trying to implement the Response Accuracy/Response Time Model by Fox (2018). As one of several identification constraints, I need to constrain the product of a parameter vector to be 1. I have implemented this constraint, for instance, for alpha by alpha = alpha / pow(prod(alpha, 0.1). Is there a more efficient way? Even in the non-centered version below, it takes quite some time to converge (no convergence issues whatsoever, though). Perhaps someone has some efficiency-related hints (or can point out any mistakes).

fox.lnirt <- "

data{

int<lower = 1> I; // number of items

int<lower = 1> J; // number of examinees

int<lower = 1> N; // number of observed data points

int<lower = 1> ii[N]; // item id

int<lower = 1> jj[N]; // person id

int<lower=0,upper=1> y[N]; // responses

real logt[N]; // log response times

}

parameters{

matrix[2, J] theta_tilde;

real<lower = 0, upper = pi()/2> tau_speed_unif;

cholesky_factor_corr[2] L_Omega_theta;

matrix[4, I] item_tilde;

vector[4] mu_item;

vector<lower = 0, upper = pi()/2>[4] tau_item_unif;

cholesky_factor_corr[4] L_Omega_item;

vector<lower=0>[I] sigma_e;

}

transformed parameters{

matrix[I, 4] item;

matrix[J, 2] theta;

vector<lower=0>[4] tau_item;

vector<lower=0>[2] tau_theta;

vector<lower=0>[I] alpha_u;

vector[I] beta;

vector<lower=0>[I] phi_u;

vector[I] lambda;

vector[I] alpha;

vector[I] phi;

vector[J] ability_u;

vector[J] speed_u;

vector[J] ability;

vector[J] speed;

for (d in 1:4) tau_item[d] = 2.0 * tan(tau_item_unif[d]);

tau_theta[1] = 1; tau_theta[2] = 2.0 * tan(tau_speed_unif); // Fox (2018) ident restr. 2

item = (diag_pre_multiply(tau_item, L_Omega_item) * item_tilde)’;

for (i in 1:I) {

alpha_u[i] = exp(mu_item[1] + item[i,1]);

beta[i] = mu_item[2] + item[i,2];

phi_u[i] = exp(mu_item[3] + item[i,3]);

lambda[i] = mu_item[4] + item[i,4];

}

alpha = alpha_u/pow(prod(alpha_u), 0.1);

phi = phi_u/pow(prod(phi_u), 0.1); // Fox (2018) ident restr. 3

theta = (diag_pre_multiply(tau_theta, L_Omega_theta) * theta_tilde)’;

for (j in 1:J) {

ability_u[j] = theta[j,1];

speed_u[j] = theta[j,2];

}

ability = ability_u - mean(ability_u); // Fox (2018) ident restr. 1a

speed = speed_u - mean(speed_u); // Fox (2018) ident restr. 1b

}

model{

// prior person parameters

to_vector(theta_tilde) ~ normal(0, 1);

L_Omega_theta ~ lkj_corr_cholesky(1);

// prior item parameters

to_vector(item_tilde) ~ normal(0, 1);

L_Omega_item ~ lkj_corr_cholesky(1);

mu_item ~ normal(0, 10);

sigma_e ~ lognormal(0, 0.3);

// the model (vectorized)

y ~ bernoulli_logit(alpha[ii] .* ability[jj] - beta[ii]);

logt ~ normal(lambda[ii] - phi[ii] .* speed[jj], sigma_e[ii]);

}

generated quantities {

corr_matrix[4] Omega_item;

corr_matrix[2] Omega_theta;

Omega_item = multiply_lower_tri_self_transpose(L_Omega_item);

Omega_theta = multiply_lower_tri_self_transpose(L_Omega_theta);

}"

Thank you!

Best wishes

Christoph