I am currently trying to run the following in which my data consists of 0 and 1s, with 1000 rows with 80 columns. The runtime is taking quite a bit longer than expected, is there anything I can do to reduce runtime besides lowering the amount of iterations? This is my first time using stan.

```
data{
int<lower=0> n_examinee; // Number of examinees
int<lower=0> n_item; // Number of items
int<lower=0, upper = 1> Y[n_examinee,n_item]; //The data matrix (0s and 1s)
}
```stan
parameters{
vector [n_examinee] theta; //Ability parameters (1 per examinee)
vector <lower = 0> [n_item] alpha; //Discrimination Parameters >0, 1 per item
vector [n_item] beta; // Difficulty Parameters, 1 per item
vector <lower = 0, upper = 1> [n_item] gamma; //Guessing Parameter (0 to 1)
real mu_beta; //hyperparameter of difficulty
real <lower = 0> sigma_alpha; //sd of discrim
real <lower = 0> sigma_beta; //sd of difficulty
}
```stan
model {
theta ~ normal(0,1); //standard normal distribution as prior for ability
beta ~ normal(mu_beta,sigma_beta);
mu_beta ~ normal(0,5); //hyperparameter for mu_beta
sigma_beta ~ cauchy(0,5); //hyperparameter for sd_beta
alpha ~ lognormal(0,sigma_alpha);
sigma_alpha ~ cauchy(0,5);
gamma ~ beta(5,23);
for(i in 1:n_examinee){
for (j in 1:n_item){
real p;
p = inv_logit(alpha[j]*(theta[i] - beta[j]));
Y[i,j] ~ bernoulli(gamma[j] + (1-gamma[j])*p);
}
}
}
```stan
generated quantities{
vector[n_item] log_lik[n_examinee];
for (i in 1: n_examinee){
for (j in 1: n_item){
real p;
p= inv_logit(alpha[j]*(theta[i] -beta[j]));
log_lik[i, j] = bernoulli_log(Y[i, j], gamma[j] + (1-gamma[j])*p); //calculating log likelihood to use for model comparison
}
}
}
```