Hi all,
I am using a super computer with a 48-hour time-limit, and I have four different time points (four jobs), only one of which finishes on time. The others return with nothing; which is a sign that the job was too long. I am wondering how I can work around this…maybe to set the four jobs to only finish 1000 iterations, and then start the four jobs again from where those left off. This would seem to be much easier than going the parallel coding route, which would seem to require a steeper learning curve.
I’ve listed first, the current (second) model I am using. I created the second model because the posteriors for time point 3 didn’t seem to show that as response window gets longer, participants perform better.
There are ~1000 participants and fourteen conditions for which I am estimating thresholds via psychometric functions. As an aside, I’m not sure why the current model is taking longer than the first model I created, but really it’s the first question that is most important.
data{ // Everything that must be input by the user
int<lower=4> n_total; // Total number of trials to analyze
int<lower=2> n_subjects; // Number of unique observers
int<lower=2> n_levels; // Number of levels of Factor
real intensity[n_total]; // Intensity of the stimulus on each trial
int<lower=0> subject[n_total]; // Observer on each trial
int<lower=0> level[n_total]; // Level of Factor on each trial
int<lower=0,upper=1> correct[n_total]; // Whether the response was correct (1) on each trial
real<lower=0,upper=1> chance_performance; // Chance performance for experiment (e.g., 1/n for n-AFC)
}
transformed data{
int df_levels_sA; // Number of degrees of freedom in the interaction
int n_levels_sA; // Number of levels in the interaction
df_levels_sA = (n_levels - 1) * (n_subjects - 1);
n_levels_sA = n_levels * n_subjects;
}
parameters{
vector<lower=0,upper=1>[n_subjects] lapse; // Observer's lapse rate
real mum;
real<lower=0> muw;
vector[n_levels-1] fA;
vector[n_subjects-1] sA;
vector[n_subjects-1] sB;
//matrix[n_subjects-1,n_levels-1] fsA;
vector[(n_subjects - 1) * (n_levels-1)] fsA;
}
transformed parameters {
real m[n_subjects,n_levels];
//real w[n_subjects,n_levels];
real lapse_alpha;
real lapse_beta;
vector [n_total] psi;
{
real mean_beta; // Mean of beta prior for individual lapse rate
real betaEta; // Precision parameter of beta prior for individual lapse rate
real threshold[n_total];
real width[n_total];
vector[n_levels] factor_alpha = append_row(fA, -sum(fA));
vector[n_subjects] subject_alpha = append_row(sA, -sum(sA));
vector[n_subjects] subject_beta = append_row(sB, -sum(sB));
matrix[n_subjects,n_levels] interaction_alpha;
interaction_alpha = rep_matrix(0, n_subjects, n_levels);
for(sj in 1:(n_subjects - 1)) {
for(l in 1:(n_levels - 1)) {
interaction_alpha[sj, l] = fsA[(sj - 1) * (n_levels - 1) + l];
}
interaction_alpha[sj, n_levels] = -1 * sum(interaction_alpha[sj,]);
}
for (l in 1:n_levels) {
interaction_alpha[n_subjects,l] = -1 * sum(interaction_alpha[,l]);
}
for (sj in 1:n_subjects) {
for (l in 1:n_levels) {
m[sj,l] = mum + factor_alpha[l] + subject_alpha[sj] + interaction_alpha[sj, l];
//w[sj,l] = exp(muw + subject_beta[sj]);
}
}
for (tr in 1:n_total) {
threshold[tr] = m[subject[tr],level[tr]];
//width[tr] = w[subject[tr],level[tr]];
psi[tr] = chance_performance
+ (1 - lapse[subject[tr]] - chance_performance)
* inv_logit((intensity[tr]-threshold[tr])/muw);
if (is_nan(psi[tr])) {
print("lapse = ", lapse[subject[tr]]);
print("width = ", width[tr]);
print("threshold = ", threshold[tr]);
}
}
mean_beta = 0.01;
betaEta = 100;
lapse_alpha = mean_beta * betaEta;
lapse_beta = (1-mean_beta) * betaEta ;
}
}
model {
//mean_beta ~ beta(1,60);
//betaEta ~ gamma(1,0.01);
fsA ~ normal(0, inv(sqrt(1 - inv(n_levels_sA)))); // Produces a standard normal on on interaction_alpha
fA ~ normal(0, inv(sqrt(1 - inv(n_levels))));
sA ~ normal(0, inv(sqrt(1 - inv(n_subjects)))); // Produces a standard normal on on interaction_alpha
sB ~ normal(0, inv(sqrt(1 - inv(n_subjects)))); // Produces a standard normal on on interaction_alpha
mum ~ gamma(2,4);
muw ~ gamma(2,4);
lapse ~ beta(lapse_alpha,lapse_beta);
correct ~ bernoulli(psi);
}
The first model I created
data{ // Everything that must be input by the user
int<lower=4> n_total; // Total number of trials to analyze
int<lower=2> n_subjects; // Number of unique observers
int<lower=2> n_levels; // Number of levels of Factor
real intensity[n_total]; // Intensity of the stimulus on each trial
int<lower=0> subject[n_total]; // Observer on each trial
int<lower=0> level[n_total]; // Level of Factor on each trial
int<lower=0,upper=1> correct[n_total]; // Whether the response was correct (1) on each trial
real<lower=0,upper=1> chance_performance; // Chance performance for experiment (e.g., 1/n for n-AFC)
}
transformed data{
int df_levels_sA; // Number of degrees of freedom in the interaction
int n_levels_sA; // Number of levels in the interaction
df_levels_sA = (n_levels - 1) * (n_subjects - 1);
n_levels_sA = n_levels * n_subjects;
}
parameters{
vector<lower=0,upper=1>[n_subjects] lapse; // Observer's lapse rate
real mum;
real muw;
vector[n_levels-1] fA;
vector[n_subjects-1] sA;
vector[n_subjects-1] sB;
//matrix[n_subjects-1,n_levels-1] fsA;
vector[(n_subjects - 1) * (n_levels-1)] fsA;
}
transformed parameters {
real m[n_subjects,n_levels];
real w[n_subjects,n_levels];
real lapse_alpha;
real lapse_beta;
vector [n_total] psi;
{
real mean_beta; // Mean of beta prior for individual lapse rate
real betaEta; // Precision parameter of beta prior for individual lapse rate
real threshold[n_total];
real width[n_total];
vector[n_levels] factor_alpha = append_row(fA, -sum(fA));
vector[n_subjects] subject_alpha = append_row(sA, -sum(sA));
vector[n_subjects] subject_beta = append_row(sB, -sum(sB));
matrix[n_subjects,n_levels] interaction_alpha;
interaction_alpha = rep_matrix(0, n_subjects, n_levels);
for(sj in 1:(n_subjects - 1)) {
for(l in 1:(n_levels - 1)) {
interaction_alpha[sj, l] = fsA[(sj - 1) * (n_levels - 1) + l];
}
interaction_alpha[sj, n_levels] = -1 * sum(interaction_alpha[sj,]);
}
for (l in 1:n_levels) {
interaction_alpha[n_subjects,l] = -1 * sum(interaction_alpha[,l]);
}
for (sj in 1:n_subjects) {
for (l in 1:n_levels) {
m[sj,l] = mum + factor_alpha[l] + subject_alpha[sj] + interaction_alpha[sj, l];
w[sj,l] = exp(muw + subject_beta[sj]);
}
}
for (tr in 1:n_total) {
threshold[tr] = m[subject[tr],level[tr]];
width[tr] = w[subject[tr],level[tr]];
psi[tr] = chance_performance
+ (1 - lapse[subject[tr]] - chance_performance)
* inv_logit((intensity[tr]-threshold[tr])/width[tr]);
if (is_nan(psi[tr])) {
print("lapse = ", lapse[subject[tr]]);
print("width = ", width[tr]);
print("threshold = ", threshold[tr]);
}
}
mean_beta = 0.01;
betaEta = 100;
lapse_alpha = mean_beta * betaEta;
lapse_beta = (1-mean_beta) * betaEta ;
}
}
model {
//mean_beta ~ beta(1,60);
//betaEta ~ gamma(1,0.01);
fsA ~ normal(0, inv(sqrt(1 - inv(n_levels_sA)))); // Produces a standard normal on on interaction_alpha
fA ~ normal(0, inv(sqrt(1 - inv(n_levels))));
sA ~ normal(0, inv(sqrt(1 - inv(n_subjects)))); // Produces a standard normal on on interaction_alpha
sB ~ normal(0, inv(sqrt(1 - inv(n_subjects)))); // Produces a standard normal on on interaction_alpha
mum ~ gamma(2,4);
muw ~ gamma(2,4);
lapse ~ beta(lapse_alpha,lapse_beta);
correct ~ bernoulli(psi);
}