Sure, I actually just posted it on the modeling forum, but I can post it here for you. I have simulated data in a json if you need it. However, the parameter recovery program is built in Matlab.
functions {
//custom function for lognormal inverse CDF
matrix logncdfinv(vector x, vector m, vector s) {
int szx;
int szm;
szx = size(x);
szm = size(m);
matrix[szx,szm] y;
y = exp( rep_matrix(s',szx) * sqrt(2) .* rep_matrix(inv_erfc( -2 .* x + 2 ),szm) + rep_matrix(m',szx));
return y;
}
//likelihood model for CASANDRE
real casandre_log(matrix resps, real guess, vector sds, vector se, real conf, int nt, int sampn) {
matrix[nt,4] llhC;
matrix[nt,sampn] avgs;
avgs = rep_matrix(se,sampn).*rep_matrix(sds',nt);
for (tr in 1:nt){
matrix[3,sampn] raws;
for (rws in 1:sampn) {
raws[1,rws] = normal_cdf(-conf,avgs[tr,rws],sds[rws]);
}
for (rws in 1:sampn) {
raws[2,rws] = normal_cdf(0,avgs[tr,rws],sds[rws]);
}
for (rws in 1:sampn) {
raws[3,rws] = normal_cdf(conf,avgs[tr,rws],sds[rws]);
}
vector[3] ratiodist;
ratiodist[1] = mean(raws[1,:]);
ratiodist[2] = mean(raws[2,:]);
ratiodist[3] = mean(raws[3,:]);
llhC[tr,1] = ((guess/4) + ((1-guess)*ratiodist[1]));
llhC[tr,2] = ((guess/4) + ((1-guess)*(ratiodist[2]-ratiodist[1])));
llhC[tr,3] = ((guess/4) + ((1-guess)*(ratiodist[3]-ratiodist[2])));
llhC[tr,4] = ((guess/4) + ((1-guess)*(1-ratiodist[3])));
}
real ll;
ll = sum(columns_dot_product(resps, log(llhC)));
return ll;
}
}
data {
int ns;
int nt;
int sampn;
matrix[4,ns*nt] respslong
row_vector[ns*nt] orislong;
vector[sampn] sampx;
}
transformed data {
matrix[ns*nt,4] respstall;
vector[ns*nt] oristall;
int nth;
respstall = respslong';
oristall = orislong';
array[ns] matrix[nt,4] resps;
matrix[nt,ns] oris
nth = 1
for (sub in 1:ns) {
resps[sub] = respstall[nth:(nth+nt-1),1:4];
oris[:,sub] = oristall[nth:(nth+nt-1)];
nth = nth + nt;
}
}
parameters {
//Parameters
vector<lower=0,upper=1>[ns] guess;
vector<lower=0>[ns] sens;
vector[ns] crit;
vector<lower=0>[ns] meta;
vector<lower=0>[ns] conf;
//Hyperparameters
real snm;
real<lower=0> sns;
real cm;
real<lower=0> cs;
real mm;
real<lower=0> ms;
real ccm;
real<lower=0> ccs;
}
model {
//Calculate local variables
matrix[nt,ns] sm;
vector[ns] sc;
matrix[sampn,ns] xtrans;
matrix[sampn,ns] sds
matrix[nt,ns] se;
sm = oris.*rep_matrix(sens',nt);
sc = crit.*sens;
xtrans = logncdfinv(sampx,log(1/sqrt(meta.^2+1)),sqrt(log(meta.^2+1)));
sds = 1./xtrans;
se = sm-rep_matrix(sc',nt);
//Hyperpriors
snm ~ normal(0,1);
sns ~ lognormal(0,1);
cm ~ normal(0,1);
cs ~ lognormal(0,1);
mm ~ normal(0,1);
ms ~ lognormal(0,1);
ccm ~ normal(0,1);
ccs ~ lognormal(0,1);
//Priors
guess ~ beta(1,193/3);
sens ~ lognormal(snm,sns);
crit ~ normal(cm,cs);
meta ~ lognormal(mm,ms);
conf ~ lognormal(ccm,ccs);
//Loop through the participants
for (i in 1:ns) {
//Likelihood Model
resps[i] ~ casandre(guess[i],sds[:,i],se[:,i],conf[i],nt,sampn); //likelihood model for this this participant
}
}
The change is in the line xtrans = logncdfinv(sampx,log(1/sqrt(meta.^2+1)),sqrt(log(meta.^2+1)));
which used to read xtrans = logncdfinv(sampx,(-1/2)*log1p(p.^2),sqrt(log1p(p.^2));