functions { // Simple wrapper around normal_cdf() for integrate_1d use, below. real integrand(real x, real xc, real[] theta, real[] x_r, int[] x_i) { return normal_cdf(x, theta[1], theta[2]); } // Compute the log of the definite integral from 0 to each of the values in x of the normal CDF with mean mu and std sigma. // Instead of computing the full integral from 0 to each value in X, I sort X and compute the integral from 0 to X1, then // X1 to X2, accumulating the integral as I go. Although this makes just as many calls to integrate_1d, it is faster, // because integrate_1d works faster on smaller chunks of the integrand. In my test, this approach took about half as much // time as just looping over X and taking the integral from 0 to X[i]. vector vec_log_ncdf_integrate(vector x, real mu, real sigma) { int N = num_elements(x); array[N] int x_idx = sort_indices_asc(x); vector[N] sx = x[x_idx]; vector[N] sorted_m2; vector[N] result; for (i in 1:N) { real lb; real ub = sx[i]; real increment; real prev_val; if (i==1) { lb = 0; prev_val = 0; } else { lb = sx[i-1]; prev_val = sorted_m2[i-1]; } increment = integrate_1d(integrand, lb, ub, {mu, sigma}, {0.0}, {0}); sorted_m2[i] = prev_val + increment; } result[x_idx] = log(sorted_m2); // this could all be re-written to use logs and log_sum_exp. return result; } // Function for within-chain parallelization of likelihood computation. Uses vec_log_ncdf_integrate to get the normalizer for // the CDF-based likelihood. real slice_loglik(real[] E, int start, int stop, real mu, real sigma, real ln_mu, real ln_sigma, vector stim_time, real[] rt) { int N = stop - start + 1; real loglik = 0; vector[N] normalizer = vec_log_ncdf_integrate(stim_time[start:stop], mu, sigma); vector[N] drt; for(i in 1:N) { loglik += normal_lcdf(E[i] | mu, sigma) - normalizer[i]; drt[i] = rt[i + start - 1] - E[i]; } loglik += lognormal_lpdf(drt | ln_mu, ln_sigma); return loglik; } // Sample from a distribution proportional to a normal CDF. // Used by generated quantities for posterior predictive checks. real rej_sample_ncdf_rng(real lb, real ub, real mu, real sig) { int accept = 0; int natt = 0; int max_att = 10000; real fmax = normal_cdf(ub, mu, sig); real v; while (accept == 0 && natt < max_att) { v = uniform_rng(lb, ub); real a = uniform_rng(0, fmax); if (normal_cdf(v, mu, sig) > a) { accept = 1; } else { natt += 1; } } if (accept==1) { return v; } else { print("Failed to find a sample. LB: ", lb, ", UB: ", ub, ", mu: ", mu, ", sig: ", sig); return not_a_number(); } } } data { int n_trial; real lb; array[n_trial] real rt; //relative to trial start vector[n_trial] x; // for converting starting position (x) to appearance time real speed; real offset; // tuning reduce sum int grainsize; } transformed data { vector[n_trial] t_stim = x*speed + offset; array[n_trial] real minval; for(i in 1:n_trial) { minval[i] = min([rt[i], t_stim[i]]); } } parameters { // parameters of the hazard function real mu_antic; real sig_antic; // parameters of the RT function real lmu_react; real lsig_react; // unobserved event array[n_trial] real E_raw; } transformed parameters { array[n_trial] real E; // scale to 0 to minval. No Jacobian required to multiply by data. for (i in 1:n_trial) { E[i] = E_raw[i]*minval[i]; } } model { // priors mu_antic ~ normal(1.35, 0.4); sig_antic ~ normal(0, 1); lmu_react ~ normal(log(0.350), 0.2); lsig_react ~ normal(0, 0.2); profile("Likelihood") { target += reduce_sum(slice_loglik, E, grainsize, mu_antic, sig_antic, lmu_react, lsig_react, t_stim, rt); } } generated quantities { vector[n_trial] rt_hat; profile("R Hat") { for (i in 1:n_trial) { rt_hat[i] = rej_sample_ncdf_rng(0, t_stim[i], mu_antic, sig_antic) + lognormal_rng(lmu_react, lsig_react); } } }