I’m trying to implement the lower, upper, and bounded constraint transformations as a single function. I feel like this is a simpler way to set up Albert and Chib (1993) style probit regression compared to the example in the manual. I’m pretty sure that I’m messing up the log density Jacobian adjustment.

```
functions {
vector ordinalToContinuous_lp(int[] outcome, vector outcomeRaw, vector truncPoints) {
int N = size(outcome);
vector[N] outcomeC;
for (rx in 1:N) {
real lb = truncPoints[outcome[rx]];
real ub = truncPoints[outcome[rx]+1];
if (lb == negative_infinity()) {
outcomeC[rx] = ub - exp(outcomeRaw[rx]);
target += log(1/(ub - outcomeC[rx])); // wrong?
} else if (ub == positive_infinity()) {
outcomeC[rx] = lb + exp(outcomeRaw[rx]);
target += log(1/(outcomeC[rx] - lb)); // wrong?
} else {
real width = ub - lb;
real ilogit = 1. / (1 + exp(outcomeRaw[rx]));
outcomeC[rx] = lb + width * ilogit;
target += width * ilogit * (1 - ilogit); // wrong?
}
}
return outcomeC;
}
}
data {
int<lower=0> numThresholds;
vector[numThresholds] cumThr;
int outcome[N]; // min(outcome)=1
...
}
transformed data {
vector[numThresholds+2] truncPoints;
truncPoints[1] = negative_infinity();
for (x in 1:numThresholds) truncPoints[x+1] = cumThr[x];
truncPoints[numThresholds+2] = positive_infinity();
}
parameters {
vector[N] outcomeRaw;
...
}
transformed parameters {
vector[N] outcomeC = ordinalToContinuous_lp(outcome, outcomeRaw, truncPoints);
...
}
model {
...
outcomeC ~ normal(pred, residScale);
}
```

If you can help me, I’d appreciate it!