Sure! Here is the one using my version of the function you suggest:
functions {
vector indexBoth(vector[] vs, int N) {
vector[N] out_vec;
for (i in 1:N) {
out_vec[i] = vs[i, i];
}
return out_vec;
}
}
data {
int<lower=2> N_State;
int<lower=1> N;
int<lower=1> State[N];
int<lower=2> N_Age;
int<lower=1> Age[N];
int<lower=0> T[N];
int<lower=0> S[N];
}
transformed data {
vector<lower=0>[N_State] beta_State_weights = rep_vector(0,N_State);
for (n in 1:N) {
beta_State_weights[State[n]] += 1;
}
beta_State_weights /= N;
vector<lower=0>[N_State] eps_Age_State_weights = rep_vector(0,N_State);
for (n in 1:N) {
eps_Age_State_weights[State[n]] += 1;
}
eps_Age_State_weights /= N;
}
parameters {
real alpha;
real eps_Age;
real<lower=0> sigma_State;
vector[N_State] beta_raw_State;
vector[N_State] eps_Age_State_raw;
real<lower=0> sigma_Age_State;
}
transformed parameters {
vector[N_State] beta_State;
beta_State = sigma_State * beta_raw_State;
vector[N_State] eps_Age_State;
eps_Age_State = sigma_Age_State * eps_Age_State_raw;
}
model {
alpha ~ normal(0, 2.0);
eps_Age ~ normal(0, 2.0);
sigma_State ~ normal(0, 2.0);
beta_raw_State ~ normal(0, 1) ;
dot_product(beta_State, beta_State_weights) ~ normal(0, 1.0e-2);
sigma_Age_State ~ normal(0, 2.0);
eps_Age_State_raw ~ normal(0, 1);
dot_product(eps_Age_State, eps_Age_State_weights) ~ normal(0, 1.0e-2);
S ~ binomial_logit(T, alpha + to_vector({eps_Age, -eps_Age}[Age]) + beta_State[State] + indexBoth({eps_Age_State[State], -eps_Age_State[State]}[Age], N));
}
This is slow. It reports that it expects 1000 transitions to take 3679s! I’ve not yet let it run to completion.
Here’s the version with the “workaround”:
data {
int<lower=2> N_State;
int<lower=1> N;
int<lower=1> State[N];
int<lower=2> N_Age;
int<lower=1> Age[N];
int<lower=0> T[N];
int<lower=0> S[N];
}
transformed data {
vector<lower=0>[N_State] beta_State_weights = rep_vector(0,N_State);
for (n in 1:N) {
beta_State_weights[State[n]] += 1;
}
beta_State_weights /= N;
vector<lower=0>[N_State] eps_Age_State_weights = rep_vector(0,N_State);
for (n in 1:N) {
eps_Age_State_weights[State[n]] += 1;
}
eps_Age_State_weights /= N;
}
parameters {
real alpha;
real eps_Age;
real<lower=0> sigma_State;
vector[N_State] beta_raw_State;
vector[N_State] eps_Age_State_raw;
real<lower=0> sigma_Age_State;
}
transformed parameters {
vector[N_State] beta_State;
beta_State = sigma_State * beta_raw_State;
vector[N_State] eps_Age_State;
eps_Age_State = sigma_Age_State * eps_Age_State_raw;
vector[N] y_Age_State;
for (n in 1:N) {
y_Age_State[n] = {eps_Age_State[State[n]], -eps_Age_State[State[n]]}[Age[n]];
}
}
model {
alpha ~ normal(0, 2.0);
eps_Age ~ normal(0, 2.0);
sigma_State ~ normal(0, 2.0);
beta_raw_State ~ normal(0, 1) ;
dot_product(beta_State, beta_State_weights) ~ normal(0, 1.0e-2);
sigma_Age_State ~ normal(0, 2.0);
eps_Age_State_raw ~ normal(0, 1);
dot_product(eps_Age_State, eps_Age_State_weights) ~ normal(0, 1.0e-2);
S ~ binomial_logit(T, alpha + to_vector({eps_Age, -eps_Age}[Age]) + beta_State[State] + y_Age_State);
}
This runs as expected, claim of 1000 transitions in 25s and runs 1000 warmup/1000 sample in 150s.
The workaround is fine. But I am wondering if there is an efficient way to put the “diagonally-indexed” vector[] in the vectorized sampling statement. I’m generating this code from Haskell, and while it’s easy enough to do the workaround, there’s some book-keeping involved in using a different expression when sampling vs. in post-stratification. Until I hit this, that book-keeping was simpler and all around what gets put into the index arguments. But this requires a more complex bit of decision-making.