(Edit: removed unneccesary things from model)

Thanks for the reply @jsocolar! Code is below. I am fitting two things at once, voter turnout and preference. Each binomial_logit sampling statement produces a contribution to the log-posterior on the order of -1e8.

The data has large numbers of trials and probabilities between .3 and .8, so I think more normal than Poisson, right?

I’m thinking of shifting that way but then it will be more confusing to try and merge with the smaller surveys since I think there will be a relative weight issue or something.

Thanks!

```
data {
int<lower=1> J_State;
int<lower=1> N_ElectionsT;
int<lower=1> ElectionsT_State[N_ElectionsT];
int<lower=1> N_ElectionsP;
int<lower=1> ElectionsP_State[N_ElectionsP];
int<lower=0> CVAP_Elex[N_ElectionsT];
int<lower=0> Voted_Elex[N_ElectionsT];
int K_DM;
matrix[N_ElectionsT,K_DM] DM_ElectionsT;
int DM_Sex;
int Sex_DM_Index;
int DM_Education;
int Education_DM_Index;
int DM_Race;
int Race_DM_Index;
int DM_Density;
int Density_DM_Index;
int DM_WhiteNonGrad;
int WhiteNonGrad_DM_Index;
matrix[N_ElectionsP,K_DM] DM_ElectionsP;
int<lower=0> VotesInRace_Elections[N_ElectionsP];
int<lower=0> DVotesInRace_Elections[N_ElectionsP];
}
transformed data {
int constIndex_ElectionsT[N_ElectionsT] = rep_array(0,N_ElectionsT);
int constIndex_ElectionsP[N_ElectionsP] = rep_array(0,N_ElectionsP);
print("CVAP_Elex=",CVAP_Elex);
print("Voted_Elex=",Voted_Elex);
print("VotesInRace_Elections=",VotesInRace_Elections);
print("DVotesInRace_Elections=",DVotesInRace_Elections);
vector[K_DM] mean_DM_ElectionsT = rep_vector(0,K_DM);
matrix[N_ElectionsT,K_DM] centered_DM_ElectionsT;
for (k in 1:K_DM) {
mean_DM_ElectionsT[k] = mean(DM_ElectionsT[,k]);
centered_DM_ElectionsT[,k] = DM_ElectionsT[,k] - mean_DM_ElectionsT[k];
}
vector[K_DM] mean_DM_ElectionsP = rep_vector(0,K_DM);
matrix[N_ElectionsP,K_DM] centered_DM_ElectionsP;
for (k in 1:K_DM) {
mean_DM_ElectionsP[k] = mean(DM_ElectionsP[,k]);
centered_DM_ElectionsP[,k] = DM_ElectionsP[,k] - mean_DM_ElectionsP[k];
}
}
parameters {
vector[J_State] alpha_rawT;
real mu_alphaT;
real<lower=0> sigma_alphaT;
vector[K_DM] muT;
vector<lower=0>[K_DM] tauT;
cholesky_factor_corr[K_DM] LT;
matrix[K_DM,J_State] betaT_raw;
vector[J_State] alpha_rawP;
real mu_alphaP;
real<lower=0> sigma_alphaP;
vector[K_DM] muP;
vector<lower=0>[K_DM] tauP;
cholesky_factor_corr[K_DM] LP;
matrix[K_DM,J_State] betaP_raw;
}
transformed parameters {
matrix[K_DM,J_State] betaT = rep_matrix(muT,J_State) + diag_pre_multiply(tauT,LT) * betaT_raw;
vector[J_State] alphaT = mu_alphaT + sigma_alphaT * alpha_rawT;
matrix[K_DM,J_State] betaP = rep_matrix(muP,J_State) + diag_pre_multiply(tauP,LP) * betaP_raw;
vector[J_State] alphaP = mu_alphaP + sigma_alphaP * alpha_rawP;
}
model {
to_vector(betaT_raw) ~ normal(0,1);
alpha_rawT ~ normal(0,1);
mu_alphaT ~ normal(0,1);
sigma_alphaT ~ normal(0,1);
muT ~ normal(0,1);
tauT ~ normal(0,1);
LT ~ lkj_corr_cholesky(4.0);
to_vector(betaP_raw) ~ normal(0,1);
alpha_rawP ~ normal(0,1);
mu_alphaP ~ normal(0,1);
sigma_alphaP ~ normal(0,1);
muP ~ normal(0,1);
tauP ~ normal(0,1);
LP ~ lkj_corr_cholesky(4.0);
vector[N_ElectionsT] voteDataBetaT_v;
for (n in 1:N_ElectionsT) {
voteDataBetaT_v[n] = alphaT[ElectionsT_State[n]] + dot_product(centered_DM_ElectionsT[n],betaT[,ElectionsT_State[n]]);
}
print("voteDataBetaT_v=",voteDataBetaT_v);
print("before T target=",target());
Voted_Elex ~ binomial_logit(CVAP_Elex,voteDataBetaT_v);
vector[N_ElectionsP] voteDataBetaP_v;
for (n in 1:N_ElectionsP) {
voteDataBetaP_v[n] = alphaP[ElectionsP_State[n]] + dot_product(centered_DM_ElectionsP[n],betaP[,ElectionsP_State[n]]);
}
print("voteDataBetaP_v=",voteDataBetaP_v);
print("before P target=",target());
DVotesInRace_Elections ~ binomial_logit(VotesInRace_Elections,voteDataBetaP_v);
print("after P target=",target());
}
```