Rejecting initial value: Error evaluating the log probability at the initial value

Hi, this is my first post on here and am newbie to stan, so please let me know if its not detailed enough, or way too complicated than it needs to be, as I’m using a modified version of a template i found on metrum tutorial ‘introduction to bayesian modelling using stan’.

I’m attempting to fit a multiple dose 1 cmt- PK model and here is my stan model file;

functions{

  vector oneCptModel1(real dt, vector init, real amt, int cmt, int evid,
		    real CL, real V){
    vector[1] x;
    real alpha;
    alpha = CL / V;

    x = rep_vector(0.0, 1);
    
    x[1] = init[1] * exp(-alpha * dt);

    if (evid == 1) x[cmt] = x[cmt] + amt;

    return x;
  }

  matrix oneCptModel(real[] time, real[] amt, int[] cmt, int[] evid, 
		     real CL, real V){
    vector[1] init;
    real dt;
    real t0;
    matrix[size(time), 1] result;
    int nt;

    nt = size(time);

    init = rep_vector(0, 1);
    t0 = time[1];
    for(i in 1:nt){
      dt = time[i] - t0;
      init = oneCptModel1(dt, init, amt[i], cmt[i], evid[i],
			   CL, V);
      result[i, 1] = init[1];
      t0 = time[i];
    }
    
    return result;
  }
  
}

data{
  int nSubjects;
  int nt;
  int nObs;
  int iObs[nObs];
  real amt[nt];
  int cmt[nt];
  int evid[nt];
  int start[nSubjects];
  int end[nSubjects];
  real time[nt];
  vector[nObs] cObs;
  int weight[nSubjects];
}

transformed data{
  vector[nObs] logCObs;
  int nti[nSubjects];
  int nRandom;

  logCObs = log(cObs);

  nRandom = 2;
}

parameters{
  real<lower = 0.001> CLHat;
  real<lower = 0.001> VHat;
  vector<lower = 0>[nRandom] omega;
  real<lower = 0> sigma;
  vector[nRandom] coeff[nSubjects];
  vector<lower = 0> [nSubjects] eta1;
  vector<lower = 0> [nSubjects] eta2;

}

transformed parameters{
  vector<lower = 0.001>[nRandom] thetaHat;
  vector[nRandom] ltv;
  cov_matrix[nRandom] Omega;
  real<lower = 0> CL[nSubjects];
  real<lower = 0> V[nSubjects];
  vector [nt] cHat;
  vector [nObs] cHatObs;
  matrix [nt, 1] x;
  vector[nRandom] logtheta[nSubjects];
  
  
  thetaHat[1] = CLHat;
  thetaHat[2] = VHat;
  
  ltv[1] = log(thetaHat[1]);
  ltv[2] = log(thetaHat[2]);

  Omega = diag_matrix(omega); 

  for(j in 1:nSubjects){
    logtheta[j,1] = (ltv[1] + eta1[j]);
    logtheta[j,2] = (ltv[2] + eta2[j]);
    CL[j] = exp(logtheta[j, 1])*(weight[j]/1000);
    V[j] = exp(logtheta[j, 2])*(weight[j]/1000);

    x[start[j]:end[j],] = oneCptModel(time[start[j]:end[j]],
				       amt[start[j]:end[j]],
				       cmt[start[j]:end[j]],
				       evid[start[j]:end[j]],
				       CL[j], V[j]);
    cHat[start[j]:end[j]] = x[start[j]:end[j], 1] ./ V[j];
  }

  cHatObs = cHat[iObs];

}

model{
    CLHat ~ uniform(1, 20);
    VHat ~ uniform(1, 10);
    for (j in 1:nSubjects){
    eta1[j] ~ normal(0, omega[1]);
    eta2[j] ~ normal(0, omega[2]);
    }
    omega[1] ~ uniform(0.1, 1);
    omega[2] ~ uniform(0.1, 1);
    sigma ~ uniform(0.1, 10);

    ## Inter-individual variability
    // logtheta ~ multi_normal(log(thetaHat), Omega);
    logtheta[1] ~ normal(ltv[1],omega[1]);
    logtheta[2] ~ normal(ltv[2],omega[2]);
//     for (i in 1:nObs)
//      if (evid[i] == 0){
//  logCObs[i] ~ normal(log(cHatObs[i]), sigma);
//     }
    for (i in 1: nObs){
    logCObs[i] ~ normal(log(cHatObs[i]), sigma);
    }

}

generated quantities{
  vector[nRandom] logthetaPred[nSubjects];
  vector<lower = 0>[nt] cHatPred;
  real <lower=0>cObsCond[nt];
  real <lower=0>cObsPred[nt];
  real<lower = 0> CLPred[nSubjects];
  real<lower = 0> VPred[nSubjects];
  real log_lik[nObs];
  matrix[nt, 2] xPred;

  for(j in 1:nSubjects){
    logthetaPred[j] = multi_normal_rng(log(thetaHat), Omega);
    CLPred[j] = exp(logthetaPred[j, 1]);
    VPred[j] = exp(logthetaPred[j, 2]);

    xPred[start[j]:end[j],] = oneCptModel(time[start[j]:end[j]],
				       amt[start[j]:end[j]],
				       cmt[start[j]:end[j]],
				       evid[start[j]:end[j]],
				       CLPred[j], VPred[j]);
    cHatPred[start[j]:end[j]] = xPred[start[j]:end[j], 2] ./ V[j];
  }

  for(i in 1:nt){
    if(time[i] == 0){
      cObsCond[i] = 0;
      cObsPred[i] = 0;
    }else{
      cObsCond[i] = exp(normal_rng(log(cHat[i]), sigma));
      cObsPred[i] = exp(normal_rng(log(cHatPred[i]), sigma));
    }
  }

}



This is my script file, with initial values:

NeoGent <- read.csv(file=“nGent_data7a-163.csv”, header = TRUE, sep = “,”)
library(data.table)
library(dplyr)
NeoGent <- as.data.table(NeoGent)
NeoGent$CMT <- 1
NeoGentEVID10 <- as.data.frame(NeoGent[NeoGent$EVID != “2”]) # training data set, incl 1+0

NeoGentEVID10 <- NeoGentEVID10%>%
mutate(DV = ifelse(EVID, NA, DV))%>%
arrange(ID, TIME, desc(EVID))

nt <- nrow(NeoGentEVID10)
start <- (1:nt)[!duplicated(NeoGentEVID10$ID)]
end <- c(start[-1] - 1, nt)

iObs <- with(NeoGentEVID10, (1:nrow(NeoGentEVID10))[!is.na(DV) & EVID == 0])
nObs <- length(iObs)

nSubjects = length(unique(NeoGentEVID10$ID))
attach(NeoGentEVID10)
data <- with(NeoGentEVID10,
list(
nSubjects = nSubjects,
nt = nt,
nObs = nObs,
iObs = iObs,
amt = AMT,
cmt = CMT,
evid = EVID,
start = start,
end = end,
time = TIME,
cObs = DV[iObs],
weight = WT[!duplicated(ID)]
))

init <- function(){
list(CLHat = 0.046,
VHat = 0.73,
omega = c(0.0576,0.0729),
sigma = runif(1, 0.09, 2),
eta1 = rep(0.5,nSubjects),
eta2 = rep(0.5,nSubjects)

   #coeff = matrix(rep(c(0.002,0.0004),ea = nObs),nrow = nObs)

)

}

parametersToPlot <- c(“CLHat”, “VHat”,
“sigma”, “omega”)

otherRVs <- c(“cObsCond”, “cObsPred”, “CL”, “V”) ##, “log_lik”)

parameters <- c(parametersToPlot, otherRVs)

nChains <- 4
nPost <- 1000 ## Number of post-burn-in samples per chain after thinning
nBurn <- 1000 ## Number of burn-in samples per chain after thinning
nThin <- 1

nIter <- (nPost + nBurn) * nThin
nBurnin <- nBurn * nThin

fit <- stan(‘grasela.stan’,
data = data,
pars = parameters,
iter = nIter,
warmup = nBurnin,
thin = nThin,
init = init,
chains = nChains)

This is the error message i get:

Rejecting initial value:
Error evaluating the log probability at the initial value.
Exception: normal_lpdf: Location parameter is nan, but must be finite! (in ‘modelea8d5760c71b_grasela’ at line 137)
Initialization between (-2, 2) failed after 100 attempts.
Try specifying initial values, reducing ranges of constrained values, or reparameterizing the model.
[1] “Error in sampler$call_sampler(args_list[[i]]) : Initialization failed.”

FYI, line 136-137 corresponds to the line:
for (i in 1: nObs){
logCObs[i] ~ normal(log(cHatObs[i]), sigma);

I have tried to play around with the bounds and the parameters of the distributions, but no luck. Any help will be greatly appreciated!!

I had similar problems with ODE models. Without checking your code in detail (Sorry, it’s a lot and a bit chaotic), I can think of two things you should check:

  1. Is it possible that the values your model returns become negative or zero? You are taking the log of cHatObs[i] and if that number is \leq 0 then the log-function will return nan and that can lead to the error you are getting.
  2. One experience I made with ODE models (and which might hold for any kind of system with exponential functions) is that the quality of the numerical solution can be highly dependent on the parameters. The exponential nature of the solutions (and that is what you have in your example) makes them grow (or shrink towards zero) really really fast while the parameters can still be super reasonable numbers around 20 or even lower. In my experience it helps a lot if you have a rough knowledge of the magnitude of the parameters and to center them around that magnitude. This way the sampler has to sample values around 0 which is a lot more numerically stable. E.g.

Replace logtheta[j,1] = (ltv[1] + eta1[j]); with logtheta[j,1] = (magn[1] + ltv[1] + eta1[j]); where magn[1] could be roughly the typical log value of your clearance parameter. It is similar to putting a prior on ltv[1] with a non-zero mean. However, Stan will have an easier time initialising ltv[1] since its trying values symmetrically around 0 (between [-2, 2] by default, as you can also see in your output).

Try these two things. If it doesn’t help then a closer investigation of the code might be necessary.

It is good to call rstan::expose_stan_functions("filename.stan") so that oneCptModel1, oneCptModel, and any other functions that you may defined in the Stan language get exported to the global environment of your R session. That way you can more easily check that they are behaving as expected.

Thanks for the reply!!! (didn’t think anyone would ACTUALLY REPLY!)

I had a look and nothing jumps out to me. I did initially think the same that a log(0) was causing the error, but I went back to the original template and somehow managed to get my code to output dodgy results at least- basically estimating all the parameters to be 0! Here is the output:

Inference for Stan model: grasela2.
4 chains, each with iter=2000; warmup=1000; thin=1;
post-warmup draws per chain=1000, total post-warmup draws=4000.

           mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat

CLHat 0 0 0 0 0 0 0 0 4000 NaN
VHat 0 0 0 0 0 0 0 0 4000 NaN
sigma 0 0 0 0 0 0 0 0 4000 NaN
omega[1] 0 0 0 0 0 0 0 0 4000 NaN
omega[2] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[1] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[2] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[3] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[4] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[5] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[6] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[7] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[8] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[9] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[10] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[11] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[12] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[13] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[14] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[15] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[16] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[17] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[18] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[19] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[20] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[21] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[22] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[23] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[24] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[25] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[26] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[27] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[28] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[29] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[30] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[31] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[32] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[33] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[34] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[35] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[36] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[37] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[38] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[39] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[40] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[41] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[42] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[43] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[44] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[45] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[46] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[47] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[48] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[49] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[50] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[51] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[52] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[53] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[54] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[55] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[56] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[57] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[58] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[59] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[60] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[61] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[62] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[63] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[64] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[65] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[66] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[67] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[68] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[69] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[70] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[71] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[72] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[73] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[74] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[75] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[76] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[77] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[78] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[79] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[80] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[81] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[82] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[83] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[84] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[85] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[86] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[87] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[88] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[89] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[90] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[91] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[92] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[93] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[94] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[95] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[96] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[97] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[98] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[99] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[100] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[101] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[102] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[103] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[104] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[105] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[106] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[107] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[108] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[109] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[110] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[111] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[112] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[113] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[114] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[115] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[116] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[117] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[118] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[119] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[120] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[121] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[122] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[123] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[124] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[125] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[126] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[127] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[128] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[129] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[130] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[131] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[132] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[133] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[134] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[135] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[136] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[137] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[138] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[139] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[140] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[141] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[142] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[143] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[144] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[145] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[146] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[147] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[148] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[149] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[150] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[151] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[152] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[153] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[154] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[155] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[156] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[157] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[158] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[159] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[160] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[161] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[162] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[163] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[164] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[165] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[166] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[167] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[168] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[169] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[170] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[171] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[172] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[173] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[174] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[175] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[176] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[177] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[178] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[179] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[180] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[181] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[182] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[183] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[184] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[185] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[186] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[187] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[188] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[189] 0 0 0 0 0 0 0 0 4000 NaN
cObsCond[190] 0 0 0 0 0 0 0 0 4000 NaN

-----i deleted entries here to make it fit— it goes on and on…
[ reached getOption(“max.print”) – omitted 1384 rows ]

Samples were drawn using NUTS(diag_e) at Sat Jul 7 18:00:48 2018.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at
convergence, Rhat=1).

and the model code:
"
functions{

vector oneCptModel1(real dt, vector init, real amt, int cmt, int evid,
real CL, real V){
vector[1] x;
vector[1] alpha;
alpha[1] = CL / V;

x = rep_vector(0.0, 1);

if(init[1] != 0.0){
  x[1] = init[1] * exp(-alpha[1] * dt);
}

if(evid == 1) x[cmt] = x[cmt] + amt;

return x;

}

matrix oneCptModel(real[] time, real[] amt, int[] cmt, int[] evid,
real CL, real V){
vector[1] init;
real dt;
real t0;
matrix[size(time), 1] result;
int nt;

nt = size(time);

init = rep_vector(0, 1);
t0 = time[1];
for(i in 1:nt){
  dt = time[i] - t0;
  init = oneCptModel1(dt, init, amt[i], cmt[i], evid[i],
		   CL, V);
		   
     result[i, 1] = init[1];
  t0 = time[i];
}

return result;

}

}

data{
int nSubjects;
int nt;
int nObs;
int iObs[nObs];
real amt[nt];
int cmt[nt];
int evid[nt];
int start[nSubjects];
int end[nSubjects];
real time[nt];
vector[nObs] cObs;
vector[nSubjects] weight;
}

transformed data{
vector[nObs] logCObs;
int nti[nSubjects];
int nRandom;

logCObs = log(cObs);

nRandom = 2;
}

parameters{
real<lower = 0> CLHat;
real<lower = 0> VHat;
corr_matrix[nRandom] rho;
vector<lower = 0>[nRandom] omega;
real<lower = 0> sigma;
vector[nRandom] eta;
}

transformed parameters{
vector<lower = 0>[nRandom] thetaHat;
cov_matrix[nRandom] Omega;
vector[nRandom] logtheta[nSubjects];
real<lower = 0> CL[nSubjects];
real<lower = 0> V[nSubjects];
vector<lower = 0>[nt] cHat;
vector<lower = 0>[nObs] cHatObs;
matrix<lower = 0>[nt, 1] x;

thetaHat[1] = CLHat;
thetaHat[2] = VHat;

Omega = diag_matrix(omega);

for(j in 1:nSubjects){
logtheta[j,1] = thetaHat[1] + eta[1];
logtheta[j,2] = thetaHat[2] + eta[2];
CL[j] = exp(logtheta[j, 1])(weight[j] / 1000);
V[j] = exp(logtheta[j, 2])
(weight[j] / 1000);

x[start[j]:end[j],] = oneCptModel(time[start[j]:end[j]],
			       amt[start[j]:end[j]],
			       cmt[start[j]:end[j]],
			       evid[start[j]:end[j]],
			       CL[j], V[j]);
cHat[start[j]:end[j]] = x[start[j]:end[j], 1] ./ V[j];

}

cHatObs = cHat[iObs];

}

model{
CLHat ~ normal(0, 20);
VHat ~ normal(0, 100);
omega[1] ~ cauchy(0, 2);
omega[2] ~ cauchy(0, 2);
rho ~ lkj_corr(1);
sigma ~ cauchy(0, 2);

## Inter-individual variability
// logtheta ~ multi_normal(log(thetaHat), Omega);
eta[1] ~ normal(0,omega[1]);
eta[2] ~ normal(0,omega[2]);

logCObs ~ normal(log(cHatObs), sigma);

}

generated quantities{
vector[nRandom] logthetaPred[nSubjects];
vector<lower = 0>[nt] cHatPred;
real cObsCond[nt];
real cObsPred[nt];
real<lower = 0> CLPred[nSubjects];
real<lower = 0> VPred[nSubjects];
real log_lik[nObs];
matrix[nt, 1] xPred;

for(j in 1:nSubjects){
logthetaPred[j] = multi_normal_rng(log(thetaHat), Omega);
CLPred[j] = exp(logthetaPred[j, 1]);
VPred[j] = exp(logthetaPred[j, 2]);

xPred[start[j]:end[j],] = oneCptModel(time[start[j]:end[j]],
			       amt[start[j]:end[j]],
			       cmt[start[j]:end[j]],
			       evid[start[j]:end[j]],
			       CLPred[j], VPred[j]);
cHatPred[start[j]:end[j]] = xPred[start[j]:end[j], 1] ./ V[j];

}

for(i in 1:nt){
if(time[i] == 0){
cObsCond[i] = 0;
cObsPred[i] = 0;
}else{
cObsCond[i] = exp(normal_rng(log(cHat[i]), sigma));
cObsPred[i] = exp(normal_rng(log(cHatPred[i]), sigma));
}
}

}

"
and script containing initial values:

NeoGent <- read.csv(file=“nGent_data7a-163.csv”, header = TRUE, sep = “,”)
library(data.table)
library(dplyr)
NeoGent <- as.data.table(NeoGent)
NeoGent$CMT <- 1
NeoGentEVID10 <- as.data.frame(NeoGent[NeoGent$EVID != “2”]) # training data set, incl 1+0

NeoGentEVID10 <- NeoGentEVID10%>%
mutate(DV = ifelse(EVID, NA, DV))%>%
arrange(ID, TIME, desc(EVID))

nt <- nrow(NeoGentEVID10)
start <- (1:nt)[!duplicated(NeoGentEVID10$ID)]
end <- c(start[-1] - 1, nt)

iObs <- with(NeoGentEVID10, (1:nrow(NeoGentEVID10))[!is.na(DV) & EVID == 0])
nObs <- length(iObs)

nSubjects = length(unique(NeoGentEVID10$ID))
attach(NeoGentEVID10)
data <- with(NeoGentEVID10,
list(
nSubjects = nSubjects,
nt = nt,
nObs = nObs,
iObs = iObs,
amt = AMT,
cmt = CMT,
evid = EVID,
start = start,
end = end,
time = TIME,
cObs = DV[iObs],
weight = WT[!duplicated(ID)]
))

init <- function(){
list(CLHat = 0.046,
VHat = 0.73,
omega = c(0.0576,0.0729),
sigma = runif(1, 0.09, 2),
eta[1] = rep(0.5,nSubjects),
eta[2] = rep(0.5,nSubjects),
logtheta = matrix(rep(log(c(0.046, 0.73),nSubjects), nrow = nSubjects))

   #coeff = matrix(rep(c(0.002,0.0004),ea = nObs),nrow = nObs)

)

}

parametersToPlot <- c(“CLHat”, “VHat”,
“sigma”, “omega”)

otherRVs <- c(“cObsCond”, “cObsPred”, “CL”, “V”,“eta”) ##, “log_lik”)

parameters <- c(parametersToPlot, otherRVs)

nChains <- 4
nPost <- 1000 ## Number of post-burn-in samples per chain after thinning
nBurn <- 1000 ## Number of burn-in samples per chain after thinning
nThin <- 1

nIter <- (nPost + nBurn) * nThin
nBurnin <- nBurn * nThin

fit <- stan(‘grasela2.stan’,
data = data,
pars = parameters,
iter = nIter,
warmup = nBurnin,
thin = nThin,
init = init,
chains = nChains)

Oh neat, I was meaning to ask this in another post!
~ Thank you