Rejecting initial value: Log probability evaluates to log(0), i.e. negative infinity

Hello!
The error message “Rejecting initial value: Log probability evaluates to log(0), i.e. negative infinity.” came when I tried to run this Stan code … :(

Anyone who knows the reason?

data {
  int<lower=1> N;
  int<lower=1> T;
  int<lower=1, upper=T> Tsubj[N];
  int<lower=-1, upper=1> gamble[N, T];
  int<lower=-2, upper=1> type[N, T];
  real cert[N, T];
  real<lower=0> gain[N, T];
  real<lower=0> loss[N, T];
  int<lower=-5,upper=5> happy[N,T]; 
}
transformed data {
}
parameters {
  vector[5] mu_p;
  vector<lower=0>[5] sigma;
  vector[N] rho_p;
  vector[N] lambda_p;
  vector[N] tau_p;
  vector[N] alpha_p;
  vector[N] beta_p;
}
transformed parameters {
  vector<lower=0, upper=2>[N] rho;
  vector<lower=0, upper=5>[N] lambda;
  vector<lower=0>[N] tau;
  vector<lower=0, upper=1>[N] alpha;
  vector<lower=0, upper=1>[N] beta;

  for (i in 1:N) {
    rho[i]    = Phi_approx(mu_p[1] + sigma[1] * rho_p[i]) * 2;
    lambda[i] = Phi_approx(mu_p[2] + sigma[2] * lambda_p[i]) * 5;
    alpha[i] = Phi_approx(mu_p[4] + sigma[4] * alpha_p[i]);
    beta[i] = Phi_approx(mu_p[5] + sigma[5] * beta_p[i]);
  }
  tau = exp(mu_p[3] + sigma[3] * tau_p);
}
model {
  mu_p  ~ normal(0, 1.0);
  sigma ~ normal(0, 0.2);

  rho_p    ~ normal(0, 1.0);
  lambda_p ~ normal(0, 1.0);
  tau_p    ~ normal(0, 1.0);
  alpha_p ~ normal(0, 1.0);
  beta_p ~ normal(0, 1.0);

  for (i in 1:N) {
    for (t in 1:Tsubj[i]) {
      real evSafe;    
      real evGamble; 
      real pGamble;

      if(type[i, t] == -1){
        if(happy[i,t] < 0){
          evSafe = -lambda[i] * pow(cert[i,t],rho[i]*(1+(-happy[i,t]*alpha[i])));
        } if(happy[i,t] == 0){
          evSafe = -lambda[i] * pow(cert[i, t], rho[i]);
        } else {
          evSafe = -lambda[i] * pow(cert[i, t], rho[i]*(1-(happy[i,t]*alpha[i])));
        }
      } else {
        if(happy[i,t] < 0){
          evSafe = pow(cert[i,t],rho[i]*(1+(-happy[i,t]*alpha[i])));
        } if(happy[i,t] == 0){
           evSafe   = pow(cert[i, t], rho[i]);
        } else {
           evSafe   = pow(cert[i,t],rho[i]*(1-(happy[i,t]*alpha[i])));
        }
      }
      
      if(happy[i,t] < 0){
        evGamble = 0.5 * (pow(gain[i, t], rho[i]*(1-(-happy[i,t]*beta[i]))) - lambda[i] * pow(loss[i, t], rho[i]*(1-(-happy[i,t]*beta[i]))));
      } if(happy[i,t] == 0){
        evGamble = 0.5 * (pow(gain[i, t], rho[i]) - lambda[i] * pow(loss[i, t], rho[i]));
      } else {
        evGamble = 0.5 * (pow(gain[i, t], rho[i]*(1+(happy[i,t]*beta[i]))) - lambda[i] * pow(loss[i, t], rho[i]*(1+(happy[i,t]*beta[i]))));
      }
      pGamble  = inv_logit(tau[i] * (evGamble - evSafe));
      gamble[i, t] ~ bernoulli(pGamble);
    }
  }
}

generated quantities {
  real<lower=0, upper=2> mu_rho;
  real<lower=0, upper=5> mu_lambda;
  real<lower=0>         mu_tau;
  real<lower=0,upper=1> mu_alpha;
  real<lower=0,upper=1> mu_beta;

  real log_lik[N];

  // For posterior predictive check
  real y_pred[N, T];

  // Set all posterior predictions to 0 (avoids NULL values)
  for (i in 1:N) {
    for (t in 1:T) {
      y_pred[i, t] = -1;
    }
  }

  mu_rho    = Phi_approx(mu_p[1]) * 2;
  mu_lambda = Phi_approx(mu_p[2]) * 5;
  mu_tau    = exp(mu_p[3]);
  mu_alpha  = Phi_approx(mu_p[4]);
  mu_beta = Phi_approx(mu_p[5]);

  { // local section, this saves time and space
    for (i in 1:N) {
      log_lik[i] = 0;
      for (t in 1:Tsubj[i]) {
        real evSafe;    
        real evGamble;  
        real pGamble;

      if(type[i, t] == -1){
        if(happy[i,t] < 0){
          evSafe = -lambda[i] * pow(cert[i,t],rho[i]*(1+(-happy[i,t]*alpha[i])));
        } if(happy[i,t] == 0){
          evSafe = -lambda[i] * pow(cert[i, t], rho[i]);
        } else {
          evSafe = -lambda[i] * pow(cert[i, t], rho[i]*(1-(happy[i,t]*alpha[i])));
        }
      } else {
        if(happy[i,t] < 0){
          evSafe = pow(cert[i,t],rho[i]*(1+(-happy[i,t]*alpha[i])));
        } if(happy[i,t] == 0){
           evSafe   = pow(cert[i, t], rho[i]);
        } else {
           evSafe   = pow(cert[i,t],rho[i]*(1-(happy[i,t]*alpha[i])));
        }
      }
      
      if(happy[i,t] < 0){
        evGamble = 0.5 * (pow(gain[i, t], rho[i]*(1-(-happy[i,t]*beta[i]))) - lambda[i] * pow(loss[i, t], rho[i]*(1-(-happy[i,t]*beta[i]))));
      } if(happy[i,t] == 0){
        evGamble = 0.5 * (pow(gain[i, t], rho[i]) - lambda[i] * pow(loss[i, t], rho[i]));
      } else {
        evGamble = 0.5 * (pow(gain[i, t], rho[i]*(1+(happy[i,t]*beta[i]))) - lambda[i] * pow(loss[i, t], rho[i]*(1+(happy[i,t]*beta[i]))));
      }
        pGamble    = inv_logit(tau[i] * (evGamble - evSafe));
        log_lik[i] = log_lik[i] + bernoulli_lpmf(gamble[i, t] | pGamble);

        // generate posterior prediction for current trial
        y_pred[i, t] = bernoulli_rng(pGamble);
      }
    }
  }
}

you can put the print statement, and check what is wrong in your program