Variance decomposition with Tweedie distribution – back transform necessary?

Hi there. We are running a hierarchical random intercept model with a Tweedie distributed dependent variable (see below) and three levels of hierarchy. Our aim is to estimate how much of the total variance (in percent) each level of the hiearchy explains. We do so by estimating the variance of the random intercepts for each level of the hierarchy and then divide it by the total variance.
The dependent variable is Tweedie distributed (continuous, zero inflated, right skewed). The priors for the random intercepts are normal distibutions, for which we estimate the variance. Given that we use a Tweedie distribution for the DV, do we need to back transform or inverse transform the variance estimates of the individual random effects to correctly estimate the share of the variance explained or is no back transform necessary?

Note that we are not using a log link function in the model, as frequently done for Tweedie distributions.

The STAN code for the model is as follows:

functions {
  int num_non_zero_fun(vector y) {
    int A = 0;
    int N = num_elements(y);
    for (n in 1 : N) {
      if (y[n] != 0) {
        A += 1;
      }
    }
    return A;
  }
  
  array[] int non_zero_index_fun(vector y, int A) {
    int N = num_elements(y);
    array[A] int non_zero_index;
    int counter = 0;
    for (n in 1 : N) {
      if (y[n] != 0) {
        counter += 1;
        non_zero_index[counter] = n;
      }
    }
    return non_zero_index;
  }
  
  array[] int zero_index_fun(vector y, int Z) {
    int N = num_elements(y);
    array[Z] int zero_index;
    int counter = 0;
    for (n in 1 : N) {
      if (y[n] == 0) {
        counter += 1;
        zero_index[counter] = n;
      }
    }
    return zero_index;
  }
  
  void check_tweedie(real mu, real phi, real mtheta) {
    if (mu < 0) {
      reject("mu must be >= 0; found mu =", mu);
    }
    if (phi < 0) {
      reject("phi must be >= 0; found phi =", phi);
    }
    if (mtheta < 1 || mtheta > 2) {
      reject("mtheta must be in [1, 2]; found mtheta =", mtheta);
    }
  }
  
  void check_tweedie(vector mu, real phi, real mtheta) {
    int N = num_elements(mu);
    if (phi < 0) {
      reject("phi must be >= 0; found phi =", phi);
    }
    if (mtheta < 1 || mtheta > 2) {
      reject("mtheta must be in [1, 2]; found mtheta =", mtheta);
    }
    for (n in 1 : N) {
      if (mu[n] < 0) {
        reject("mu must be >= 0; found mu =", mu[n], "on element", n);
      }
    }
  }
  
  real tweedie_lpdf(vector y, vector mu, real phi, real mtheta, int M) {
    check_tweedie(mu, phi, mtheta);
    int N = num_elements(y);
    int N_non_zero = num_non_zero_fun(y);
    int N_zero = N - N_non_zero;
    array[N_zero] int zero_index = zero_index_fun(y, N_zero);
    array[N_non_zero] int non_zero_index = non_zero_index_fun(y, N_non_zero);
    int A = num_elements(non_zero_index);
    int NmA = N - A;
    vector[N] lambda = 1 / phi * mu ^ (2 - mtheta) / (2 - mtheta);
    real alpha = (2 - mtheta) / (mtheta - 1);
    vector[N] beta = 1 / phi * mu ^ (1 - mtheta) / (mtheta - 1);
    real lp = -sum(lambda[zero_index]);
    for (n in 1 : A) {
      vector[M] ps;
      for (m in 1 : M) {
        ps[m] = poisson_lpmf(m | lambda[n])
                + gamma_lpdf(y[non_zero_index[n]] | m * alpha, beta[n]);
      }
      lp += log_sum_exp(ps);
    }
    return lp;
  }
  
  real tweedie_rng(real mu, real phi, real mtheta) {
    check_tweedie(mu, phi, mtheta);
    real lambda = 1 / phi * mu ^ (2 - mtheta) / (2 - mtheta);
    real alpha = (2 - mtheta) / (mtheta - 1);
    real beta = 1 / phi * mu ^ (1 - mtheta) / (mtheta - 1);
    int N = poisson_rng(lambda);
    real tweedie_val;
    if (mtheta == 1) {
      return phi * poisson_rng(mu / phi);
    }
    if (mtheta == 2) {
      return gamma_rng(1 / phi, beta);
    }
    if (N * alpha == 0) {
      return 0.;
    }
    return gamma_rng(N * alpha, beta);
  }
}
data {
  int<lower=1> N; // total number of observations
  vector[N] Y; // response variable
  int<lower=1> K; // number of population-level effects
  matrix[N, K] X; // population-level design matrix
  // data for group-level effects of ID 1
  int<lower=1> N_1; // number of grouping levels
  int<lower=1> M_1; // number of coefficients per level
  array[N] int<lower=1> J_1; // grouping indicator per observation
  // group-level predictor values
  vector[N] Z_1_1;
  // data for group-level effects of ID 2
  int<lower=1> N_2; // number of grouping levels
  int<lower=1> M_2; // number of coefficients per level
  array[N] int<lower=1> J_2; // grouping indicator per observation
  // group-level predictor values
  vector[N] Z_2_1;
  // data for group-level effects of ID 3
  int<lower=1> N_3; // number of grouping levels
  int<lower=1> M_3; // number of coefficients per level
  array[N] int<lower=1> J_3; // grouping indicator per observation
  // group-level predictor values
  vector[N] Z_3_1;
  // data for group-level effects of ID 4
  int<lower=1> N_4; // number of grouping levels
  int<lower=1> M_4; // number of coefficients per level
  array[N] int<lower=1> J_4; // grouping indicator per observation
  // group-level predictor values
  vector[N] Z_4_1;
  int prior_only; // should the likelihood be ignored?
  int<lower=1> M;
}
transformed data {

}



parameters {
  vector[K] b; // population-level effects
  real<lower=0> Intercept; //  ensuring it's positive
  real<lower=0> phi; // precision parameter
  real<lower=1, upper=2> mtheta;

  vector<lower=0>[M_1] sd_1; // group-level standard deviations
  array[M_1] vector[N_1] z_1; // standardized group-level effects

  vector<lower=0>[N_1] sd_2; // group-level standard deviations
  array[M_2] vector[N_2] z_2; // standardized group-level effects

  vector<lower=0>[N_2] sd_3; // group-level standard deviations
  array[M_3] vector[N_3] z_3; // standardized group-level effects

  vector<lower=0>[M_4] sd_4; // group-level standard deviations
  array[M_4] vector[N_4] z_4; // standardized group-level effects


}
transformed parameters {
  vector[N_1] r_1_1; // actual group-level effects
  vector[N_2] r_2_1; // actual group-level effects
  vector[N_3] r_3_1; // actual group-level effects
  vector[N_4] r_4_1; // actual group-level effects
  real lprior = 0; // prior contributions to the log posterior
  r_1_1 = sd_1[1] * z_1[1];
  r_2_1 = sd_2[1] * z_2[1];
  r_3_1 = sd_3[1] * z_3[1];
  r_4_1 = sd_4[1] * z_4[1];
  lprior += student_t_lpdf(Intercept | 3, 0, 2.5);
  lprior += gamma_lpdf(phi | 0.01, 0.01);
 
}
model {
  // likelihood including constants
  if (!prior_only) {
    // initialize linear predictor term
    vector[N] mu = rep_vector(0.0, N);
    mu += Intercept + X * b; // Using original X instead of Xc
    for (n in 1 : N) {
      // add more terms to the linear predictor
      mu[n] += r_1_1[J_1[n]] * Z_1_1[n] + r_2_1[J_2[n]] * Z_2_1[n]
               + r_3_1[J_3[n]] * Z_3_1[n] + r_4_1[J_4[n]] * Z_4_1[n];
    }
    target += tweedie_lpdf(Y | mu, phi, mtheta, M);
  }
  // priors including constants
  target += lprior;
  target += std_normal_lpdf(z_1[1]);
  target += std_normal_lpdf(z_2[1]);
  target += std_normal_lpdf(z_3[1]);
  target += std_normal_lpdf(z_4[1]);
}


generated quantities {
  // actual population-level intercept
  real b_Intercept = Intercept;
  
  // Recompute mu based on the model
  vector[N] mu = rep_vector(0.0, N);
  mu += Intercept + X * b; // Adding the intercept and fixed effects
  for (n in 1 : N) {
    // Adding group-level effects
    mu[n] += r_1_1[J_1[n]] * Z_1_1[n] + r_2_1[J_2[n]] * Z_2_1[n]
             + r_3_1[J_3[n]] * Z_3_1[n] + r_4_1[J_4[n]] * Z_4_1[n];
  }

  // Generating simulated data based on the model
  vector[N] r_tweedie;
  for (n in 1:N) {
    r_tweedie[n] = tweedie_rng(mu[n], phi, mtheta);
  }
}