Using the onion method in occupancy hierarchical model

I am trying to build a hierarchical, two-level occupancy model. The model estimates the correlation between species detections using a \Sigma matrix, similar to the model in SUG 1.13.

The model works works until I scale up to greater than ~5 to 10 covariates. This post ( -nan results with lkj_corr_cholesky - Developers - The Stan Forums (mc-stan.org)) describes why.

Looking through the answers, I see an example onion method file, in -nan results with lkj_corr_cholesky - #3 by bgoodri. However, I cannot figure out how to include the onion method within my model. For example, if I want to use the a matrix similar to cholesky_factor_corr, where do I create this matrix? The transformed parameter section or the model section? The example uses the generated quantities section, but I am using this matrix later in the model.

Here are my questions:

  1. Is the onion method the best approach?
  2. Is there another method that would work better?
  3. Are there any other ways to optimize my code?

One end application I have for this model is to estimate an intercept for each species and then compare the correlation for site occupancy across species.

Here is my example model that complies and works until I add too many species:

data {
  // sampling unit-level occupancy covariates
  int<lower = 1> n_unit; // number of units (e.g., site and time combos)
  int<lower = 1> m_psi; // number of psi predictors
  matrix[n_unit, m_psi] x_psi; // psi preditors
  int<lower = 1> j_psi; // number of unit (psi) groups
  int<lower = 1,upper = j_psi> jj_psi[n_unit];  // group id for psi_level
  
  int<lower = 1> m_psi_star; // number of psi hyper-predictor
  matrix[j_psi, m_psi_star] x_psi_star; // psi hyper-predictor matrix
  
  // sample-level detection covariates
  int<lower = 1> total_observations; //number of total, raw observations
  int<lower = 1> m_p; // number of p predictors
  matrix[total_observations, m_p] v_p; // p preidctors
  int<lower = 1> j_p; // number of sample (p) groups
  int<lower = 1,upper = j_p> jj_p[total_observations];  // group id for p_level
  
  int<lower = 1> m_p_star; // number of p hypter-predictors
  matrix[j_psi, m_p_star] v_p_star; // p hyper-predictor matirx
  
  // survey level information  
  int<lower = 0, upper = 1> y[total_observations]; //raw count observations
  int<lower = 0, upper = total_observations> start_idx[n_unit]; //observation index
  int<lower = 0, upper = total_observations> end_idx[n_unit]; //observation index
  
  // summary of whether species is known to be present at each unit
  int<lower = 0, upper = 1> any_seen[n_unit]; //detection at sample-level?

  // number of surveys at each unit
  int<lower = 0> n_samples[n_unit]; // number of samples per unit

  // priors
  matrix[m_psi_star, m_psi] beta_psi_star_prior;
  real<lower=0> beta_star_sd_prior;
  
  matrix[m_p_star, m_p] delta_p_star_prior;
  real<lower=0> delta_star_sd_prior;
}
parameters {
  matrix[m_psi, j_psi] z_psi;
  matrix[m_p, j_p] z_p;

  matrix[m_psi_star, m_psi] beta_psi_star;

  matrix[m_p_star, m_p] delta_p_star;
    
  real<lower=0> sigma_psi;
  real<lower=0> sigma_p;
  
  cholesky_factor_corr[m_psi] L_Omega_psi;
  vector<lower=0, upper= pi()/2>[m_psi] tau_psi_unif;

  cholesky_factor_corr[m_p] L_Omega_p;
  vector<lower=0, upper= pi()/2>[m_p] tau_p_unif;
  
  vector[n_unit] logit_psi;
  vector[total_observations] logit_p;
}
transformed parameters {
  vector<lower=0>[m_psi] tau_psi; // prior scale
  vector<lower=0>[m_psi] tau_p; // prior scale

  matrix[j_psi, m_psi] beta_psi; 
  matrix[j_p, m_p] delta_p; 

  for (k in 1:m_psi) tau_psi[k] = 2.5 * tan(tau_psi_unif[k]);
  for (k in 1:m_p) tau_p[k] = 2.5 * tan(tau_p_unif[k]);

  beta_psi = x_psi_star * beta_psi_star + (diag_pre_multiply(tau_psi,  L_Omega_psi) * z_psi)';
  delta_p =  v_p_star * delta_p_star + (diag_pre_multiply(tau_p,  L_Omega_p) * z_p)';
}
model {
  vector[n_unit] log_psi = log_inv_logit(logit_psi);
  vector[n_unit] log1m_psi = log1m_inv_logit(logit_psi);

  to_vector(delta_p_star) ~ normal(to_vector(delta_p_star_prior), delta_star_sd_prior);
  to_vector(beta_psi_star) ~ normal(to_vector(beta_psi_star_prior), beta_star_sd_prior);

  logit_p ~ normal(rows_dot_product(delta_p[jj_p], v_p), sigma_p);
  logit_psi ~ normal(rows_dot_product(beta_psi[jj_psi], x_psi), sigma_psi);  

  to_vector(z_psi) ~ std_normal();
  L_Omega_psi ~ lkj_corr_cholesky(2);

  to_vector(z_p) ~ std_normal();
  L_Omega_p ~ lkj_corr_cholesky(2);
 
  for (idx in 1:n_unit) {
    if (n_samples[idx] > 0) {
      if (any_seen[idx]) {
        // unit is occupied
        target += log_psi[idx] 
                  + bernoulli_logit_lpmf(y[start_idx[idx]:end_idx[idx]] | 
                  logit_p[start_idx[idx]:end_idx[idx]]);
      } else {
        // unit may or may not be occupied
        target += log_sum_exp(
          log_psi[idx] + bernoulli_logit_lpmf(y[start_idx[idx]:end_idx[idx]] |
          logit_p[start_idx[idx]:end_idx[idx]]), 
          log1m_psi[idx]
        );
      }
    }
  }
}
generated quantities {
  matrix[m_psi, m_psi] Omega_psi;
  matrix[m_p, m_p]     Omega_p;

  Omega_psi = multiply_lower_tri_self_transpose(L_Omega_psi);
  Omega_p   = multiply_lower_tri_self_transpose(L_Omega_p);
}

Here is example code to use the model (assuming you save the model as occupancy_2_corr.stan):

memory.limit(32497*5)
library(tidyverse)
library(GGally)
library(rstan)
library(rstan)
options(mc.cores = parallel::detectCores())

set.seed(1234)

n_chain = 4

## Define function inputs
n_units = 5
n_unit_revisit = 5
n_spp = 3
n_samples = 6
  
unit_obs  <-
  tibble(unit_mean = rnorm(n_units, mean = 0, sd = 1)) %>%
  rowid_to_column("unit")

## 1b creates spp correlation error
beta_psi_variance <- abs(stats::rnorm(n_spp, mean = 0.20, sd = 0.1))
beta_psi_covariance <- stats::rnorm((n_spp^2 - n_spp)/2,
                                    mean = 0, sd = 0.25)

sigma_beta <- matrix(NA, nrow = n_spp, ncol = n_spp)
diag(sigma_beta) <- beta_psi_variance
sigma_beta[base::lower.tri(sigma_beta)] <- beta_psi_covariance
sigma_beta[base::upper.tri(sigma_beta)] <- beta_psi_covariance

## ensure matrix is positive def
sigma_beta <-
  t(sigma_beta) %*% sigma_beta
sigma_beta_sim <-
  data.frame(MASS::mvrnorm(n = n_units,
                           mu = rep(0, n_spp), Sigma = sigma_beta))
colnames(sigma_beta_sim) <- paste0("Spp_", seq_len(n_spp))

unit_obs_spp <- dplyr::bind_cols(unit_obs, sigma_beta_sim)

unit_obs_long <-
  tidyr::pivot_longer(unit_obs_spp, 
                      cols = starts_with("Spp_"),
                      names_to = "Species",
                      values_to = "spp_error") %>%
  dplyr::mutate(unit_response =  spp_error,
                psi = plogis(unit_response))

unit_subtable <-
  expand_grid(
    unit = seq_len(n_units),
    revisit_id = seq_len(n_unit_revisit)
  )

## data should include non-detects
unit_all <-
  unit_subtable %>%
  dplyr::full_join(unit_obs_long, by = "unit") %>%
  dplyr::mutate(z_sim = stats::rbinom(n(), size = 1, p = psi))

unit_data_summary <-
  unit_all %>%
  dplyr::group_by(unit, Species) %>%
  dplyr::summarize(n = dplyr::n(),
                   psi_sim = mean(psi),
                   psi_obs = mean(z_sim), .groups = "keep") %>%
  dplyr::arrange(unit, Species) %>%
  dplyr::mutate(unit_spp = factor(paste(unit, Species, sep = "-")),
                unit_spp_id = as.numeric(unit_spp))

# unit_data_group <-
#   unit_all %>%
#   dplyr::group_by(unit) %>%
#   dplyr::summarize(n = dplyr::n(), .groups = "keep")

#############################
## Simulate sample_level data
sample_obs <- expand_grid(unit = seq_len(n_units))

delta_p_variance <- abs(stats::rnorm(n_spp, mean = 0.25, sd = 0.1))
delta_p_covariance <- stats::rnorm((n_spp^2 - n_spp)/2,
                                    mean = 0, sd = 0.25)

sigma_delta <- matrix(NA, nrow = n_spp, ncol = n_spp)
diag(sigma_delta) <- delta_p_variance
sigma_delta[base::lower.tri(sigma_delta)] <- delta_p_covariance
sigma_delta[base::upper.tri(sigma_delta)] <- delta_p_covariance

## ensure matrix is positive def
sigma_delta <-
  t(sigma_delta) %*% sigma_delta
sigma_delta_sim <-
  data.frame(MASS::mvrnorm(n = n_units,
                           mu = rep(0, n_spp), Sigma = sigma_delta))
colnames(sigma_delta_sim) <- paste0("Spp_", seq_len(n_spp))

sample_obs_spp <- dplyr::bind_cols(sample_obs, sigma_delta_sim)

sample_obs_long <-
  tidyr::pivot_longer(sample_obs_spp, 
                      cols = starts_with("Spp_"),
                      names_to = "Species",
                      values_to = "spp_error") %>%
  dplyr::mutate(sample_response =  spp_error,
                p = plogis(sample_response))

sample_obs_longer <-
  expand_grid(unit = seq_len(n_units),
              revisit_id = seq_len(n_unit_revisit),
              sample_id = seq_len(n_samples)) %>%
  full_join(sample_obs_long, by = "unit") %>%
  full_join(unit_all %>%select(unit, revisit_id, Species, z_sim),
            by = c("unit", "revisit_id", "Species")) %>%
  mutate(y = z_sim * stats::rbinom(n(), size = 1, p))

sample_obs_longer_summary <-
  sample_obs_longer %>%
  dplyr::group_by(unit, Species) %>%
  dplyr::summarize(n = n(),
                   total_y = sum(y),
                   .groups = "keep") %>%
  dplyr::mutate(any_seen = ifelse(total_y > 0, 1, 0))


# sample_unit_summary <-
#   sample_obs_longer_summary %>%
#   dplyr::group_by(unit) %>%
#   dplyr::summarize(n = n(),
#                    total_y = sum(total_y),
#                    .groups = "keep")

## merge to get any_seen
unit_all <-
  unit_all %>%
  full_join(sample_obs_longer_summary %>%
              select(unit, Species, any_seen, n),
            by = c("unit", "Species")) %>%
  dplyr::arrange(unit, Species) %>%
  mutate(unit_spp = factor(paste(unit, Species, sep = "-")),
         unit_spp_id = as.numeric(unit_spp))

sample_obs_longer <-
  sample_obs_longer %>%
  arrange(unit, Species, revisit_id) %>%
  mutate(unit_spp = factor(paste(unit, Species, sep = "-")),
         unit_spp_revisit = factor(paste(unit, Species, revisit_id, sep = "-")),
         unit_spp_id = as.numeric(unit_spp),
         unit_spp_revisit_id = as.numeric(unit_spp_revisit)) %>%
  rowid_to_column("index")

sample_obs_idx <-
  sample_obs_longer %>%
  group_by(unit_spp_revisit, unit_spp_revisit_id) %>%
  summarize(n = n(),
            start_idx = min(index),
            end_idx = max(index), .groups = "keep")

#---
n_unit = nrow(unit_all)
jj_psi = unit_all %>% pull(unit_spp_id)

x_psi = model.matrix(~ unit_spp  - 1, unit_all)
m_psi = ncol(x_psi)
j_psi = length(unique(jj_psi))
x_psi_star = model.matrix(~ 1, unit_data_summary)
m_psi_star = ncol(x_psi_star)

total_observations = nrow(sample_obs_longer)
v_p = model.matrix( ~ unit_spp - 1, sample_obs_longer)
m_p = ncol(v_p)
jj_p = sample_obs_longer %>% pull(unit_spp_id)
j_p = length(unique(jj_p))
v_p_star = model.matrix(~ 1, sample_obs_longer_summary)
m_p_star = ncol(v_p_star)

y = sample_obs_longer %>% pull(y)
start_idx = sample_obs_idx %>% pull(start_idx)
end_idx = sample_obs_idx %>% pull(end_idx)

any_seen = unit_all %>% pull(any_seen)
n_samples = unit_all %>% pull(n)

beta_psi_star_prior = matrix(0, nrow = m_psi_star, ncol = m_psi)
beta_star_sd_prior = 1
delta_p_star_prior = matrix(0, nrow = m_p_star, ncol = m_p)
delta_star_sd_prior = 1## stan inputs


stan_data <-
  list(n_unit = n_unit,
       jj_psi = jj_psi,
       x_psi = x_psi,
       m_psi = m_psi,
       j_psi = j_psi,
       x_psi_star = x_psi_star,
       m_psi_star = m_psi_star,
       total_observations = total_observations,
       v_p = v_p,
       m_p = m_p,
       jj_p = jj_p,
       j_p = j_p,
       v_p_star = v_p_star,
       m_p_star = m_p_star,
       y = y,
       start_idx = start_idx,
       end_idx = end_idx,
       any_seen = any_seen,
       n_samples = n_samples,
       prior_beta_psi = 0,
       prior_beta_psi_sd = 1,
       prior_delta_p = 0,
       prior_delta_p_sd = 1,
       beta_psi_star_prior = beta_psi_star_prior,
       beta_star_sd_prior = beta_star_sd_prior,
       delta_p_star_prior = delta_p_star_prior,
       delta_star_sd_prior = delta_star_sd_prior)


test_mod <- rstan::stan_model("./inst/stan/occupancy_2_corr.stan")

stan_out_2 <- rstan::sampling(test_mod, stan_data,
                              iter = 100, chain = 1)

Also, unless I set init = "0", I get this error message when I scale up the model:

Chain 2: Rejecting initial value:
Chain 2: Log probability evaluates to log(0), i.e. negative infinity.
Chain 2: Stan can’t start sampling from this initial value.

1 Like

I don’t have time to understand your exact model but I’ll give you how to use the onion method. The linked thread just shows how to reconstruct the cov mat in gen quant but you can move that to model or tp such as (nb, haven’t tested this code so there may be errors)

data {
  int<lower = 0> K;    // dim of corr mat
  real<lower = 0> eta;  // lkj param
}
transformed data {
  real<lower = 0> alpha = eta + (K - 2) / 2.0;
  vector<lower = 0>[K-1] shape1;
  vector<lower = 0>[K-1] shape2;

  shape1[1] = alpha;
  shape2[1] = alpha;
  for(k in 2:(K-1)) {
    alpha -= 0.5;
    shape1[k] = k / 2.0;
    shape2[k] = alpha;
  }
}
parameters {
  row_vector[choose(K, 2) - 1]  l;         // do NOT init with 0 for all elements
  vector<lower = 0,upper = 1>[K-1] R2; // first element is not really a R^2 but is on (0,1)  
}
model {
  // priors that imply Sigma ~ lkj_corr(eta)
  l ~ std_normal();
  R2 ~ beta(shape1, shape2);

 matrix[K, K] L = rep_matrix(0, K, K); // cholesky_factor corr matrix
  {
    int start = 1;
    int end = 2;

    L[1,1] = 1.0;
    L[2,1] = 2.0 * R2[1] - 1.0;
    L[2,2] = sqrt(1.0 - square(L[2,1]));
    for(k in 2:(K-1)) {
      int kp1 = k + 1;
      row_vector[k] l_row = segment(l, start, k);
      real scale = sqrt(R2[k] / dot_self(l_row));
      L[kp1, 1:k] = l_row[1:k] * scale;
      L[kp1, kp1] = sqrt(1.0 - R2[k]);
      start = end + 1;
      end = start + k - 1;

    }
}
generated quantities {
  matrix[K,K] Sigma = multiply_lower_tri_self_transpose(L);
}
4 Likes

@spinkney thank you. I will try this as I have time and let people know how things go.

1 Like

Thank you for the help! Your solution worked.

My hard part was indexing, but once I figured out that, I was able to get the onion method working like a charm!

For anyone following along, here is my working code:

data {
  // sampling unit-level occupancy covariates
  int<lower = 1> n_unit;
  int<lower = 1> n_psi;
  int<lower = 1> m_psi;
  matrix[n_psi, m_psi] x_psi;
  int<lower = 1> jj_psi[n_psi];  // group id for psi-level
  
  // sampling unit-level hyper-parameters
  int<lower = 1> n_psi_star;
  int<lower = 1> m_psi_star;
  matrix[n_psi_star, m_psi_star] x_psi_star;

  matrix[m_psi_star, m_psi] beta_psi_star_prior;     // prior values for beta_star
  real<lower=0> beta_psi_star_sd_prior; // prior values for beta_star's SD
  real<lower=0> eta_psi; // prior for Chol. matrix

  // sample-level detection covariates
  int<lower = 1> total_observations;
  int<lower = 1> m_p;
  matrix[total_observations, m_p] v_p;
  int<lower = 1> jj_p[total_observations];  // group id for p-level
  
  // sample-level hyper-parameters
  int<lower = 1> n_p_star;
  int<lower = 1> m_p_star;
  matrix[n_p_star, m_p_star] v_p_star;  

  // survey level information  
  int<lower = 0> y[total_observations];
  int<lower = 0, upper = total_observations> start_idx[n_psi];
  int<lower = 0, upper = total_observations> end_idx[n_psi];
  
  // summary of whether species is known to be present at each unit
  int<lower = 0, upper = 1> any_seen[n_psi];
  
  // number of surveys at each unit
  int<lower = 0> n_samples[n_psi];

  matrix[m_p_star, m_p] delta_p_star_prior;     // prior values for delta_star
  real<lower=0> delta_p_star_sd_prior; // prior values for delta_star's
  real<lower=0> eta_p; // prior for Chol. matrix
}
parameters {
  matrix[m_psi, n_unit] z_psi;
  matrix[m_p, n_unit] z_p;

  matrix[m_psi_star, m_psi] beta_psi_star;
  real<lower=0> sigma_psi;
  vector<lower=0, upper= pi()/2>[m_psi] tau_psi_unif;
  vector[n_psi] logit_psi;

  row_vector[choose(m_psi, 2) - 1]  l_omega_psi;         // do NOT init with 0 for all elements
  vector<lower = 0,upper = 1>[m_psi - 1] r_2_psi; // first element is not really a R^2 but is on (0,1)  

  matrix[m_p_star, m_p] delta_p_star;
  real<lower=0> sigma_p;
  vector<lower=0, upper= pi()/2>[m_p] tau_p_unif;
  vector[total_observations] logit_p; 
  
  row_vector[choose(m_p, 2) - 1]  l_omega_p;         // do NOT init with 0 for all elements
  vector<lower = 0,upper = 1>[m_p - 1] r_2_p; // first element is not really a R^2 but is on (0,1)  


}
transformed parameters {
  real<lower = 0> alpha_p = eta_p + (m_p - 2) / 2.0;
  vector<lower = 0>[m_p-1] shape_1_p;
  vector<lower = 0>[m_p-1] shape_2_p;
  
  real<lower = 0> alpha_psi = eta_psi + (m_psi - 2) / 2.0;
  vector<lower = 0>[m_psi-1] shape_1_psi;
  vector<lower = 0>[m_psi-1] shape_2_psi;

  vector<lower=0>[m_psi] tau_psi; // prior scale
  matrix[n_unit, m_psi] beta_psi;
  
  vector<lower=0>[m_p] tau_p; // prior scale
  matrix[n_unit, m_p] delta_p; 

  matrix[m_p, m_p] L_Omega_p = rep_matrix(0, m_p, m_p); // cholesky_factor corr matrix
  matrix[m_psi, m_psi] L_Omega_psi = rep_matrix(0, m_psi, m_psi); // cholesky_factor corr matrix

   {
    int start = 1;
    int end = 2;

    L_Omega_p[1,1] = 1.0;
    L_Omega_p[2,1] = 2.0 * r_2_p[1] - 1.0;
    L_Omega_p[2,2] = sqrt(1.0 - square(L_Omega_p[2,1]));
    for(k in 2:(m_p - 1)) {
      int kp1 = k + 1;
      row_vector[k] l_row = segment(l_omega_p, start, k);
      real scale = sqrt(r_2_p[k] / dot_self(l_row));
      L_Omega_p[kp1, 1:k] = l_row[1:k] * scale;
      L_Omega_p[kp1, kp1] = sqrt(1.0 - r_2_p[k]);
      start = end + 1;
      end = start + k - 1;
    }
   }
    
  shape_1_p[1] = alpha_p;
  shape_2_p[1] = alpha_p;

  for(k in 2:(m_p-1)) {
    alpha_p -= 0.5;
    shape_1_p[k] = k / 2.0;
    shape_2_p[k] = alpha_p;
  }

  
   {
    int start = 1;
    int end = 2;

    L_Omega_psi[1,1] = 1.0;
    L_Omega_psi[2,1] = 2.0 * r_2_psi[1] - 1.0;
    L_Omega_psi[2,2] = sqrt(1.0 - square(L_Omega_psi[2,1]));
    for(k in 2:(m_psi - 1)) {
      int kp1 = k + 1;
      row_vector[k] l_row = segment(l_omega_psi, start, k);
      real scale = sqrt(r_2_psi[k] / dot_self(l_row));
      L_Omega_psi[kp1, 1:k] = l_row[1:k] * scale;
      L_Omega_psi[kp1, kp1] = sqrt(1.0 - r_2_psi[k]);
      start = end + 1;
      end = start + k - 1;
    }
   }
    
  shape_1_psi[1] = alpha_psi;
  shape_2_psi[1] = alpha_psi;

  for(k in 2:(m_psi-1)) {
    alpha_psi -= 0.5;
    shape_1_psi[k] = k / 2.0;
    shape_2_psi[k] = alpha_psi;
  }

  for (k in 1:m_psi) tau_psi[k] = 2.5 * tan(tau_psi_unif[k]);
  for (k in 1:m_p) tau_p[k]     = 2.5 * tan(tau_p_unif[k]);
  
  beta_psi = x_psi_star * beta_psi_star + (diag_pre_multiply(tau_psi,  L_Omega_psi) * z_psi)';
  delta_p =  v_p_star * delta_p_star + (diag_pre_multiply(tau_p,  L_Omega_p) * z_p)';
}
model {
  vector[n_psi] log_psi = log_inv_logit(logit_psi);
  vector[n_psi] log1m_psi = log1m_inv_logit(logit_psi);

  l_omega_p ~ std_normal();
  r_2_p ~ beta(shape_1_p, shape_2_p);
  
  l_omega_psi ~ std_normal();
  r_2_psi ~ beta(shape_1_psi, shape_2_psi);
  
  logit_psi ~ normal(rows_dot_product(beta_psi[jj_psi], x_psi), sigma_psi); 

  to_vector(beta_psi_star) ~ normal(to_vector(beta_psi_star_prior), beta_psi_star_sd_prior);
  to_vector(z_psi) ~ std_normal();

  logit_p ~ normal(rows_dot_product(delta_p[jj_p], v_p), sigma_p);
  to_vector(delta_p_star) ~ normal(to_vector(delta_p_star_prior), delta_p_star_sd_prior);
  to_vector(z_p) ~ std_normal();

  for (idx in 1:n_psi) {
    if (n_samples[idx] > 0) {
      if (any_seen[idx]) {
        // unit is occupied
        target += log_psi[idx] 
                  + bernoulli_logit_lpmf(y[start_idx[idx]:end_idx[idx]] |
                  logit_p[start_idx[idx]:end_idx[idx]]);
      } else {
        // unit may or may not be occupied
        target += log_sum_exp(
          log_psi[idx] + bernoulli_logit_lpmf(y[start_idx[idx]:end_idx[idx]] |
          logit_p[start_idx[idx]:end_idx[idx]]), 
          log1m_psi[idx]
        );
      }
    }
  }
}
generated quantities {
  matrix[m_psi, m_psi] Omega_psi;
  matrix[m_p, m_p]     Omega_p;

  Omega_psi = multiply_lower_tri_self_transpose(L_Omega_psi);
  Omega_p   = multiply_lower_tri_self_transpose(L_Omega_p);
}

This code currently lives on the corr branch of Files · corr · UMESC / quant-ecology / occStan · GitLab (usgs.gov) but should be moving to the main branch in the next few months as I write supporting functions.

1 Like