I understand the challenge of creating a predict
function for models fit using rstan::stan
. However it is a problem I run into constantly: Needing to use a computationally intensive model to predict outcomes on lots of new data (where I only have observations of the independent data so can’t use the new data as new observations for model fitting). While it couldn’t be called a prediction method per say (since requires the user to write the code that actually produces the predictions), would it be possible to extend the algorithm = "Fixed_param"
to allow say a list of fixed parameters corresponding to the posterior of the parameters from a fitted model? At the moment, my strategy is to sequentially pass each draw from the posterior to stan
using algorithm = "Fixed_param"
with new data, and then storing the posterior predictives for the new data for each run. See reprex below. Not terrible for users to write their own code for this, but seems like the Fixed_param
option is almost there as a way to get posterior predictives for new data without having to refit the model each time? Or am I missing something and can that already be done using Fixed_param
?. If not and if there’s any interest in developing that functionality I’d be happy to help out as best I can.
library(tidyverse)
library(rstan)
#> Loading required package: StanHeaders
#> rstan (Version 2.19.2, GitRev: 2e1f913d3ca3)
#> For execution on a local, multicore CPU with excess RAM we recommend calling
#> options(mc.cores = parallel::detectCores()).
#> To avoid recompilation of unchanged Stan programs, we recommend calling
#> rstan_options(auto_write = TRUE)
#>
#> Attaching package: 'rstan'
#> The following object is masked from 'package:tidyr':
#>
#> extract
library(tidybayes)
# a toy model
stan_model <- "
data {
int<lower=0> n; // number of observations
int<lower = 1> n_betas; // number of betas
vector[n] y; // outcomes
matrix[n,n_betas] x; // predictors
}
parameters {
vector[n_betas] betas;
real sigma;
}
model {
y ~ normal(x * betas, sigma);
betas ~ normal(0,10);
sigma ~ cauchy(0,2.5);
}
generated quantities {
vector[n] y_pp;
for (i in 1:n){
y_pp[i] = normal_rng(x[i,1:n_betas] * betas, sigma);
}
}
"
n <- 50
betas <- c(2,10)
sigma <- 10
training_data <- data.frame(x = 1:n, intercept = 1)
y = as.numeric(as.matrix(training_data) %*% betas + rnorm(n,0,sigma))
# plot(training_data$x, y)
# fit the model
stan_fit <- stan(
model_code = stan_model,
data = list(n = n,
n_betas = length(betas),
y = y,
x = training_data),
chains = 1,
warmup = 500,
iter = 1000,
cores = 1,
refresh = 0 # no progress shown
)
# plot(stan_fit, pars = "betas")
# go through and get the individual draws for each parameter
tidy_posts <- tidybayes::gather_draws(stan_fit, betas[variable])
nested_posts <- tidy_posts %>%
group_by(.draw) %>%
nest()
# create some new data partly outside of the range of the training data
testing_data <- data.frame(x = 20 + (1:n), intercept = 1)
new_data <- list(n = nrow(testing_data),
n_betas = ncol(testing_data),
y = rep(1,n),
x = testing_data
)
pred_foo <- function(params, stan_model, new_data) { # function to get posterior predictives given fixed parameters
variables <- unique(params$.variable)
inits <-
purrr::map(variables, ~ params$.value[params$.variable == .x]) %>%
purrr::set_names(variables)
pp_samps <- stan(
model_code = stan_model,
data = new_data,
chains = 1,
warmup = 0,
iter = 1,
cores = 1,
refresh = 0,
init = list(inits),
algorithm = "Fixed_param"
)
out <- tidybayes::tidy_draws(pp_samps)
} # close function
# iterate over posterior of parameters to generate predictions (pretending you had "new" schools data)
nested_posts <- nested_posts %>%
mutate(preds = map(data, pred_foo, stan_model = stan_model, new_data = new_data))
unnested_posts <- nested_posts %>%
rename(draw = .draw) %>%
select(-data) %>%
unnest(cols = preds)
y_pp <- unnested_posts %>%
tidyr::pivot_longer(
cols = contains("_pp"),
names_to = "observation",
values_to = "prediction",
names_pattern = "y_pp\\[(.*)\\]",
names_ptypes = list(observation = integer())
)
y_pp %>%
mutate(x = observation + min(testing_data$x) - 1) %>%
group_by(x) %>%
summarise(mean_pred = mean(prediction),
lower = quantile(prediction, 0.05),
upper = quantile(prediction, 0.95)) %>%
ungroup() %>%
ggplot() +
geom_ribbon(aes(x, ymin = lower, ymax = upper), alpha = 0.5) +
geom_line(aes(x, mean_pred), color = "red") +
scale_y_continuous(name = "y") +
labs(caption = "Red line is mean posterior predictive, grey shaded area 90% credible interval")
Created on 2019-10-22 by the reprex package (v0.3.0)