Model compiles, then "Selection:" shows up with no further activity

Hello,

I am working on a model in stan that I’ve already implemented in R (R version 4.1.2 (2021-11-01)), but it is very inefficient, so I’m attempting to convert the code into the stan format.

The code involves several user-defined functions, and many of these involve integrals. Ultimately, there is a quadruple integral when the functions are nested in the function fX. Initially, the code was not running due to error tolerances in the integrate_1d function, so I’ve swapped to using the integrate_ode_rk45 function in one place (based on discussion elsewhere Difference in behavior between integrate_1d and integrate - #7 by alescia). Now the code doesn’t give an integration error, but after stan prints out “SAMPLING FOR MODEL ‘anon_model’ NOW (CHAIN 1).”, there is a prompt “Selection:”, or the program crashes with a segmentation error. I wonder if this is due to the depth of the integrals or something else. I am not sure. The system monitor indicates there is plenty of memory. Well, when I replace all the integrate_1d with integrate_ode_rk45, then another error occurs: integrate_ode_rk45: initial time is 0, but must be less than 0.

I’m not quite sure where to go from here. Any help would be greatly appreciated. I’ve copied the stan code, the R code used to invoke stan, a snapshot of the data, and the diagnostics file contents below. There are several print statements in the code for debugging purposes:

functions {

real getBetaShape1(real mean, real variance, real m, real M) {
	real mean1 = (mean-m)/(M-m);
	real variance1 = variance / ((M-m)*(M-m));
	real a;
	if( variance1 >= mean1 * (1 - mean1) ) {
		print("Error in getBetaShape1");
		print("M ", M);
		print("m ", m);
		print("mean1 ", mean1);
		print("variance1 ", variance1);
		return -1.0;
	}
	a = mean1 * ( -1*(mean1*mean1) + mean1 - variance1) / variance1;
	print("shape1: ", a);
	return a; 
}

real getBetaShape2(real mean, real variance, real m, real M) {
	real mean1 = (mean-m)/(M-m);
	real variance1 = variance / ((M-m)*(M-m));
	real b;
	if( variance1 >= mean1 * (1 - mean1) ) {
		print("Error in getBetaShape2");
		print("M ", M);
		print("m ", m);
		print("mean1 ", mean1);
		print("variance1 ", variance1);
		return -1.0;
	}
	b = (mean1*mean1*mean1 - 2*mean1*mean1 + mean1*variance1 + mean1 - variance1) / variance1;
	print("shape2: ", b);
	return b; 
}

real beta_PDF(real x, real a, real b) {
	real p;
	if(x==0) {
		print("beta density x==0 0");
		return 0;
	}
	if(x==1) {
		print("beta density x==1 0");
		return 0;
	}
	p = (1 / beta(a,b)) * pow(x, a-1) * pow((1-x), b-1);
	print("beta input ", x);
	print("beta density ", p);
	return p;
}

real fO(real tO, real m, real M, real sO1, real sO2) {
  	real scaled;
        if(M==m) { 
		print("Error in fO");
		return 0;
	}
	scaled  = (tO-m) / ( M - m );
	return beta_PDF(scaled, sO1, sO2) / (M - m);
}

real fO_CDF(real tO, real m, real M, real sO1, real sO2) { 
	real scaled;
	real cdf;
	if(M==m) { 
		print("Error in fO_CDF");
		return 0; 
	}
	scaled  = (tO-m) / ( M - m );
	cdf = beta_cdf(scaled, sO1, sO2) / (M - m);
	print("fO cdf input ", tO);
	print("fO cdf value ", cdf);
	return cdf;
}

real fD(real tD, real tO, real M, real sD1, real sD2) { 
	real scaled;
	if(tO == M) { 
		print("Error in fD");
		return 0; 
	} 
	scaled = tD / (M - tO);
	return beta_PDF(scaled, sD1, sD2) / (M - tO);
}

real fC_integrand(real x, real xc, array[] real theta, array[] real x_r, array[] int x_i ) {
	real m = theta[1];
	real M = theta[2];
	real sO1 = theta[3];
	real sO2 = theta[4];
	real sD1 = theta[5];
	real sD2 = theta[6];
	real tC = theta[7];
	real res = fO(x, m, M, sO1, sO2) * fD(tC-x, x, M, sD1, sD2);
	print("from fC_integrand result: ", res);
	print("from fC_integrand x: ", x);
	return res;
}

real[] fC_ODE(real x, array[] real y, array[] real theta, array[] real x_r, array[] int x_i ) {
	real m = theta[1];
	real M = theta[2];
	real sO1 = theta[3];
	real sO2 = theta[4];
	real sD1 = theta[5];
	real sD2 = theta[6];
	real tC = theta[7];
	real res = fO(x, m, M, sO1, sO2) * fD(tC-x, x, M, sD1, sD2);
	print("from fC_integrand result: ", res);
	print("from fC_integrand x: ", x);
	return { res };
}

real fC(real tC,real m, real M, real sO1, real sO2, real sD1, real sD2) { 
	// not sure how to deal with the x_r and x_i arguments, so put in dummy values
	//real res = integrate_1d(fC_integrand, m, tC, { m, M, sO1, sO2, sD1, sD2, tC }, {1.1}, {1});
	real res = integrate_ode_rk45(fC_ODE, { 0.0 }, m, { tC }, { m, M, sO1, sO2, sD1, sD2, tC }, {1.1}, {1})[1,1];
	print("from fC result: ", res);
	print("from fC tC: ", tC);
	return res;
}

real fC_CDF_integrand(real x, real xc, array[] real theta, array[] real x_r, array[] int x_i ) {
	real m = theta[1];
	real M = theta[2];
	real sO1 = theta[3];
	real sO2 = theta[4];
	real sD1 = theta[5];
	real sD2 = theta[6];
	return fC(x,m, M, sO1, sO2, sD1, sD2);
}

real fC_CDF(real tC, real m, real M, real sO1, real sO2, real sD1, real sD2) {
	// not sure how to deal with the x_r and x_i arguments, so put in dummy values
	real res = integrate_1d(fC_CDF_integrand, m, tC, { m, M, sO1, sO2, sD1, sD2 }, {1.1}, {1});
	print("from fC_CDF result: ", res);
	return res;
}

real Pt(real t, real m, real M, real sO1, real sO2, real sD1, real sD2) { 
	real res = fO_CDF(t, m, M, sO1, sO1) * (1 - fC_CDF(t,m,M, sO1, sO2, sD1, sD1));
	print("from Pt result: ", res);
	return res;
}

real Pt_integrand(real x, real xc, array[] real theta, array[] real x_r, array[] int x_i) {
	real m = theta[1];
	real M = theta[2];
	real sO1 = theta[3];
	real sO2 = theta[4];
	real sD1 = theta[5];
	real sD2 = theta[6];
	real res = Pt(x, m, M, sO1, sO2, sD1, sD2);
	print("from Pt_integrand result: ", res);
	print("from Pt_integrand x: ", x);
	return res;
}

real fX(real t, real m, real M, real sO1, real sO2, real sD1, real sD2) { 
	// not sure how to deal with the x_r and x_i arguments, so put in dummy values
	real res = Pt(t, m, M, sO1, sO2, sD1, sD2) / integrate_1d( Pt_integrand, m, M, { m, M, sO1, sO2, sD1, sD2 }, {1.1}, {1}); 
	print("from fX result: ", res);
	print("from fX t: ", t);
	return res;
}

real likelihoodXprior(real y, array[] real theta) {
	real m = theta[1];
	real M = theta[2];
	real meanO = theta[3];
	real varO = theta[4];
	real meanD = theta[5];
	real varD = theta[6];
  	real sO1;
	real sO2;
	real sD1;
	real sD2;
	real val;

	print("In likelihood, m: ", m);
	print("In likelihood, M: ", M);
	print("In likelihood, meanO: ", meanO);
	print("In likelihood, varO: ", varO);
	print("In likelihood, meanD: ", meanD);
	print("In likelihood, varD: ", varD);

	//uniform priors
	if(meanO <= 30) {
		print("meanO too small ", meanO);
		return log(0);
	}
	if(meanO >= 200) {
		print("meanO too large ", meanO);
		return log(0);
	}
	if(varO <= 25) {
		print("varO too small ", varO);
		return log(0);
	}
	if(varO >= 1000) {
		print("varO too large ", varO);
		return log(0);
	}
	if(meanD <= 5) {
		print("meanD too small ", meanD);
		return log(0);
	}
	if(meanD >= 100) {
		print("meanD too large ", meanD);
		return log(0);
	}
	if(varD <= 9) {
		print("varD too small ", varD);
		return log(0);
	}
	if(varD >= 1000) {
		print("varD too large ", varD);
		return log(0);
	}

	sO1 = getBetaShape1(meanO, varO, m, M);
	sO2 = getBetaShape2(meanO, varO, m, M);
	sD1 = getBetaShape1(meanD, varD, m, M);
	sD2 = getBetaShape2(meanD, varD, m, M);

	//constraints
	if(sO1 <= 0) {
		print("sO1 too small ", sO1);
		return log(0);
	}
	if(sO2 <= 0) {
		print("sO2 too small ", sO2);
		return log(0);
	}
	if(sD1 <= 0) {
		print("sD1 too small ", sD1);
		return log(0);
	}
	if(sD2 <= 0) {
		print("sD2 too small ", sD2);
		return log(0);
	}

        //Occasionally, due to error, integration routines return very slightly negative values, so abs it
	val = abs( fX(y, m, M, sO1, sO2, sD1, sD2) );

	print("The prelog likelihood value is ", val);
	print("The likelihood input is ", y);

	return log( val ) ;
}

}

data { 
	int<lower=0> N;		//number of observations
	int<lower=0> K;		//number of predictor variables
	matrix[N, K] x;		//"design" matrix of observed predictor variable values
	vector[N] y;		// observed response variable values
	real<lower=0> m;	//minimum onset time
	real<lower=0> M;	//maximum onset time
}

parameters {
	real interceptO;	//intercept for onset model
	real interceptD;	//intercept for duration model
	vector[K] coeffsO;	//coefficients for onset predictor variables
	vector[K] coeffsD;	//coefficients for duration predictor variables
	real<lower=0> varO;	//variance for onset model
	real<lower=0> varD;	//variance for duration model
}

model {
        //vector approach did not work...
	//y ~ likelihoodXprior(m, M, x * coeffsO + interceptO, varO, x * coeffsD + interceptD, varD);
	for(i in 1:N) {
		target += likelihoodXprior(y[i], { m, M, dot_product(coeffsO, x[i]) + interceptO, varO, dot_product(coeffsD, x[i]) + interceptD, varD } ) ;
		print("current target: ", target());
}

}

I started the stan code in R:

       library(rstan)
        library(dplyr)

        source(<various helper functions>)


        dataFileName="<data file with table>" 
        stanFile="<stan file with model specification>"
        initiationType = "mixed"
        initialParams = NULL
        responseVar = "<response variable>"
        outlierVars = c(<list of names of variables (column headers) to test for outliers>)
        predictorVars = c(<list of names of predictor variables>)
        m=0
        M=365
        nChains=1
        N=10	

        print("getting data")
        #get data
        data = read.table(dataFileName, header=T, sep='\t')

        print("removing outliers and standardizing data")
        #remove outliers
        data = removeOutliers(data,outlierVars)
        print(paste("There are now " , nrow(data), " data items.", collapse=" " ))

        data = select(data, c(responseVar, predictorVars))
        names(data)[names(data) == responseVar] = "Response"

        print("setting up initial parameter values function")
	#stan failed with default parameter selections
        initFunc = get.stanInitFunction(data=data, type=initiationType, initialParams=initialParams)
        print(initFunc())

	#test code with a few data values...
        print("subsampling data")
        data = subsampleData(data, N)

        print("setting up data structures for rstan")
        y = data[["Response"]]
        x = select(data, predictorVars)
        N = nrow(x)
        K = length(predictorVars)
        data = list( x=as.matrix(x), y = y, N = N, K = K, m=m, M=M )

        print("Running stan")
        stan(file = stanFile, data = data, chains = nChains, init=initFunc, sample_file="stanMCMCResults.txt", diagnostic_file = "stanMCMCResults.txt.diagnostics", verbose=TRUE) 

Below is an example data set (unstandardized):

    Response Latitude Elevation Year
2        102 39.96659     34.25 1859
3        140 40.71360    318.79 1868
4        125 41.31140      8.46 1869
5        130 40.31011    126.96 1873
6        126 41.40700    125.03 1873
7        124 41.31140      8.46 1873
8        136 40.34089    353.53 1874
...

And here is the diagnostics file:

# Sample generated by Stan
# stan_version_major=2
# stan_version_minor=26
# stan_version_patch=1
# init=user
# enable_random_init=1
# seed=1819311177
# chain_id=1
# iter=2000
# warmup=1000
# save_warmup=1
# thin=1
# refresh=200
# stepsize=1
# stepsize_jitter=0
# adapt_engaged=1
# adapt_gamma=0.05
# adapt_delta=0.8
# adapt_kappa=0.75
# adapt_t0=10
# max_treedepth=10
# sampler_t=NUTS(diag_e)
# sample_file=stanMCMCResults.txt
# diagnostic_file=stanMCMCResults.txt.diagnostics
# append_samples=0
#
#

I wonder if the depth of integration (quadruple integral) or the number of print statements is causing issues. If anyone could please help to understand why the program crashes or why the program responds with a prompt “Selection:”, I would be very grateful. I’m new to stan, so if I’m not following good stan coding conventions, I apologize. Any advice would be helpful.

there is a prompt “Selection:”, or the program crashes with a segmentation error.

I started seeing the same error after upgrading to the latest R 4.3.1, but if you’re on an earlier version, maybe it isn’t related to R. What happens if you just run the test rstan example:

example(stan_model, package = "rstan", run.dontrun = TRUE)

Hi ssp3nc3r,

Thanks for getting back to me. Much appreciated.

When I ran your code snippet, rstan seemed to work for me. Sorry this isn’t much help! Below is the screen printout…


> example(stan_model, package = "rstan", run.dontrun = TRUE)

stn_md> stancode <- 'data {real y_mean;} parameters {real y;} model {y ~ normal(y_mean,1);}'

stn_md> mod <- stan_model(model_code = stancode, verbose = TRUE)

TRANSLATING MODEL '' FROM Stan CODE TO C++ CODE NOW.
COMPILING THE C++ CODE FOR MODEL 'anon_model' NOW.
OS: x86_64, linux-gnu; rstan: 2.26.22; Rcpp: 1.0.9; inline: 0.3.19 
 >> setting environment variables: 
PKG_LIBS =  '/home/david/R/x86_64-pc-linux-gnu-library/4.1/rstan/lib//libStanServices.a' -L'/home/david/R/x86_64-pc-linux-gnu-library/4.1/StanHeaders/lib/' -lStanHeaders -L'/home/david/R/x86_64-pc-linux-gnu-library/4.1/RcppParallel/lib/' -ltbb 
PKG_CPPFLAGS =   -I"/home/david/R/x86_64-pc-linux-gnu-library/4.1/Rcpp/include/"  -I"/home/david/R/x86_64-pc-linux-gnu-library/4.1/RcppEigen/include/"  -I"/home/david/R/x86_64-pc-linux-gnu-library/4.1/RcppEigen/include/unsupported"  -I"/home/david/R/x86_64-pc-linux-gnu-library/4.1/BH/include" -I"/home/david/R/x86_64-pc-linux-gnu-library/4.1/StanHeaders/include/src/"  -I"/home/david/R/x86_64-pc-linux-gnu-library/4.1/StanHeaders/include/"  -I"/home/david/R/x86_64-pc-linux-gnu-library/4.1/RcppParallel/include/"  -I"/home/david/R/x86_64-pc-linux-gnu-library/4.1/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DUSE_STANC3 -DSTRICT_R_HEADERS  -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION  -DBOOST_NO_AUTO_PTR  -include '/home/david/R/x86_64-pc-linux-gnu-library/4.1/StanHeaders/include/stan/math/prim/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1 
 >> Program source :

[...]

stn_md> fit <- sampling(mod, data = list(y_mean = 0))

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
Chain 1: 
Chain 1: Gradient evaluation took 7e-06 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.07 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1: 
Chain 1: 
Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 1: 
Chain 1:  Elapsed Time: 0.011 seconds (Warm-up)
Chain 1:                0.01 seconds (Sampling)
Chain 1:                0.021 seconds (Total)
Chain 1: 

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
Chain 2: 
Chain 2: Gradient evaluation took 3e-06 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.03 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2: 
Chain 2: 
Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 2: 
Chain 2:  Elapsed Time: 0.011 seconds (Warm-up)
Chain 2:                0.011 seconds (Sampling)
Chain 2:                0.022 seconds (Total)
Chain 2: 

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 3).
Chain 3: 
Chain 3: Gradient evaluation took 3e-06 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.03 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3: 
Chain 3: 
Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 3: 
Chain 3:  Elapsed Time: 0.008 seconds (Warm-up)
Chain 3:                0.01 seconds (Sampling)
Chain 3:                0.018 seconds (Total)
Chain 3: 

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 4).
Chain 4: 
Chain 4: Gradient evaluation took 3e-06 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.03 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4: 
Chain 4: 
Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 4: 
Chain 4:  Elapsed Time: 0.011 seconds (Warm-up)
Chain 4:                0.012 seconds (Sampling)
Chain 4:                0.023 seconds (Total)
Chain 4: 

stn_md> fit2 <- sampling(mod, data = list(y_mean = 5))

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
Chain 1: 
Chain 1: Gradient evaluation took 4e-06 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.04 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1: 
Chain 1: 
Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 1: 
Chain 1:  Elapsed Time: 0.009 seconds (Warm-up)
Chain 1:                0.009 seconds (Sampling)
Chain 1:                0.018 seconds (Total)
Chain 1: 

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
Chain 2: 
Chain 2: Gradient evaluation took 3e-06 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.03 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2: 
Chain 2: 
Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 2: 
Chain 2:  Elapsed Time: 0.011 seconds (Warm-up)
Chain 2:                0.01 seconds (Sampling)
Chain 2:                0.021 seconds (Total)
Chain 2: 

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 3).
Chain 3: 
Chain 3: Gradient evaluation took 3e-06 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.03 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3: 
Chain 3: 
Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 3: 
Chain 3:  Elapsed Time: 0.011 seconds (Warm-up)
Chain 3:                0.01 seconds (Sampling)
Chain 3:                0.021 seconds (Total)
Chain 3: 

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 4).
Chain 4: 
Chain 4: Gradient evaluation took 2e-06 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.02 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4: 
Chain 4: 
Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 4: 
Chain 4:  Elapsed Time: 0.011 seconds (Warm-up)
Chain 4:                0.008 seconds (Sampling)
Chain 4:                0.019 seconds (Total)
Chain 4: