This is the way it’s called on the cluster:
#! /bin/bash
#SBATCH --ntasks=128
#SBATCH --ntasks-per-core=1
#SBATCH --constraint=x2680
#SBATCH --exclusive
#SBATCH --partition=multinode
#SBATCH --mem-per-cpu=2g
module load openmpi/4.1.3/gcc-11.3.0
srun --mpi=pmix ./ModelName \
sample \
num_chains=4 \
num_samples=1000 \
num_warmup=1000 \
num_threads=-1 \
data file=filename.json \
output file=output.csv \
This is saved as a .sh file, which we then execute using a Terminal command from the model folder.
I’ve been advised by the cluster administrator that they are not seeing any evidence of the model benefiting from within-chain parallelization.
This is what they said:
“Speaking of which. My scaling experiments aren’t complete but I alread
see that runtime DECREASES when you go from 1 node (28 tasks, 56 CPUs)
to 4 nodes (112 tasks, 224 CPUs) for a single, relatively short chain.
Your model is probably getting zero benefit from the added
parallelism. That’s not good for cluster efficiency and negatively
affects the priority of your future jobs.”
This is our model code:
// Modeling code: fit the CASANDRE parameters for 1. a perceptual task with multiple confidence conditions and sensitivity values, 2. a value-based decision-making task, and 3. a correlation coefficient in a Bayesian hierarchical model
functions {
//Custom quantile function for lognormal distribution
matrix log_normal_qf(vector x, vector m, vector s) {
//takes a vector sampling the x-axis in terms of cumulative density, plus vectors of mu and sigma parameters
return exp(rep_matrix(s' * sqrt(2), size(x)) .* rep_matrix(inv_erfc(-2 * x + 2), size(m)) + rep_matrix(m', size(x)));
}
//Custom copula function
real normal_copula(real u, real v, real rho) {
real rho_sq = square(rho);
return (0.5 * rho * (-2. * u * v + square(u) * rho + square(v) * rho)) / (-1. + rho_sq) - 0.5 * log1m(rho_sq);
}
//Likelihood model for CASANDRE
real casandre_log(array[] matrix resps, vector guess, matrix sds, matrix sm, vector nconf, vector pconf) {
//declare and initialize log likelihood variable
real llhC;
llhC = 0;
//loop over participants to calculate log likelihood
for (n in 1:size(resps)) {
//declare the index-sizing variable
int m[rows(resps[n])];
//build the logical vector of non-zero trials
for (tt in 1:size(m)) {
m[tt] = sum(resps[n][tt]) == 1;
}
//declare the likelihood variable
matrix[rows(sm),4] lhC;
//check if subject has any non-zero data before running
if (sum(resps[n]) != 0) {
//declare incrementing and index variables
int t;
int ind[sum(m)];
//initialize incrementing variable
t = 1;
//declare and calculate mu parameters for normal cdfs
matrix[rows(sm),rows(sds)] avgs;
avgs = rep_matrix(sm[,n],rows(sds)).*rep_matrix(sds[,n]',rows(sm));
//loop over trials
for (tr in 1:rows(sm)){
//check if trial has any non-zero data before running
if (sum(resps[n][tr]) != 0) {
//declare sample vector
matrix[3,rows(sds)] raws;
//sample the cumulative density of normals along the transformed x-axis for each confidence bin
for (rws in 1:rows(sds)) {
raws[1,rws] = normal_cdf(-nconf[n],avgs[tr,rws],sds[rws,n]);
}
for (rws in 1:rows(sds)) {
raws[2,rws] = normal_cdf(0,avgs[tr,rws],sds[rws,n]);
}
for (rws in 1:rows(sds)) {
raws[3,rws] = normal_cdf(pconf[n],avgs[tr,rws],sds[rws,n]);
}
//declare the cumulative likelihood variable
vector[3] ratiodist;
//take the mean of the sampled densities
ratiodist[1] = mean(raws[1]);
ratiodist[2] = mean(raws[2]);
ratiodist[3] = mean(raws[3]);
//implement a lapse rate parameter to absorb unmodeled noise, and calculate likelihoods of each choice possibility in the trial
lhC[tr,1] = ((guess[n]/4) + ((1-guess[n])*ratiodist[1]));
lhC[tr,2] = ((guess[n]/4) + ((1-guess[n])*(ratiodist[2]-ratiodist[1])));
lhC[tr,3] = ((guess[n]/4) + ((1-guess[n])*(ratiodist[3]-ratiodist[2])));
lhC[tr,4] = ((guess[n]/4) + ((1-guess[n])*(1-ratiodist[3])));
//add this trial to the index
ind[t] = tr;
//increment the index
t = t + 1;
}
}
//multiply the choice matrix by the log of the likelihood matrix
llhC += sum(columns_dot_product(resps[n][ind], log(lhC[ind])));
}
}
//return the log likelihood for all data given the sampled parameters
return llhC;
}
//Partial sum function
real partial_sum_casandre_log(array[] matrix slice_n_resps, int start, int end, vector guess, matrix sds, matrix sm, vector nconf, vector pconf) {
//dynamically slice data into the log likelihood function
return casandre_log(slice_n_resps, guess[start:end], sds[:,start:end], sm[:,start:end], nconf[start:end], pconf[start:end]);
}
}
data {
//declare the number of subjects
int ns;
//declare the number of trials in the largest unpadded data set for each experiment
int nt_p;
int nt_v;
//declare the number of conditions and number of contrast levels in the perceptual domain
int ncond;
int ncont;
//declare the response matrix (one subject per array) for perceptual domain
array[ncond,ncont,ns] matrix[nt_p,4] resps_p;
//declare the response matrix (one per subject) for the value-based domain
array[ns] matrix[nt_v,4] resps_v;
//declare the experimental values matrices (one subject per matrix in each array) for perceptual domain
array[ncond,ncont] matrix[nt_p,ns] vals_p;
//declare the experimental values matrices (one subject per matrix) for value-based domain
matrix[nt_v,ns] vals_v;
//declare the number of x-axis samples and the vector sampling the x-axis in terms of cumulative density
int sampn;
vector[sampn] sampx;
}
parameters {
//Perceptual parameters
vector<lower=0,upper=1>[ns] guess_p; //lapse rate
matrix<lower=0>[ncont,ns] sens_p; //stimulus sensitivities
matrix[ncont,ns] crit_p; //decision criteria
vector<lower=0>[ns] meta_p; //meta-uncertainties
matrix<lower=0>[ncond,ns] nconf_p; //negative confidence criteria
matrix<lower=0>[ncond,ns] pconf_p; //positive confidence criteria
//Value-based parameters
vector<lower=0,upper=1>[ns] guess_v; //lapse rate
vector<lower=0>[ns] sens_v; //stimulus sensitivities
vector<lower=0>[ns] meta_v; //meta-uncertainties
vector<lower=0>[ns] nconf_v; //negative confidence criteria
vector<lower=0>[ns] pconf_v; //positive confidence criteria
//Perceptual hyperparameters
vector[ncont] ssm_p; //mu hyperparameters of each contrast level's stimulus sensitivity lognormal prior
vector<lower=0>[ncont] sss_p; //sigma hyperparameters of each contrast level's stimulus sensitivity lognormal prior
vector[ncont] scm_p; //mu hyperparameters of each contrast level's decision criterion normal prior
vector<lower=0>[ncont] scs_p; //sigma hyperparameters of each contrast level's s decision criterion normal prior
real mum_p; //mu hyperparameters of each condition's meta-uncertainty lognormal prior
real<lower=0> mus_p; //sigma hyperparameters of each condition's meta-uncertainty lognormal prior
vector[ncond] nccm_p; //mu hyperparameters of each condition's negative confidence criterion lognormal prior
vector<lower=0>[ncond] nccs_p; //sigma hyperparameters of each condition's negative confidence criterion lognormal prior
vector[ncond] pccm_p; //mu hyperparameters of each condition's positive confidence criterion lognormal prior
vector<lower=0>[ncond] pccs_p; //sigma hyperparameters of each condition's positive confidence criterion lognormal prior
//Value-based hyperparameters
real ssm_v;
real<lower=1> sss_v;
real mum_v;
real<lower=1>mus_v;
real nccm_v;
real<lower=1> nccs_v;
real pccm_v;
real<lower=1> pccs_v;
//Correlation parameter
real<lower=-1,upper=1> rho;
}
model {
//Perceptual hyperpriors
ssm_p ~ normal(0,1);
sss_p ~ lognormal(0,1);
scm_p ~ normal(0,1);
scs_p ~ lognormal(0,1);
mum_p ~ normal(0,1);
mus_p ~ lognormal(0,1);
nccm_p ~ normal(0,1);
nccs_p ~ lognormal(0,1);
pccm_p ~ normal(0,1);
pccs_p ~ lognormal(0,1);
//Value-based hyperpriors
ssm_v ~ normal(0,1);
sss_v ~ lognormal(0,1);
mum_v ~ normal(0,1);
mus_v ~ lognormal(0,1);
nccm_v ~ normal(0,1);
nccs_v ~ lognormal(0,1);
pccm_v ~ normal(0,1);
pccs_v ~ lognormal(0,1);
//Perceptial priors
guess_p ~ beta(1,197.0/3.0);
meta_p ~ lognormal(mum_p,mus_p);
for (cont in 1:ncont) {
sens_p[cont] ~ lognormal(ssm_p[cont],sss_p[cont]);
crit_p[cont] ~ normal(scm_p[cont],scs_p[cont]);
}
for (cond in 1:ncond) {
nconf_p[cond] ~ lognormal(nccm_p[cond],nccs_p[cond]);
pconf_p[cond] ~ lognormal(pccm_p[cond],pccs_p[cond]);
}
//Value-based priors
guess_v ~ beta(1,197.0/3.0);
sens_v ~ lognormal(ssm_v,sss_v);
meta_v ~ lognormal(mum_v,mus_v);
nconf_v ~ lognormal(nccm_v,nccs_v);
pconf_v ~ lognormal(pccm_v,pccs_v);
//Perceptual likelihood model (with local variable calculations)
for (cond in 1:ncond) {
//declare the transformed x-axis matrices
matrix[sampn,ns] xtrans_p;
matrix[sampn,ns] sds_p;
//calculate the transformed x-axis matrices
xtrans_p = log_normal_qf(sampx,-0.5*log1p(meta_p.^2),sqrt(log1p(meta_p.^2)));
sds_p = 1./xtrans_p;
for (cont in 1:ncont) {
//check if condition and contrast level combination has non-zero data
if (sum(abs(vals_p[cond][cont])) != 0) {
//declare matrices for calculating each contrast level's transformed data
array[ncont] matrix[nt_p,ns] om_p;
array[ncont] row_vector[ns] cm_p;
array[ncont] matrix[nt_p,ns] sm_p;
//orientations and decision criteria in terms of standard deviations
om_p[cont] = vals_p[cond][cont].*rep_matrix(sens_p[cont],nt_p);
cm_p[cont] = crit_p[cont].*sens_p[cont];
//transform the data
sm_p[cont] = om_p[cont]-rep_matrix(cm_p[cont],nt_p);
//hand the data and parameters to the reduce sum wrap function for dynamic within-chain parallelization
target += reduce_sum(partial_sum_casandre_log, resps_p[cond][cont], 1, guess_p, sds_p, sm_p[cont], nconf_p[cond]', pconf_p[cond]');
}
}
}
//Value-based likelihood model (with local variable calculations)
//declare the transformed x-axis matrices
matrix[sampn,ns] xtrans_v;
matrix[sampn,ns] sds_v;
//calculate the transformed x-axis matrices
xtrans_v = log_normal_qf(sampx,-0.5*log1p(meta_v.^2),sqrt(log1p(meta_v.^2)));
sds_v = 1./xtrans_v;
//declare matrices for calculating transformed data
matrix[nt_v,ns] om_v;
matrix[nt_v,ns] sm_v;
//oreintations in terms of standard deviations
om_v = vals_v.*rep_matrix(sens_v',nt_v);
//transform the data
sm_v = om_v;
//hand the data and parameter to the reduce sum wrap function for dynamic within-chain parallelization
target += reduce_sum(partial_sum_casandre_log, resps_v, 1, guess_v, sds_v, sm_v, nconf_v, pconf_v);
//Correlation copula
real y_p;
real y_v;
for (n in 1:ns) {
y_p = inv_Phi(lognormal_cdf(meta_p[n],mum_p,mus_p));
y_v = inv_Phi(lognormal_cdf(meta_v[n],mum_v,mus_v));
target += normal_copula(y_p,y_v,rho);
}
}