After dealing with the recent Catalina upgrade on Mac and getting Rstan to work again, I’m getting a new error message that I haven’t seen before when using brms
“Error in close.connection(zz) : cannot close ‘message’ sink connection”
I’m wondering if anyone else has encountered this problem and if so, how they fixed the issue.
I did the standard googling to find an answer, but I have had no luck. I’ve also tried uninstalling and re-instaling R which didn’t solve the problem
This is missing its closing parenthesis. It should be target += normal_lpdf(b | 0, 10);. If you tell @paul.buerkner how you called brm(), he can fix it. Otherwise, you can just copy that to a .stan file, fix the closing parenthesis, and call stan yourself.
@bgoodri I get the same error:
Error in close.connection(zz) : cannot close ‘message’ sink connection
When I call debug(Stan::stand) and then writeLines(model_code) I get the following output. I believe that the parentheses are as they should be. Can you help?
// generated with brms 2.10.0
functions {
/* cumulative-logit log-PDF for a single response
Args:
y: response category
mu: linear predictor
thres: ordinal thresholds
disc: discrimination parameter
Returns:
a scalar to be added to the log posterior
*/
real cumulative_logit_lpmf(int y, real mu, vector thres, real disc) {
int ncat = num_elements(thres) + 1;
real p;
if (y == 1) {
p = inv_logit(disc * (thres[1] - mu));
} else if (y == ncat) {
p = 1 - inv_logit(disc * (thres[ncat - 1] - mu));
} else {
p = inv_logit(disc * (thres[y] - mu)) -
inv_logit(disc * (thres[y - 1] - mu));
}
return log§;
}
}
data {
int<lower=1> N; // number of observations
int<lower=2> ncat; // number of categories
int Y[N]; // response variable
int<lower=1> K; // number of population-level effects
matrix[N, K] X; // population-level design matrix
real<lower=0> disc; // discrimination parameters
// data for group-level effects of ID 1
int<lower=1> N_1; // number of grouping levels
int<lower=1> M_1; // number of coefficients per level
int<lower=1> J_1[N]; // grouping indicator per observation
// group-level predictor values
vector[N] Z_1_1;
// data for group-level effects of ID 2
int<lower=1> N_2; // number of grouping levels
int<lower=1> M_2; // number of coefficients per level
int<lower=1> J_2[N]; // grouping indicator per observation
// group-level predictor values
vector[N] Z_2_1;
int prior_only; // should the likelihood be ignored?
}
transformed data {
int Kc = K;
matrix[N, Kc] Xc; // centered version of X
vector[Kc] means_X; // column means of X before centering
for (i in 1:K) {
means_X[i] = mean(X[, i]);
Xc[, i] = X[, i] - means_X[i];
}
}
parameters {
vector<lower=0>[Kc] b; // population-level effects
// temporary thresholds for centered predictors
ordered[ncat - 1] Intercept;
vector<lower=0>[M_1] sd_1; // group-level standard deviations
// standardized group-level effects
vector[N_1] z_1[M_1];
vector<lower=0>[M_2] sd_2; // group-level standard deviations
// standardized group-level effects
vector[N_2] z_2[M_2];
}
transformed parameters {
// actual group-level effects
vector[N_1] r_1_1 = (sd_1[1] * (z_1[1]));
// actual group-level effects
vector[N_2] r_2_1 = (sd_2[1] * (z_2[1]));
}
model {
// initialize linear predictor term
vector[N] mu = Xc * b;
for (n in 1:N) {
// add more terms to the linear predictor
mu[n] += r_1_1[J_1[n]] * Z_1_1[n] + r_2_1[J_2[n]] * Z_2_1[n];
}
// priors including all constants
target += normal_lpdf(b[1] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[2] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[3] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[4] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[5] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[6] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[7] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[8] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[9] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[10] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[11] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[12] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[13] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[14] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[15] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[16] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[17] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[18] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[19] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[20] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[21] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[22] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[23] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[24] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[25] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[26] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[27] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[28] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[29] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[30] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[31] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[32] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[33] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[34] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(b[35] | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(Intercept | 0,.1;
target += normal_lpdf(sd_1 | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(z_1[1] | 0, 1);
target += normal_lpdf(sd_2 | 0,.1)
- 1 * normal_lccdf(0 | 0,.1);
target += normal_lpdf(z_2[1] | 0, 1);
// likelihood including all constants
if (!prior_only) {
for (n in 1:N) {
target += ordered_logistic_lpmf(Y[n] | mu[n], Intercept);
}
}
}
generated quantities {
// compute actual thresholds
vector[ncat - 1] b_Intercept = Intercept + dot_product(means_X, b);
}