m_par.stan (1.3 KB)

sim_data.r (940.8 KB)

Hello all,

Here’s my “Hello world” in parallel. It is just a standard normal linear regression, but with K explanatory variables. This means that I need to “flatten” Y[1:J] and X[1:J,1:K] into a vector, where 1:J are the observations sent to one shard, and then stack these vectors on top of each other. the xi int array is useful for storing indices and such for keeping track of things.

The model works in the sense that several cores are humming along for the chain, and parameter estimates are correct. Happy to hear if anyone has a more elegant way of doing this…!

But hey - it works! :-) :-) :-)

-op

```
functions {
vector bl_glm(vector sigma_beta, vector theta,
real[] xs, int[] xi) {
int J = xi[1];
int K = xi[2];
real lp;
real sigma = sigma_beta[1];
vector[K] beta= sigma_beta[2:(K+1)];
vector[J] Y = to_vector(xs[1:J]);
matrix[J,K] X;
for (k in 1:K)
X[,k] = to_vector(xs[(1+J*k):(J*(k+1))]);
lp=normal_lpdf(Y | X * beta, sigma);
return [lp]';
}
}
data {
int<lower = 0> K;
int<lower = 0> shards;
int<lower = 0> N;
matrix[N,K] X;
vector[N] Y;
}
transformed data {
vector[0] theta[shards];
int<lower = 0> J = N / shards; // Obs per shard
real x_r[shards, J * (K+1)];
int x_i[shards, 2];
{
int pos = 1;
for (k in 1:shards) {
int end = pos + J - 1;
x_r[k,1:J] = to_array_1d(Y[pos:end]);
x_i[k,1] = J;
x_i[k,2] = K;
pos += J;
}
}
{
for (cov in 1:K){
int pos = 1;
for (k in 1:shards) {
int end = pos + J - 1;
x_r[k,(1+J*cov):(J+J*cov)] = to_array_1d(X[pos:end,cov]);
pos += J;
}
}
}
}
parameters {
vector[K] beta;
real<lower=0> sigma;
}
model {
beta ~ normal(0, 2);
sigma ~ normal(0, 2);
target += sum(map_rect(bl_glm, append_row(sigma, beta),
theta, x_r, x_i));
}
```

edit: Uploaded wrong data file