There is this hacky way doing it with `model.sample`

+ `inits`

If step_size and inv_metric are given, the results are wrong (?), not sure why

```
model_code = """
data {
int N;
int K;
vector[N] x[K];
vector[N] y[K];
}
parameters {
real b;
real m[K];
real sigma[K];
}
model {
for (k in 1:K) {
y[k] ~ normal(x[k] * m[k] + b, sigma[k]);
}
b ~ std_normal();
m ~ std_normal();
}
"""
stan_path = "regr_example.stan"
with open(stan_path, "w") as f:
print(model_code, file=f)
import numpy as np
np.random.seed(3)
N = 35
K = 3
x = np.sort(np.random.randn(K, N), axis=1)
m_true = np.c_[[2.3, 4.5, 6.6]]
b_true = 10
y_raw = x * m_true + b_true
noise = np.random.randn(K, N) / 10
y = y_raw + noise
data = dict(
N = N,
K = K,
x = x,
y = y,
)
import json
from cmdstanpy import CmdStanModel
model = CmdStanModel(stan_file=stan_path)
fit = model.sample(iter_sampling=100, data=data, sig_figs=16)
values = fit.stan_variables()
lp_values = []
for chain in range(fit.chains):
lp_chain = []
#inv_metric_path = f"metric_file_chain_{chain}.json"
#inv_metric = fit.metric[chain].tolist()
#with open(inv_metric_path, "w") as f:
# json.dump({"inv_metric": inv_metric}, f)
for draw in range(fit.num_draws_sampling):
i = chain * fit.num_draws_sampling + draw
sub_values = {key: values[i] for key, values in values.items()}
fit_ = model.sample(
iter_warmup=0,
iter_sampling=1,
chains=1,
#step_size=fit.step_size[chain],
#metric=inv_metric_path,
adapt_engaged=False,
data=data,
inits=sub_values,
sig_figs=16
)
lp_value = fit_.sampler_variables()["lp__"].ravel()[0]
lp_chain.append(lp_value)
lp_values.append(lp_chain)
lp__new = np.array(lp_values).T
lp__original = fit.sampler_variables()["lp__"]
print("max absolute error", abs(lp__new - lp__original).max())
# max absolute error 3.19745083743328e-08
print("max relative error", (abs(lp__new - lp__original) / lp__original).max())
# max relative error 2.739793033513317e-10
```