|
| 1 | +data { |
| 2 | + int<lower=2> K; // num topics |
| 3 | + int<lower=2> V; // num words |
| 4 | + int<lower=1> M; // num docs |
| 5 | + int<lower=1> N; // total word instances |
| 6 | + int<lower=1,upper=V> w[N]; // word n |
| 7 | + int<lower=1,upper=M> doc[N]; // doc ID for word n |
| 8 | + vector<lower=0>[V] beta; // word prior |
| 9 | +} |
| 10 | +parameters { |
| 11 | + vector[K] mu; // topic mean |
| 12 | + corr_matrix[K] Omega; // correlation matrix |
| 13 | + vector<lower=0>[K] sigma; // scales |
| 14 | + vector[K] eta[M]; // logit topic dist for doc m |
| 15 | + simplex[V] phi[K]; // word dist for topic k |
| 16 | +} |
| 17 | +transformed parameters { |
| 18 | + simplex[K] theta[M]; // simplex topic dist for doc m |
| 19 | + cov_matrix[K] Sigma; // covariance matrix |
| 20 | + for (m in 1:M) |
| 21 | + theta[m] <- softmax(eta[m]); |
| 22 | + for (m in 1:M) { |
| 23 | + Sigma[m,m] <- sigma[m] * sigma[m] * Omega[m,m]; |
| 24 | + for (n in (m+1):M) |
| 25 | + Sigma[m,n] <- sigma[m] * sigma[n] * Omega[m,n]; |
| 26 | + } |
| 27 | +} |
| 28 | +model { |
| 29 | + // priors |
| 30 | + for (k in 1:K) |
| 31 | + phi[k] ~ dirichlet(beta); |
| 32 | + mu ~ normal(0,5); |
| 33 | + Omega ~ lkj_corr(2.0); |
| 34 | + sigma ~ cauchy(0,5); |
| 35 | + // topic distribution for docs |
| 36 | + for (m in 1:M) |
| 37 | + eta[m] ~ multi_normal(mu,Sigma); |
| 38 | + // token probabilities |
| 39 | + for (n in 1:N) { |
| 40 | + real gamma[K]; |
| 41 | + for (k in 1:K) |
| 42 | + gamma[k] <- log(theta[doc[n],k]) + log(phi[k,w[n]]); |
| 43 | + lp__ <- lp__ + log_sum_exp(gamma); // likelihood |
| 44 | + } |
| 45 | + |
| 46 | +} |
0 commit comments