Skip to content

Commit ed04cb6

Browse files
committed
More md -> mdx
1 parent c695f6b commit ed04cb6

File tree

2 files changed

+47
-46
lines changed

2 files changed

+47
-46
lines changed

.astro/types.d.ts

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -136,13 +136,13 @@ declare module 'astro:content' {
136136

137137
type ContentEntryMap = {
138138
"post": {
139-
"annealed-importance-sampling.md": {
140-
id: "annealed-importance-sampling.md";
139+
"annealed-importance-sampling.mdx": {
140+
id: "annealed-importance-sampling.mdx";
141141
slug: "annealed-importance-sampling";
142142
body: string;
143143
collection: "post";
144144
data: InferEntrySchema<"post">
145-
} & { render(): Render[".md"] };
145+
} & { render(): Render[".mdx"] };
146146
"autoencoders.md": {
147147
id: "autoencoders.md";
148148
slug: "autoencoders";
@@ -164,20 +164,20 @@ declare module 'astro:content' {
164164
collection: "post";
165165
data: InferEntrySchema<"post">
166166
} & { render(): Render[".md"] };
167-
"boundary-seeking-gan.md": {
168-
id: "boundary-seeking-gan.md";
167+
"boundary-seeking-gan.mdx": {
168+
id: "boundary-seeking-gan.mdx";
169169
slug: "boundary-seeking-gan";
170170
body: string;
171171
collection: "post";
172172
data: InferEntrySchema<"post">
173-
} & { render(): Render[".md"] };
174-
"brouwers-fixed-point.md": {
175-
id: "brouwers-fixed-point.md";
173+
} & { render(): Render[".mdx"] };
174+
"brouwers-fixed-point.mdx": {
175+
id: "brouwers-fixed-point.mdx";
176176
slug: "brouwers-fixed-point";
177177
body: string;
178178
collection: "post";
179179
data: InferEntrySchema<"post">
180-
} & { render(): Render[".md"] };
180+
} & { render(): Render[".mdx"] };
181181
"chentsov-theorem.mdx": {
182182
id: "chentsov-theorem.mdx";
183183
slug: "chentsov-theorem";
@@ -255,13 +255,13 @@ declare module 'astro:content' {
255255
collection: "post";
256256
data: InferEntrySchema<"post">
257257
} & { render(): Render[".md"] };
258-
"fisher-information.md": {
259-
id: "fisher-information.md";
258+
"fisher-information.mdx": {
259+
id: "fisher-information.mdx";
260260
slug: "fisher-information";
261261
body: string;
262262
collection: "post";
263263
data: InferEntrySchema<"post">
264-
} & { render(): Render[".md"] };
264+
} & { render(): Render[".mdx"] };
265265
"forward-reverse-kl.md": {
266266
id: "forward-reverse-kl.md";
267267
slug: "forward-reverse-kl";
@@ -290,13 +290,13 @@ declare module 'astro:content' {
290290
collection: "post";
291291
data: InferEntrySchema<"post">
292292
} & { render(): Render[".md"] };
293-
"gibbs-sampling.md": {
294-
id: "gibbs-sampling.md";
293+
"gibbs-sampling.mdx": {
294+
id: "gibbs-sampling.mdx";
295295
slug: "gibbs-sampling";
296296
body: string;
297297
collection: "post";
298298
data: InferEntrySchema<"post">
299-
} & { render(): Render[".md"] };
299+
} & { render(): Render[".mdx"] };
300300
"gleam-use.mdx": {
301301
id: "gleam-use.mdx";
302302
slug: "gleam-use";
@@ -346,13 +346,13 @@ declare module 'astro:content' {
346346
collection: "post";
347347
data: InferEntrySchema<"post">
348348
} & { render(): Render[".mdx"] };
349-
"lda-gibbs.md": {
350-
id: "lda-gibbs.md";
349+
"lda-gibbs.mdx": {
350+
id: "lda-gibbs.mdx";
351351
slug: "lda-gibbs";
352352
body: string;
353353
collection: "post";
354354
data: InferEntrySchema<"post">
355-
} & { render(): Render[".md"] };
355+
} & { render(): Render[".mdx"] };
356356
"least-squares-gan.md": {
357357
id: "least-squares-gan.md";
358358
slug: "least-squares-gan";
@@ -388,41 +388,41 @@ declare module 'astro:content' {
388388
collection: "post";
389389
data: InferEntrySchema<"post">
390390
} & { render(): Render[".mdx"] };
391-
"mayer-vietoris-sphere.md": {
392-
id: "mayer-vietoris-sphere.md";
391+
"mayer-vietoris-sphere.mdx": {
392+
id: "mayer-vietoris-sphere.mdx";
393393
slug: "mayer-vietoris-sphere";
394394
body: string;
395395
collection: "post";
396396
data: InferEntrySchema<"post">
397-
} & { render(): Render[".md"] };
397+
} & { render(): Render[".mdx"] };
398398
"metropolis-hastings.md": {
399399
id: "metropolis-hastings.md";
400400
slug: "metropolis-hastings";
401401
body: string;
402402
collection: "post";
403403
data: InferEntrySchema<"post">
404404
} & { render(): Render[".md"] };
405-
"minkowski-dirichlet.md": {
406-
id: "minkowski-dirichlet.md";
405+
"minkowski-dirichlet.mdx": {
406+
id: "minkowski-dirichlet.mdx";
407407
slug: "minkowski-dirichlet";
408408
body: string;
409409
collection: "post";
410410
data: InferEntrySchema<"post">
411-
} & { render(): Render[".md"] };
411+
} & { render(): Render[".mdx"] };
412412
"mle-vs-map.md": {
413413
id: "mle-vs-map.md";
414414
slug: "mle-vs-map";
415415
body: string;
416416
collection: "post";
417417
data: InferEntrySchema<"post">
418418
} & { render(): Render[".md"] };
419-
"natural-gradient.md": {
420-
id: "natural-gradient.md";
419+
"natural-gradient.mdx": {
420+
id: "natural-gradient.mdx";
421421
slug: "natural-gradient";
422422
body: string;
423423
collection: "post";
424424
data: InferEntrySchema<"post">
425-
} & { render(): Render[".md"] };
425+
} & { render(): Render[".mdx"] };
426426
"nn-optimization.md": {
427427
id: "nn-optimization.md";
428428
slug: "nn-optimization";

src/content/post/gibbs-sampling.mdx

Lines changed: 20 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -5,41 +5,43 @@ publishDate: 2015-10-09 17:01:00
55
tags: [machine learning, programming, python]
66
---
77

8+
import BlogImage from '@/components/BlogImage.astro'
9+
810
After so many months struggling with Gibbs Sampling, now I conquered it! Well, kind of.
911

10-
This week's been a renaissance on MCMC. I decided to open up again those Machine Learning Summer School (MLSS) Cambridge 2009, and absent mindedly opened that lecture about MCMC by Iain Murray. Oh boy, did I hit the jackpot? That lecture was really great. Here, help yourself, watch the lecture <http://videolectures.net/mlss09uk_murray_mcmc/>.
12+
This week's been a renaissance on MCMC. I decided to open up again those Machine Learning Summer School (MLSS) Cambridge 2009, and absent mindedly opened that lecture about MCMC by Iain Murray. Oh boy, did I hit the jackpot? That lecture was really great. Here, help yourself, watch the lecture http://videolectures.net/mlss09uk_murray_mcmc/.
1113

1214
So, full of confidence and revelation after watching that lecture, I decided to implement the Gibbs Sampler. Now, I won't dive deep on what is Gibbs Sampling and what not, but if you want to know deeper about it, I suggest you to read this tutorial: Gibbs Sampling for the Uninitiated.
1315

1416
Gibbs Sampling is a MCMC method to draw samples from a potentially really really complicated, high dimensional distribution, where analytically, it's hard to draw samples from it. The usual suspect would be those nasty integrals when computing the normalizing constant of the distribution, especially in Bayesian inference. Now Gibbs Sampler can draw samples from any distribution, provided you can provide all of the conditional distributions of the joint distribution analitically.
1517

1618
In this example I will use Gibb Sampler to draw sampler from a Bivariate Gaussian with mu of `[5, 5]` and sigma/covariance matrix of `[[1, 0.9], [0.9, 1]]`. The exact distribution should look like this:
1719

18-
![Gibbs]({{ site.baseurl }}/img/2015-10-09-gibbs-sampling/00.png)
20+
<BlogImage imagePath='/img/gibbs-sampling/00.png' altText='Target distribution.' />
1921

2022
Now, pretend that this distribution is really complicated and very hard to sample (I know, I know, but please bear with me). We don't know how to sample from this directly, and we don't even know the shape of the distribution. However, because of some mathematical convenience, or maybe by just sheer luck, we know the conditional distributions: `P(X|Y)` and `P(Y|X)`. By now, it screams "Gibbs Sampling!".
2123

22-
The derivation of conditional distribution of Multivariate Gaussian could be found here: <http://fourier.eng.hmc.edu/e161/lectures/gaussianprocess/node7.html>.
24+
The derivation of conditional distribution of Multivariate Gaussian could be found here: http://fourier.eng.hmc.edu/e161/lectures/gaussianprocess/node7.html.
2325

2426
Let's inspect the Gibbs Sampler code, shall we.
2527

2628
```python
2729
import numpy as np
2830
import seaborn as sns
2931

30-
def p*x_given_y(y, mus, sigmas):
31-
mu = mus[0] + sigmas[1, 0] / sigmas[0, 0] * (y - mus[1])
32-
sigma = sigmas[0, 0] - sigmas[1, 0] / sigmas[1, 1] \_ sigmas[1, 0]
33-
return np.random.normal(mu, sigma)
32+
def p_x_given_y(y, mus, sigmas):
33+
mu = mus[0] + sigmas[1, 0] / sigmas[0, 0] * (y - mus[1])
34+
sigma = sigmas[0, 0] - sigmas[1, 0] / sigmas[1, 1] * sigmas[1, 0]
35+
return np.random.normal(mu, sigma)
3436

35-
def p*y_given_x(x, mus, sigmas):
36-
mu = mus[1] + sigmas[0, 1] / sigmas[1, 1] * (x - mus[0])
37-
sigma = sigmas[1, 1] - sigmas[0, 1] / sigmas[0, 0] \_ sigmas[0, 1]
38-
return np.random.normal(mu, sigma)
37+
def p_y_given_x(x, mus, sigmas):
38+
mu = mus[1] + sigmas[0, 1] / sigmas[1, 1] * (x - mus[0])
39+
sigma = sigmas[1, 1] - sigmas[0, 1] / sigmas[0, 0] * sigmas[0, 1]
40+
return np.random.normal(mu, sigma)
3941

4042
def gibbs_sampling(mus, sigmas, iter=10000):
41-
samples = np.zeros((iter, 2))
42-
y = np.random.rand() \* 10
43+
samples = np.zeros((iter, 2))
44+
y = np.random.rand() * 10
4345

4446
for i in range(iter):
4547
x = p_x_given_y(y, mus, sigmas)
@@ -48,13 +50,12 @@ y = np.random.rand() \* 10
4850

4951
return samples
5052

51-
if **name** == '**main**':
52-
mus = np.array([5, 5])
53-
sigmas = np.array([[1, .9], [.9, 1]])
53+
if __name__ == '__main__':
54+
mus = np.array([5, 5])
55+
sigmas = np.array([[1, .9], [.9, 1]])
5456

5557
samples = gibbs_sampling(mus, sigmas)
5658
sns.jointplot(samples[:, 0], samples[:, 1])
57-
5859
```
5960

6061
Really really really simple. The main algorithm is just what, 10 line of codes? Including whitespaces.
@@ -67,10 +68,10 @@ After a lot of iteration, it will then converge to approximately the exact distr
6768

6869
Here's the result of that Gibbs Sampler:
6970

70-
![Gibbs]({{ site.baseurl }}/img/2015-10-09-gibbs-sampling/01.png)
71+
<BlogImage imagePath='/img/gibbs-sampling/01.png' altText='Gibbs samples.' />
7172

7273
Pretty good, huh?
7374

7475
Gibbs Sampling is one hell of algorithm. It's so simple, yet took me a long time to get the intuition. It's an integral algorithm in Bayesian Inference landscape. One of the popular implementation of Gibbs Sampling would be in Mallet, where David Mimno uses Gibbs Sampler to do inference for LDA. I haven't studied Variational Bayes method, but based on my observation, LDA result using Gibbs Sampling is a lot better than the one using Variational method. I observe this in the case of Mallet vs Gensim implementation of LDA.
7576

76-
For closing note, I really really really suggest you to watch this lecture <http://videolectures.net/mlss09uk_murray_mcmc/>. What an excellent lecture, that is.
77+
As aclosing note, I really really really suggest you to watch this lecture http://videolectures.net/mlss09uk_murray_mcmc/. What an excellent lecture, that is.

0 commit comments

Comments
 (0)