Skip to content

Commit 87c93b4

Browse files
committed
small edits
1 parent 13db4ac commit 87c93b4

File tree

1 file changed

+63
-63
lines changed

1 file changed

+63
-63
lines changed

probability/09_random_variables.py

Lines changed: 63 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
import marimo
1212

13-
__generated_with = "0.11.9"
13+
__generated_with = "0.11.10"
1414
app = marimo.App(width="medium", app_title="Random Variables")
1515

1616

@@ -72,7 +72,7 @@ def _(mo):
7272
r"""
7373
## Properties of Random Variables
7474
75-
Each random variable has several key properties that help us understand and work with it:
75+
Each random variable has several key properties:
7676
7777
| Property | Description | Example |
7878
|----------|-------------|---------|
@@ -81,7 +81,7 @@ def _(mo):
8181
| Support/Range | Possible values | $\{0,1,2,...,n\}$ for binomial |
8282
| Distribution | PMF or PDF | $p_X(x)$ or $f_X(x)$ |
8383
| Expectation | Weighted average | $E[X]$ |
84-
| Variance | Measure of spread | $Var(X)$ |
84+
| Variance | Measure of spread | $\text{Var}(X)$ |
8585
| Standard Deviation | Square root of variance | $\sigma_X$ |
8686
| Mode | Most likely value | argmax$_x$ $p_X(x)$ |
8787
@@ -121,14 +121,14 @@ def _(mo):
121121
def _(np, plt):
122122
def die_pmf(x):
123123
if x in [1, 2, 3, 4, 5, 6]:
124-
return 1/6
124+
return 1 / 6
125125
return 0
126126

127127
# Plot the PMF
128128
_x = np.arange(1, 7)
129129
probabilities = [die_pmf(i) for i in _x]
130130

131-
plt.figure(figsize=(8, 4))
131+
plt.figure(figsize=(8, 2))
132132
plt.bar(_x, probabilities)
133133
plt.title("PMF of Rolling a Fair Die")
134134
plt.xlabel("Outcome")
@@ -166,7 +166,7 @@ def _(np, plt, stats):
166166
_pdf = stats.norm.pdf(_x, loc=0, scale=1)
167167

168168
plt.figure(figsize=(8, 4))
169-
plt.plot(_x, _pdf, 'b-', label='PDF')
169+
plt.plot(_x, _pdf, "b-", label="PDF")
170170
plt.fill_between(_x, _pdf, where=(_x >= -1) & (_x <= 1), alpha=0.3)
171171
plt.title("Standard Normal Distribution")
172172
plt.xlabel("x")
@@ -214,7 +214,7 @@ def expected_value_discrete(x_values, probabilities):
214214

215215
@app.cell
216216
def _(E_X):
217-
print(E_X)
217+
E_X
218218
return
219219

220220

@@ -224,17 +224,17 @@ def _(mo):
224224
r"""
225225
## Variance
226226
227-
The variance $Var(X)$ measures the spread of a random variable around its mean:
227+
The variance $\text{Var}(X)$ measures the spread of a random variable around its mean:
228228
229-
$Var(X) = E[(X - E[X])^2]$
229+
$\text{Var}(X) = E[(X - E[X])^2]$
230230
231231
This can be computed as:
232-
$Var(X) = E[X^2] - (E[X])^2$
232+
$\text{Var}(X) = E[X^2] - (E[X])^2$
233233
234234
Properties:
235235
236-
1. $Var(aX) = a^2Var(X)$
237-
2. $Var(X + b) = Var(X)$
236+
1. $\text{Var}(aX) = a^2Var(X)$
237+
2. $\text{Var}(X + b) = Var(X)$
238238
"""
239239
)
240240
return
@@ -243,7 +243,7 @@ def _(mo):
243243
@app.cell
244244
def _(E_X, die_probs, die_values, np):
245245
def variance_discrete(x_values, probabilities, expected_value):
246-
squared_diff = [(x - expected_value)**2 for x in x_values]
246+
squared_diff = [(x - expected_value) ** 2 for x in x_values]
247247
return sum(d * p for d, p in zip(squared_diff, probabilities))
248248

249249
# Example: Variance of a fair die roll
@@ -305,12 +305,39 @@ def _(np, variance_discrete):
305305
@app.cell(hide_code=True)
306306
def _(coin_var, mo, normal_var, uniform_var):
307307
mo.md(
308-
f"""
308+
rf"""
309309
Let's look at some calculated variances:
310310
311-
- Fair coin (X = 0 or 1): Var(X) = {coin_var:.4f}
312-
- Standard normal distribution (discretized): Var(X) ≈ {normal_var:.4f}
313-
- Uniform distribution on [0,1] (discretized): Var(X) ≈ {uniform_var:.4f}
311+
- Fair coin (X = 0 or 1): $\text{{Var}}(X) = {coin_var:.4f}$
312+
- Standard normal distribution (discretized): $\text{{Var(X)}} ≈ {normal_var:.4f}$
313+
- Uniform distribution on $[0,1]$ (discretized): $\text{{Var(X)}} ≈ {uniform_var:.4f}$
314+
"""
315+
)
316+
return
317+
318+
319+
@app.cell(hide_code=True)
320+
def _(mo):
321+
mo.md(
322+
r"""
323+
## Common Distributions
324+
325+
1. Bernoulli Distribution
326+
- Models a single success/failure experiment
327+
- $P(X = 1) = p$, $P(X = 0) = 1-p$
328+
- $E[X] = p$, $\text{Var}(X) = p(1-p)$
329+
330+
2. Binomial Distribution
331+
332+
- Models number of successes in $n$ independent trials
333+
- $P(X = k) = \binom{n}{k}p^k(1-p)^{n-k}$
334+
- $E[X] = np$, $\text{Var}(X) = np(1-p)$
335+
336+
3. Normal Distribution
337+
338+
- Bell-shaped curve defined by mean $\mu$ and variance $\sigma^2$
339+
- PDF: $f_X(x) = \frac{1}{\sigma\sqrt{2\pi}}e^{-\frac{(x-\mu)^2}{2\sigma^2}}$
340+
- $E[X] = \mu$, $\text{Var}(X) = \sigma^2$
314341
"""
315342
)
316343
return
@@ -320,7 +347,7 @@ def _(coin_var, mo, normal_var, uniform_var):
320347
def _(mo):
321348
mo.md(
322349
r"""
323-
## Interactive Example: Comparing PMF and PDF
350+
### Example: Comparing Discrete and Continuous Distributions
324351
325352
This example shows the relationship between a Binomial distribution (discrete) and its Normal approximation (continuous).
326353
The parameters control both distributions:
@@ -334,7 +361,7 @@ def _(mo):
334361

335362
@app.cell
336363
def _(mo, n_trials, p_success):
337-
mo.hstack([n_trials, p_success], justify='space-around')
364+
mo.hstack([n_trials, p_success], justify="space-around")
338365
return
339366

340367

@@ -348,28 +375,28 @@ def _(mo):
348375

349376
@app.cell(hide_code=True)
350377
def _(n_trials, np, p_success, plt, stats):
351-
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
378+
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 3))
352379

353380
# Discrete: Binomial PMF
354381
k = np.arange(0, n_trials.value + 1)
355382
pmf = stats.binom.pmf(k, n_trials.value, p_success.value)
356-
ax1.bar(k, pmf, alpha=0.8, color='#1f77b4', label='PMF')
357-
ax1.set_title(f'Binomial PMF (n={n_trials.value}, p={p_success.value})')
358-
ax1.set_xlabel('Number of Successes')
359-
ax1.set_ylabel('Probability')
383+
ax1.bar(k, pmf, alpha=0.8, color="#1f77b4", label="PMF")
384+
ax1.set_title(f"Binomial PMF (n={n_trials.value}, p={p_success.value})")
385+
ax1.set_xlabel("Number of Successes")
386+
ax1.set_ylabel("Probability")
360387
ax1.grid(True, alpha=0.3)
361388

362389
# Continuous: Normal PDF approx.
363390
mu = n_trials.value * p_success.value
364-
sigma = np.sqrt(n_trials.value * p_success.value * (1-p_success.value))
365-
x = np.linspace(max(0, mu - 4*sigma), min(n_trials.value, mu + 4*sigma), 100)
391+
sigma = np.sqrt(n_trials.value * p_success.value * (1 - p_success.value))
392+
x = np.linspace(max(0, mu - 4 * sigma), min(n_trials.value, mu + 4 * sigma), 100)
366393
pdf = stats.norm.pdf(x, mu, sigma)
367394

368-
ax2.plot(x, pdf, 'r-', linewidth=2, label='PDF')
369-
ax2.fill_between(x, pdf, alpha=0.3, color='red')
370-
ax2.set_title(f'Normal PDF (μ={mu:.1f}, σ={sigma:.1f})')
371-
ax2.set_xlabel('Continuous Approximation')
372-
ax2.set_ylabel('Density')
395+
ax2.plot(x, pdf, "r-", linewidth=2, label="PDF")
396+
ax2.fill_between(x, pdf, alpha=0.3, color="red")
397+
ax2.set_title(f"Normal PDF (μ={mu:.1f}, σ={sigma:.1f})")
398+
ax2.set_xlabel("Continuous Approximation")
399+
ax2.set_ylabel("Density")
373400
ax2.grid(True, alpha=0.3)
374401

375402
# Set consistent x-axis limits for better comparison
@@ -387,7 +414,7 @@ def _(mo, n_trials, np, p_success):
387414
**Current Distribution Properties:**
388415
389416
- Mean (μ) = {n_trials.value * p_success.value:.2f}
390-
- Standard Deviation (σ) = {np.sqrt(n_trials.value * p_success.value * (1-p_success.value)):.2f}
417+
- Standard Deviation (σ) = {np.sqrt(n_trials.value * p_success.value * (1 - p_success.value)):.2f}
391418
392419
Notice how the Normal distribution (right) approximates the Binomial distribution (left) better when:
393420
@@ -397,33 +424,6 @@ def _(mo, n_trials, np, p_success):
397424
return
398425

399426

400-
@app.cell(hide_code=True)
401-
def _(mo):
402-
mo.md(
403-
r"""
404-
## Common Distributions
405-
406-
1. Bernoulli Distribution
407-
- Models a single success/failure experiment
408-
- $P(X = 1) = p$, $P(X = 0) = 1-p$
409-
- $E[X] = p$, $Var(X) = p(1-p)$
410-
411-
2. Binomial Distribution
412-
413-
- Models number of successes in $n$ independent trials
414-
- $P(X = k) = \binom{n}{k}p^k(1-p)^{n-k}$
415-
- $E[X] = np$, $Var(X) = np(1-p)$
416-
417-
3. Normal Distribution
418-
419-
- Bell-shaped curve defined by mean $\mu$ and variance $\sigma^2$
420-
- PDF: $f_X(x) = \frac{1}{\sigma\sqrt{2\pi}}e^{-\frac{(x-\mu)^2}{2\sigma^2}}$
421-
- $E[X] = \mu$, $Var(X) = \sigma^2$
422-
"""
423-
)
424-
return
425-
426-
427427
@app.cell(hide_code=True)
428428
def _(mo):
429429
mo.md(
@@ -435,7 +435,7 @@ def _(mo):
435435
436436
1. The support of $X$
437437
2. The PMF $p_X(x)$
438-
3. $E[X]$ and $Var(X)$
438+
3. $E[X]$ and $\text{Var}(X)$
439439
440440
<details>
441441
<summary>Solution</summary>
@@ -457,7 +457,7 @@ def two_dice_pmf(x):
457457
458458
1. The PDF integrates to 1
459459
2. $E[X] = 1/2$
460-
3. $Var(X) = 1/12$
460+
3. $\text{Var}(X) = 1/12$
461461
462462
Try solving this yourself first, then check the solution below.
463463
"""
@@ -490,13 +490,13 @@ def _(mo):
490490
$E[X] = \int_0^1 x \cdot 1 \, dx = [\frac{x^2}{2}]_0^1 = \frac{1}{2} - 0 = \frac{1}{2}$
491491
492492
3. **Variance**:
493-
$Var(X) = E[X^2] - (E[X])^2$
493+
$\text{Var}(X) = E[X^2] - (E[X])^2$
494494
495495
First calculate $E[X^2]$:
496496
$E[X^2] = \int_0^1 x^2 \cdot 1 \, dx = [\frac{x^3}{3}]_0^1 = \frac{1}{3}$
497497
498498
Then:
499-
$Var(X) = \frac{1}{3} - (\frac{1}{2})^2 = \frac{1}{3} - \frac{1}{4} = \frac{1}{12}$
499+
$\text{Var}(X) = \frac{1}{3} - (\frac{1}{2})^2 = \frac{1}{3} - \frac{1}{4} = \frac{1}{12}$
500500
"""
501501
)
502502
return (mktext,)

0 commit comments

Comments
 (0)