|
11 | 11 | reality_values_to_test = [False, True] |
12 | 12 |
|
13 | 13 |
|
| 14 | +@pytest.mark.parametrize("size", (10, 100, 1000)) |
| 15 | +@pytest.mark.parametrize("var", (1, 2)) |
| 16 | +def test_complex_normal(rng, size, var): |
| 17 | + samples = gen.complex_normal(rng, size, var) |
| 18 | + assert samples.dtype == np.complex128 |
| 19 | + assert samples.size == size |
| 20 | + mean = samples.mean() |
| 21 | + # Error in real + imag components of mean estimate ~ Normal(0, (var / 2) / size) |
| 22 | + # Therefore difference between mean estimate and true zero value should be |
| 23 | + # less than 3 * sqrt(var / (2 * size)) with probability 0.997 |
| 24 | + mean_error_tol = 3 * (var / (2 * size)) ** 0.5 |
| 25 | + assert abs(mean.imag) < mean_error_tol and abs(mean.real) < mean_error_tol |
| 26 | + # If S is (unbiased) sample variance estimate then (size - 1) * S / var is a |
| 27 | + # chi-squared distributed random variable with (size - 1) degrees of freedom |
| 28 | + # For size >> 1, S ~approx Normal(var, 2 * var**2 / (size - 1)) so error in |
| 29 | + # variance estimate should be less than 3 * sqrt(2 * var**2 / (size - 1)) |
| 30 | + # with high probability |
| 31 | + assert abs(samples.var(ddof=1) - var) < 3 * (2 * var**2 / (size - 1)) ** 0.5 |
| 32 | + |
| 33 | + |
14 | 34 | @pytest.mark.parametrize("L", L_values_to_test) |
15 | 35 | @pytest.mark.parametrize("min_el", [0, 1]) |
16 | 36 | def test_complex_el_and_m_indices(L, min_el): |
|
0 commit comments