|
9 | 9 |
|
10 | 10 |
|
11 | 11 | class TestModel(object): |
| 12 | + @classmethod |
| 13 | + def setup_class(self): |
| 14 | + self.lorentz1D = models.GeneralizedLorentz1D(x_0=3, fwhm=32, value=2.5, power_coeff=2) |
| 15 | + self.smoothPowerlaw = models.SmoothBrokenPowerLaw( |
| 16 | + norm=1, gamma_low=-2, gamma_high=2, break_freq=10 |
| 17 | + ) |
| 18 | + |
| 19 | + def test_model_param(self): |
| 20 | + lorentz1D = self.lorentz1D |
| 21 | + smoothPowerlaw = self.smoothPowerlaw |
| 22 | + |
| 23 | + assert np.allclose(smoothPowerlaw.parameters, np.array([1, -2, 2, 10])) |
| 24 | + assert np.allclose(lorentz1D.parameters, np.array([3.0, 32.0, 2.5, 2.0])) |
| 25 | + |
| 26 | + assert np.array_equal( |
| 27 | + lorentz1D.param_names, np.array(["x_0", "fwhm", "value", "power_coeff"]) |
| 28 | + ) |
| 29 | + assert np.array_equal( |
| 30 | + smoothPowerlaw.param_names, np.array(["norm", "gamma_low", "gamma_high", "break_freq"]) |
| 31 | + ) |
12 | 32 |
|
13 | 33 | def test_power_coeff(self): |
14 | 34 | with pytest.raises( |
15 | 35 | InputParameterError, match="The power coefficient should be greater than zero." |
16 | 36 | ): |
17 | 37 | models.GeneralizedLorentz1D(x_0=2, fwhm=100, value=3, power_coeff=-1) |
18 | 38 |
|
19 | | - def test_lorentz_model(self): |
20 | | - model = models.GeneralizedLorentz1D(x_0=3, fwhm=32, value=2.5, power_coeff=2) |
| 39 | + @pytest.mark.parametrize( |
| 40 | + "model, yy_func, params", |
| 41 | + [ |
| 42 | + (models.SmoothBrokenPowerLaw, models.smoothbknpo, [1, -2, 2, 10]), |
| 43 | + (models.GeneralizedLorentz1D, models.generalized_lorentzian, [3, 32, 2.5, 2]), |
| 44 | + ], |
| 45 | + ) |
| 46 | + def test_model_evaluate(self, model, yy_func, params): |
| 47 | + model = model(*params) |
21 | 48 | xx = np.linspace(2, 4, 6) |
22 | 49 | yy = model(xx) |
23 | | - yy_ref = [ |
24 | | - 2.4902723735, |
25 | | - 2.4964893119, |
26 | | - 2.4996094360, |
27 | | - 2.4996094360, |
28 | | - 2.4964893119, |
29 | | - 2.4902723735, |
30 | | - ] |
| 50 | + yy_ref = yy_func(xx, params) |
| 51 | + |
31 | 52 | assert_allclose(yy, yy_ref, rtol=0, atol=1e-8) |
| 53 | + assert xx.shape == yy.shape == yy_ref.shape |
32 | 54 |
|
33 | | - def test_SmoothBrokenPowerLaw_fit_deriv(self): |
34 | | - x_lim = [0.01, 100] |
| 55 | + @pytest.mark.parametrize( |
| 56 | + "model, x_lim", |
| 57 | + [ |
| 58 | + (models.SmoothBrokenPowerLaw(1, -2, 2, 10), [0.01, 70]), |
| 59 | + (models.GeneralizedLorentz1D(3, 32, 2.5, 2), [-10, 10]), |
| 60 | + ], |
| 61 | + ) |
| 62 | + def test_model_fitting(self, model, x_lim): |
35 | 63 | x = np.logspace(x_lim[0], x_lim[1], 100) |
36 | 64 |
|
37 | | - model_with_deriv = models.SmoothBrokenPowerLaw(1, 10, -2, 2) |
38 | | - model_no_deriv = models.SmoothBrokenPowerLaw(1, 10, -2, 2) |
| 65 | + model_with_deriv = model |
| 66 | + model_no_deriv = model |
39 | 67 |
|
40 | 68 | # add 10% noise to the amplitude |
41 | | - # fmt: off |
42 | | - rsn_rand_1234567890 = np.array( |
43 | | - [ |
44 | | - 0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748, |
45 | | - 0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161, |
46 | | - 0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388, |
47 | | - 0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526, |
48 | | - 0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314, |
49 | | - 0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748, |
50 | | - 0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099, |
51 | | - 0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673, |
52 | | - 0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369, |
53 | | - 0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824, |
54 | | - 0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966, |
55 | | - 0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713, |
56 | | - 0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052, |
57 | | - 0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480, |
58 | | - 0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519, |
59 | | - 0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486, |
60 | | - 0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576, |
61 | | - 0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468, |
62 | | - 0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252, |
63 | | - 0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890 |
64 | | - ] |
65 | | - ) |
66 | | - # fmt: on |
67 | | - # remove non-finite values from x, rsn_rand_1234567890 |
68 | | - # to fit the data because |
69 | | - # these value results a non-finite output |
70 | | - x = np.delete(x, 26) |
71 | | - rsn_rand_1234567890 = np.delete(rsn_rand_1234567890, 26) |
72 | | - |
73 | | - n = 0.1 * (rsn_rand_1234567890 - 0.5) |
| 69 | + rng = np.random.default_rng(0) |
| 70 | + rsn_rand_0 = rng.random(x.shape) |
| 71 | + n = 0.1 * (rsn_rand_0 - 0.5) |
74 | 72 |
|
75 | 73 | data = model_with_deriv(x) + n |
76 | | - fitter_with_deriv = fitting.LevMarLSQFitter() |
| 74 | + fitter_with_deriv = fitting.LMLSQFitter() |
77 | 75 | new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data) |
78 | | - fitter_no_deriv = fitting.LevMarLSQFitter() |
| 76 | + fitter_no_deriv = fitting.LMLSQFitter() |
79 | 77 | new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data, estimate_jacobian=True) |
80 | 78 | assert_allclose(new_model_with_deriv.parameters, new_model_no_deriv.parameters, atol=0.5) |
0 commit comments