|
70 | 70 | np.array([t.min(), t.max()]) * x[1] + x[0], |
71 | 71 | "k", |
72 | 72 | lw=4, |
73 | | - label=r"true: $x_0$ = %.2f, $x_1$ = %.2f" % (x[0], x[1]), |
| 73 | + label=rf"true: $x_0$ = {x[0]:.2f}, $x_1$ = {x[1]:.2f}", |
74 | 74 | ) |
75 | 75 | plt.plot( |
76 | 76 | np.array([t.min(), t.max()]), |
77 | 77 | np.array([t.min(), t.max()]) * xest[1] + xest[0], |
78 | 78 | "--r", |
79 | 79 | lw=4, |
80 | | - label=r"est noise-free: $x_0$ = %.2f, $x_1$ = %.2f" % (xest[0], xest[1]), |
| 80 | + label=rf"est noise-free: $x_0$ = {xest[0]:.2f}, $x_1$ = {xest[1]:.2f}", |
81 | 81 | ) |
82 | 82 | plt.plot( |
83 | 83 | np.array([t.min(), t.max()]), |
84 | 84 | np.array([t.min(), t.max()]) * xnest[1] + xnest[0], |
85 | 85 | "--g", |
86 | 86 | lw=4, |
87 | | - label=r"est noisy: $x_0$ = %.2f, $x_1$ = %.2f" % (xnest[0], xnest[1]), |
| 87 | + label=rf"est noisy: $x_0$ = {xnest[0]:.2f}, $x_1$ = {xnest[1]:.2f}", |
88 | 88 | ) |
89 | 89 | plt.scatter(t, y, c="r", s=70) |
90 | 90 | plt.scatter(t, yn, c="g", s=70) |
91 | 91 | plt.legend() |
| 92 | +plt.tight_layout() |
92 | 93 |
|
93 | 94 | ############################################################################### |
94 | 95 | # Once that we have estimated the best fitting coefficients :math:`\mathbf{x}` |
|
103 | 104 | plt.scatter(t, y, c="k", s=70) |
104 | 105 | plt.scatter(t1, y1, c="r", s=40) |
105 | 106 | plt.legend() |
| 107 | +plt.tight_layout() |
106 | 108 |
|
107 | 109 | ############################################################################### |
108 | 110 | # We consider now the case where some of the observations have large errors. |
|
133 | 135 | tolIRLS=tolIRLS, |
134 | 136 | returnhistory=True, |
135 | 137 | ) |
136 | | -print("IRLS converged at %d iterations..." % nouter) |
| 138 | +print(f"IRLS converged at {nouter} iterations...") |
137 | 139 |
|
138 | 140 | plt.figure(figsize=(5, 7)) |
139 | 141 | plt.plot( |
140 | 142 | np.array([t.min(), t.max()]), |
141 | 143 | np.array([t.min(), t.max()]) * x[1] + x[0], |
142 | 144 | "k", |
143 | 145 | lw=4, |
144 | | - label=r"true: $x_0$ = %.2f, $x_1$ = %.2f" % (x[0], x[1]), |
| 146 | + label=rf"true: $x_0$ = {x[0]:.2f}, $x_1$ = {x[1]:.2f}", |
145 | 147 | ) |
146 | 148 | plt.plot( |
147 | 149 | np.array([t.min(), t.max()]), |
148 | 150 | np.array([t.min(), t.max()]) * xnest[1] + xnest[0], |
149 | 151 | "--r", |
150 | 152 | lw=4, |
151 | | - label=r"L2: $x_0$ = %.2f, $x_1$ = %.2f" % (xnest[0], xnest[1]), |
| 153 | + label=rf"L2: $x_0$ = {xnest[0]:.2f}, $x_1$ = {xnest[1]:.2f}", |
152 | 154 | ) |
153 | 155 | plt.plot( |
154 | 156 | np.array([t.min(), t.max()]), |
155 | 157 | np.array([t.min(), t.max()]) * xirls[1] + xirls[0], |
156 | 158 | "--g", |
157 | 159 | lw=4, |
158 | | - label=r"L1 - IRSL: $x_0$ = %.2f, $x_1$ = %.2f" % (xirls[0], xirls[1]), |
| 160 | + label=rf"L1 - IRSL: $x_0$ = {xirls[0]:.2f}, $x_1$ = {xirls[1]:.2f}", |
159 | 161 | ) |
160 | 162 | plt.scatter(t, y, c="r", s=70) |
161 | 163 | plt.scatter(t, yn, c="g", s=70) |
162 | 164 | plt.legend() |
| 165 | +plt.tight_layout() |
163 | 166 |
|
164 | 167 | ############################################################################### |
165 | 168 | # Let's finally take a look at the convergence of IRLS. First we visualize |
|
0 commit comments