Skip to content

Commit 3529da6

Browse files
cakomrava87
andauthored
Convert all strings to fstrings when appropriate (#349)
* feat: fstring changes in optimization, signalprocessing, utils, waveeqprocessing * feat: fstring changes in examples * feat: fstring changes in tutorials * minor: remove prints in tests * Update solver.py Co-authored-by: mrava87 <[email protected]>
1 parent 89b2043 commit 3529da6

34 files changed

+195
-244
lines changed

examples/plot_cgls.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,8 @@
4343
Aop, y, x0=np.zeros_like(x), niter=10, tol=1e-10, show=True
4444
)
4545

46-
print("x= %s" % x)
47-
print("cgls solution xest= %s" % xest)
46+
print(f"x= {x}")
47+
print(f"cgls solution xest= {xest}")
4848

4949
###############################################################################
5050
# And the lsqr solver to invert this matrix
@@ -64,8 +64,9 @@
6464
cost_lsqr,
6565
) = pylops.optimization.solver.lsqr(Aop, y, x0=np.zeros_like(x), niter=10, show=True)
6666

67-
print("x= %s" % x)
68-
print("lsqr solution xest= %s" % xest)
67+
print(f"x= {x}")
68+
print(f"lsqr solution xest= {xest}")
69+
6970

7071
###############################################################################
7172
# Finally we show that the L2 norm of the residual of the two solvers decays

examples/plot_diagonal.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -76,22 +76,22 @@
7676
# simplest case where each element is multipled by a different value
7777
nx, ny = 3, 5
7878
x = np.ones((nx, ny))
79-
print("x =\n%s" % x)
79+
print(f"x =\n{x}")
8080

8181
d = np.arange(nx * ny).reshape(nx, ny)
8282
Dop = pylops.Diagonal(d)
8383

8484
y = Dop * x.ravel()
8585
y1 = Dop.H * x.ravel()
8686

87-
print("y = D*x =\n%s" % y.reshape(nx, ny))
88-
print("xadj = D'*x =\n%s " % y1.reshape(nx, ny))
87+
print(f"y = D*x =\n{y.reshape(nx, ny)}")
88+
print(f"xadj = D'*x =\n{y1.reshape(nx, ny)}")
8989

9090
###############################################################################
9191
# And we now broadcast
9292
nx, ny = 3, 5
9393
x = np.ones((nx, ny))
94-
print("x =\n%s" % x)
94+
print(f"x =\n{x}")
9595

9696
# 1st dim
9797
d = np.arange(nx)
@@ -100,8 +100,8 @@
100100
y = Dop * x.ravel()
101101
y1 = Dop.H * x.ravel()
102102

103-
print("1st dim: y = D*x =\n%s" % y.reshape(nx, ny))
104-
print("1st dim: xadj = D'*x =\n%s " % y1.reshape(nx, ny))
103+
print(f"1st dim: y = D*x =\n{y.reshape(nx, ny)}")
104+
print(f"1st dim: xadj = D'*x =\n{y1.reshape(nx, ny)}")
105105

106106
# 2nd dim
107107
d = np.arange(ny)
@@ -110,5 +110,5 @@
110110
y = Dop * x.ravel()
111111
y1 = Dop.H * x.ravel()
112112

113-
print("2nd dim: y = D*x =\n%s" % y.reshape(nx, ny))
114-
print("2nd dim: xadj = D'*x =\n%s " % y1.reshape(nx, ny))
113+
print(f"2nd dim: y = D*x =\n{y.reshape(nx, ny)}")
114+
print(f"2nd dim: xadj = D'*x =\n{y1.reshape(nx, ny)}")

examples/plot_identity.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -70,9 +70,9 @@
7070
y = Iop * x
7171
xadj = Iop.H * y
7272

73-
print("x = %s " % x)
74-
print("I*x = %s " % y)
75-
print("I'*y = %s " % xadj)
73+
print(f"x = {x} ")
74+
print(f"I*x = {y} ")
75+
print(f"I'*y = {xadj} ")
7676

7777
###############################################################################
7878
# and model bigger than data
@@ -83,9 +83,9 @@
8383
y = Iop * x
8484
xadj = Iop.H * y
8585

86-
print("x = %s " % x)
87-
print("I*x = %s " % y)
88-
print("I'*y = %s " % xadj)
86+
print(f"x = {x} ")
87+
print(f"I*x = {y} ")
88+
print(f"I'*y = {xadj} ")
8989

9090
###############################################################################
9191
# Note that this operator can be useful in many real-life applications when for example

examples/plot_linearregr.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -70,25 +70,26 @@
7070
np.array([t.min(), t.max()]) * x[1] + x[0],
7171
"k",
7272
lw=4,
73-
label=r"true: $x_0$ = %.2f, $x_1$ = %.2f" % (x[0], x[1]),
73+
label=rf"true: $x_0$ = {x[0]:.2f}, $x_1$ = {x[1]:.2f}",
7474
)
7575
plt.plot(
7676
np.array([t.min(), t.max()]),
7777
np.array([t.min(), t.max()]) * xest[1] + xest[0],
7878
"--r",
7979
lw=4,
80-
label=r"est noise-free: $x_0$ = %.2f, $x_1$ = %.2f" % (xest[0], xest[1]),
80+
label=rf"est noise-free: $x_0$ = {xest[0]:.2f}, $x_1$ = {xest[1]:.2f}",
8181
)
8282
plt.plot(
8383
np.array([t.min(), t.max()]),
8484
np.array([t.min(), t.max()]) * xnest[1] + xnest[0],
8585
"--g",
8686
lw=4,
87-
label=r"est noisy: $x_0$ = %.2f, $x_1$ = %.2f" % (xnest[0], xnest[1]),
87+
label=rf"est noisy: $x_0$ = {xnest[0]:.2f}, $x_1$ = {xnest[1]:.2f}",
8888
)
8989
plt.scatter(t, y, c="r", s=70)
9090
plt.scatter(t, yn, c="g", s=70)
9191
plt.legend()
92+
plt.tight_layout()
9293

9394
###############################################################################
9495
# Once that we have estimated the best fitting coefficients :math:`\mathbf{x}`
@@ -103,6 +104,7 @@
103104
plt.scatter(t, y, c="k", s=70)
104105
plt.scatter(t1, y1, c="r", s=40)
105106
plt.legend()
107+
plt.tight_layout()
106108

107109
###############################################################################
108110
# We consider now the case where some of the observations have large errors.
@@ -133,33 +135,34 @@
133135
tolIRLS=tolIRLS,
134136
returnhistory=True,
135137
)
136-
print("IRLS converged at %d iterations..." % nouter)
138+
print(f"IRLS converged at {nouter} iterations...")
137139

138140
plt.figure(figsize=(5, 7))
139141
plt.plot(
140142
np.array([t.min(), t.max()]),
141143
np.array([t.min(), t.max()]) * x[1] + x[0],
142144
"k",
143145
lw=4,
144-
label=r"true: $x_0$ = %.2f, $x_1$ = %.2f" % (x[0], x[1]),
146+
label=rf"true: $x_0$ = {x[0]:.2f}, $x_1$ = {x[1]:.2f}",
145147
)
146148
plt.plot(
147149
np.array([t.min(), t.max()]),
148150
np.array([t.min(), t.max()]) * xnest[1] + xnest[0],
149151
"--r",
150152
lw=4,
151-
label=r"L2: $x_0$ = %.2f, $x_1$ = %.2f" % (xnest[0], xnest[1]),
153+
label=rf"L2: $x_0$ = {xnest[0]:.2f}, $x_1$ = {xnest[1]:.2f}",
152154
)
153155
plt.plot(
154156
np.array([t.min(), t.max()]),
155157
np.array([t.min(), t.max()]) * xirls[1] + xirls[0],
156158
"--g",
157159
lw=4,
158-
label=r"L1 - IRSL: $x_0$ = %.2f, $x_1$ = %.2f" % (xirls[0], xirls[1]),
160+
label=rf"L1 - IRSL: $x_0$ = {xirls[0]:.2f}, $x_1$ = {xirls[1]:.2f}",
159161
)
160162
plt.scatter(t, y, c="r", s=70)
161163
plt.scatter(t, yn, c="g", s=70)
162164
plt.legend()
165+
plt.tight_layout()
163166

164167
###############################################################################
165168
# Let's finally take a look at the convergence of IRLS. First we visualize

examples/plot_matrixmult.py

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@
8888
ax.grid(linewidth=3, color="white")
8989
ax.xaxis.set_ticklabels([])
9090
ax.yaxis.set_ticklabels([])
91+
plt.tight_layout()
9192

9293
gs = pltgs.GridSpec(1, 6)
9394
fig = plt.figure(figsize=(7, 3))
@@ -126,6 +127,7 @@
126127
ax.grid(linewidth=3, color="white")
127128
ax.xaxis.set_ticklabels([])
128129
ax.yaxis.set_ticklabels([])
130+
plt.tight_layout()
129131

130132
###############################################################################
131133
# Let's also plot the matrix eigenvalues
@@ -156,6 +158,7 @@
156158
plt.plot(xnest, "--g", lw=2, label="Noisy")
157159
plt.title("Matrix inversion", size=16, fontweight="bold")
158160
plt.legend()
161+
plt.tight_layout()
159162

160163
###############################################################################
161164
# And we can also use a sparse matrix from the :obj:`scipy.sparse`
@@ -168,12 +171,12 @@
168171
y = Aop * x
169172
xest = Aop / y
170173

171-
print("A= %s" % Aop.A.todense())
172-
print("A^-1=", Aop.inv().todense())
173-
print("eigs=", Aop.eigs())
174-
print("x= %s" % x)
175-
print("y= %s" % y)
176-
print("lsqr solution xest= %s" % xest)
174+
print(f"A= {Aop.A.todense()}")
175+
print(f"A^-1= {Aop.inv().todense()}")
176+
print(f"eigs= {Aop.eigs()}")
177+
print(f"x= {x}")
178+
print(f"y= {y}")
179+
print(f"lsqr solution xest= {xest}")
177180

178181
###############################################################################
179182
# Finally, in several circumstances the input model :math:`\mathbf{x}` may
@@ -206,7 +209,7 @@
206209
xest, istop, itn, r1norm, r2norm = lsqr(Aop, y, damp=1e-10, iter_lim=10, show=0)[0:5]
207210
xest = xest.reshape(3, 2)
208211

209-
print("A= %s" % A)
210-
print("x= %s" % x)
211-
print("y= %s" % y)
212-
print("lsqr solution xest= %s" % xest)
212+
print(f"A= {A}")
213+
print(f"x= {x}")
214+
print(f"y={y}")
215+
print(f"lsqr solution xest= {xest}")

examples/plot_pad.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@
2323
y = Pop * x
2424
xadj = Pop.H * y
2525

26-
print("x = %s " % x)
27-
print("P*x = %s " % y)
28-
print("P'*y = %s " % xadj)
26+
print(f"x = {x}")
27+
print(f"P*x = {y}")
28+
print(f"P'*y = {xadj}")
2929

3030
###############################################################################
3131
# We move now to a multi-dimensional case. We pad the input model

examples/plot_regr.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@
122122
tolIRLS=tolIRLS,
123123
returnhistory=True,
124124
)
125-
print("IRLS converged at %d iterations..." % nouter)
125+
print(f"IRLS converged at {nouter} iterations...")
126126

127127
plt.figure(figsize=(5, 7))
128128
plt.plot(

examples/plot_stacking.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -253,12 +253,12 @@
253253
yop = ABop * x
254254
xinv = ABop / yop
255255

256-
print("AB = \n", AB)
256+
print(f"AB = \n {AB}")
257257

258-
print("x = ", x)
259-
print("y = ", y)
260-
print("yop = ", yop)
261-
print("xinv = ", x)
258+
print(f"x = {x}")
259+
print(f"y = {y}")
260+
print(f"yop = {yop}")
261+
print(f"xinv = {xinv}")
262262

263263
###############################################################################
264264
# We can also use :py:class:`pylops.Kronecker` to do something more

examples/plot_zero.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -72,10 +72,9 @@
7272
y = Zop * x
7373
xadj = Zop.H * y
7474

75-
print("x = %s" % x)
76-
print("0*x = %s" % y)
77-
print("0'*y = %s" % xadj)
78-
75+
print(f"x = {x}")
76+
print(f"0*x = {y}")
77+
print(f"0'*y = {xadj}")
7978

8079
###############################################################################
8180
# and model bigger than data
@@ -86,9 +85,9 @@
8685
y = Zop * x
8786
xadj = Zop.H * y
8887

89-
print("x = %s" % x)
90-
print("0*x = %s" % y)
91-
print("0'*y = %s" % xadj)
88+
print(f"x = {x}")
89+
print(f"0*x = {y}")
90+
print(f"0'*y = {xadj}")
9291

9392
###############################################################################
9493
# Note that this operator can be useful in many real-life applications when for

0 commit comments

Comments
 (0)