Skip to content

Commit

Permalink
Convert all strings to fstrings when appropriate (#349)
Browse files Browse the repository at this point in the history
* feat: fstring changes in optimization, signalprocessing, utils, waveeqprocessing

* feat: fstring changes in examples

* feat: fstring changes in tutorials

* minor: remove prints in tests

* Update solver.py

Co-authored-by: mrava87 <[email protected]>
  • Loading branch information
cako and mrava87 authored Mar 19, 2022
1 parent 89b2043 commit 3529da6
Show file tree
Hide file tree
Showing 34 changed files with 195 additions and 244 deletions.
9 changes: 5 additions & 4 deletions examples/plot_cgls.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@
Aop, y, x0=np.zeros_like(x), niter=10, tol=1e-10, show=True
)

print("x= %s" % x)
print("cgls solution xest= %s" % xest)
print(f"x= {x}")
print(f"cgls solution xest= {xest}")

###############################################################################
# And the lsqr solver to invert this matrix
Expand All @@ -64,8 +64,9 @@
cost_lsqr,
) = pylops.optimization.solver.lsqr(Aop, y, x0=np.zeros_like(x), niter=10, show=True)

print("x= %s" % x)
print("lsqr solution xest= %s" % xest)
print(f"x= {x}")
print(f"lsqr solution xest= {xest}")


###############################################################################
# Finally we show that the L2 norm of the residual of the two solvers decays
Expand Down
16 changes: 8 additions & 8 deletions examples/plot_diagonal.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,22 +76,22 @@
# simplest case where each element is multipled by a different value
nx, ny = 3, 5
x = np.ones((nx, ny))
print("x =\n%s" % x)
print(f"x =\n{x}")

d = np.arange(nx * ny).reshape(nx, ny)
Dop = pylops.Diagonal(d)

y = Dop * x.ravel()
y1 = Dop.H * x.ravel()

print("y = D*x =\n%s" % y.reshape(nx, ny))
print("xadj = D'*x =\n%s " % y1.reshape(nx, ny))
print(f"y = D*x =\n{y.reshape(nx, ny)}")
print(f"xadj = D'*x =\n{y1.reshape(nx, ny)}")

###############################################################################
# And we now broadcast
nx, ny = 3, 5
x = np.ones((nx, ny))
print("x =\n%s" % x)
print(f"x =\n{x}")

# 1st dim
d = np.arange(nx)
Expand All @@ -100,8 +100,8 @@
y = Dop * x.ravel()
y1 = Dop.H * x.ravel()

print("1st dim: y = D*x =\n%s" % y.reshape(nx, ny))
print("1st dim: xadj = D'*x =\n%s " % y1.reshape(nx, ny))
print(f"1st dim: y = D*x =\n{y.reshape(nx, ny)}")
print(f"1st dim: xadj = D'*x =\n{y1.reshape(nx, ny)}")

# 2nd dim
d = np.arange(ny)
Expand All @@ -110,5 +110,5 @@
y = Dop * x.ravel()
y1 = Dop.H * x.ravel()

print("2nd dim: y = D*x =\n%s" % y.reshape(nx, ny))
print("2nd dim: xadj = D'*x =\n%s " % y1.reshape(nx, ny))
print(f"2nd dim: y = D*x =\n{y.reshape(nx, ny)}")
print(f"2nd dim: xadj = D'*x =\n{y1.reshape(nx, ny)}")
12 changes: 6 additions & 6 deletions examples/plot_identity.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,9 @@
y = Iop * x
xadj = Iop.H * y

print("x = %s " % x)
print("I*x = %s " % y)
print("I'*y = %s " % xadj)
print(f"x = {x} ")
print(f"I*x = {y} ")
print(f"I'*y = {xadj} ")

###############################################################################
# and model bigger than data
Expand All @@ -83,9 +83,9 @@
y = Iop * x
xadj = Iop.H * y

print("x = %s " % x)
print("I*x = %s " % y)
print("I'*y = %s " % xadj)
print(f"x = {x} ")
print(f"I*x = {y} ")
print(f"I'*y = {xadj} ")

###############################################################################
# Note that this operator can be useful in many real-life applications when for example
Expand Down
17 changes: 10 additions & 7 deletions examples/plot_linearregr.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,25 +70,26 @@
np.array([t.min(), t.max()]) * x[1] + x[0],
"k",
lw=4,
label=r"true: $x_0$ = %.2f, $x_1$ = %.2f" % (x[0], x[1]),
label=rf"true: $x_0$ = {x[0]:.2f}, $x_1$ = {x[1]:.2f}",
)
plt.plot(
np.array([t.min(), t.max()]),
np.array([t.min(), t.max()]) * xest[1] + xest[0],
"--r",
lw=4,
label=r"est noise-free: $x_0$ = %.2f, $x_1$ = %.2f" % (xest[0], xest[1]),
label=rf"est noise-free: $x_0$ = {xest[0]:.2f}, $x_1$ = {xest[1]:.2f}",
)
plt.plot(
np.array([t.min(), t.max()]),
np.array([t.min(), t.max()]) * xnest[1] + xnest[0],
"--g",
lw=4,
label=r"est noisy: $x_0$ = %.2f, $x_1$ = %.2f" % (xnest[0], xnest[1]),
label=rf"est noisy: $x_0$ = {xnest[0]:.2f}, $x_1$ = {xnest[1]:.2f}",
)
plt.scatter(t, y, c="r", s=70)
plt.scatter(t, yn, c="g", s=70)
plt.legend()
plt.tight_layout()

###############################################################################
# Once that we have estimated the best fitting coefficients :math:`\mathbf{x}`
Expand All @@ -103,6 +104,7 @@
plt.scatter(t, y, c="k", s=70)
plt.scatter(t1, y1, c="r", s=40)
plt.legend()
plt.tight_layout()

###############################################################################
# We consider now the case where some of the observations have large errors.
Expand Down Expand Up @@ -133,33 +135,34 @@
tolIRLS=tolIRLS,
returnhistory=True,
)
print("IRLS converged at %d iterations..." % nouter)
print(f"IRLS converged at {nouter} iterations...")

plt.figure(figsize=(5, 7))
plt.plot(
np.array([t.min(), t.max()]),
np.array([t.min(), t.max()]) * x[1] + x[0],
"k",
lw=4,
label=r"true: $x_0$ = %.2f, $x_1$ = %.2f" % (x[0], x[1]),
label=rf"true: $x_0$ = {x[0]:.2f}, $x_1$ = {x[1]:.2f}",
)
plt.plot(
np.array([t.min(), t.max()]),
np.array([t.min(), t.max()]) * xnest[1] + xnest[0],
"--r",
lw=4,
label=r"L2: $x_0$ = %.2f, $x_1$ = %.2f" % (xnest[0], xnest[1]),
label=rf"L2: $x_0$ = {xnest[0]:.2f}, $x_1$ = {xnest[1]:.2f}",
)
plt.plot(
np.array([t.min(), t.max()]),
np.array([t.min(), t.max()]) * xirls[1] + xirls[0],
"--g",
lw=4,
label=r"L1 - IRSL: $x_0$ = %.2f, $x_1$ = %.2f" % (xirls[0], xirls[1]),
label=rf"L1 - IRSL: $x_0$ = {xirls[0]:.2f}, $x_1$ = {xirls[1]:.2f}",
)
plt.scatter(t, y, c="r", s=70)
plt.scatter(t, yn, c="g", s=70)
plt.legend()
plt.tight_layout()

###############################################################################
# Let's finally take a look at the convergence of IRLS. First we visualize
Expand Down
23 changes: 13 additions & 10 deletions examples/plot_matrixmult.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.tight_layout()

gs = pltgs.GridSpec(1, 6)
fig = plt.figure(figsize=(7, 3))
Expand Down Expand Up @@ -126,6 +127,7 @@
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.tight_layout()

###############################################################################
# Let's also plot the matrix eigenvalues
Expand Down Expand Up @@ -156,6 +158,7 @@
plt.plot(xnest, "--g", lw=2, label="Noisy")
plt.title("Matrix inversion", size=16, fontweight="bold")
plt.legend()
plt.tight_layout()

###############################################################################
# And we can also use a sparse matrix from the :obj:`scipy.sparse`
Expand All @@ -168,12 +171,12 @@
y = Aop * x
xest = Aop / y

print("A= %s" % Aop.A.todense())
print("A^-1=", Aop.inv().todense())
print("eigs=", Aop.eigs())
print("x= %s" % x)
print("y= %s" % y)
print("lsqr solution xest= %s" % xest)
print(f"A= {Aop.A.todense()}")
print(f"A^-1= {Aop.inv().todense()}")
print(f"eigs= {Aop.eigs()}")
print(f"x= {x}")
print(f"y= {y}")
print(f"lsqr solution xest= {xest}")

###############################################################################
# Finally, in several circumstances the input model :math:`\mathbf{x}` may
Expand Down Expand Up @@ -206,7 +209,7 @@
xest, istop, itn, r1norm, r2norm = lsqr(Aop, y, damp=1e-10, iter_lim=10, show=0)[0:5]
xest = xest.reshape(3, 2)

print("A= %s" % A)
print("x= %s" % x)
print("y= %s" % y)
print("lsqr solution xest= %s" % xest)
print(f"A= {A}")
print(f"x= {x}")
print(f"y={y}")
print(f"lsqr solution xest= {xest}")
6 changes: 3 additions & 3 deletions examples/plot_pad.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@
y = Pop * x
xadj = Pop.H * y

print("x = %s " % x)
print("P*x = %s " % y)
print("P'*y = %s " % xadj)
print(f"x = {x}")
print(f"P*x = {y}")
print(f"P'*y = {xadj}")

###############################################################################
# We move now to a multi-dimensional case. We pad the input model
Expand Down
2 changes: 1 addition & 1 deletion examples/plot_regr.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@
tolIRLS=tolIRLS,
returnhistory=True,
)
print("IRLS converged at %d iterations..." % nouter)
print(f"IRLS converged at {nouter} iterations...")

plt.figure(figsize=(5, 7))
plt.plot(
Expand Down
10 changes: 5 additions & 5 deletions examples/plot_stacking.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,12 +253,12 @@
yop = ABop * x
xinv = ABop / yop

print("AB = \n", AB)
print(f"AB = \n {AB}")

print("x = ", x)
print("y = ", y)
print("yop = ", yop)
print("xinv = ", x)
print(f"x = {x}")
print(f"y = {y}")
print(f"yop = {yop}")
print(f"xinv = {xinv}")

###############################################################################
# We can also use :py:class:`pylops.Kronecker` to do something more
Expand Down
13 changes: 6 additions & 7 deletions examples/plot_zero.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,9 @@
y = Zop * x
xadj = Zop.H * y

print("x = %s" % x)
print("0*x = %s" % y)
print("0'*y = %s" % xadj)

print(f"x = {x}")
print(f"0*x = {y}")
print(f"0'*y = {xadj}")

###############################################################################
# and model bigger than data
Expand All @@ -86,9 +85,9 @@
y = Zop * x
xadj = Zop.H * y

print("x = %s" % x)
print("0*x = %s" % y)
print("0'*y = %s" % xadj)
print(f"x = {x}")
print(f"0*x = {y}")
print(f"0'*y = {xadj}")

###############################################################################
# Note that this operator can be useful in many real-life applications when for
Expand Down
Loading

0 comments on commit 3529da6

Please sign in to comment.