import numpy as np
import sympy
sympy.init_printing()
from scipy import linalg as la
from scipy import optimize
import matplotlib as mpl
import matplotlib.pyplot as plt
Appendix D — Equation Solving
\(~\)
- In this appendix, we use
sympy
for solving equations symbolically, when possible, and use thelinalg
module from thescipy
library for numerically solving linear equation systems. For tackling nonlinear problems, we will use the root-finding functions in theoptimize
module ofscipy
D.1 Importing modules
print('numpy: ', np.__version__)
print('sympy: ', sympy.__version__)
from scipy import __version__
print('scipy: ', __version__)
print('matplotlib: ', mpl.__version__)
numpy: 1.23.5
sympy: 1.12
scipy: 1.10.1
matplotlib: 3.8.0
D.2 System of Linear equations
\(~\)
\[ \begin{pmatrix} a_{11} & a_{12} & \cdots & a_{1n} \\ a_{21} & a_{22} & \cdots & a_{2n} \\ \vdots & \vdots & \ddots & \vdots \\ a_{m1} & a_{m2} & \cdots & a_{mn} \end{pmatrix} \begin{pmatrix} x_1\\ x_2\\ \vdots\\ x_n \end{pmatrix}= \begin{pmatrix} b_1\\ b_2\\ \vdots\\ b_m \end{pmatrix}\]
\[\text{or}\]
\[\text{simply}~\mathbf{A}\mathbf{x}=\mathbf{b}\]
\(~\)
D.2.1 Square system
\[ \begin{aligned} 2 x_1 + 3 x_2 &= 4\\ 5 x_1 + 4 x_2 &= 3 \end{aligned} \]
= np.array([[2, 3], [5, 4]])
A = np.array([4, 3])
b = la.solve(A, b)
x x
array([-1., 2.])
= np.linspace(-4, 2, 100)
x1
= (4 -2 *x1) /3
x2_1 = (3 -5 *x1) /4 x2_2
= plt.subplots(figsize=(6, 4))
fig, ax
'r', lw=2, label=r"$2x_1+3x_2=4$")
ax.plot(x1, x2_1, 'b', lw=2, label=r"$5x_1+4x_2=3$")
ax.plot(x1, x2_2,
0], x[1], 'ko', lw=2)
ax.plot(x["""The intersection point of
ax.annotate(the two lines is the solution
of the equation system""",
=(x[0], x[1]),
xy='data',
xycoords=(-120, -75),
xytext='offset points',
textcoords=dict(arrowstyle="->",
arrowprops="arc3, rad=-0.3"))
connectionstyle
-4, 2])
ax.set_xlim([-2, 6])
ax.set_ylim([='both', direction='in')
ax.tick_params(whichr"$x_1$", fontsize=16)
ax.set_xlabel(r"$x_2$", fontsize=16)
ax.set_ylabel( ax.legend()
D.2.1.1 Symbolic approach
= sympy.Matrix([[2, 3], [5, 4]])
A = sympy.Matrix([4, 3]) b
A.condition_number()
\(\displaystyle \frac{\sqrt{2 \sqrt{170} + 27}}{\sqrt{27 - 2 \sqrt{170}}}\)
sympy.N(_)
\(\displaystyle 7.58240137440151\)
= A.LUdecomposition() L, U, _
* U L, U, L
\(\displaystyle \left( \left[\begin{matrix}1 & 0\\\frac{5}{2} & 1\end{matrix}\right], \ \left[\begin{matrix}2 & 3\\0 & - \frac{7}{2}\end{matrix}\right], \ \left[\begin{matrix}2 & 3\\5 & 4\end{matrix}\right]\right)\)
= A.solve(b); x # equivalent to A.LUsolve(b) x
\(\displaystyle \left[\begin{matrix}-1\\2\end{matrix}\right]\)
D.2.1.2 Numerical approach
= np.array([[2, 3], [5, 4]])
A = np.array([4, 3]) b
np.linalg.cond(A)
\(\displaystyle 7.58240137440152\)
= la.lu(A) P, L, U
@ (L @ U) P, L, U, P
(array([[0., 1.],
[1., 0.]]),
array([[1. , 0. ],
[0.4, 1. ]]),
array([[5. , 4. ],
[0. , 1.4]]),
array([[2., 3.],
[5., 4.]]))
la.solve(A, b)
array([-1., 2.])
The advantage of using
sympy
is of course that we may obtain exact results and we can also include symbolic variables in the matrices. However, not all problems are solvable symbolically, or it may give exceedingly lengthy resultsThe advantage of using a numerical approach with
numpy/scipy
, on the other hand, is that we are guaranteed to obtain a result, although it will be an approximate solution due to floating-point errorsSee the code below for an example that illustrates the differences between the symbolic and numerical approaches, and for an example that show the numerical approaches can be sensitive for equation systems with large condition numbers
In this example, we solve the equation system
\[ \begin{pmatrix} 1 & \sqrt{p}\\ 1 & \frac{1}{\sqrt{p}} \end{pmatrix} \begin{pmatrix} x_1 \\ x_2 \end{pmatrix}= \begin{pmatrix} 1 \\ 2 \end{pmatrix} \]
which for \(p=1\) is singular and for \(p\) in the vicinity of one is ill-conditioned
A comparison between this symbolic solution and the numerical solution is shown in Figure below. Here the errors in the numerical solution are due to numerical floating-point errors, and the numerical errors are significantly larger in the vicinity of \(p=1\), where the system has a large condition number. Also, if there are other sources of errors in either \(\mathbf{A}\) or \(\mathbf{b}\), the corresponding errors in \(\mathbf{x}\) can be even more severe
# Symbolic problem specification
= sympy.symbols("p", positive=True)
p = sympy.Matrix([[1, sympy.sqrt(p)], [1, 1/sympy.sqrt(p)]])
A = sympy.Matrix([1, 2])
b
# Solve symbolically
= A.solve(b)
x_sym_sol
x_sym_sol.simplify() x_sym_sol
\(\displaystyle \left[\begin{matrix}\frac{2 p - 1}{p - 1}\\- \frac{\sqrt{p}}{p - 1}\end{matrix}\right]\)
= A.condition_number().simplify()
Acond Acond
\(\displaystyle \frac{\max\left(\frac{\sqrt{2} \sqrt{\left(p + 1\right)^{2} - \sqrt{p^{4} + 14 p^{2} + 1}}}{2 \sqrt{p}}, \frac{\sqrt{2} \sqrt{\left(p + 1\right)^{2} + \sqrt{p^{4} + 14 p^{2} + 1}}}{2 \sqrt{p}}\right)}{\min\left(\frac{\sqrt{2} \sqrt{\left(p + 1\right)^{2} - \sqrt{p^{4} + 14 p^{2} + 1}}}{2 \sqrt{p}}, \frac{\sqrt{2} \sqrt{\left(p + 1\right)^{2} + \sqrt{p^{4} + 14 p^{2} + 1}}}{2 \sqrt{p}}\right)}\)
# Function for solving numerically
= lambda p: np.array([[1, np.sqrt(p)], [1, 1/np.sqrt(p)]])
AA = np.array([1, 2])
bb = lambda p: np.linalg.solve(AA(p), bb) x_num_sol
# Graph the difference between the symbolic (exact)
# and numerical results.
= np.linspace(0.9, 1.1, 200)
p_vec
= plt.subplots(2, 1, figsize=(6, 8))
fig, axes
for n in range(2):
= np.array([x_sym_sol[n].subs(p, pp).evalf()
x_sym for pp in p_vec])
= np.array([x_num_sol(pp)[n] for pp in p_vec])
x_num 0].plot(p_vec, (x_num - x_sym) /x_sym, 'k')
axes[
0].set_title("Error in solution\n"
axes["(numerical - symbolic) /symbolic")
# axes[0].set_xlabel(r'$p$', fontsize=12)
0].set_xlim([0.9, 1.1])
axes[0].set_ylim([-2.0e-13, 4.0e-13])
axes[0].tick_params(which='both', direction='in')
axes[0].tick_params(axis='x', pad=7)
axes[0].set_xticks(np.arange(0.9, 1.1, 0.05))
axes[
1].plot(p_vec, [Acond.subs(p, pp).evalf()
axes[for pp in p_vec])
1].set_title("Condition number")
axes[1].set_xlabel(r'$p$', fontsize=12)
axes[1].set_xlim([0.9, 1.1])
axes[1].set_ylim([0, 9000])
axes[1].tick_params(which='both', direction='in')
axes[1].tick_params(axis='x', pad=7)
axes[1].set_xticks(np.arange(0.9, 1.1, 0.05))
axes[1].set_yticks([3000, 6000, 9000]) axes[
D.2.2 Rectangular system
D.2.2.1 Under-determined
Rectangular systems, with \(m \times n\), can be either under-determined or over-determined
Under-determined systems have more variables than equations, so the solution cannot be fully determined. Therefore, for such a system, the solution must be given in terms of the remaining free variables. This makes it difficult to treat this type of problem numerically, but a symbolic approach can often be used instead. For example, consider the underdetermined linear equation system
\[ \begin{pmatrix} 1 & 2 & 3\\ 4 & 5 & 6 \end{pmatrix} \begin{pmatrix} x_1 \\ x_2 \\ x_3 \end{pmatrix}= \begin{pmatrix} 7 \\ 8 \end{pmatrix} \]
= sympy.symbols("x_1, x_2, x_3")
x_vars = sympy.Matrix(x_vars)
x = sympy.Matrix([[1, 2, 3], [4, 5, 6]])
A = sympy.Matrix([7, 8])
b *x - b, x_vars) sympy.solve(A
\(\displaystyle \left\{ x_{1} : x_{3} - \frac{19}{3}, \ x_{2} : \frac{20}{3} - 2 x_{3}\right\}\)
D.2.2.2 Over-determined: Least squares
It is often interesting to find an approximate solution to an over-determined system. An example of when this situation arises is data fitting: Say we have a model where a variable \(y\) is a quadratic polynomial in the variable \(x\), so that \(y = a_0 +a_1 x +a_2 x^2\), and that we would like to fit this model to experimental data
Here \(y\) is nonlinear in \(x\), but \(y\) is linear in the three unknown coefficients \(a_0\), \(a_1\) and \(a_2\), and this fact can be used to write the model as a linear equation system. If we collect data for \(m\) pairs \(\{ x_i, y_i \}_{i=1}^m\) of the variables \(x\) and \(y\), we can write the model as an \(m \times 3\) equation system:
\[ \begin{pmatrix} 1 & x_1 & x_1^2\\ \vdots & \vdots & \vdots \\ 1 & x_m & x_m^2 \end{pmatrix} \begin{pmatrix} a_0 \\[4pt] a_1 \\[4pt] a_3 \end{pmatrix}= \begin{pmatrix} y_1 \\ \vdots \\ y_m \end{pmatrix} \]
For \(m > 3\), there is in general no exact solution, and we need to introduce an approximate solution that give a best fit for the over-determined system
A natural definition of best fit for the over-determined system \(\mathbf{Ax} \approx \mathbf{b}\), is to minimize the sum of square error,
\[ \min_x \sum_{i=1}^m r_i^2 \]
where \(\mathbf{r} = \mathbf{b} -\mathbf{Ax}\) is the residual vector. This leads to the least square solution of the problem \(\mathbf{Ax} \approx \mathbf{b}\), which minimizes the distances between the data points
In
sympy
, we can solve for the least square solution of an over-determined system using thesolve_least_squares
method, and for numerical problems we can use thescipy
functionla.lstsq
1234)
np.random.seed(
# define true model parameters
= 100
m = np.linspace(-1, 1, m)
x = 1, 2, 3
a, b, c = a +b*x +c*x**2
y_exact
# simulate noisy data points
= 1 -2*np.random.rand(m)
X = a +b*X +c*X**2 +np.random.randn(m)
Y
# fit the data to the model using linear least square
# see np.vander for alternative
= np.vstack([X**0, X**1, X**2])
A = la.lstsq(A.T, Y)
sol, r, rank, sv = sol[0] +sol[1] *x +sol[2] *x**2 y_fit
= plt.subplots(figsize=(6, 4))
fig, ax
'go', alpha=0.5,
ax.plot(X, Y, ='Simulated data')
label'r', lw=2,
ax.plot(x, y_exact, ='True value $y = 1 + 2x + 3x^2$')
label'b', lw=2,
ax.plot(x, y_fit, ='Least square fit')
labelr"$x$", fontsize=12)
ax.set_xlabel(r"$y$", fontsize=12)
ax.set_ylabel(=2) ax.legend(loc
# fit the data to the model using linear least square:
# 1st order polynomial
= np.vstack([X**n for n in range(2)])
A = la.lstsq(A.T, Y)
sol, r, rank, sv = sum([s*x**n for n, s in enumerate(sol)])
y_fit1
# 15th order polynomial
= np.vstack([X**n for n in range(16)])
A = la.lstsq(A.T, Y)
sol, r, rank, sv = sum([s*x**n for n, s in enumerate(sol)]) y_fit15
= plt.subplots(figsize=(6, 4))
fig, ax
'go', alpha=0.5,
ax.plot(X, Y, ='Simulated data')
label'r', lw=2,
ax.plot(x, y_exact, ='True value $y = 1 + 2x + 3x^2$')
label'b', lw=2,
ax.plot(x, y_fit1, ='Least square fit [1st order]')
label'm', lw=2,
ax.plot(x, y_fit15, ='Least square fit [15th order]')
labelr"$x$", fontsize=12)
ax.set_xlabel(r"$y$", fontsize=12)
ax.set_ylabel(=2) ax.legend(loc
D.3 Eigenvalue problems
= sympy.symbols("epsilon, Delta")
eps, delta = sympy.Matrix([[eps, delta], [delta, -eps]])
H H
\(\displaystyle \left[\begin{matrix}\epsilon & \Delta\\\Delta & - \epsilon\end{matrix}\right]\)
H.eigenvals()
\(\displaystyle \left\{ - \sqrt{\Delta^{2} + \epsilon^{2}} : 1, \ \sqrt{\Delta^{2} + \epsilon^{2}} : 1\right\}\)
0] H.eigenvects()[
\(\displaystyle \left( - \sqrt{\Delta^{2} + \epsilon^{2}}, \ 1, \ \left[ \left[\begin{matrix}\frac{\epsilon}{\Delta} - \frac{\sqrt{\Delta^{2} + \epsilon^{2}}}{\Delta}\\1\end{matrix}\right]\right]\right)\)
1] H.eigenvects()[
\(\displaystyle \left( \sqrt{\Delta^{2} + \epsilon^{2}}, \ 1, \ \left[ \left[\begin{matrix}\frac{\epsilon}{\Delta} + \frac{\sqrt{\Delta^{2} + \epsilon^{2}}}{\Delta}\\1\end{matrix}\right]\right]\right)\)
= H.eigenvects() (_, _, evec1), (_, _, evec2)
0].T*evec2[0]) sympy.simplify(evec1[
\(\displaystyle \left[\begin{matrix}0\end{matrix}\right]\)
Obtaining analytical expressions for eigenvalues and eigenvectors using these methods is often very desirable indeed, but unfortunately it only works for small matrices
Thus, for larger systems we must resort to a fully numerical approach. For this, we can use the
la.eigvals
andla.eig
functions in thescipy
linear algebra packageMatrices that are either Hermitian or real symmetric have real-valued eigenvalues, and for such matrices, it is advantageous to instead use the functions
la.eigvalsh
andla.eigh
, which guarantees that the eigenvalues returned by the function is stored in anumpy
array with real values
= np.array([[1, 3, 5], [3, 5, 3], [5, 3, 9]])
A A
array([[1, 3, 5],
[3, 5, 3],
[5, 3, 9]])
la.eigvals(A)
array([13.35310908+0.j, -1.75902942+0.j, 3.40592034+0.j])
= la.eig(A) evals, evecs
evecs
array([[ 0.42663918, 0.90353276, -0.04009445],
[ 0.43751227, -0.24498225, -0.8651975 ],
[ 0.79155671, -0.35158534, 0.49982569]])
la.eigvalsh(A)
array([-1.75902942, 3.40592034, 13.35310908])
= la.eigh(A) evals, evecs
evecs
array([[ 0.90353276, -0.04009445, 0.42663918],
[-0.24498225, -0.8651975 , 0.43751227],
[-0.35158534, 0.49982569, 0.79155671]])
D.4 Nonlinear equation
- A nonlinear equation can always be written on the form \(f(x)=0\), where \(f(x)\) is a nonlinear function and we seek the value of \(x\) (which can be a scalar or a vector) such that \(f(x)\) is zero. This \(x\) is called the root of \(f(x)=0\), and equation solving is therefore often referred to as root finding
D.4.1 Univariate equation
- In
sympy
, we can solve many analytically solvable univariate and nonlinear equations using thesympy.solve
function. For example,
= sympy.symbols("x, a, b, c") x, a, b, c
+ b*x + c*x**2, x) sympy.solve(a
\(\displaystyle \left[ \frac{- b - \sqrt{- 4 a c + b^{2}}}{2 c}, \ \frac{- b + \sqrt{- 4 a c + b^{2}}}{2 c}\right]\)
*sympy.cos(x) - b*sympy.sin(x), x) sympy.solve(a
\(\displaystyle \left[ - 2 \operatorname{atan}{\left(\frac{b - \sqrt{a^{2} + b^{2}}}{a} \right)}, \ - 2 \operatorname{atan}{\left(\frac{b + \sqrt{a^{2} + b^{2}}}{a} \right)}\right]\)
```{python}
sympy.solve(sympy.sin(x)-x, x)
```
[x, sin(x)]
NotImplementedError: multiple generators No algorithms are implemented to solve equation -x + sin(x)
- In this type of situation, we need to resort to various numerical techniques. As a first step, it is often very useful to graph the function. This can give important clues about the number of solutions to the equation, and their approximate locations
= np.linspace(-2, 2, 400)
x
# four examples of nonlinear functions
= x**2 -x -1
f1 = x**3 -3*np.sin(x)
f2 = np.exp(x) -2
f3 = 1 -x**2 +np.sin(50 /(1 +x**2)) f4
# plot each function
= plt.subplots(4, 1, figsize=(4, 12), sharex=True)
fig, axes
for n, f in enumerate([f1, f2, f3, f4]):
=1.5)
axes[n].plot(x, f, lw0, ls=':', color='k')
axes[n].axhline(-2, 2)
axes[n].set_xlim(-5, 5)
axes[n].set_ylim(-2, -1, 0, 1, 2])
axes[n].set_xticks([='both', direction='in')
axes[n].tick_params(whichr'$f(x)$', fontsize=12)
axes[n].set_ylabel(
0].set_xlabel(r'$x$', fontsize=12)
axes[
= [r'$f(x) = x^2 -x -1$',
titles r'$f(x) = x^3 -3 \sin(x)$',
r'$f(x) = \exp(x) -2$',
r'$f(x) = \sin\left( 50/(1 +x^2) \right) +1 -x^2$']
for n, title in enumerate(titles):
axes[n].set_title(title)
fig.tight_layout()
D.4.2 Bisection method
# define a function, desired tolerance
# and starting interval [a, b]
= np.linspace(-2.1, 2.1, 1000)
x = lambda x: np.exp(x) - 2
f = 0.1
tol = -2, 2 a, b
# graph the function f
= plt.subplots(1, 1, figsize=(6.8, 4))
fig, ax
=1.5)
ax.plot(x, f(x), lw0, ls=':', color='k')
ax.axhline(r'$x$', fontsize=12)
ax.set_xlabel(r'$f(x)$', fontsize=12)
ax.set_ylabel(-3, 7)
ax.set_ylim(-2, -1, 0, 1, 2])
ax.set_xticks([='both', direction='in')
ax.tick_params(which
# find the root using the bisection method and visualize
# the steps in the method in the graph
= f(a), f(b)
fa, fb
'ko'), ax.vlines(a, 0, fa, ls=':')
ax.plot(a, fa, 'ko'), ax.vlines(b, 0, fb, ls=':')
ax.plot(b, fb, - 0.8, r"$a$", ha='center', fontsize=12)
ax.text(a, fa + 0.5, r"$b$", ha='center', fontsize=12)
ax.text(b, fb
= 1
n while b - a > tol:
= a + (b - a)/2
m = f(m)
fm
'ko')
ax.plot(m, fm, 0, fm], color='g', ls=':')
ax.plot([m, m], [-0.5, rf"$m_{n}$", ha='center', fontsize=8)
ax.text(m, fm += 1
n
if np.sign(fa) == np.sign(fm):
= m, fm
a, fa else:
= m, fm
b, fb
'r*', markersize=10)
ax.plot(m, fm, f"Root approximately at {m: .3f}",
ax.annotate(=12,
fontsize="serif",
family=(a, fm),
xy='data',
xycoords=(-150, +50),
xytext='offset points',
textcoords=dict(arrowstyle="->",
arrowprops="arc3, rad=-.5"))
connectionstyle
"Bisection method")
ax.set_title( plt.show()
D.4.3 Newton’s method
= sympy.symbols("x")
s_x = sympy.exp(s_x) -2
s_f
= lambda x: sympy.lambdify(s_x, s_f, 'numpy')(x)
f = lambda x: sympy.lambdify(s_x, sympy.diff(s_f, s_x), 'numpy')(x)
fp
= np.linspace(-1.9, 2.1, 1000)
x
# define a function, desired tolerance and starting point xk
= 0.01
tol = 2 xk
# setup a graph for visualizing the root finding steps
= plt.subplots(1, 1, figsize=(6.8, 4))
fig, ax
ax.plot(x, f(x))0, ls=':', color='k')
ax.axhline(-2, -1, 0, 1, 2])
ax.set_xticks([-3, 7)
ax.set_ylim(r'$x$', fontsize=12)
ax.set_xlabel(r'$f(x)$', fontsize=12)
ax.set_ylabel(='both', direction='in')
ax.tick_params(which
# repeat Newton's method
# until convergence to the desired tolerance has been reached
= 0
n while f(xk) > tol:
= xk -f(xk) /fp(xk)
xk_new
0, f(xk)], color='k', ls=':')
ax.plot([xk, xk], ['ko')
ax.plot(xk, f(xk), -.5, rf'$x_{n}$', ha='center')
ax.text(xk, 0], 'g:')
ax.plot([xk, xk_new], [f(xk),
= xk_new
xk += 1
n
'ro', markersize=10)
ax.plot(xk, f(xk), f"Root approximately at {xk: .3f}",
ax.annotate(=12,
fontsize="serif",
family=(xk, f(xk)),
xy='data',
xycoords=(-150, +50),
xytext='offset points',
textcoords=dict(arrowstyle="->",
arrowprops="arc3, rad=-.5"))
connectionstyle
"Newton's method")
ax.set_title( plt.show()
- The
scipy optimize
module provides multiple functions for numerical root finding. Theoptimize.bisect
andoptimize.newton
functions implement variants of bisection and Newton methods
= lambda x: np.exp(x) -2 f
-2, 2) optimize.bisect(f,
\(\displaystyle 0.693147180560118\)
= 2
x_root_guess = lambda x: np.exp(x) fprime
optimize.newton(f, x_root_guess)
\(\displaystyle 0.693147180559946\)
=fprime) optimize.newton(f, x_root_guess, fprime
\(\displaystyle 0.693147180559945\)
- The
scipy
optimize
module provides additional functions for root finding. In particular, theoptimize.brentq
andoptimize.brenth
functions, which are variants of the bisection method, and also work on an interval where the function changes sign
-2, 2) optimize.brentq(f,
\(\displaystyle 0.693147180559945\)
-2, 2) optimize.brenth(f,
\(\displaystyle 0.693147180559938\)
-2, 2) optimize.ridder(f,
\(\displaystyle 0.69314718055904\)
D.5 System of nonlinear equations
For example, consider the following system of two multivariate and nonlinear equations:
\[ \begin{aligned} y - x^3 -2x^2 +1 &= 0\\ y + x^2 -1 &= 0 \end{aligned} \]
def f(x):
return [x[1] -x[0]**3 -2*x[0]**2 +1, x[1] +x[0]**2 -1]
1, 1]) optimize.fsolve(f, [
array([0.73205081, 0.46410162])
1, 1]) optimize.broyden1(f, [
array([0.73205046, 0.46410254])
1, 1]) optimize.broyden2(f, [
array([0.73205083, 0.4641017 ])
= sympy.symbols("x, y")
x, y = sympy.Matrix([y - x**3 -2*x**2 + 1, y + x**2 - 1])
f_mat f_mat.jacobian(sympy.Matrix([x, y]))
\(\displaystyle \left[\begin{matrix}- 3 x^{2} - 4 x & 1\\2 x & 1\end{matrix}\right]\)
def f_jacobian(x):
return [[-3*x[0]**2 - 4*x[0], 1], [2*x[0], 1]]
1, 1], fprime=f_jacobian) optimize.fsolve(f, [
array([0.73205081, 0.46410162])
#def f(x):
# return [x[1] -x[0]**3 -2*x[0]**2 +1, x[1] +x[0]**2 -1]
= np.linspace(-3, 2, 5000)
x = x**3 +2*x**2 -1
y1 = -x**2 +1
y2
= plt.subplots(figsize=(6, 4))
fig, ax
'b', lw=1.5, label=r'$y = x^3 +2x^2 -1$')
ax.plot(x, y1, 'g', lw=1.5, label=r'$y =-x^2 +1$')
ax.plot(x, y2,
= [[-2, 2], [1, -1], [-2, -5]]
x_guesses for x_guess in x_guesses:
= optimize.fsolve(f, x_guess)
sol 0], sol[1], 'r*', markersize=10)
ax.plot(sol[
0], x_guess[1], 'ko')
ax.plot(x_guess["",
ax.annotate(=(sol[0], sol[1]),
xy=(x_guess[0], x_guess[1]),
xytext=dict(arrowstyle="->",
arrowprops=1.5, linestyle=':'))
linewidth
=0)
ax.legend(locr'$x$', fontsize=12)
ax.set_xlabel(r'$y$', fontsize=12)
ax.set_ylabel(
plt.show()
import warnings
"error")
warnings.filterwarnings(
#def f(x):
# return [x[1] -x[0]**3 -2*x[0]**2 +1, x[1] +x[0]**2 -1]
= plt.subplots(figsize=(6, 4))
fig, ax
'k', lw=1.5, label=r'$y = x^3 +2x^2 -1$')
ax.plot(x, y1, 'k', lw=1.5, label=r'$y = -x^2 +1$')
ax.plot(x, y2,
= optimize.fsolve(f, [-2, 2])
sol1 = optimize.fsolve(f, [ 1, -1])
sol2 = optimize.fsolve(f, [-2, -5])
sol3
= ['r', 'b', 'g']
colors for m in np.linspace(-4, 3, 80):
for n in np.linspace(-15, 15, 40):
= [m, n]
x_guess
try:
= optimize.fsolve(f, x_guess)
sol
for idx, s in enumerate([sol1, sol2, sol3]):
if abs(s -sol).max() < 1e-8:
0], sol[1],
ax.plot(sol[+'*', markersize=10)
colors[idx]0], x_guess[1],
ax.plot(x_guess[+'.')
colors[idx]except:
pass
-4, 3)
ax.set_xlim(-15, 15)
ax.set_ylim(r'$x$', fontsize=12)
ax.set_xlabel(r'$y$', fontsize=12)
ax.set_ylabel(
plt.show()