import numpy as np
from numpy import dot, round, eye, diag
from numpy.linalg import qr, eig, norm, inv
N = 4
A = np.random.rand(N, N) - 0.5
A = A + A.T
L, V = eig(A)
print(f'E-values: {round(L, 5)}')
print
print(f'E-vectors:\n{round(V, 5)}')
E-values: [ 1.09112 0.02704 -0.43916 -1.18786] E-vectors: [[ 0.48901 0.8447 -0.21743 -0.00886] [ 0.49137 -0.27815 -0.00914 0.82529] [-0.69334 0.30092 -0.41106 0.50967] [-0.19676 0.34433 0.88525 0.24301]]
# A times the eigenvector
A@V[:,0]
array([ 0.53356333, 0.53614047, -0.75650994, -0.21468721])
# Eigenvalue times the eigenvector
L[0]*V[:,0]
array([ 0.53356333, 0.53614047, -0.75650994, -0.21468721])
print(round( V.T@(A@V), 5))
[[ 1.09112 0. -0. 0. ] [ 0. 0.02704 0. -0. ] [-0. 0. -0.43916 -0. ] [ 0. -0. -0. -1.18786]]
# Use an eigenvector to find its eigenvalue
# using the Rayleigh quotient.
x = V[:,1]
r = x.T@A@x / (x.T@x)
print(f'Rayleigh quotient: {r}')
print(f'Eigenvalue: {L[1]}')
Rayleigh quotient: 0.027036688784287468 Eigenvalue: 0.02703668878428746
To find the largest eigenvalue (in magnitude).
def PowerIteration(M):
N = len(M)
v = np.random.random((N,))
v = v / norm(v)
for k in range(10):
w = M@v
v = w / norm(w)
el = v.T @ (M@v)
return el, v
el, v = PowerIteration(A)
print(f'Largest e-value: {el}')
print(f'Corresponding e-vector:\n {v}')
Largest e-value: -0.7401366753549805 Corresponding e-vector: [0.20880724 0.95758637 0.14957603 0.13059439]
# Fandier version of PowerIteration, with
# added bells and whistles!
def PowerIteration(M, iters=10, printout=False):
N = len(M)
v = np.random.random((N,))
v = v / norm(v)
for k in range(iters):
w = M@v
v = w / norm(w)
el = v.T @ (M@v)
if printout:
print(f'{k}: {el}')
return el, v
To find the smallest eigenvalue of $A$, find the largest eigenvalue of $A^{-1}$, and reciprocate it.
L, V = eig(A)
print(f'E-values of A: {round(L, 5)}\n')
print(f'E-vectors of A:\n{round(V, 5)}\n')
el, v = PowerIteration(inv(A), iters=20)
print(f'Largest e-value of A^(-1): {el}')
print(f'Smallest e-value of A: {1./el}')
print(f'Corresponding e-vector:\n {v}')
E-values of A: [ 1.09112 0.02704 -0.43916 -1.18786] E-vectors of A: [[ 0.48901 0.8447 -0.21743 -0.00886] [ 0.49137 -0.27815 -0.00914 0.82529] [-0.69334 0.30092 -0.41106 0.50967] [-0.19676 0.34433 0.88525 0.24301]] Largest e-value of A^(-1): 36.98677778105565 Smallest e-value of A: 0.02703668878428746 Corresponding e-vector: [ 0.84469895 -0.27815431 0.30091833 0.34432836]
To find an eigenvalue close to $\mu$.
N = 6
A = np.random.normal(size=(N,N))
A = A + A.T
L_A, V_A = eig(A)
mu = 1.2
print(f'Shift A by {mu}')
B = A - mu*eye(N) # Shift the matrix by mu
L_B, V_B = eig(B)
print(f'Matrix A:')
print(A)
print(f'E-values of A: {L_A}') # e-vals of shifted matrix are simply shifted by mu
print(f'Matrix B:')
print(B)
print(f'E-values of B: {L_B}') # e-vals of shifted matrix are simply shifted by mu
# Apply Inverse Iteration to get smallest e-value, then unshift
print('\nNow using Inverse Iteration')
Binv_largest, v = PowerIteration(inv(B))
print(f'Largerst e-value of B^(-1): {Binv_largest}')
B_smallest = 1./Binv_largest
print(f'Thus, smallest e-value of B: {B_smallest}')
lam = B_smallest + mu
print(f'Unshifting, an e-value of A is: {lam}')
Shift A by 1.2 Matrix A: [[ 0.52150051 0.76722326 0.43879386 -0.11062426 -1.65283174 2.14484543] [ 0.76722326 -1.65219781 -2.05571625 1.51329309 0.81977706 0.98778737] [ 0.43879386 -2.05571625 0.71091415 -0.88813219 -2.04921273 -1.40161718] [-0.11062426 1.51329309 -0.88813219 1.28326062 -0.12195356 -0.93389275] [-1.65283174 0.81977706 -2.04921273 -0.12195356 -0.84828181 -2.76633837] [ 2.14484543 0.98778737 -1.40161718 -0.93389275 -2.76633837 -1.52080495]] E-values of A: [-5.33358367 -3.25496128 -1.56580018 4.22304388 3.35925498 1.06643698] Matrix B: [[-0.67849949 0.76722326 0.43879386 -0.11062426 -1.65283174 2.14484543] [ 0.76722326 -2.85219781 -2.05571625 1.51329309 0.81977706 0.98778737] [ 0.43879386 -2.05571625 -0.48908585 -0.88813219 -2.04921273 -1.40161718] [-0.11062426 1.51329309 -0.88813219 0.08326062 -0.12195356 -0.93389275] [-1.65283174 0.81977706 -2.04921273 -0.12195356 -2.04828181 -2.76633837] [ 2.14484543 0.98778737 -1.40161718 -0.93389275 -2.76633837 -2.72080495]] E-values of B: [-6.53358367 -4.45496128 -2.76580018 3.02304388 2.15925498 -0.13356302] Now using Inverse Iteration Largerst e-value of B^(-1): -7.487102383917762 Thus, smallest e-value of B: -0.13356301927271522 Unshifting, an e-value of A is: 1.0664369807272847
Q, R = qr(A)
A0 = Q.T @ A @ Q
print(round(A0, 4))
[[ 2.1967 -0.8466 1.7841 0.3954 0.904 1.9322] [-0.8466 -1.5204 1.6902 -2.3505 0.6047 -0.999 ] [ 1.7841 1.6902 -0.1951 -2.6463 1.5407 -0.7786] [ 0.3954 -2.3505 -2.6463 -1.2101 1.3053 -1.1498] [ 0.904 0.6047 1.5407 1.3053 -0.4382 -0.2018] [ 1.9322 -0.999 -0.7786 -1.1498 -0.2018 -0.3385]]
L, V = eig(A)
Ak = A.copy()
V = np.eye(N)
iters = 200
for k in range(iters):
Q, R = qr(Ak)
Ak = R@Q
V = V@Q
print(round(Ak, 4))
[[-5.3336 -0. 0. -0. -0. -0. ] [-0. 4.223 -0. -0. 0. 0. ] [ 0. 0. 3.3565 -0.1346 0. 0. ] [-0. -0. -0.1346 -3.2522 -0. 0. ] [-0. 0. -0. 0. -1.5658 -0. ] [-0. 0. 0. 0. -0. 1.0664]]
print('E-values:\n'+str(np.round(np.diag(Ak),5)))
print('E-vectors:\n'+str(np.round(V,5)))
E-values: [-5.33358 4.22304 3.35651 -3.25222 -1.5658 1.06644] E-vectors: [[ 0.08214 -0.47263 0.4322 -0.29005 0.70414 0.056 ] [ 0.19555 0.19694 0.46809 0.82299 0.16226 -0.01492] [-0.32494 -0.43932 -0.51688 0.44335 0.20783 0.44101] [-0.19465 0.34203 0.33483 -0.19167 -0.09809 0.82865] [-0.58286 0.54907 -0.12999 -0.02718 0.52542 -0.25512] [-0.68689 -0.35547 0.44488 0.06727 -0.38598 -0.22452]]
print(f'Actual E-values: \n{round(L, 5)}')
print
print(f'Actual E-vectors:\n{round(V, 5)}')
Actual E-values: [-5.33358 -3.25496 -1.5658 4.22304 3.35925 1.06644] Actual E-vectors: [[ 0.08214 -0.47263 0.4322 -0.29005 0.70414 0.056 ] [ 0.19555 0.19694 0.46809 0.82299 0.16226 -0.01492] [-0.32494 -0.43932 -0.51688 0.44335 0.20783 0.44101] [-0.19465 0.34203 0.33483 -0.19167 -0.09809 0.82865] [-0.58286 0.54907 -0.12999 -0.02718 0.52542 -0.25512] [-0.68689 -0.35547 0.44488 0.06727 -0.38598 -0.22452]]