Chapter 10: Eigenvalues and Eigenvectors
Chapter 10: Eigenvalues and Eigenvectors
- Understand the definition of eigenvalues and eigenvectors
- Master methods for calculating eigenvalues and eigenvectors
- Understand the geometric meaning of eigenvalues
- Master properties of characteristic polynomials
- Understand applications of eigenvalues in practical problems
Definition of Eigenvalues and Eigenvectors
Basic Definition
For an matrix , if there exists a non-zero vector and scalar such that:
Then is called an eigenvalue of matrix , and is called the corresponding eigenvector.
Geometric Meaning
Eigenvectors are vectors whose direction remains unchanged under a linear transformation, and eigenvalues represent the scaling factor of those vectors.
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import eig
import scipy.linalg
import matplotlib.patches as patches
def visualize_eigenvalues_2d():
"""Visualize eigenvalues and eigenvectors of a 2D matrix"""
# Define a 2x2 matrix
A = np.array([[3, 1],
[1, 2]])
# Calculate eigenvalues and eigenvectors
eigenvalues, eigenvectors = eig(A)
print("Matrix A:")
print(A)
print(f"\nEigenvalues: {eigenvalues}")
print(f"Eigenvectors:\n{eigenvectors}")
# Verify eigenvalue equation
for i in range(len(eigenvalues)):
lambda_i = eigenvalues[i]
v_i = eigenvectors[:, i]
Av = A @ v_i
lambda_v = lambda_i * v_i
print(f"\nVerifying eigenvalue {lambda_i:.3f}:")
print(f"A × v = {Av}")
print(f"λ × v = {lambda_v}")
print(f"Equal: {np.allclose(Av, lambda_v)}")
# Visualization
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
# 1. Original vector space
ax1 = axes[0]
# Draw unit circle
theta = np.linspace(0, 2*np.pi, 100)
unit_circle = np.array([np.cos(theta), np.sin(theta)])
ax1.plot(unit_circle[0], unit_circle[1], 'b--', alpha=0.5, label='Unit circle')
# Draw eigenvectors
for i, (val, vec) in enumerate(zip(eigenvalues, eigenvectors.T)):
# Normalize eigenvector for display
vec_normalized = vec / np.linalg.norm(vec) * 2
ax1.arrow(0, 0, vec_normalized[0], vec_normalized[1],
head_width=0.1, head_length=0.1, fc=f'C{i}', ec=f'C{i}',
linewidth=3, label=f'Eigenvector {i+1}')
# Display eigenvalue
ax1.text(vec_normalized[0]*1.2, vec_normalized[1]*1.2,
f'λ={val:.2f}', fontsize=10, ha='center')
ax1.set_xlim(-3, 3)
ax1.set_ylim(-3, 3)
ax1.grid(True, alpha=0.3)
ax1.set_aspect('equal')
ax1.legend()
ax1.set_title('Eigenvectors (Original Space)')
# 2. Transformed space
ax2 = axes[1]
# Transformed unit circle (ellipse)
transformed_circle = A @ unit_circle
ax2.plot(transformed_circle[0], transformed_circle[1], 'r-', linewidth=2, label='Transformed circle')
# Transformed eigenvectors
for i, (val, vec) in enumerate(zip(eigenvalues, eigenvectors.T)):
vec_normalized = vec / np.linalg.norm(vec) * 2
transformed_vec = A @ vec_normalized
ax2.arrow(0, 0, transformed_vec[0], transformed_vec[1],
head_width=0.1, head_length=0.1, fc=f'C{i}', ec=f'C{i}',
linewidth=3, label=f'A×Eigenvector {i+1}')
ax2.set_xlim(-4, 8)
ax2.set_ylim(-4, 6)
ax2.grid(True, alpha=0.3)
ax2.set_aspect('equal')
ax2.legend()
ax2.set_title('After Linear Transformation')
# 3. Comparison view
ax3 = axes[2]
# Transform general vectors
test_vectors = np.array([[1, 0, 1, 1],
[0, 1, 1, -1]])
for i in range(test_vectors.shape[1]):
original = test_vectors[:, i]
transformed = A @ original
# Original vector
ax3.arrow(0, 0, original[0], original[1],
head_width=0.05, head_length=0.05, fc='blue', ec='blue',
alpha=0.5, linestyle='--')
# Transformed vector
ax3.arrow(0, 0, transformed[0], transformed[1],
head_width=0.05, head_length=0.05, fc='red', ec='red',
alpha=0.7)
# Eigenvectors (direction unchanged)
for i, (val, vec) in enumerate(zip(eigenvalues, eigenvectors.T)):
vec_normalized = vec / np.linalg.norm(vec) * 1.5
transformed_vec = A @ vec_normalized
ax3.arrow(0, 0, vec_normalized[0], vec_normalized[1],
head_width=0.1, head_length=0.1, fc=f'C{i}', ec=f'C{i}',
linewidth=3, alpha=0.5)
ax3.arrow(0, 0, transformed_vec[0], transformed_vec[1],
head_width=0.1, head_length=0.1, fc=f'C{i}', ec=f'C{i}',
linewidth=3)
ax3.set_xlim(-2, 6)
ax3.set_ylim(-2, 4)
ax3.grid(True, alpha=0.3)
ax3.set_aspect('equal')
ax3.set_title('Eigenvector Direction Unchanged')
plt.tight_layout()
plt.show()
visualize_eigenvalues_2d()
Characteristic Polynomial and Characteristic Equation
Characteristic Polynomial
Eigenvalues satisfy the characteristic equation:
is called the characteristic polynomial of matrix , denoted as .
Algebraic and Geometric Multiplicity
- Algebraic multiplicity: Multiplicity of an eigenvalue in the characteristic polynomial
- Geometric multiplicity: Dimension of the corresponding eigenspace
def characteristic_polynomial_analysis():
"""Analyze characteristic polynomial"""
# Define several different matrices
matrices = {
"Diagonal matrix": np.array([[3, 0], [0, 2]]),
"Symmetric matrix": np.array([[2, 1], [1, 2]]),
"Matrix with repeated eigenvalues": np.array([[2, 1], [0, 2]]),
"Complex eigenvalue matrix": np.array([[0, -1], [1, 0]])
}
for name, A in matrices.items():
print(f"\n{name}:")
print(f"Matrix A =\n{A}")
# Calculate eigenvalues and eigenvectors
eigenvalues, eigenvectors = eig(A)
print(f"Eigenvalues: {eigenvalues}")
# Manually verify characteristic equation
print("\nVerifying characteristic equation det(A - λI) = 0:")
for i, lambda_val in enumerate(eigenvalues):
matrix_diff = A - lambda_val * np.eye(A.shape[0])
det_val = np.linalg.det(matrix_diff)
print(f"λ = {lambda_val:.3f}: det(A - λI) = {det_val:.2e}")
# Analyze eigenvectors
print("\nEigenvector analysis:")
for i, (lambda_val, eigenvec) in enumerate(zip(eigenvalues, eigenvectors.T)):
print(f"λ{i+1} = {lambda_val:.3f}, v{i+1} = {eigenvec}")
# Verify Av = λv
Av = A @ eigenvec
lambda_v = lambda_val * eigenvec
print(f" Verification: ||Av - λv|| = {np.linalg.norm(Av - lambda_v):.2e}")
print("-" * 50)
characteristic_polynomial_analysis()
Eigenvalues of Special Matrices
Eigenvalues of Symmetric Matrices
Eigenvalues of symmetric matrices are all real numbers, and eigenvectors corresponding to different eigenvalues are orthogonal.
Eigenvalues of Orthogonal Matrices
The magnitude of eigenvalues of orthogonal matrices all equal 1.
def special_matrices_eigenvalues():
"""Properties of eigenvalues of special matrices"""
# 1. Symmetric matrix
symmetric_matrix = np.array([[4, 2, 1],
[2, 3, 2],
[1, 2, 4]])
print("1. Symmetric matrix:")
print(symmetric_matrix)
sym_eigenvals, sym_eigenvecs = eig(symmetric_matrix)
print(f"Eigenvalues: {sym_eigenvals}")
print("All eigenvalues are real:", np.all(np.isreal(sym_eigenvals)))
# Verify orthogonality of eigenvectors
print("\nVerifying orthogonality of eigenvectors:")
for i in range(len(sym_eigenvals)):
for j in range(i+1, len(sym_eigenvals)):
dot_product = np.dot(sym_eigenvecs[:, i], sym_eigenvecs[:, j])
print(f"v{i+1} · v{j+1} = {dot_product:.2e}")
# 2. Skew-symmetric matrix
antisymmetric_matrix = np.array([[0, 2, -1],
[-2, 0, 3],
[1, -3, 0]])
print(f"\n2. Skew-symmetric matrix:")
print(antisymmetric_matrix)
antisym_eigenvals, _ = eig(antisymmetric_matrix)
print(f"Eigenvalues: {antisym_eigenvals}")
print("Eigenvalues are purely imaginary or zero:", np.allclose(np.real(antisym_eigenvals), 0))
# 3. Orthogonal matrix (rotation matrix)
theta = np.pi/4
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
print(f"\n3. Rotation matrix (θ = 45°):")
print(rotation_matrix)
rot_eigenvals, rot_eigenvecs = eig(rotation_matrix)
print(f"Eigenvalues: {rot_eigenvals}")
print("Eigenvalue magnitudes:", np.abs(rot_eigenvals))
print("All eigenvalue magnitudes are 1:", np.allclose(np.abs(rot_eigenvals), 1))
# 4. Upper triangular matrix
upper_triangular = np.array([[2, 3, 1],
[0, 4, 2],
[0, 0, 5]])
print(f"\n4. Upper triangular matrix:")
print(upper_triangular)
upper_eigenvals, _ = eig(upper_triangular)
diagonal_elements = np.diag(upper_triangular)
print(f"Eigenvalues: {upper_eigenvals}")
print(f"Diagonal elements: {diagonal_elements}")
print("Eigenvalues equal diagonal elements:", np.allclose(np.sort(upper_eigenvals), np.sort(diagonal_elements)))
special_matrices_eigenvalues()
Methods for Computing Eigenvalues
Analytical Solution for 2×2 Matrices
For a matrix:
The eigenvalues are:
Power Method for Dominant Eigenvalue
def power_method_demo():
"""Demonstrate power method for finding dominant eigenvalue"""
def power_method(A, max_iterations=100, tolerance=1e-6):
"""
Power method for finding the dominant eigenvalue and corresponding eigenvector
"""
n = A.shape[0]
# Initialize random vector
x = np.random.rand(n)
x = x / np.linalg.norm(x)
eigenvalue_estimates = []
for iteration in range(max_iterations):
# Matrix-vector multiplication
y = A @ x
# Estimate eigenvalue (Rayleigh quotient)
eigenvalue = x.T @ y
# Normalize
x_new = y / np.linalg.norm(y)
eigenvalue_estimates.append(eigenvalue)
# Check convergence
if iteration > 0:
if abs(eigenvalue_estimates[-1] - eigenvalue_estimates[-2]) < tolerance:
break
x = x_new
return eigenvalue, x, eigenvalue_estimates
# Test matrix
A = np.array([[4, 1, 2],
[1, 3, 1],
[2, 1, 5]], dtype=float)
print("Test matrix A:")
print(A)
# Using power method
eigenval_power, eigenvec_power, estimates = power_method(A)
# Verify using NumPy
eigenvals_true, eigenvecs_true = eig(A)
max_eigenval_true = eigenvals_true[np.argmax(np.abs(eigenvals_true))]
print(f"\nPower method result:")
print(f"Estimated dominant eigenvalue: {eigenval_power:.6f}")
print(f"True dominant eigenvalue: {max_eigenval_true:.6f}")
print(f"Error: {abs(eigenval_power - max_eigenval_true):.2e}")
# Visualize convergence process
plt.figure(figsize=(10, 6))
plt.subplot(1, 2, 1)
plt.plot(estimates, 'b-o', markersize=4)
plt.axhline(y=max_eigenval_true, color='r', linestyle='--', label='True value')
plt.xlabel('Iteration')
plt.ylabel('Eigenvalue estimate')
plt.title('Power Method Convergence')
plt.legend()
plt.grid(True, alpha=0.3)
# Error analysis
plt.subplot(1, 2, 2)
errors = [abs(est - max_eigenval_true) for est in estimates]
plt.semilogy(errors, 'r-o', markersize=4)
plt.xlabel('Iteration')
plt.ylabel('Absolute error (log scale)')
plt.title('Convergence Error')
plt.grid(True, alpha=0.3)
plt.tight_layout()
plt.show()
return eigenval_power, eigenvec_power
power_method_demo()
Application of Eigenvalues: Principal Component Analysis
Mathematical Principles of PCA
Principal Component Analysis (PCA) finds the main directions of variation in data by computing eigenvalues and eigenvectors of the covariance matrix.
def pca_eigenvalue_demo():
"""Use eigenvalues for principal component analysis"""
# Generate correlated 2D data
np.random.seed(42)
n_samples = 200
# Create correlated data
X1 = np.random.randn(n_samples)
X2 = 0.8 * X1 + 0.6 * np.random.randn(n_samples)
X = np.column_stack([X1, X2])
# Center data
X_centered = X - np.mean(X, axis=0)
# Calculate covariance matrix
cov_matrix = np.cov(X_centered.T)
print("Covariance matrix:")
print(cov_matrix)
# Calculate eigenvalues and eigenvectors
eigenvals, eigenvecs = eig(cov_matrix)
# Sort by eigenvalue magnitude
idx = np.argsort(eigenvals)[::-1]
eigenvals = eigenvals[idx]
eigenvecs = eigenvecs[:, idx]
print(f"\nEigenvalues: {eigenvals}")
print(f"Variance explained ratio: {eigenvals / np.sum(eigenvals)}")
# Visualization
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
# 1. Original data
ax1 = axes[0]
ax1.scatter(X[:, 0], X[:, 1], alpha=0.6, s=20)
ax1.set_xlabel('X1')
ax1.set_ylabel('X2')
ax1.set_title('Original Data')
ax1.grid(True, alpha=0.3)
ax1.set_aspect('equal')
# 2. Principal component directions
ax2 = axes[1]
ax2.scatter(X_centered[:, 0], X_centered[:, 1], alpha=0.6, s=20)
# Draw principal component directions
center = np.mean(X_centered, axis=0)
for i, (val, vec) in enumerate(zip(eigenvals, eigenvecs.T)):
# Eigenvector length proportional to eigenvalue
length = 3 * np.sqrt(val)
ax2.arrow(center[0], center[1], vec[0]*length, vec[1]*length,
head_width=0.1, head_length=0.1, fc=f'C{i}', ec=f'C{i}',
linewidth=3, label=f'PC{i+1} (λ={val:.2f})')
ax2.arrow(center[0], center[1], -vec[0]*length, -vec[1]*length,
head_width=0.1, head_length=0.1, fc=f'C{i}', ec=f'C{i}',
linewidth=3)
ax2.set_xlabel('X1 (centered)')
ax2.set_ylabel('X2 (centered)')
ax2.set_title('Principal Component Directions')
ax2.legend()
ax2.grid(True, alpha=0.3)
ax2.set_aspect('equal')
# 3. Data in principal component coordinate system
ax3 = axes[2]
# Project data onto principal component coordinate system
X_pca = X_centered @ eigenvecs
ax3.scatter(X_pca[:, 0], X_pca[:, 1], alpha=0.6, s=20)
ax3.set_xlabel(f'PC1 (explains {eigenvals[0]/np.sum(eigenvals)*100:.1f}% variance)')
ax3.set_ylabel(f'PC2 (explains {eigenvals[1]/np.sum(eigenvals)*100:.1f}% variance)')
ax3.set_title('Principal Component Coordinate System')
ax3.grid(True, alpha=0.3)
ax3.set_aspect('equal')
plt.tight_layout()
plt.show()
# Calculate cumulative variance explained ratio
explained_variance_ratio = eigenvals / np.sum(eigenvals)
cumulative_variance = np.cumsum(explained_variance_ratio)
print(f"\nCumulative variance explained:")
for i, (individual, cumulative) in enumerate(zip(explained_variance_ratio, cumulative_variance)):
print(f"PC{i+1}: {individual:.3f} (cumulative: {cumulative:.3f})")
pca_eigenvalue_demo()
Eigenvalues in Dynamical Systems
Stability of Linear Dynamical Systems
def dynamical_system_stability():
"""Analyze stability of linear dynamical systems"""
# Define several dynamical system matrices
systems = {
"Stable spiral": np.array([[-0.5, -0.8], [0.8, -0.5]]),
"Unstable node": np.array([[1.2, 0.3], [0.3, 0.8]]),
"Saddle point": np.array([[1, 0], [0, -0.5]]),
"Center": np.array([[0, 1], [-1, 0]])
}
fig, axes = plt.subplots(2, 2, figsize=(12, 10))
axes = axes.flatten()
for i, (name, A) in enumerate(systems.items()):
ax = axes[i]
# Calculate eigenvalues
eigenvals, eigenvecs = eig(A)
print(f"\n{name}:")
print(f"Matrix:\n{A}")
print(f"Eigenvalues: {eigenvals}")
# Analyze stability
real_parts = np.real(eigenvals)
if np.all(real_parts < 0):
stability = "Stable"
elif np.all(real_parts > 0):
stability = "Unstable"
elif np.any(real_parts > 0) and np.any(real_parts < 0):
stability = "Saddle"
else:
stability = "Marginally stable"
print(f"Stability: {stability}")
# Draw phase plane
x = np.linspace(-2, 2, 20)
y = np.linspace(-2, 2, 20)
X, Y = np.meshgrid(x, y)
# Calculate vector field
DX = A[0, 0] * X + A[0, 1] * Y
DY = A[1, 0] * X + A[1, 1] * Y
# Draw vector field
ax.quiver(X, Y, DX, DY, alpha=0.5)
# Draw eigenvectors
for j, (val, vec) in enumerate(zip(eigenvals, eigenvecs.T)):
if np.isreal(val):
# Real eigenvalue
vec_real = np.real(vec)
length = 1.5
ax.arrow(0, 0, vec_real[0]*length, vec_real[1]*length,
head_width=0.1, head_length=0.1, fc=f'C{j}', ec=f'C{j}',
linewidth=3, alpha=0.8)
ax.arrow(0, 0, -vec_real[0]*length, -vec_real[1]*length,
head_width=0.1, head_length=0.1, fc=f'C{j}', ec=f'C{j}',
linewidth=3, alpha=0.8)
# Draw some trajectories
for initial in [[-1, -1], [1, 1], [-1, 1], [1, -1]]:
trajectory = simulate_trajectory(A, initial, steps=50, dt=0.1)
ax.plot(trajectory[:, 0], trajectory[:, 1], 'r-', alpha=0.7, linewidth=1)
ax.plot(initial[0], initial[1], 'ro', markersize=4)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title(f'{name}\nEigenvalues: {eigenvals[0]:.2f}, {eigenvals[1]:.2f}')
ax.grid(True, alpha=0.3)
ax.set_aspect('equal')
plt.tight_layout()
plt.show()
def simulate_trajectory(A, initial, steps=50, dt=0.1):
"""Simulate trajectory of linear system"""
trajectory = np.zeros((steps, 2))
state = np.array(initial, dtype=float)
for i in range(steps):
trajectory[i] = state
# Euler method: x(t+dt) = x(t) + dt * A * x(t)
state = state + dt * (A @ state)
return trajectory
dynamical_system_stability()
Numerical Computation of Eigenvalues
Introduction to QR Algorithm
def qr_algorithm_demo():
"""Demonstrate QR algorithm for finding eigenvalues"""
def qr_algorithm(A, max_iterations=50):
"""
Simplified QR algorithm for eigenvalues
"""
A_k = A.copy()
eigenvalue_estimates = []
for k in range(max_iterations):
# QR decomposition
Q, R = np.linalg.qr(A_k)
# Update matrix
A_k = R @ Q
# Record diagonal elements (approximation of eigenvalues)
diagonal = np.diag(A_k)
eigenvalue_estimates.append(diagonal.copy())
# Check convergence (lower triangular part close to zero)
if k > 5:
lower_triangular_norm = np.linalg.norm(np.tril(A_k, k=-1))
if lower_triangular_norm < 1e-10:
break
return A_k, eigenvalue_estimates
# Test matrix
A = np.array([[4, 1, 1],
[1, 3, 2],
[1, 2, 5]], dtype=float)
print("Original matrix A:")
print(A)
# QR algorithm
A_final, estimates = qr_algorithm(A)
print(f"\nMatrix after {len(estimates)} iterations:")
print(A_final)
# True eigenvalues
true_eigenvals = np.linalg.eigvals(A)
estimated_eigenvals = np.diag(A_final)
print(f"\nTrue eigenvalues: {np.sort(true_eigenvals)}")
print(f"QR algorithm estimate: {np.sort(estimated_eigenvals)}")
# Visualize convergence process
estimates_array = np.array(estimates)
plt.figure(figsize=(12, 4))
for i in range(A.shape[0]):
plt.plot(estimates_array[:, i], 'o-', label=f'Eigenvalue {i+1}')
plt.axhline(y=true_eigenvals[i], color=f'C{i}', linestyle='--', alpha=0.7)
plt.xlabel('Iteration')
plt.ylabel('Eigenvalue estimate')
plt.title('QR Algorithm Convergence')
plt.legend()
plt.grid(True, alpha=0.3)
plt.show()
qr_algorithm_demo()
Geometric Interpretation of Eigenvalues
Principal Axes of Ellipse
The principal axes of the ellipse defined by the quadratic form are determined by the eigenvectors of , and the axis lengths are determined by the reciprocal square root of eigenvalues.
def ellipse_principal_axes():
"""Visualize relationship between ellipse principal axes and eigenvectors"""
# Define positive definite symmetric matrix
A = np.array([[5, 2],
[2, 2]])
# Calculate eigenvalues and eigenvectors
eigenvals, eigenvecs = eig(A)
# Sort by eigenvalue magnitude
idx = np.argsort(eigenvals)[::-1]
eigenvals = eigenvals[idx]
eigenvecs = eigenvecs[:, idx]
print("Matrix A:")
print(A)
print(f"Eigenvalues: {eigenvals}")
print(f"Eigenvectors:\n{eigenvecs}")
# Semi-axis lengths of ellipse
semi_axes = 1 / np.sqrt(eigenvals)
print(f"Ellipse semi-axis lengths: {semi_axes}")
# Generate points on ellipse
theta = np.linspace(0, 2*np.pi, 1000)
# Unit circle in eigenvector coordinate system
unit_circle = np.array([np.cos(theta), np.sin(theta)])
# Scale to ellipse
ellipse_principal = semi_axes[:, np.newaxis] * unit_circle
# Rotate to original coordinate system
ellipse_points = eigenvecs @ ellipse_principal
# Visualization
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
# 1. Ellipse and eigenvectors
ax1.plot(ellipse_points[0], ellipse_points[1], 'b-', linewidth=2, label='Ellipse $x^TAx = 1$')
# Draw eigenvectors (principal axis directions)
for i, (val, vec) in enumerate(zip(eigenvals, eigenvecs.T)):
length = semi_axes[i]
ax1.arrow(0, 0, vec[0]*length, vec[1]*length,
head_width=0.05, head_length=0.05, fc=f'C{i+1}', ec=f'C{i+1}',
linewidth=3, label=f'Principal axis {i+1} (λ={val:.2f})')
ax1.arrow(0, 0, -vec[0]*length, -vec[1]*length,
head_width=0.05, head_length=0.05, fc=f'C{i+1}', ec=f'C{i+1}',
linewidth=3)
ax1.set_xlim(-1, 1)
ax1.set_ylim(-1, 1)
ax1.set_aspect('equal')
ax1.grid(True, alpha=0.3)
ax1.legend()
ax1.set_title('Principal Axes of Ellipse')
# 2. Contour plot
x = np.linspace(-1, 1, 100)
y = np.linspace(-1, 1, 100)
X, Y = np.meshgrid(x, y)
# Calculate quadratic form values
Z = A[0,0]*X**2 + 2*A[0,1]*X*Y + A[1,1]*Y**2
contours = ax2.contour(X, Y, Z, levels=[0.5, 1, 2, 3, 4], colors='blue', alpha=0.7)
ax2.clabel(contours, inline=True, fontsize=8)
# Draw eigenvectors
for i, (val, vec) in enumerate(zip(eigenvals, eigenvecs.T)):
ax2.arrow(0, 0, vec[0]*0.8, vec[1]*0.8,
head_width=0.05, head_length=0.05, fc=f'C{i+1}', ec=f'C{i+1}',
linewidth=3, label=f'Eigenvector {i+1}')
ax2.set_xlim(-1, 1)
ax2.set_ylim(-1, 1)
ax2.set_aspect('equal')
ax2.grid(True, alpha=0.3)
ax2.legend()
ax2.set_title('Quadratic Form Contours')
plt.tight_layout()
plt.show()
ellipse_principal_axes()
Chapter Summary
Core Concept Summary
| Concept | Mathematical Expression | Geometric Meaning | Application |
|---|---|---|---|
| Eigenvalue | Scaling factor of transformation | Stability analysis | |
| Eigenvector | Non-zero solution | Direction unchanged by transformation | Principal component analysis |
| Characteristic polynomial | Polynomial of eigenvalues | Theoretical analysis | |
| Algebraic multiplicity | Multiplicity in polynomial | Number of repeated eigenvalues | Diagonalization criterion |
| Geometric multiplicity | Dimension of eigenspace | Number of linearly independent eigenvectors | Diagonalization criterion |
Important Properties
Summary of Computational Methods
def computational_methods_summary():
"""Summary of eigenvalue computation methods"""
methods = {
"Analytical method": {
"Applicable": "2×2 matrices",
"Advantage": "Exact solution",
"Disadvantage": "Limited to low dimensions"
},
"Power method": {
"Applicable": "Dominant eigenvalue",
"Advantage": "Simple, memory efficient",
"Disadvantage": "Only gets one eigenvalue"
},
"QR algorithm": {
"Applicable": "All eigenvalues",
"Advantage": "Numerically stable",
"Disadvantage": "High computational complexity"
},
"Jacobi method": {
"Applicable": "Symmetric matrices",
"Advantage": "High precision",
"Disadvantage": "Slow convergence"
}
}
print("Summary of eigenvalue computation methods:")
print("=" * 60)
for method, properties in methods.items():
print(f"\n{method}:")
for prop, desc in properties.items():
print(f" {prop}: {desc}")
computational_methods_summary()
Eigenvalues and eigenvectors are among the most important concepts in linear algebra. They not only reveal the internal structure of linear transformations but also play a critical role in scientific computing, engineering applications, and data analysis. Mastering eigenvalue theory lays a solid foundation for understanding advanced topics such as matrix diagonalization, quadratic forms, and dynamical systems.