Chapter 2: Fundamental Theory of Vector Spaces

Haiyue
14min

Chapter 2: Fundamental Theory of Vector Spaces

Learning Objectives
  • Deeply understand the definition and geometric meaning of vectors
  • Master basic vector operations (addition, scalar multiplication)
  • Understand the axiomatic definition of vector spaces
  • Master the concept and determination methods of subspaces
  • Understand the role of zero vectors and negative vectors

In-Depth Understanding of Vectors

Mathematical Definition of Vectors

A vector is an ordered array, typically represented as: v=[v1v2vn]\mathbf{v} = \begin{bmatrix} v_1 \\ v_2 \\ \vdots \\ v_n \end{bmatrix}

where viv_i is the ii-th component of the vector.

import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

# Examples of vectors in different dimensions
v_1d = np.array([5])                    # 1D vector
v_2d = np.array([3, 4])                 # 2D vector
v_3d = np.array([1, 2, 3])              # 3D vector
v_nd = np.array([1, -2, 0, 5, 3, -1])   # High-dimensional vector

print(f"1D vector: {v_1d}")
print(f"2D vector: {v_2d}")
print(f"3D vector: {v_3d}")
print(f"6D vector: {v_nd}")

Geometric Representation of Vectors

# Geometric representation of 2D vectors
fig, axes = plt.subplots(1, 2, figsize=(12, 5))

# Position vectors
v1 = np.array([3, 2])
v2 = np.array([-2, 3])
v3 = np.array([1, -2])

# First subplot: multiple vectors
axes[0].quiver(0, 0, v1[0], v1[1], angles='xy', scale_units='xy', scale=1,
               color='red', width=0.005, label='v1=(3,2)')
axes[0].quiver(0, 0, v2[0], v2[1], angles='xy', scale_units='xy', scale=1,
               color='blue', width=0.005, label='v2=(-2,3)')
axes[0].quiver(0, 0, v3[0], v3[1], angles='xy', scale_units='xy', scale=1,
               color='green', width=0.005, label='v3=(1,-2)')

axes[0].set_xlim(-3, 4)
axes[0].set_ylim(-3, 4)
axes[0].grid(True, alpha=0.3)
axes[0].legend()
axes[0].set_title('Vectors in Different Directions')
axes[0].set_xlabel('x')
axes[0].set_ylabel('y')

# Second subplot: vector magnitude
v = np.array([4, 3])
magnitude = np.linalg.norm(v)

axes[1].quiver(0, 0, v[0], v[1], angles='xy', scale_units='xy', scale=1,
               color='purple', width=0.008, label=f'v=({v[0]},{v[1]})')
axes[1].plot([0, v[0]], [0, v[1]], 'purple', linewidth=2)
axes[1].text(v[0]/2, v[1]/2 + 0.3, f'|v| = {magnitude:.2f}',
             fontsize=12, ha='center')

axes[1].set_xlim(-1, 5)
axes[1].set_ylim(-1, 4)
axes[1].grid(True, alpha=0.3)
axes[1].legend()
axes[1].set_title('Vector Magnitude')
axes[1].set_xlabel('x')
axes[1].set_ylabel('y')

plt.tight_layout()
plt.show()
Vector Magnitude Formula

For an nn-dimensional vector v=[v1,v2,...,vn]\mathbf{v} = [v_1, v_2, ..., v_n], its magnitude (norm) is defined as: v=v12+v22+...+vn2\|\mathbf{v}\| = \sqrt{v_1^2 + v_2^2 + ... + v_n^2}

Basic Vector Operations

Vector Addition

The addition of two vectors is defined as the addition of corresponding components: u+v=[u1+v1u2+v2un+vn]\mathbf{u} + \mathbf{v} = \begin{bmatrix} u_1 + v_1 \\ u_2 + v_2 \\ \vdots \\ u_n + v_n \end{bmatrix}

# Vector addition example
u = np.array([2, 1])
v = np.array([1, 3])
result = u + v

print(f"Vector u: {u}")
print(f"Vector v: {v}")
print(f"u + v: {result}")

# Geometric representation of vector addition (parallelogram rule)
plt.figure(figsize=(8, 6))

# Draw vector u
plt.arrow(0, 0, u[0], u[1], head_width=0.1, head_length=0.1,
          fc='red', ec='red', label='u')
# Draw vector v
plt.arrow(0, 0, v[0], v[1], head_width=0.1, head_length=0.1,
          fc='blue', ec='blue', label='v')
# Draw vector v starting from the endpoint of u
plt.arrow(u[0], u[1], v[0], v[1], head_width=0.1, head_length=0.1,
          fc='blue', ec='blue', linestyle='--', alpha=0.7)
# Draw vector u starting from the endpoint of v
plt.arrow(v[0], v[1], u[0], u[1], head_width=0.1, head_length=0.1,
          fc='red', ec='red', linestyle='--', alpha=0.7)
# Draw result vector
plt.arrow(0, 0, result[0], result[1], head_width=0.15, head_length=0.15,
          fc='green', ec='green', linewidth=2, label='u + v')

plt.grid(True, alpha=0.3)
plt.legend()
plt.xlim(-0.5, 4)
plt.ylim(-0.5, 5)
plt.title('Geometric Representation of Vector Addition')
plt.xlabel('x')
plt.ylabel('y')
plt.show()

Scalar Multiplication

The multiplication of a scalar and a vector is defined as: cv=c[v1v2vn]=[cv1cv2cvn]c\mathbf{v} = c\begin{bmatrix} v_1 \\ v_2 \\ \vdots \\ v_n \end{bmatrix} = \begin{bmatrix} cv_1 \\ cv_2 \\ \vdots \\ cv_n \end{bmatrix}

# Scalar multiplication example
v = np.array([2, 1])
scalars = [-2, -1, 0.5, 1, 2]

plt.figure(figsize=(10, 6))

colors = ['red', 'orange', 'yellow', 'blue', 'green']
for i, c in enumerate(scalars):
    scaled_v = c * v
    plt.arrow(0, 0, scaled_v[0], scaled_v[1], head_width=0.1, head_length=0.1,
              fc=colors[i], ec=colors[i], label=f'{c}v')

plt.grid(True, alpha=0.3)
plt.legend()
plt.xlim(-5, 5)
plt.ylim(-3, 3)
plt.title('Geometric Effect of Scalar Multiplication')
plt.xlabel('x')
plt.ylabel('y')
plt.show()

# Numerical computation
print("Scalar multiplication results:")
for c in scalars:
    result = c * v
    print(f"{c} * {v} = {result}")
Geometric Meaning of Scalar Multiplication
  • c>1c > 1: Vector is stretched, direction unchanged
  • 0<c<10 < c < 1: Vector is shortened, direction unchanged
  • c=0c = 0: Vector becomes zero vector
  • c<0c < 0: Vector direction is reversed, length is scaled by c|c| times

Axiomatic Definition of Vector Spaces

A vector space (linear space) is a set VV whose elements are called vectors and satisfy the following axioms:

Addition Axioms

Let u,v,wV\mathbf{u}, \mathbf{v}, \mathbf{w} \in V:

  1. Closure: u+vV\mathbf{u} + \mathbf{v} \in V
  2. Commutativity: u+v=v+u\mathbf{u} + \mathbf{v} = \mathbf{v} + \mathbf{u}
  3. Associativity: (u+v)+w=u+(v+w)(\mathbf{u} + \mathbf{v}) + \mathbf{w} = \mathbf{u} + (\mathbf{v} + \mathbf{w})
  4. Zero element: There exists 0V\mathbf{0} \in V such that v+0=v\mathbf{v} + \mathbf{0} = \mathbf{v}
  5. Negative element: For each vV\mathbf{v} \in V, there exists vV-\mathbf{v} \in V such that v+(v)=0\mathbf{v} + (-\mathbf{v}) = \mathbf{0}

Scalar Multiplication Axioms

Let a,ba, b be scalars, u,vV\mathbf{u}, \mathbf{v} \in V:

  1. Closure: avVa\mathbf{v} \in V
  2. Distributivity 1: a(u+v)=au+ava(\mathbf{u} + \mathbf{v}) = a\mathbf{u} + a\mathbf{v}
  3. Distributivity 2: (a+b)v=av+bv(a + b)\mathbf{v} = a\mathbf{v} + b\mathbf{v}
  4. Associativity: (ab)v=a(bv)(ab)\mathbf{v} = a(b\mathbf{v})
  5. Identity: 1v=v1\mathbf{v} = \mathbf{v}
# Verify vector space axioms
def verify_vector_space_axioms():
    # Define some vectors
    u = np.array([1, 2, 3])
    v = np.array([4, 5, 6])
    w = np.array([7, 8, 9])
    zero = np.array([0, 0, 0])

    # Scalars
    a, b = 2, 3

    print("Verifying vector space axioms:")
    print("=" * 40)

    # Addition commutativity
    print(f"1. Commutativity: u + v = {u + v}")
    print(f"                  v + u = {v + u}")
    print(f"   Equal? {np.array_equal(u + v, v + u)}")

    # Addition associativity
    print(f"\n2. Associativity: (u + v) + w = {(u + v) + w}")
    print(f"                  u + (v + w) = {u + (v + w)}")
    print(f"   Equal? {np.array_equal((u + v) + w, u + (v + w))}")

    # Zero vector
    print(f"\n3. Zero vector: u + 0 = {u + zero}")
    print(f"                u = {u}")
    print(f"   Equal? {np.array_equal(u + zero, u)}")

    # Negative vector
    neg_u = -u
    print(f"\n4. Negative vector: u + (-u) = {u + neg_u}")
    print(f"   Is zero? {np.allclose(u + neg_u, zero)}")

    # Scalar multiplication distributivity
    print(f"\n5. Distributivity 1: a(u + v) = {a * (u + v)}")
    print(f"                     au + av = {a * u + a * v}")
    print(f"   Equal? {np.array_equal(a * (u + v), a * u + a * v)}")

    print(f"\n6. Distributivity 2: (a + b)u = {(a + b) * u}")
    print(f"                     au + bu = {a * u + b * u}")
    print(f"   Equal? {np.array_equal((a + b) * u, a * u + b * u)}")

verify_vector_space_axioms()

Examples of Vector Spaces

Common Vector Spaces

# 1. R^n space - n-dimensional real vector space
print("1. R^n space examples:")
R2_vectors = [np.array([1, 2]), np.array([3, -1]), np.array([0, 5])]
R3_vectors = [np.array([1, 2, 3]), np.array([4, 5, 6]), np.array([7, 8, 9])]

print("Vectors in R^2:", R2_vectors)
print("Vectors in R^3:", R3_vectors)

# 2. Polynomial space
print("\n2. Polynomial space P_n:")
print("Polynomials of degree at most 2: {a + bx + cx^2 | a, b, c ∈ R}")
print("Examples: 3 + 2x - x^2, 5x + 4x^2, 7 - 3x")

# 3. Matrix space
print("\n3. Matrix space M_{m×n}:")
matrix_space_example = [
    np.array([[1, 2], [3, 4]]),
    np.array([[5, 6], [7, 8]]),
    np.array([[0, 1], [-1, 0]])
]
print("Elements in 2×2 matrix space:")
for i, matrix in enumerate(matrix_space_example):
    print(f"Matrix {i+1}:\n{matrix}")

# 4. Function space
print("\n4. Function space:")
print("Continuous function space C[0,1]: all continuous functions on interval [0,1]")
print("Examples: f(x) = x^2, g(x) = sin(x), h(x) = e^x")

Subspace Theory

Definition of Subspace

A subset WW of a vector space VV is called a subspace of VV if WW is itself a vector space.

Subspace Criterion Theorem

WW is a subspace of VV if and only if:

  1. 0W\mathbf{0} \in W (contains zero vector)
  2. For any u,vW\mathbf{u}, \mathbf{v} \in W, we have u+vW\mathbf{u} + \mathbf{v} \in W (closed under addition)
  3. For any cRc \in \mathbb{R}, vW\mathbf{v} \in W, we have cvWc\mathbf{v} \in W (closed under scalar multiplication)
def is_subspace(vectors, test_vectors=None):
    """
    Check whether a given set of vectors spans a subspace
    """
    vectors = np.array(vectors)

    # Check if contains zero vector
    zero_vector = np.zeros(vectors.shape[1])
    contains_zero = any(np.allclose(v, zero_vector) for v in vectors)

    print(f"Contains zero vector: {contains_zero}")

    # Check closure under addition (simple test)
    if len(vectors) >= 2:
        v1, v2 = vectors[0], vectors[1]
        sum_vector = v1 + v2
        print(f"v1 + v2 = {sum_vector}")

    # Check closure under scalar multiplication
    if len(vectors) >= 1:
        v1 = vectors[0]
        scaled = 2 * v1
        print(f"2 * v1 = {scaled}")

    return contains_zero

# Example 1: Line through origin in R^3
print("Example 1: Line through origin span{(1,2,3)}")
line_vectors = [np.array([0, 0, 0]), np.array([1, 2, 3]), np.array([2, 4, 6])]
is_subspace(line_vectors)

print("\n" + "="*50)

# Example 2: Plane through origin in R^3
print("Example 2: Plane through origin span{(1,0,0), (0,1,0)}")
plane_vectors = [np.array([0, 0, 0]), np.array([1, 0, 0]),
                 np.array([0, 1, 0]), np.array([1, 1, 0])]
is_subspace(plane_vectors)

Common Types of Subspaces

🔄 正在渲染 Mermaid 图表...

Importance of Zero and Negative Vectors

Properties of Zero Vector

The zero vector 0\mathbf{0} is a special element in vector space:

# Demonstration of zero vector properties
zero_2d = np.array([0, 0])
zero_3d = np.array([0, 0, 0])

v = np.array([3, 4])
w = np.array([1, 2, 3])

print("Properties of zero vector:")
print(f"1. v + 0 = {v} + {zero_2d} = {v + zero_2d}")
print(f"2. 0 + v = {zero_2d} + {v} = {zero_2d + v}")
print(f"3. 0 * v = 0 * {v} = {0 * v}")
print(f"4. ||0|| = {np.linalg.norm(zero_2d)}")

# Role of zero vector in linear combinations
a, b, c = 2, -3, 5
u1 = np.array([1, 0])
u2 = np.array([0, 1])
u3 = np.array([0, 0])  # Zero vector

linear_comb = a * u1 + b * u2 + c * u3
print(f"\nLinear combination: {a}*{u1} + {b}*{u2} + {c}*{u3} = {linear_comb}")

Properties of Negative Vector

# Demonstration of negative vector properties
v = np.array([3, -2, 1])
neg_v = -v

print("Properties of negative vector:")
print(f"Vector v: {v}")
print(f"Negative vector -v: {neg_v}")
print(f"v + (-v) = {v + neg_v}")
print(f"||v|| = {np.linalg.norm(v):.3f}")
print(f"||-v|| = {np.linalg.norm(neg_v):.3f}")

# Visualize positive and negative vectors
plt.figure(figsize=(8, 6))
v_2d = np.array([3, 2])
neg_v_2d = -v_2d

plt.arrow(0, 0, v_2d[0], v_2d[1], head_width=0.2, head_length=0.2,
          fc='blue', ec='blue', label='v')
plt.arrow(0, 0, neg_v_2d[0], neg_v_2d[1], head_width=0.2, head_length=0.2,
          fc='red', ec='red', label='-v')

plt.grid(True, alpha=0.3)
plt.axis('equal')
plt.legend()
plt.xlim(-4, 4)
plt.ylim(-3, 3)
plt.title('Vector and Its Negative')
plt.xlabel('x')
plt.ylabel('y')
plt.show()

Chapter Summary

This chapter explored the fundamental theory of vector spaces in depth:

ConceptCore ContentImportant Properties
Vector OperationsAddition, scalar multiplicationSatisfies commutativity, associativity, distributivity
Vector Space10 axiomsAbstract linear structure
Subspace3 criteriaContains zero vector, closed under operations
Zero VectorAdditive identityUniqueness, neutral role
Negative VectorAdditive inverseEach vector has a unique negative
Important Reminders
  • Vector space axioms are abstract but apply to various concrete mathematical objects
  • Subspaces must contain the zero vector, which is a necessary condition for determination
  • Understanding geometric intuition helps master abstract concepts

Through this chapter, we have established a rigorous foundation in vector space theory, preparing us for subsequent learning of linear dependence, basis and dimension, and other concepts.