Chapter 9: Plugin System and Extensions

Haiyue
20min

Chapter 9: Plugin System and Extensions

Learning Objectives
  • Understand the pytest plugin ecosystem
  • Master the usage of common plugins
  • Learn custom plugin development
  • Understand the use of hook functions

Key Concepts

Pytest Plugin Architecture

pytest uses a plugin-based architecture with hook mechanisms for extensibility:

  • Core framework: Provides basic test execution capabilities
  • Plugin system: Extends functionality through hooks
  • Third-party plugins: Rich ecosystem
  • Custom plugins: Meet specific needs

Plugin Types

TypeDescriptionExamples
Built-in pluginspytest core featuresparametrize, fixtures
Official pluginsMaintained by pytest teampytest-xdist, pytest-cov
Third-party pluginsCommunity contributionspytest-django, pytest-mock
Local pluginsProject-specificHooks in conftest.py

Common Plugin Ecosystem

🔄 正在渲染 Mermaid 图表...

Example Code

Common Plugin Installation and Usage

# requirements-test.txt - Common plugin list
pytest>=7.0.0
pytest-cov>=4.0.0          # Code coverage
pytest-html>=3.1.0         # HTML reports
pytest-json-report>=1.5.0  # JSON reports
pytest-mock>=3.10.0        # Mock integration
pytest-xdist>=3.2.0        # Parallel execution
pytest-benchmark>=4.0.0    # Performance testing
pytest-timeout>=2.1.0      # Timeout control
pytest-rerunfailures>=11.0 # Failure retry
pytest-django>=4.5.0       # Django integration
pytest-asyncio>=0.21.0     # Async testing
pytest-freezegun>=0.4.0    # Time mocking
pytest-env>=0.8.0          # Environment variables
pytest-randomly>=3.12.0    # Test randomization
pytest-sugar>=0.9.0        # Beautify output

pytest-xdist Parallel Testing

# test_parallel_execution.py
import pytest
import time
import threading

# Compute-intensive tasks
def fibonacci(n):
    """Calculate Fibonacci sequence"""
    if n <= 1:
        return n
    return fibonacci(n-1) + fibonacci(n-2)

def prime_check(n):
    """Check if prime"""
    if n < 2:
        return False
    for i in range(2, int(n**0.5) + 1):
        if n % i == 0:
            return False
    return True

class TestParallelExecution:
    """Parallel execution test examples"""

    @pytest.mark.parametrize("n", [20, 21, 22, 23, 24])
    def test_fibonacci_computation(self, n):
        """Fibonacci calculation test"""
        start_time = time.time()
        result = fibonacci(n)
        duration = time.time() - start_time

        assert result > 0
        print(f"Thread {threading.current_thread().name}: fib({n}) = {result}, time: {duration:.3f}s")

    @pytest.mark.parametrize("n", [97, 101, 103, 107, 109])
    def test_prime_numbers(self, n):
        """Prime number test"""
        assert prime_check(n) is True
        print(f"Thread {threading.current_thread().name}: {n} is prime")

    @pytest.mark.parametrize("n", [95, 99, 100, 102, 104])
    def test_composite_numbers(self, n):
        """Composite number test"""
        assert prime_check(n) is False
        print(f"Thread {threading.current_thread().name}: {n} is composite")

# Parallel execution commands
"""
# Auto-detect CPU cores and run in parallel
pytest -n auto

# Run in parallel with specified number of processes
pytest -n 4

# Distribute by test file
pytest -n 4 --dist=loadfile

# Distribute by test method
pytest -n 4 --dist=loadscope

# View distribution info
pytest -n 4 -v --tb=short
"""

pytest-benchmark Performance Testing

# test_performance_benchmark.py
import pytest
import time
import random

# Algorithms to test
def bubble_sort(arr):
    """Bubble sort"""
    n = len(arr)
    for i in range(n):
        for j in range(0, n - i - 1):
            if arr[j] > arr[j + 1]:
                arr[j], arr[j + 1] = arr[j + 1], arr[j]
    return arr

def quick_sort(arr):
    """Quick sort"""
    if len(arr) <= 1:
        return arr
    pivot = arr[len(arr) // 2]
    left = [x for x in arr if x < pivot]
    middle = [x for x in arr if x == pivot]
    right = [x for x in arr if x > pivot]
    return quick_sort(left) + middle + quick_sort(right)

def binary_search(arr, target):
    """Binary search"""
    left, right = 0, len(arr) - 1
    while left <= right:
        mid = (left + right) // 2
        if arr[mid] == target:
            return mid
        elif arr[mid] < target:
            left = mid + 1
        else:
            right = mid - 1
    return -1

class TestPerformanceBenchmark:
    """Performance benchmark tests"""

    @pytest.fixture
    def small_dataset(self):
        """Small dataset"""
        return [random.randint(1, 100) for _ in range(10)]

    @pytest.fixture
    def medium_dataset(self):
        """Medium dataset"""
        return [random.randint(1, 1000) for _ in range(100)]

    @pytest.fixture
    def large_dataset(self):
        """Large dataset"""
        return [random.randint(1, 10000) for _ in range(1000)]

    def test_bubble_sort_small(self, benchmark, small_dataset):
        """Bubble sort small dataset performance test"""
        result = benchmark(bubble_sort, small_dataset.copy())
        assert result == sorted(small_dataset)

    def test_quick_sort_small(self, benchmark, small_dataset):
        """Quick sort small dataset performance test"""
        result = benchmark(quick_sort, small_dataset.copy())
        assert result == sorted(small_dataset)

    def test_bubble_sort_medium(self, benchmark, medium_dataset):
        """Bubble sort medium dataset performance test"""
        result = benchmark(bubble_sort, medium_dataset.copy())
        assert result == sorted(medium_dataset)

    def test_quick_sort_medium(self, benchmark, medium_dataset):
        """Quick sort medium dataset performance test"""
        result = benchmark(quick_sort, medium_dataset.copy())
        assert result == sorted(medium_dataset)

    @pytest.mark.slow
    def test_quick_sort_large(self, benchmark, large_dataset):
        """Quick sort large dataset performance test"""
        result = benchmark(quick_sort, large_dataset.copy())
        assert result == sorted(large_dataset)

    def test_binary_search_performance(self, benchmark):
        """Binary search performance test"""
        arr = list(range(1000))
        target = 500

        result = benchmark(binary_search, arr, target)
        assert result == 500

    def test_benchmark_with_setup(self, benchmark):
        """Benchmark test with setup"""
        def setup():
            return [random.randint(1, 100) for _ in range(50)]

        def sorting_algorithm(data):
            return sorted(data)

        result = benchmark.pedantic(
            sorting_algorithm,
            setup=setup,
            rounds=10,
            iterations=5
        )
        assert len(result) == 50

# Performance test commands
"""
# Run performance tests
pytest --benchmark-only

# Generate performance report
pytest --benchmark-only --benchmark-html=benchmark_report.html

# Compare performance benchmarks
pytest --benchmark-only --benchmark-compare=previous_benchmark.json

# Save benchmark data
pytest --benchmark-only --benchmark-save=baseline

# Set performance threshold
pytest --benchmark-only --benchmark-max-time=2.0
"""

pytest-timeout Timeout Control

# test_timeout_control.py
import pytest
import time
import threading
from concurrent.futures import ThreadPoolExecutor

class TestTimeoutControl:
    """Timeout control tests"""

    @pytest.mark.timeout(5)
    def test_fast_operation(self):
        """Fast operation test"""
        time.sleep(1)
        assert True

    @pytest.mark.timeout(2)
    def test_timeout_failure(self):
        """Timeout failure test"""
        time.sleep(3)  # Exceeds 2 second limit
        assert True

    @pytest.mark.timeout(10, method="thread")
    def test_thread_timeout(self):
        """Thread timeout test"""
        def worker():
            time.sleep(5)
            return "completed"

        with ThreadPoolExecutor() as executor:
            future = executor.submit(worker)
            result = future.result()
            assert result == "completed"

    @pytest.mark.timeout(3, method="signal")
    def test_signal_timeout(self):
        """Signal timeout test (Unix only)"""
        time.sleep(1)
        assert True

    def test_no_timeout(self):
        """Test without timeout limit"""
        time.sleep(2)
        assert True

# Global timeout configuration in pytest.ini
"""
[tool:pytest]
timeout = 300
timeout_method = thread
addopts = --timeout=60
"""

pytest-rerunfailures Failure Retry

# test_retry_failures.py
import pytest
import random
import time

class TestRetryFailures:
    """Failure retry tests"""

    @pytest.mark.flaky(reruns=3)
    def test_flaky_network_operation(self):
        """Flaky network operation"""
        # Simulate 70% success rate network request
        success_rate = 0.7
        if random.random() < success_rate:
            assert True
        else:
            pytest.fail("Network request failed")

    @pytest.mark.flaky(reruns=2, reruns_delay=1)
    def test_with_retry_delay(self):
        """Test with retry delay"""
        # Simulate service that needs time to recover
        current_time = time.time()
        if int(current_time) % 3 == 0:
            assert True
        else:
            pytest.fail("Service temporarily unavailable")

    @pytest.mark.flaky(reruns=5, condition=lambda: random.random() < 0.3)
    def test_conditional_retry(self):
        """Conditional retry test"""
        # Only retry when condition is met
        if random.random() < 0.8:
            assert True
        else:
            pytest.fail("Random failure")

# Run retry test commands
"""
# Enable retry for all failed tests
pytest --reruns 3

# Retry with delay
pytest --reruns 3 --reruns-delay 2

# Only retry tests with specific markers
pytest -m flaky --reruns 2
"""

Custom Plugin Development

# conftest.py - Project-level custom plugin
import pytest
import logging
import time
from datetime import datetime
from pathlib import Path

# Plugin configuration
class TestMetrics:
    """Test metrics collection"""
    def __init__(self):
        self.start_time = None
        self.test_results = {}
        self.slow_tests = []

    def start_session(self):
        self.start_time = time.time()

    def record_test(self, nodeid, duration, outcome):
        self.test_results[nodeid] = {
            'duration': duration,
            'outcome': outcome,
            'timestamp': datetime.now().isoformat()
        }

        if duration > 1.0:  # Record slow tests
            self.slow_tests.append({
                'nodeid': nodeid,
                'duration': duration
            })

# Global metrics instance
test_metrics = TestMetrics()

def pytest_configure(config):
    """Plugin configuration hook"""
    # Register custom markers
    config.addinivalue_line(
        "markers", "performance: Performance related tests"
    )
    config.addinivalue_line(
        "markers", "flaky: Flaky tests"
    )

    # Setup logging
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )

def pytest_sessionstart(session):
    """Test session start hook"""
    test_metrics.start_session()
    logger = logging.getLogger(__name__)
    logger.info("🚀 Test session starting")
    logger.info(f"Discovered {len(session.items)} tests")

def pytest_sessionfinish(session, exitstatus):
    """Test session finish hook"""
    logger = logging.getLogger(__name__)
    total_time = time.time() - test_metrics.start_time

    logger.info("📊 Test session finished")
    logger.info(f"Total time: {total_time:.2f} seconds")
    logger.info(f"Total tests: {len(test_metrics.test_results)}")

    # Analyze results
    passed = sum(1 for r in test_metrics.test_results.values() if r['outcome'] == 'passed')
    failed = sum(1 for r in test_metrics.test_results.values() if r['outcome'] == 'failed')
    skipped = sum(1 for r in test_metrics.test_results.values() if r['outcome'] == 'skipped')

    logger.info(f"Passed: {passed}, Failed: {failed}, Skipped: {skipped}")

    # Report slow tests
    if test_metrics.slow_tests:
        logger.warning("🐌 Slow tests detected:")
        for slow_test in sorted(test_metrics.slow_tests, key=lambda x: x['duration'], reverse=True)[:5]:
            logger.warning(f"  {slow_test['nodeid']}: {slow_test['duration']:.2f}s")

    # Generate metrics report
    generate_metrics_report()

@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
    """Generate test report hook"""
    outcome = yield
    report = outcome.get_result()

    if report.when == "call":
        test_metrics.record_test(
            item.nodeid,
            report.duration,
            report.outcome
        )

def pytest_runtest_setup(item):
    """Test setup hook"""
    # Check performance test markers
    if item.get_closest_marker("performance"):
        # Can setup special performance test environment
        logging.getLogger(__name__).info(f"⚡ Preparing performance test: {item.name}")

def pytest_runtest_teardown(item, nextitem):
    """Test teardown hook"""
    # Check for special cleanup requirements
    if item.get_closest_marker("flaky"):
        logging.getLogger(__name__).info(f"🔄 Flaky test completed: {item.name}")

def pytest_collection_modifyitems(config, items):
    """Modify test collection hook"""
    # Sort tests by markers
    performance_tests = []
    regular_tests = []

    for item in items:
        if item.get_closest_marker("performance"):
            performance_tests.append(item)
        else:
            regular_tests.append(item)

    # Run regular tests first, then performance tests
    items[:] = regular_tests + performance_tests

def generate_metrics_report():
    """Generate metrics report"""
    report_path = Path("test_metrics.json")

    import json
    metrics_data = {
        'session_start': test_metrics.start_time,
        'test_results': test_metrics.test_results,
        'slow_tests': test_metrics.slow_tests,
        'summary': {
            'total_tests': len(test_metrics.test_results),
            'total_duration': time.time() - test_metrics.start_time,
            'slow_test_count': len(test_metrics.slow_tests)
        }
    }

    with open(report_path, 'w') as f:
        json.dump(metrics_data, f, indent=2)

    logging.getLogger(__name__).info(f"📄 Metrics report generated: {report_path}")

# Custom fixture
@pytest.fixture
def performance_monitor():
    """Performance monitoring fixture"""
    start_memory = get_memory_usage()
    start_time = time.time()

    yield

    end_time = time.time()
    end_memory = get_memory_usage()

    duration = end_time - start_time
    memory_diff = end_memory - start_memory

    if duration > 0.5:
        logging.getLogger(__name__).warning(
            f"⚠️  Test took long time: {duration:.2f}s"
        )

    if memory_diff > 10:  # 10MB
        logging.getLogger(__name__).warning(
            f"⚠️  Memory usage increased: {memory_diff:.2f}MB"
        )

def get_memory_usage():
    """Get memory usage (simplified version)"""
    try:
        import psutil
        process = psutil.Process()
        return process.memory_info().rss / 1024 / 1024  # MB
    except ImportError:
        return 0

# Command line option extension
def pytest_addoption(parser):
    """Add command line options"""
    parser.addoption(
        "--performance-threshold",
        action="store",
        type=float,
        default=1.0,
        help="Performance test time threshold (seconds)"
    )

    parser.addoption(
        "--generate-report",
        action="store_true",
        help="Generate detailed test report"
    )

# Custom marker implementation
def pytest_configure(config):
    """Custom marker configuration"""
    # Performance threshold configuration
    threshold = config.getoption("--performance-threshold")
    config.performance_threshold = threshold

def pytest_runtest_call(pyfuncitem):
    """Test call hook"""
    if pyfuncitem.get_closest_marker("performance"):
        threshold = pyfuncitem.config.performance_threshold
        start_time = time.time()

        # Can add performance monitoring logic here

        yield

        duration = time.time() - start_time
        if duration > threshold:
            logging.getLogger(__name__).warning(
                f"⚠️  Performance test exceeded threshold: {duration:.2f}s > {threshold}s"
            )

Publishing Custom Plugins

# setup.py - Plugin packaging configuration
from setuptools import setup, find_packages

setup(
    name="pytest-custom-metrics",
    version="1.0.0",
    description="Custom pytest metrics collection plugin",
    long_description=open("README.md").read(),
    long_description_content_type="text/markdown",
    author="Your Name",
    author_email="your.email@example.com",
    url="https://github.com/yourname/pytest-custom-metrics",
    packages=find_packages(),
    classifiers=[
        "Development Status :: 4 - Beta",
        "Framework :: Pytest",
        "Intended Audience :: Developers",
        "License :: OSI Approved :: MIT License",
        "Operating System :: OS Independent",
        "Programming Language :: Python",
        "Programming Language :: Python :: 3",
        "Programming Language :: Python :: 3.8",
        "Programming Language :: Python :: 3.9",
        "Programming Language :: Python :: 3.10",
        "Programming Language :: Python :: 3.11",
        "Topic :: Software Development :: Quality Assurance",
        "Topic :: Software Development :: Testing",
        "Topic :: Utilities",
    ],
    python_requires=">=3.8",
    install_requires=[
        "pytest>=6.0.0",
    ],
    entry_points={
        "pytest11": [
            "custom_metrics = pytest_custom_metrics.plugin",
        ],
    },
)

# pyproject.toml - Modern packaging configuration
"""
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"

[project]
name = "pytest-custom-metrics"
version = "1.0.0"
description = "Custom pytest metrics collection plugin"
readme = "README.md"
requires-python = ">=3.8"
dependencies = [
    "pytest>=6.0.0",
]

[project.entry-points.pytest11]
custom_metrics = "pytest_custom_metrics.plugin"
"""
Plugin Development Best Practices
  1. Clear objective: Plugin should solve specific testing problems
  2. Hook selection: Choose appropriate hook functions to implement features
  3. Compatibility: Ensure plugin is compatible with different pytest versions
  4. Complete documentation: Provide clear usage documentation and examples
  5. Test coverage: Write tests for the plugin itself
Important Notes
  1. Performance impact: Plugins may affect test execution performance
  2. Plugin conflicts: Different plugins may conflict with each other
  3. Maintenance cost: Custom plugins require ongoing maintenance
  4. Version dependencies: Be aware of compatibility with pytest version upgrades

The pytest plugin system provides unlimited extensibility for testing. Proper use of plugins can greatly improve testing efficiency and quality.