Chapter 5: Test Marks and Grouping

Haiyue
14min

Chapter 5: Test Marks and Grouping

Learning Objectives
  • Master the creation and use of custom marks
  • Learn the application of built-in marks
  • Understand test grouping and selective execution
  • Master the combined use of marks

Knowledge Points

Test Mark Concept

Test marks (Markers) are a mechanism provided by pytest to add metadata to tests, allowing:

  • Test classification: Group tests by functionality, type, or priority
  • Selective execution: Run specific test subsets based on marks
  • Conditional execution: Skip or run tests based on environment or conditions
  • Test management: Better organize and manage large test suites

Built-in Marks

MarkFunction
@pytest.mark.skipUnconditionally skip test
@pytest.mark.skipifConditionally skip test
@pytest.mark.xfailTest expected to fail
@pytest.mark.parametrizeParameterize test
@pytest.mark.filterwarningsFilter warnings

Custom Marks

You can create custom marks to meet specific needs:

  • Feature module marks (e.g., @pytest.mark.auth, @pytest.mark.api)
  • Test type marks (e.g., @pytest.mark.unit, @pytest.mark.integration)
  • Priority marks (e.g., @pytest.mark.critical, @pytest.mark.low_priority)

Example Code

Basic Mark Usage

# test_basic_marks.py
import pytest
import sys

@pytest.mark.skip(reason="Feature not yet implemented")
def test_unimplemented_feature():
    """Skip unimplemented feature test"""
    assert False  # This test will be skipped

@pytest.mark.skipif(sys.version_info < (3, 8), reason="Requires Python 3.8+")
def test_python38_feature():
    """Conditionally skip test"""
    # Use Python 3.8+ new feature
    result = "hello"
    assert result := "hello"  # Walrus operator

@pytest.mark.xfail(reason="Known bug, awaiting fix")
def test_known_bug():
    """Test expected to fail"""
    assert 1 == 2  # This test is expected to fail

@pytest.mark.xfail(sys.platform == "win32", reason="Fails on Windows")
def test_unix_specific():
    """Expected to fail on specific platform"""
    import os
    assert os.name == "posix"

# Conditional expected failure
@pytest.mark.xfail(
    condition=sys.version_info < (3, 9),
    reason="Bug fixed in Python 3.9+"
)
def test_version_specific_fix():
    """Version-specific fix test"""
    # Simulate functionality with bug in older versions
    result = "test"
    assert len(result) == 4

Custom Mark Examples

# test_custom_marks.py
import pytest

# Register custom marks in pytest.ini or pyproject.toml
# Or use pytestmark declaration

# Feature module marks
@pytest.mark.auth
def test_user_login():
    """User login test"""
    assert True

@pytest.mark.auth
def test_user_logout():
    """User logout test"""
    assert True

@pytest.mark.api
def test_api_endpoint():
    """API endpoint test"""
    assert True

# Test type marks
@pytest.mark.unit
def test_calculate_sum():
    """Unit test"""
    assert 2 + 2 == 4

@pytest.mark.integration
def test_database_integration():
    """Integration test"""
    # Simulate database integration test
    assert True

@pytest.mark.e2e
def test_end_to_end_workflow():
    """End-to-end test"""
    # Simulate complete user workflow
    assert True

# Performance and priority marks
@pytest.mark.slow
def test_slow_operation():
    """Slow test"""
    import time
    time.sleep(0.1)  # Simulate slow operation
    assert True

@pytest.mark.critical
def test_critical_functionality():
    """Critical functionality test"""
    assert True

@pytest.mark.smoke
def test_basic_smoke():
    """Smoke test"""
    assert True

# Environment marks
@pytest.mark.dev
def test_development_feature():
    """Development environment test"""
    assert True

@pytest.mark.prod
def test_production_ready():
    """Production environment test"""
    assert True

Multiple Marks and Combined Marks

# test_multiple_marks.py
import pytest

@pytest.mark.auth
@pytest.mark.critical
@pytest.mark.unit
def test_authentication_core():
    """Core authentication unit test"""
    assert True

@pytest.mark.api
@pytest.mark.integration
@pytest.mark.slow
def test_api_integration_slow():
    """Slow API integration test"""
    import time
    time.sleep(0.05)
    assert True

@pytest.mark.smoke
@pytest.mark.critical
def test_smoke_critical():
    """Critical smoke test"""
    assert True

# Use pytestmark to add marks to entire module
pytestmark = [pytest.mark.database, pytest.mark.integration]

def test_user_creation():
    """User creation test (automatically inherits module marks)"""
    assert True

def test_user_deletion():
    """User deletion test (automatically inherits module marks)"""
    assert True

# Class-level marks
@pytest.mark.api
class TestAPIOperations:
    """API operations test class"""

    @pytest.mark.get
    def test_get_request(self):
        """GET request test"""
        assert True

    @pytest.mark.post
    @pytest.mark.slow
    def test_post_request(self):
        """POST request test"""
        assert True

    @pytest.mark.delete
    @pytest.mark.critical
    def test_delete_request(self):
        """DELETE request test"""
        assert True

Dynamic Marks and Conditional Marks

# test_dynamic_marks.py
import pytest
import os

def pytest_collection_modifyitems(config, items):
    """Dynamically modify test item marks"""
    for item in items:
        # Add slow mark to all tests containing "slow"
        if "slow" in item.nodeid:
            item.add_marker(pytest.mark.slow)

        # Add marks to specific modules
        if "database" in str(item.fspath):
            item.add_marker(pytest.mark.database)

# Conditional mark decorator
def requires_network(func):
    """Decorator for tests requiring network connection"""
    return pytest.mark.skipif(
        not os.getenv("NETWORK_TESTS"),
        reason="Requires NETWORK_TESTS environment variable"
    )(func)

def requires_docker(func):
    """Decorator for tests requiring Docker"""
    import subprocess
    try:
        subprocess.run(["docker", "--version"],
                      capture_output=True, check=True)
        docker_available = True
    except (subprocess.CalledProcessError, FileNotFoundError):
        docker_available = False

    return pytest.mark.skipif(
        not docker_available,
        reason="Requires Docker environment"
    )(func)

@requires_network
def test_api_call():
    """API call test requiring network"""
    # Simulate network API call
    assert True

@requires_docker
def test_container_deployment():
    """Container deployment test requiring Docker"""
    # Simulate container deployment test
    assert True

# Configuration-based conditional marks
@pytest.mark.skipif(
    not pytest.config.getoption("--run-expensive"),
    reason="Requires --run-expensive option"
)
def test_expensive_operation():
    """Expensive operation test"""
    assert True

Parameterization Combined with Marks

# test_params_with_marks.py
import pytest

@pytest.mark.parametrize("input_val,expected", [
    pytest.param(1, 2, marks=pytest.mark.fast),
    pytest.param(2, 4, marks=pytest.mark.fast),
    pytest.param(1000, 2000, marks=pytest.mark.slow),
    pytest.param(10000, 20000, marks=[pytest.mark.slow, pytest.mark.heavy])
])
def test_double_function(input_val, expected):
    """Parameterized test combined with marks"""
    result = input_val * 2
    assert result == expected

@pytest.mark.parametrize("browser", [
    pytest.param("chrome", marks=pytest.mark.stable),
    pytest.param("firefox", marks=pytest.mark.stable),
    pytest.param("safari", marks=pytest.mark.skipif(
        os.name == 'nt', reason="Safari not available on Windows")),
    pytest.param("edge", marks=pytest.mark.experimental)
])
def test_browser_compatibility(browser):
    """Browser compatibility test"""
    assert browser in ["chrome", "firefox", "safari", "edge"]

# Mark parameterization
@pytest.mark.parametrize("env", ["dev", "staging", "prod"])
@pytest.mark.parametrize("feature", ["auth", "payment", "reporting"])
def test_feature_across_environments(env, feature):
    """Cross-environment feature test"""
    # Test based on environment and feature combination
    if env == "prod" and feature == "experimental":
        pytest.skip("Don't run experimental features in production environment")

    assert True

Mark Configuration and Registration

# pytest.ini configuration example
"""
[tool:pytest]
markers =
    auth: Authentication-related tests
    api: API-related tests
    unit: Unit tests
    integration: Integration tests
    e2e: End-to-end tests
    slow: Slow tests
    fast: Fast tests
    critical: Critical tests
    smoke: Smoke tests
    dev: Development environment tests
    prod: Production environment tests
    database: Database-related tests
    network: Tests requiring network
    experimental: Experimental feature tests
"""

# Register marks in conftest.py
def pytest_configure(config):
    """Register custom marks"""
    config.addinivalue_line(
        "markers", "auth: Authentication-related tests"
    )
    config.addinivalue_line(
        "markers", "api: API-related tests"
    )
    config.addinivalue_line(
        "markers", "slow: Slow tests that may take longer"
    )

# Add command-line options
def pytest_addoption(parser):
    """Add custom command-line options"""
    parser.addoption(
        "--run-slow",
        action="store_true",
        default=False,
        help="Run tests marked as slow"
    )
    parser.addoption(
        "--run-expensive",
        action="store_true",
        default=False,
        help="Run expensive tests"
    )

def pytest_collection_modifyitems(config, items):
    """Modify test collection based on command-line options"""
    if not config.getoption("--run-slow"):
        # If no --run-slow option, skip slow tests
        skip_slow = pytest.mark.skip(reason="Requires --run-slow option to run")
        for item in items:
            if "slow" in item.keywords:
                item.add_marker(skip_slow)

Running Tests with Specific Marks

Command-line Usage Examples

# Run tests with specific marks
pytest -m "auth"                    # Run authentication tests
pytest -m "api and not slow"       # Run API tests but exclude slow tests
pytest -m "critical or smoke"      # Run critical or smoke tests
pytest -m "unit and not integration" # Run unit tests but exclude integration tests

# Run combination of multiple marks
pytest -m "(auth or api) and critical"  # Run critical authentication or API tests

# View all available marks
pytest --markers

# View which tests will run (without actually running)
pytest -m "smoke" --collect-only

# Run unmarked tests
pytest -m "not slow"

# Verbose output showing mark information
pytest -v -m "critical"

Mark Expression Syntax

ExpressionDescription
authTests with auth mark
not slowTests without slow mark
auth and criticalTests with both auth and critical marks
auth or apiTests with auth or api mark
(auth or api) and not slowTests with auth or api but not slow mark

Real-world Usage Scenarios

# test_real_world_marks.py
import pytest

# Different stages in CI/CD pipeline
@pytest.mark.pr_check  # Pull Request check
@pytest.mark.fast
def test_basic_functionality():
    """Basic functionality test for PR checks"""
    assert True

@pytest.mark.nightly  # Nightly build
@pytest.mark.slow
@pytest.mark.integration
def test_comprehensive_integration():
    """Comprehensive integration test for nightly builds"""
    assert True

@pytest.mark.release  # Pre-release test
@pytest.mark.e2e
@pytest.mark.critical
def test_release_readiness():
    """Release readiness test"""
    assert True

# Environment-based tests
@pytest.mark.local_only
def test_local_development():
    """Run only in local development environment"""
    assert True

@pytest.mark.cloud_only
def test_cloud_infrastructure():
    """Run only in cloud environment"""
    assert True

# Feature flag tests
@pytest.mark.feature_flag("new_ui")
def test_new_ui_feature():
    """New UI feature test"""
    assert True

@pytest.mark.beta_feature
def test_beta_functionality():
    """Beta functionality test"""
    assert True
Mark Usage Best Practices
  1. Consistent naming: Use consistent mark naming conventions
  2. Document marks: Register and document all marks in pytest.ini
  3. Reasonable grouping: Group reasonably by function, type, priority, etc.
  4. Avoid over-marking: Don’t add too many marks to each test
  5. CI/CD integration: Use marks to control test execution in CI/CD pipelines
Considerations
  1. Mark spelling: Ensure mark names are spelled correctly; incorrect mark names will be ignored
  2. Performance impact: Too many marks may affect test collection performance
  3. Maintenance cost: Mark systems need regular maintenance and cleanup
  4. Team agreement: Teams need to agree on mark usage conventions

Test marks are a powerful tool for organizing and managing large test suites. Proper use of marks can significantly improve testing efficiency and maintainability.