Chapter 8: Configuration Files and Command Line Options
Haiyue
17min
Chapter 8: Configuration Files and Command Line Options
Learning Objectives
- Master the use of pytest.ini configuration file
- Learn the role and configuration of conftest.py
- Understand the use of command line arguments
- Master test environment configuration management
Key Concepts
Configuration File Hierarchy
pytest searches for configuration files in the following priority order:
- pytest.ini - Dedicated configuration file (recommended)
- pyproject.toml - Modern Python project configuration
- tox.ini - Shared configuration with tox
- setup.cfg - Traditional setuptools configuration
Configuration File Scope
- Project root directory: Affects the entire project
- Subdirectories: Affects that directory and its subdirectories
- Command line: Overrides file configuration (highest priority)
Example Code
Complete pytest.ini Configuration
# pytest.ini - Recommended configuration method
[tool:pytest]
# Test discovery
testpaths = tests
python_files = test_*.py *_test.py
python_functions = test_*
python_classes = Test*
# Minimum version requirement
minversion = 6.0
# Command line options
addopts =
-ra
--strict-markers
--strict-config
--cov=src
--cov-branch
--cov-report=term-missing:skip-covered
--cov-report=html:htmlcov
--cov-report=xml
--cov-fail-under=80
# Marker registration
markers =
slow: Mark slow tests
integration: Integration tests
unit: Unit tests
smoke: Smoke tests
api: API tests
auth: Authentication related tests
database: Database related tests
network: Tests requiring network
external: Tests depending on external services
parametrize: Parametrized tests
# Filter warnings
filterwarnings =
error
ignore::UserWarning
ignore::DeprecationWarning
ignore::PendingDeprecationWarning
ignore:.*HTTPSConnection.*:urllib3.exceptions.InsecureRequestWarning
# Test timeout
timeout = 300
timeout_method = thread
# Parallel execution
addopts = -n auto
# Log configuration
log_cli = true
log_cli_level = INFO
log_cli_format = %(asctime)s [%(levelname)8s] %(name)s: %(message)s
log_cli_date_format = %Y-%m-%d %H:%M:%S
log_file = tests.log
log_file_level = DEBUG
log_file_format = %(asctime)s [%(levelname)8s] %(filename)s:%(lineno)d %(funcName)s(): %(message)s
log_file_date_format = %Y-%m-%d %H:%M:%S
# Auto-use fixtures
usefixtures = clean_database
# Doctest options
doctest_optionflags = NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
doctest_encoding = utf-8
# Output options
console_output_style = progress
pyproject.toml Configuration
# pyproject.toml - Modern configuration method
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "my-project"
version = "1.0.0"
dependencies = [
"requests>=2.25.0",
]
[project.optional-dependencies]
test = [
"pytest>=7.0.0",
"pytest-cov>=4.0.0",
"pytest-mock>=3.10.0",
]
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py", "*_test.py"]
python_functions = ["test_*"]
python_classes = ["Test*"]
addopts = [
"-ra",
"--strict-markers",
"--strict-config",
"--cov=src",
"--cov-report=term-missing:skip-covered",
"--cov-report=html:htmlcov",
"--cov-fail-under=80",
]
markers = [
"slow: Slow tests",
"integration: Integration tests",
"unit: Unit tests",
]
filterwarnings = [
"error",
"ignore::UserWarning",
"ignore::DeprecationWarning",
]
[tool.coverage.run]
source = ["src"]
omit = [
"*/tests/*",
"*/test_*",
"*/__pycache__/*",
"*/venv/*",
"*/virtualenv/*",
]
[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"def __repr__",
"raise AssertionError",
"raise NotImplementedError",
"if __name__ == .__main__.:",
"if TYPE_CHECKING:",
]
conftest.py Configuration Example
# conftest.py - Project root directory
import pytest
import os
import tempfile
import logging
from pathlib import Path
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def pytest_addoption(parser):
"""Add command line options"""
parser.addoption(
"--env",
action="store",
default="test",
help="Runtime environment: test, dev, staging, prod"
)
parser.addoption(
"--slow",
action="store_true",
default=False,
help="Run slow tests"
)
parser.addoption(
"--integration",
action="store_true",
default=False,
help="Run integration tests"
)
parser.addoption(
"--database-url",
action="store",
default="sqlite:///:memory:",
help="Database connection URL"
)
def pytest_configure(config):
"""pytest configuration hook"""
# Set environment variables
env = config.getoption("--env")
os.environ["TEST_ENV"] = env
# Register custom markers
config.addinivalue_line(
"markers", "env(name): Specify test environment"
)
# Configure logging
if config.getoption("--log-cli"):
logging.getLogger().setLevel(logging.DEBUG)
def pytest_collection_modifyitems(config, items):
"""Modify test collection"""
# Skip tests based on command line options
if not config.getoption("--slow"):
skip_slow = pytest.mark.skip(reason="Requires --slow option")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
if not config.getoption("--integration"):
skip_integration = pytest.mark.skip(reason="Requires --integration option")
for item in items:
if "integration" in item.keywords:
item.add_marker(skip_integration)
# Skip tests based on environment
env = config.getoption("--env")
for item in items:
env_markers = [mark for mark in item.iter_markers(name="env")]
if env_markers:
supported_envs = env_markers[0].args
if env not in supported_envs:
item.add_marker(pytest.mark.skip(
reason=f"Test not supported in environment {env}"
))
@pytest.fixture(scope="session")
def config(request):
"""Configuration fixture"""
return {
"env": request.config.getoption("--env"),
"database_url": request.config.getoption("--database-url"),
"slow": request.config.getoption("--slow"),
"integration": request.config.getoption("--integration"),
}
@pytest.fixture(scope="session")
def temp_dir():
"""Temporary directory fixture"""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)
@pytest.fixture(autouse=True)
def setup_test_env(config):
"""Automatically setup test environment"""
original_env = os.environ.copy()
# Set test environment variables
os.environ.update({
"TESTING": "true",
"TEST_ENV": config["env"],
"DATABASE_URL": config["database_url"],
})
yield
# Restore original environment variables
os.environ.clear()
os.environ.update(original_env)
@pytest.fixture
def clean_database(config):
"""Clean database fixture"""
# Clean before test
logger.info(f"Cleaning database: {config['database_url']}")
yield
# Clean after test
logger.info("Test completed, cleaning database")
# Custom marker hooks
def pytest_runtest_setup(item):
"""Setup before test run"""
# Check environment markers
env_markers = list(item.iter_markers(name="env"))
if env_markers:
current_env = os.environ.get("TEST_ENV", "test")
supported_envs = env_markers[0].args
if current_env not in supported_envs:
pytest.skip(f"Test requires environment: {supported_envs}, current: {current_env}")
Multi-Environment Configuration Management
# config/settings.py
import os
from dataclasses import dataclass
from typing import Optional
@dataclass
class DatabaseConfig:
"""Database configuration"""
url: str
pool_size: int = 5
echo: bool = False
@dataclass
class APIConfig:
"""API configuration"""
base_url: str
timeout: int = 30
api_key: Optional[str] = None
@dataclass
class TestConfig:
"""Test configuration"""
database: DatabaseConfig
api: APIConfig
debug: bool = False
log_level: str = "INFO"
def get_test_config(env: str = "test") -> TestConfig:
"""Get configuration based on environment"""
configs = {
"test": TestConfig(
database=DatabaseConfig(
url="sqlite:///:memory:",
echo=False
),
api=APIConfig(
base_url="http://localhost:8000",
timeout=5
),
debug=True,
log_level="DEBUG"
),
"dev": TestConfig(
database=DatabaseConfig(
url=os.getenv("DEV_DATABASE_URL", "sqlite:///dev.db"),
echo=True
),
api=APIConfig(
base_url=os.getenv("DEV_API_URL", "http://dev-api.example.com"),
timeout=10,
api_key=os.getenv("DEV_API_KEY")
),
debug=True,
log_level="DEBUG"
),
"staging": TestConfig(
database=DatabaseConfig(
url=os.getenv("STAGING_DATABASE_URL"),
pool_size=10
),
api=APIConfig(
base_url=os.getenv("STAGING_API_URL"),
timeout=15,
api_key=os.getenv("STAGING_API_KEY")
),
debug=False,
log_level="INFO"
),
"prod": TestConfig(
database=DatabaseConfig(
url=os.getenv("PROD_DATABASE_URL"),
pool_size=20
),
api=APIConfig(
base_url=os.getenv("PROD_API_URL"),
timeout=30,
api_key=os.getenv("PROD_API_KEY")
),
debug=False,
log_level="WARNING"
)
}
if env not in configs:
raise ValueError(f"Unsupported environment: {env}")
return configs[env]
# tests/conftest.py - Environment configuration integration
@pytest.fixture(scope="session")
def test_config(config):
"""Test configuration fixture"""
from config.settings import get_test_config
return get_test_config(config["env"])
Command Line Options Usage Example
# test_command_line_options.py
import pytest
import os
def test_environment_config(config):
"""Test environment configuration"""
assert config["env"] in ["test", "dev", "staging", "prod"]
assert os.environ["TEST_ENV"] == config["env"]
@pytest.mark.env("dev", "staging")
def test_development_feature(config):
"""Only run in dev and staging environments"""
assert config["env"] in ["dev", "staging"]
@pytest.mark.slow
def test_slow_operation():
"""Slow test"""
import time
time.sleep(1) # Simulate slow operation
assert True
@pytest.mark.integration
def test_database_integration(test_config):
"""Integration test"""
assert test_config.database.url is not None
def test_api_configuration(test_config):
"""API configuration test"""
assert test_config.api.base_url.startswith("http")
assert test_config.api.timeout > 0
# Command line usage examples
"""
# Basic run
pytest
# Specify environment
pytest --env=dev
# Run slow tests
pytest --slow
# Run integration tests
pytest --integration
# Combine options
pytest --env=staging --slow --integration
# Specify database
pytest --database-url=postgresql://user:pass@localhost/testdb
# Verbose logging
pytest --log-cli --log-cli-level=DEBUG
# Parallel run
pytest -n 4
# Run only specific markers
pytest -m "not slow"
pytest -m "integration and not slow"
"""
Advanced Configuration Tips
# advanced_config.py
import pytest
import sys
import platform
from pathlib import Path
def pytest_report_header(config):
"""Customize report header"""
return [
f"Project path: {Path.cwd()}",
f"Python version: {sys.version}",
f"Platform: {platform.platform()}",
f"Test environment: {config.getoption('--env')}",
]
def pytest_sessionstart(session):
"""Hook at session start"""
print("\n🚀 Starting test session")
print(f"📊 Discovered {len(session.items)} tests")
def pytest_sessionfinish(session, exitstatus):
"""Hook at session end"""
if exitstatus == 0:
print("\n✅ All tests passed!")
else:
print(f"\n❌ Tests failed, exit code: {exitstatus}")
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Hook for generating test reports"""
outcome = yield
rep = outcome.get_result()
# Add extra information for failed tests
if rep.when == "call" and rep.failed:
# Can add screenshots, environment info, etc.
setattr(item, "rep_call", rep)
def pytest_runtest_teardown(item, nextitem):
"""Hook in test teardown phase"""
if hasattr(item, "rep_call") and item.rep_call.failed:
print(f"\n🔍 Test failed: {item.name}")
# Can perform additional cleanup or logging
# Conditional configuration
def pytest_configure(config):
"""Dynamic configuration"""
# Adjust configuration based on Python version
if sys.version_info < (3, 8):
config.addinivalue_line(
"markers", "py38plus: requires Python 3.8+"
)
# Adjust configuration based on platform
if platform.system() == "Windows":
config.addinivalue_line(
"markers", "unix_only: Unix-only tests"
)
# Plugin management
pytest_plugins = [
"pytest_html",
"pytest_cov",
"pytest_mock",
"pytest_xdist",
]
# Dynamically disable plugins
def pytest_configure(config):
"""Dynamic plugin configuration"""
# Disable certain plugins in CI environment
if os.environ.get("CI"):
config.pluginmanager.set_blocked("pytest-qt")
Configuration File Best Practices
# Project structure best practices
"""
project/
├── pytest.ini # Main configuration file
├── pyproject.toml # Project configuration
├── conftest.py # Global configuration
├── config/
│ ├── __init__.py
│ ├── settings.py # Environment configuration
│ └── test_data.py # Test data
├── tests/
│ ├── conftest.py # Test-specific configuration
│ ├── unit/
│ │ └── conftest.py # Unit test configuration
│ └── integration/
│ └── conftest.py # Integration test configuration
└── src/
└── ...
"""
# Configuration validation
def validate_test_config():
"""Validate test configuration"""
required_env_vars = [
"TEST_ENV",
"DATABASE_URL"
]
missing_vars = []
for var in required_env_vars:
if not os.environ.get(var):
missing_vars.append(var)
if missing_vars:
raise EnvironmentError(
f"Missing required environment variables: {', '.join(missing_vars)}"
)
# Use in conftest.py
def pytest_sessionstart(session):
"""Validate configuration at session start"""
try:
validate_test_config()
print("✅ Configuration validation passed")
except EnvironmentError as e:
print(f"❌ Configuration validation failed: {e}")
pytest.exit("Configuration error", returncode=1)
Configuration Best Practices
- Use pytest.ini: Preferred method for pytest-specific configuration
- Environment separation: Use different configurations for different environments
- Configuration validation: Validate necessary configurations before tests start
- Document options: Provide clear documentation for custom options
- Reasonable defaults: Provide sensible default values for configuration options
Important Notes
- Configuration override order: Command line > Configuration file > Defaults
- Sensitive information: Don’t hardcode sensitive information in configuration files
- Version compatibility: Be aware of configuration differences across pytest versions
- Performance impact: Some configuration options may affect test performance
Proper configuration management is key to successful large-scale test projects. Through systematic configuration, you can greatly improve test maintainability and scalability.