Chapter 7: YOLO Environment Setup and Tool Usage
Haiyue
31min
Chapter 7: YOLO Environment Setup and Tool Usage
Learning Objectives
- Set up YOLO development environment (Python, PyTorch/TensorFlow)
- Familiarize with common computer vision libraries (OpenCV, PIL, etc.)
- Master data preprocessing and visualization tools
- Understand GPU acceleration and model deployment tools
7.1 Basic Environment Setup
7.1.1 Python Environment Configuration
# Create virtual environment
conda create -n yolo python=3.8
conda activate yolo
# Or use pip
python -m venv yolo_env
source yolo_env/bin/activate # Linux/Mac
# yolo_env\Scripts\activate # Windows
7.1.2 Core Dependency Installation
# PyTorch installation (CUDA version)
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
# Other core dependencies
pip install ultralytics # YOLOv8 official package
pip install opencv-python
pip install matplotlib
pip install numpy
pip install pillow
pip install tensorboard
pip install wandb
7.2 YOLOv8 Environment Configuration
7.2.1 Ultralytics YOLO Installation
import ultralytics
from ultralytics import YOLO
import torch
import cv2
import numpy as np
# Check installation
print(f"Ultralytics version: {ultralytics.__version__}")
print(f"PyTorch version: {torch.__version__}")
print(f"CUDA available: {torch.cuda.is_available()}")
# Quick verification
model = YOLO('yolov8n.pt') # Auto-download pretrained model
results = model('https://ultralytics.com/images/bus.jpg')
results[0].show()
7.2.2 Configuration File Management
# Create configuration management system
import yaml
from pathlib import Path
class YOLOConfig:
def __init__(self, config_path='config.yaml'):
self.config_path = Path(config_path)
self.config = self.load_config()
def load_config(self):
"""Load configuration file"""
if self.config_path.exists():
with open(self.config_path, 'r', encoding='utf-8') as f:
return yaml.safe_load(f)
else:
return self.create_default_config()
def create_default_config(self):
"""Create default configuration"""
default_config = {
'model': {
'name': 'yolov8n.pt',
'task': 'detect',
'device': 'auto'
},
'data': {
'path': './datasets',
'train': 'train/images',
'val': 'val/images',
'nc': 80,
'names': ['person', 'bicycle', ...] # COCO classes
},
'train': {
'epochs': 100,
'batch_size': 16,
'imgsz': 640,
'lr0': 0.01,
'optimizer': 'SGD'
},
'paths': {
'project': './runs',
'name': 'train',
'weights': './weights',
'logs': './logs'
}
}
self.save_config(default_config)
return default_config
def save_config(self, config=None):
"""Save configuration file"""
config = config or self.config
with open(self.config_path, 'w', encoding='utf-8') as f:
yaml.dump(config, f, default_flow_style=False, allow_unicode=True)
def get(self, key, default=None):
"""Get configuration value"""
keys = key.split('.')
value = self.config
for k in keys:
value = value.get(k, default)
if value is None:
return default
return value
# Usage example
config = YOLOConfig()
print(f"Model name: {config.get('model.name')}")
print(f"Batch size: {config.get('train.batch_size')}")
7.3 Development Tool Integration
7.3.1 Jupyter Notebook Configuration
# Install Jupyter packages
# pip install jupyter ipywidgets
# Jupyter configuration file
%%writefile jupyter_setup.py
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['figure.figsize'] = (12, 8)
matplotlib.rcParams['font.size'] = 12
# Auto-reload modules
%load_ext autoreload
%autoreload 2
# Inline image display
%matplotlib inline
# Progress bar support
from tqdm.notebook import tqdm
# Set random seed
import random
import numpy as np
import torch
def set_random_seed(seed=42):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
set_random_seed(42)
print("Jupyter environment configured successfully!")
7.3.2 VS Code Configuration
// .vscode/settings.json
{
"python.interpreter": "./yolo_env/bin/python",
"python.linting.enabled": true,
"python.linting.pylintEnabled": true,
"python.formatting.provider": "black",
"python.sortImports.args": ["-rc", "--atomic"],
"editor.formatOnSave": true,
"editor.rulers": [88],
"files.associations": {
"*.yaml": "yaml"
}
}
// .vscode/launch.json
{
"version": "0.2.0",
"configurations": [
{
"name": "YOLO Training",
"type": "python",
"request": "launch",
"program": "train.py",
"args": ["--config", "config.yaml"],
"console": "integratedTerminal",
"env": {
"CUDA_VISIBLE_DEVICES": "0"
}
}
]
}
7.4 Data Processing Tools
7.4.1 Image Processing Utility Class
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
from typing import List, Tuple, Optional
class ImageProcessor:
"""Image Processing Utility Class"""
def __init__(self):
self.supported_formats = ['.jpg', '.jpeg', '.png', '.bmp', '.tiff']
def load_image(self, image_path: str) -> np.ndarray:
"""Load image"""
if isinstance(image_path, str):
image = cv2.imread(image_path)
if image is None:
raise ValueError(f"Cannot load image from {image_path}")
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image_path
def resize_image(self, image: np.ndarray, size: Tuple[int, int],
keep_ratio: bool = True) -> np.ndarray:
"""Resize image"""
if keep_ratio:
return self._resize_keep_ratio(image, size)
else:
return cv2.resize(image, size)
def _resize_keep_ratio(self, image: np.ndarray,
size: Tuple[int, int]) -> np.ndarray:
"""Resize while maintaining aspect ratio"""
h, w = image.shape[:2]
target_w, target_h = size
# Calculate scale
scale = min(target_w / w, target_h / h)
new_w, new_h = int(w * scale), int(h * scale)
# Resize
resized = cv2.resize(image, (new_w, new_h))
# Create target size image and center placement
result = np.zeros((target_h, target_w, 3), dtype=np.uint8)
y_offset = (target_h - new_h) // 2
x_offset = (target_w - new_w) // 2
result[y_offset:y_offset+new_h, x_offset:x_offset+new_w] = resized
return result
def normalize_image(self, image: np.ndarray) -> np.ndarray:
"""Image normalization"""
return image.astype(np.float32) / 255.0
def denormalize_image(self, image: np.ndarray) -> np.ndarray:
"""Denormalization"""
return (image * 255).astype(np.uint8)
def apply_augmentation(self, image: np.ndarray,
augmentation_type: str) -> np.ndarray:
"""Apply data augmentation"""
if augmentation_type == 'flip_horizontal':
return cv2.flip(image, 1)
elif augmentation_type == 'flip_vertical':
return cv2.flip(image, 0)
elif augmentation_type == 'rotate_90':
return cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
elif augmentation_type == 'blur':
return cv2.GaussianBlur(image, (5, 5), 0)
elif augmentation_type == 'brightness':
return cv2.convertScaleAbs(image, alpha=1.2, beta=30)
else:
return image
def draw_bboxes(self, image: np.ndarray, bboxes: List[List],
labels: List[str] = None,
colors: List[Tuple] = None) -> np.ndarray:
"""Draw bounding boxes"""
result = image.copy()
if colors is None:
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] * 10
for i, bbox in enumerate(bboxes):
x1, y1, x2, y2 = map(int, bbox[:4])
color = colors[i % len(colors)]
# Draw bounding box
cv2.rectangle(result, (x1, y1), (x2, y2), color, 2)
# Draw label
if labels and i < len(labels):
label = labels[i]
(text_width, text_height), _ = cv2.getTextSize(
label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1
)
cv2.rectangle(result, (x1, y1 - text_height - 5),
(x1 + text_width, y1), color, -1)
cv2.putText(result, label, (x1, y1 - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
return result
# Usage example
processor = ImageProcessor()
# Load and process image
image = processor.load_image('sample.jpg')
resized = processor.resize_image(image, (640, 640))
normalized = processor.normalize_image(resized)
# Data augmentation
flipped = processor.apply_augmentation(image, 'flip_horizontal')
# Draw detection results
bboxes = [[100, 100, 200, 200], [300, 150, 400, 250]]
labels = ['person', 'car']
result = processor.draw_bboxes(image, bboxes, labels)
print("Image processing tools configured successfully!")
7.4.2 Dataset Tools
import os
import json
from pathlib import Path
from collections import defaultdict
import pandas as pd
class DatasetManager:
"""Dataset Management Tool"""
def __init__(self, dataset_path: str):
self.dataset_path = Path(dataset_path)
self.annotations = {}
self.class_names = []
def scan_dataset(self) -> dict:
"""Scan dataset structure"""
structure = {
'images': {'train': [], 'val': [], 'test': []},
'labels': {'train': [], 'val': [], 'test': []},
'statistics': {}
}
for split in ['train', 'val', 'test']:
img_dir = self.dataset_path / split / 'images'
label_dir = self.dataset_path / split / 'labels'
if img_dir.exists():
structure['images'][split] = list(img_dir.glob('*.jpg')) + list(img_dir.glob('*.png'))
if label_dir.exists():
structure['labels'][split] = list(label_dir.glob('*.txt'))
return structure
def validate_dataset(self) -> dict:
"""Validate dataset integrity"""
issues = {'missing_labels': [], 'missing_images': [], 'empty_labels': []}
structure = self.scan_dataset()
for split in ['train', 'val', 'test']:
images = structure['images'][split]
labels = structure['labels'][split]
# Create filename mapping
image_names = {img.stem for img in images}
label_names = {label.stem for label in labels}
# Check for missing labels
missing_labels = image_names - label_names
if missing_labels:
issues['missing_labels'].extend([
f"{split}/{name}" for name in missing_labels
])
# Check for missing images
missing_images = label_names - image_names
if missing_images:
issues['missing_images'].extend([
f"{split}/{name}" for name in missing_images
])
# Check for empty label files
for label_file in labels:
if label_file.stat().st_size == 0:
issues['empty_labels'].append(f"{split}/{label_file.name}")
return issues
def analyze_dataset(self) -> dict:
"""Analyze dataset statistics"""
stats = {
'total_images': 0,
'total_objects': 0,
'class_distribution': defaultdict(int),
'bbox_sizes': [],
'image_sizes': []
}
structure = self.scan_dataset()
for split in ['train', 'val', 'test']:
images = structure['images'][split]
labels = structure['labels'][split]
stats['total_images'] += len(images)
# Analyze image sizes
for img_path in images:
img = cv2.imread(str(img_path))
if img is not None:
h, w = img.shape[:2]
stats['image_sizes'].append((w, h))
# Analyze annotations
for label_path in labels:
try:
with open(label_path, 'r') as f:
lines = f.readlines()
for line in lines:
parts = line.strip().split()
if len(parts) >= 5:
class_id = int(parts[0])
x, y, w, h = map(float, parts[1:5])
stats['total_objects'] += 1
stats['class_distribution'][class_id] += 1
stats['bbox_sizes'].append((w, h))
except Exception as e:
print(f"Error reading {label_path}: {e}")
return stats
def create_data_yaml(self, output_path: str = None):
"""Create YOLO format data configuration file"""
if output_path is None:
output_path = self.dataset_path / 'data.yaml'
config = {
'path': str(self.dataset_path.absolute()),
'train': 'train/images',
'val': 'val/images',
'test': 'test/images',
'nc': len(self.class_names),
'names': self.class_names
}
with open(output_path, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
print(f"Data config saved to {output_path}")
def visualize_statistics(self, stats: dict):
"""Visualize dataset statistics"""
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# Class distribution
if stats['class_distribution']:
classes = list(stats['class_distribution'].keys())
counts = list(stats['class_distribution'].values())
axes[0, 0].bar(classes, counts)
axes[0, 0].set_title('Class Distribution')
axes[0, 0].set_xlabel('Class ID')
axes[0, 0].set_ylabel('Count')
# Bounding box size distribution
if stats['bbox_sizes']:
widths, heights = zip(*stats['bbox_sizes'])
axes[0, 1].scatter(widths, heights, alpha=0.5)
axes[0, 1].set_title('Bounding Box Sizes')
axes[0, 1].set_xlabel('Width (normalized)')
axes[0, 1].set_ylabel('Height (normalized)')
# Image size distribution
if stats['image_sizes']:
widths, heights = zip(*stats['image_sizes'])
axes[1, 0].scatter(widths, heights, alpha=0.5)
axes[1, 0].set_title('Image Sizes')
axes[1, 0].set_xlabel('Width (pixels)')
axes[1, 0].set_ylabel('Height (pixels)')
# Overall statistics
axes[1, 1].text(0.1, 0.8, f"Total Images: {stats['total_images']}",
transform=axes[1, 1].transAxes, fontsize=12)
axes[1, 1].text(0.1, 0.6, f"Total Objects: {stats['total_objects']}",
transform=axes[1, 1].transAxes, fontsize=12)
axes[1, 1].text(0.1, 0.4, f"Classes: {len(stats['class_distribution'])}",
transform=axes[1, 1].transAxes, fontsize=12)
axes[1, 1].set_title('Dataset Summary')
axes[1, 1].axis('off')
plt.tight_layout()
plt.show()
# Usage example
dataset_manager = DatasetManager('./dataset')
# Scan dataset
# structure = dataset_manager.scan_dataset()
# print("Dataset structure:", structure)
# Validate dataset
# issues = dataset_manager.validate_dataset()
# print("Dataset issues:", issues)
# Analyze statistics
# stats = dataset_manager.analyze_dataset()
# dataset_manager.visualize_statistics(stats)
7.5 GPU and Hardware Configuration
7.5.1 GPU Environment Detection
import torch
import subprocess
import psutil
from typing import Dict, List
class SystemInfo:
"""System Information Detection"""
@staticmethod
def check_gpu_info() -> Dict:
"""Check GPU information"""
info = {
'cuda_available': torch.cuda.is_available(),
'gpu_count': torch.cuda.device_count() if torch.cuda.is_available() else 0,
'current_device': torch.cuda.current_device() if torch.cuda.is_available() else None,
'gpu_details': []
}
if torch.cuda.is_available():
for i in range(torch.cuda.device_count()):
gpu_props = torch.cuda.get_device_properties(i)
gpu_info = {
'device_id': i,
'name': gpu_props.name,
'total_memory': f"{gpu_props.total_memory / 1024**3:.1f} GB",
'compute_capability': f"{gpu_props.major}.{gpu_props.minor}",
'multiprocessors': gpu_props.multi_processor_count
}
info['gpu_details'].append(gpu_info)
return info
@staticmethod
def check_memory_usage() -> Dict:
"""Check memory usage"""
memory = psutil.virtual_memory()
return {
'total_ram': f"{memory.total / 1024**3:.1f} GB",
'available_ram': f"{memory.available / 1024**3:.1f} GB",
'used_ram': f"{memory.used / 1024**3:.1f} GB",
'memory_percent': f"{memory.percent:.1f}%"
}
@staticmethod
def check_disk_space(path: str = './') -> Dict:
"""Check disk space"""
disk = psutil.disk_usage(path)
return {
'total_space': f"{disk.total / 1024**3:.1f} GB",
'free_space': f"{disk.free / 1024**3:.1f} GB",
'used_space': f"{disk.used / 1024**3:.1f} GB",
'disk_percent': f"{(disk.used / disk.total) * 100:.1f}%"
}
@staticmethod
def print_system_info():
"""Print system information"""
print("=== System Information ===")
# GPU information
gpu_info = SystemInfo.check_gpu_info()
print(f"\nGPU Information:")
print(f" CUDA Available: {gpu_info['cuda_available']}")
print(f" GPU Count: {gpu_info['gpu_count']}")
if gpu_info['gpu_details']:
for gpu in gpu_info['gpu_details']:
print(f" GPU {gpu['device_id']}: {gpu['name']}")
print(f" Memory: {gpu['total_memory']}")
print(f" Compute Capability: {gpu['compute_capability']}")
# Memory information
memory_info = SystemInfo.check_memory_usage()
print(f"\nMemory Information:")
for key, value in memory_info.items():
print(f" {key.replace('_', ' ').title()}: {value}")
# Disk information
disk_info = SystemInfo.check_disk_space()
print(f"\nDisk Information:")
for key, value in disk_info.items():
print(f" {key.replace('_', ' ').title()}: {value}")
# GPU Performance Test
class GPUBenchmark:
"""GPU Performance Testing"""
@staticmethod
def test_gpu_speed(device_id: int = 0, iterations: int = 100):
"""Test GPU computation speed"""
if not torch.cuda.is_available():
print("CUDA not available")
return
device = torch.device(f'cuda:{device_id}')
# Create test data
a = torch.randn(1000, 1000, device=device)
b = torch.randn(1000, 1000, device=device)
# Warm-up
for _ in range(10):
c = torch.matmul(a, b)
torch.cuda.synchronize()
import time
start_time = time.time()
# Performance test
for _ in range(iterations):
c = torch.matmul(a, b)
torch.cuda.synchronize()
end_time = time.time()
avg_time = (end_time - start_time) / iterations
print(f"GPU {device_id} Matrix Multiplication Average Time: {avg_time*1000:.2f} ms")
@staticmethod
def test_model_inference_speed(model_name: str = 'yolov8n.pt'):
"""Test model inference speed"""
try:
model = YOLO(model_name)
# Create test image
test_image = torch.randn(1, 3, 640, 640)
# Warm-up
for _ in range(10):
results = model(test_image, verbose=False)
import time
iterations = 100
start_time = time.time()
for _ in range(iterations):
results = model(test_image, verbose=False)
end_time = time.time()
avg_time = (end_time - start_time) / iterations
fps = 1 / avg_time
print(f"Model {model_name} Inference:")
print(f" Average Time: {avg_time*1000:.2f} ms")
print(f" FPS: {fps:.1f}")
except Exception as e:
print(f"Error testing model inference: {e}")
# Run system detection
SystemInfo.print_system_info()
# GPU performance test
if torch.cuda.is_available():
print("\n=== GPU Performance Test ===")
GPUBenchmark.test_gpu_speed()
# GPUBenchmark.test_model_inference_speed()
7.6 Monitoring and Logging Tools
7.6.1 Training Monitor
import wandb
import tensorboard
from torch.utils.tensorboard import SummaryWriter
import logging
from datetime import datetime
class TrainingMonitor:
"""Training Monitoring Tool"""
def __init__(self, project_name: str = "yolo_training",
use_wandb: bool = True, use_tensorboard: bool = True):
self.project_name = project_name
self.use_wandb = use_wandb
self.use_tensorboard = use_tensorboard
# Initialize logging
self.setup_logging()
# Initialize monitoring tools
if self.use_wandb:
self.init_wandb()
if self.use_tensorboard:
self.init_tensorboard()
def setup_logging(self):
"""Set up logging"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
log_filename = f"training_{timestamp}.log"
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(log_filename),
logging.StreamHandler()
]
)
self.logger = logging.getLogger(__name__)
self.logger.info(f"Training monitor initialized for project: {self.project_name}")
def init_wandb(self):
"""Initialize Weights & Biases"""
try:
wandb.init(project=self.project_name)
self.logger.info("W&B initialized successfully")
except Exception as e:
self.logger.warning(f"Failed to initialize W&B: {e}")
self.use_wandb = False
def init_tensorboard(self):
"""Initialize TensorBoard"""
try:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
log_dir = f"runs/{self.project_name}_{timestamp}"
self.writer = SummaryWriter(log_dir)
self.logger.info(f"TensorBoard initialized: {log_dir}")
except Exception as e:
self.logger.warning(f"Failed to initialize TensorBoard: {e}")
self.use_tensorboard = False
def log_metrics(self, metrics: dict, step: int):
"""Log training metrics"""
# Log to file
metrics_str = ", ".join([f"{k}: {v:.4f}" for k, v in metrics.items()])
self.logger.info(f"Step {step} - {metrics_str}")
# W&B logging
if self.use_wandb:
wandb.log(metrics, step=step)
# TensorBoard logging
if self.use_tensorboard:
for key, value in metrics.items():
self.writer.add_scalar(key, value, step)
def log_images(self, images: dict, step: int):
"""Log images"""
if self.use_wandb:
wandb_images = {}
for key, img in images.items():
if isinstance(img, np.ndarray):
wandb_images[key] = wandb.Image(img)
wandb.log(wandb_images, step=step)
if self.use_tensorboard:
for key, img in images.items():
if isinstance(img, np.ndarray):
# Ensure correct image format (C, H, W)
if len(img.shape) == 3 and img.shape[-1] == 3:
img = img.transpose(2, 0, 1)
self.writer.add_image(key, img, step, dataformats='CHW')
def log_model(self, model_path: str):
"""Log model"""
if self.use_wandb:
wandb.save(model_path)
self.logger.info(f"Model saved to W&B: {model_path}")
def close(self):
"""Close monitor"""
if self.use_tensorboard:
self.writer.close()
if self.use_wandb:
wandb.finish()
self.logger.info("Training monitor closed")
# Usage example
# monitor = TrainingMonitor("yolo_experiment")
#
# # Log training metrics
# metrics = {
# 'train_loss': 0.5,
# 'val_loss': 0.6,
# 'mAP': 0.75
# }
# monitor.log_metrics(metrics, step=100)
#
# # Log images
# sample_image = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
# monitor.log_images({'sample': sample_image}, step=100)
print("Monitoring tools configured successfully!")
7.7 Chapter Summary
7.7.1 Environment Configuration Checklist
def environment_checklist():
"""Environment Configuration Checklist"""
checklist = {
"Python Environment": {
"Python Version": "3.8+",
"Virtual Environment": "Created and activated",
"Package Manager": "pip or conda"
},
"Deep Learning Framework": {
"PyTorch": "CUDA version installed",
"TorchVision": "Installed",
"CUDA": "Version matched"
},
"YOLO Related": {
"Ultralytics": "Latest version installed",
"Pretrained Models": "Can download normally",
"Inference Test": "Runs successfully"
},
"Data Processing": {
"OpenCV": "Installed",
"PIL/Pillow": "Installed",
"Matplotlib": "Installed",
"NumPy": "Installed"
},
"Development Tools": {
"Jupyter Notebook": "Optional installation",
"VS Code": "Recommended configuration",
"Git": "Version control"
},
"Monitoring Tools": {
"TensorBoard": "Configured",
"W&B": "Optional configuration",
"Logging System": "Set up"
},
"Hardware Configuration": {
"GPU": "CUDA compatible",
"Memory": "8GB+ recommended",
"Storage": "Sufficient disk space"
}
}
print("YOLO Development Environment Configuration Checklist:")
print("=" * 50)
for category, items in checklist.items():
print(f"\n{category}:")
for item, requirement in items.items():
print(f" ✓ {item}: {requirement}")
print(f"\nAfter configuration, please run the following tests:")
print(" 1. System information detection")
print(" 2. GPU performance test")
print(" 3. YOLO model inference test")
print(" 4. Dataset tool test")
print(" 5. Monitoring tool test")
# Run checklist
environment_checklist()
7.7.2 Common Problem Solving
class TroubleShooting:
"""Common Problem Solutions"""
@staticmethod
def common_issues():
issues = {
"CUDA Unavailable": {
"Symptom": "torch.cuda.is_available() returns False",
"Solutions": [
"Check if GPU driver is correctly installed",
"Confirm PyTorch CUDA version matches driver",
"Reinstall PyTorch CUDA version",
"Check environment variable CUDA_PATH"
]
},
"Out of Memory": {
"Symptom": "CUDA out of memory error",
"Solutions": [
"Reduce batch_size",
"Lower input image resolution",
"Use mixed precision training",
"Clear GPU cache torch.cuda.empty_cache()"
]
},
"Model Download Failed": {
"Symptom": "Cannot download pretrained model",
"Solutions": [
"Check network connection",
"Use proxy or mirror source",
"Download model file manually",
"Configure environment variable YOLO_CONFIG_DIR"
]
},
"Dependency Conflict": {
"Symptom": "Package version conflict error",
"Solutions": [
"Create new virtual environment",
"Install dependencies with recommended versions",
"Use pip check to verify dependencies",
"Check official documentation for latest requirements"
]
}
}
print("YOLO Environment Common Issues and Solutions:")
print("=" * 50)
for issue, details in issues.items():
print(f"\nIssue: {issue}")
print(f"Symptom: {details['Symptom']}")
print("Solutions:")
for i, solution in enumerate(details['Solutions'], 1):
print(f" {i}. {solution}")
# Display troubleshooting guide
TroubleShooting.common_issues()
print("\nEnvironment setup complete! Ready to start YOLO model training and deployment.")
After completing this chapter, you should be able to:
- Successfully set up YOLO development environment
- Configure GPU and deep learning frameworks
- Use data processing and visualization tools
- Set up training monitoring and logging systems
- Solve common environment configuration problems
- Prepare for actual model training
Key Points: Establish a complete YOLO development environment to lay the foundation for subsequent data preparation, model training, and deployment.