在前面的章节中,我们学习了各种机器学习算法和特征工程技巧。然而,如何正确评估模型性能是机器学习项目成功的关键。不同的评估指标适用于不同的场景,选择合适的评估方法能够帮助我们更好地理解模型表现并做出正确的决策。
本节将深入探讨模型评估的各个方面,从基础的准确率到高级的AUC、F1分数等指标,帮助你建立完整的模型评估体系。
为什么模型评估如此重要?
graph TD
A[模型评估] --> B[性能评估]
A --> C[泛化能力评估]
A --> D[业务价值评估]
B --> B1[准确率]
B --> B2[精确率/召回率]
B --> B3[AUC-ROC]
C --> C1[交叉验证]
C --> C2[学习曲线]
C --> C3[验证集表现]
D --> D1[业务指标]
D --> D2[成本效益]
D --> D3[可解释性]
style A fill:#ff6b6b
分类任务评估指标
1. 混淆矩阵(Confusion Matrix)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def confusion_matrix_analysis():
"""混淆矩阵分析"""
# 生成不平衡数据集
X, y = make_classification(n_samples=1000, n_features=20, n_informative=10,
n_classes=2, weights=[0.9, 0.1], random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# 训练模型
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# 计算混淆矩阵
cm = confusion_matrix(y_test, y_pred)
# 可视化
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# 数值混淆矩阵
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', ax=axes[0],
xticklabels=['预测负类', '预测正类'],
yticklabels=['实际负类', '实际正类'])
axes[0].set_title('混淆矩阵(数值)')
axes[0].set_ylabel('真实标签')
axes[0].set_xlabel('预测标签')
# 归一化混淆矩阵
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
sns.heatmap(cm_normalized, annot=True, fmt='.2%', cmap='Blues', ax=axes[1],
xticklabels=['预测负类', '预测正类'],
yticklabels=['实际负类', '实际正类'])
axes[1].set_title('混淆矩阵(归一化)')
axes[1].set_ylabel('真实标签')
axes[1].set_xlabel('预测标签')
plt.tight_layout()
plt.show()
# 提取指标
TN, FP, FN, TP = cm.ravel()
print("混淆矩阵分析:")
print(f"真阴性 (TN): {TN}")
print(f"假阳性 (FP): {FP}")
print(f"假阴性 (FN): {FN}")
print(f"真阳性 (TP): {TP}")
return cm
cm = confusion_matrix_analysis()
2. 准确率、精确率、召回率、F1分数
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
def comprehensive_classification_metrics(y_true, y_pred, y_pred_proba=None):
"""全面的分类评估指标"""
# 基础指标
accuracy = accuracy_score(y_true, y_pred)
precision = precision_score(y_true, y_pred)
recall = recall_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
# 混淆矩阵
cm = confusion_matrix(y_true, y_pred)
TN, FP, FN, TP = cm.ravel()
# 额外指标
specificity = TN / (TN + FP) if (TN + FP) > 0 else 0 # 特异性
sensitivity = recall # 敏感性(等于召回率)
# FPR和FNR
FPR = FP / (FP + TN) if (FP + TN) > 0 else 0 # 假阳性率
FNR = FN / (FN + TP) if (FN + TP) > 0 else 0 # 假阴性率
metrics = {
'准确率 (Accuracy)': accuracy,
'精确率 (Precision)': precision,
'召回率 (Recall/Sensitivity)': recall,
'特异性 (Specificity)': specificity,
'F1分数': f1,
'假阳性率 (FPR)': FPR,
'假阴性率 (FNR)': FNR
}
return metrics, cm
# 使用示例
X, y = make_classification(n_samples=1000, n_features=20, n_informative=10,
n_classes=2, weights=[0.8, 0.2], random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_pred_proba = model.predict_proba(X_test)[:, 1]
metrics, cm = comprehensive_classification_metrics(y_test, y_pred, y_pred_proba)
print("\n分类评估指标:")
print("=" * 60)
for metric_name, value in metrics.items():
print(f"{metric_name}: {value:.4f}")
# 可视化指标对比
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
metric_names = list(metrics.keys())
metric_values = list(metrics.values())
axes[0].barh(metric_names, metric_values, alpha=0.7)
axes[0].set_xlabel('分数')
axes[0].set_title('分类评估指标')
axes[0].grid(True, alpha=0.3, axis='x')
# 指标关系图
axes[1].scatter([metrics['精确率 (Precision)']], [metrics['召回率 (Recall/Sensitivity)']],
s=200, alpha=0.7, label='当前模型')
axes[1].plot([0, 1], [1, 0], 'r--', alpha=0.5, label='理想曲线')
axes[1].set_xlabel('精确率')
axes[1].set_ylabel('召回率')
axes[1].set_title('精确率-召回率空间')
axes[1].legend()
axes[1].grid(True, alpha=0.3)
axes[1].set_xlim([0, 1])
axes[1].set_ylim([0, 1])
plt.tight_layout()
plt.show()
3. ROC曲线和AUC
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.metrics import precision_recall_curve, average_precision_score
def roc_auc_analysis(y_true, y_pred_proba):
"""ROC曲线和AUC分析"""
# 计算ROC曲线
fpr, tpr, thresholds = roc_curve(y_true, y_pred_proba)
roc_auc = auc(fpr, tpr)
# 计算AUC
auc_score = roc_auc_score(y_true, y_pred_proba)
# 计算PR曲线
precision_vals, recall_vals, pr_thresholds = precision_recall_curve(y_true, y_pred_proba)
avg_precision = average_precision_score(y_true, y_pred_proba)
# 可视化
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# ROC曲线
axes[0].plot(fpr, tpr, color='darkorange', lw=2,
label=f'ROC曲线 (AUC = {roc_auc:.4f})')
axes[0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--', label='随机分类器')
axes[0].set_xlim([0.0, 1.0])
axes[0].set_ylim([0.0, 1.05])
axes[0].set_xlabel('假阳性率 (FPR)')
axes[0].set_ylabel('真阳性率 (TPR)')
axes[0].set_title('ROC曲线')
axes[0].legend(loc="lower right")
axes[0].grid(True, alpha=0.3)
# PR曲线
axes[1].plot(recall_vals, precision_vals, color='darkorange', lw=2,
label=f'PR曲线 (AP = {avg_precision:.4f})')
axes[1].set_xlabel('召回率')
axes[1].set_ylabel('精确率')
axes[1].set_title('精确率-召回率曲线')
axes[1].legend(loc="lower left")
axes[1].grid(True, alpha=0.3)
plt.tight_layout()
plt.show()
# 找到最优阈值(Youden's J statistic)
youden_j = tpr - fpr
optimal_idx = np.argmax(youden_j)
optimal_threshold = thresholds[optimal_idx]
print(f"\nROC-AUC分析:")
print(f"AUC分数: {auc_score:.4f}")
print(f"平均精确率 (AP): {avg_precision:.4f}")
print(f"最优阈值: {optimal_threshold:.4f}")
print(f"最优阈值下的FPR: {fpr[optimal_idx]:.4f}, TPR: {tpr[optimal_idx]:.4f}")
return {
'fpr': fpr,
'tpr': tpr,
'thresholds': thresholds,
'auc': auc_score,
'optimal_threshold': optimal_threshold
}
roc_results = roc_auc_analysis(y_test, y_pred_proba)
4. 多分类评估
from sklearn.metrics import classification_report
from sklearn.datasets import make_classification
from sklearn.multiclass import OneVsRestClassifier
def multiclass_evaluation():
"""多分类评估"""
# 生成多分类数据
X, y = make_classification(n_samples=1000, n_features=20, n_informative=10,
n_classes=3, n_redundant=10, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# 训练模型
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_pred_proba = model.predict_proba(X_test)
# 分类报告
print("多分类评估报告:")
print("=" * 60)
print(classification_report(y_test, y_pred,
target_names=[f'类别{i}' for i in range(3)]))
# 混淆矩阵
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(10, 8))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
xticklabels=[f'预测类别{i}' for i in range(3)],
yticklabels=[f'实际类别{i}' for i in range(3)])
plt.title('多分类混淆矩阵')
plt.ylabel('真实标签')
plt.xlabel('预测标签')
plt.tight_layout()
plt.show()
# 每个类别的指标
from sklearn.metrics import precision_recall_fscore_support
precision, recall, f1, support = precision_recall_fscore_support(y_test, y_pred, average=None)
metrics_df = pd.DataFrame({
'精确率': precision,
'召回率': recall,
'F1分数': f1,
'支持数': support
}, index=[f'类别{i}' for i in range(3)])
print("\n各类别详细指标:")
print(metrics_df)
return metrics_df
multiclass_metrics = multiclass_evaluation()
回归任务评估指标
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
def regression_metrics_demo():
"""回归任务评估指标"""
# 生成回归数据
X, y = make_regression(n_samples=1000, n_features=10, n_informative=5,
noise=10, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# 训练模型
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# 计算各种指标
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
# 平均绝对百分比误差
mape = np.mean(np.abs((y_test - y_pred) / y_test)) * 100
# 中位数绝对误差
median_ae = np.median(np.abs(y_test - y_pred))
metrics = {
'均方误差 (MSE)': mse,
'均方根误差 (RMSE)': rmse,
'平均绝对误差 (MAE)': mae,
'中位数绝对误差 (MedAE)': median_ae,
'R²分数': r2,
'平均绝对百分比误差 (MAPE)': mape
}
print("回归评估指标:")
print("=" * 60)
for metric_name, value in metrics.items():
print(f"{metric_name}: {value:.4f}")
# 可视化
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# 预测vs真实值
axes[0].scatter(y_test, y_pred, alpha=0.6)
axes[0].plot([y_test.min(), y_test.max()],
[y_test.min(), y_test.max()], 'r--', lw=2, label='完美预测')
axes[0].set_xlabel('真实值')
axes[0].set_ylabel('预测值')
axes[0].set_title(f'预测vs真实值 (R² = {r2:.4f})')
axes[0].legend()
axes[0].grid(True, alpha=0.3)
# 残差图
residuals = y_test - y_pred
axes[1].scatter(y_pred, residuals, alpha=0.6)
axes[1].axhline(y=0, color='r', linestyle='--', lw=2)
axes[1].set_xlabel('预测值')
axes[1].set_ylabel('残差')
axes[1].set_title('残差图')
axes[1].grid(True, alpha=0.3)
plt.tight_layout()
plt.show()
return metrics
regression_metrics = regression_metrics_demo()
交叉验证
from sklearn.model_selection import cross_val_score, cross_validate, StratifiedKFold, KFold
def cross_validation_demo():
"""交叉验证演示"""
# 分类任务
X_class, y_class = make_classification(n_samples=1000, n_features=20,
n_informative=10, n_classes=2, random_state=42)
model_class = RandomForestClassifier(n_estimators=100, random_state=42)
# K折交叉验证
cv_scores = cross_val_score(model_class, X_class, y_class, cv=5, scoring='accuracy')
print("分类任务交叉验证:")
print("=" * 60)
print(f"各折准确率: {cv_scores}")
print(f"平均准确率: {cv_scores.mean():.4f} (+/- {cv_scores.std() * 2:.4f})")
# 多指标交叉验证
scoring = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']
cv_results = cross_validate(model_class, X_class, y_class, cv=5, scoring=scoring)
print("\n多指标交叉验证结果:")
for metric in scoring:
scores = cv_results[f'test_{metric}']
print(f"{metric}: {scores.mean():.4f} (+/- {scores.std() * 2:.4f})")
# 分层K折交叉验证(用于不平衡数据)
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
stratified_scores = cross_val_score(model_class, X_class, y_class, cv=skf, scoring='roc_auc')
print(f"\n分层K折交叉验证 (AUC): {stratified_scores.mean():.4f} (+/- {stratified_scores.std() * 2:.4f})")
# 回归任务
X_reg, y_reg = make_regression(n_samples=1000, n_features=10, noise=10, random_state=42)
model_reg = LinearRegression()
# 回归交叉验证
reg_scoring = ['neg_mean_squared_error', 'neg_mean_absolute_error', 'r2']
reg_cv_results = cross_validate(model_reg, X_reg, y_reg, cv=5, scoring=reg_scoring)
print("\n回归任务交叉验证:")
print("=" * 60)
for metric in reg_scoring:
scores = reg_cv_results[f'test_{metric}']
if 'neg_' in metric:
scores = -scores # 转换为正数
metric_name = metric.replace('neg_', '')
else:
metric_name = metric
print(f"{metric_name}: {scores.mean():.4f} (+/- {scores.std() * 2:.4f})")
return cv_results, reg_cv_results
cv_results, reg_cv_results = cross_validation_demo()
学习曲线
from sklearn.model_selection import learning_curve
def learning_curve_demo():
"""学习曲线分析"""
X, y = make_classification(n_samples=1000, n_features=20, n_informative=10,
n_classes=2, random_state=42)
model = RandomForestClassifier(n_estimators=100, random_state=42)
train_sizes, train_scores, val_scores = learning_curve(
model, X, y, cv=5, n_jobs=-1,
train_sizes=np.linspace(0.1, 1.0, 10),
scoring='accuracy'
)
train_scores_mean = train_scores.mean(axis=1)
train_scores_std = train_scores.std(axis=1)
val_scores_mean = val_scores.mean(axis=1)
val_scores_std = val_scores.std(axis=1)
plt.figure(figsize=(12, 6))
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, val_scores_mean - val_scores_std,
val_scores_mean + val_scores_std, alpha=0.1, color='g')
plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label='训练准确率')
plt.plot(train_sizes, val_scores_mean, 'o-', color='g', label='验证准确率')
plt.xlabel('训练样本数')
plt.ylabel('准确率')
plt.title('学习曲线')
plt.legend(loc='best')
plt.grid(True, alpha=0.3)
plt.tight_layout()
plt.show()
print("学习曲线分析:")
print(f"训练准确率: {train_scores_mean[-1]:.4f}")
print(f"验证准确率: {val_scores_mean[-1]:.4f}")
print(f"过拟合程度: {train_scores_mean[-1] - val_scores_mean[-1]:.4f}")
learning_curve_demo()
模型评估最佳实践
graph TD
A[模型评估流程] --> B[数据分割]
B --> C[训练模型]
C --> D[交叉验证]
D --> E[选择指标]
E --> F[评估性能]
F --> G{性能满足要求?}
G -->|否| H[调整模型]
H --> C
G -->|是| I[最终验证]
I --> J[部署模型]
style A fill:#ff6b6b
style J fill:#51cf66
评估指标选择指南
| 场景 | 推荐指标 | 原因 |
|---|---|---|
| 平衡二分类 | 准确率、AUC | 简单直观 |
| 不平衡二分类 | F1分数、AUC、PR曲线 | 关注少数类 |
| 多分类 | 宏平均F1、加权F1 | 考虑类别不平衡 |
| 回归 | RMSE、MAE、R² | 不同角度评估 |
| 排序任务 | NDCG、MAP | 关注排序质量 |
总结
本节深入探讨了模型评估的各个方面:
- 分类指标:混淆矩阵、准确率、精确率、召回率、F1、AUC
- 回归指标:MSE、RMSE、MAE、R²、MAPE
- 验证方法:交叉验证、学习曲线
- 最佳实践:指标选择指南和评估流程
掌握这些评估方法能够帮助你更好地理解模型性能,做出正确的模型选择和改进决策。
模型评估不是一次性的工作,而是贯穿整个机器学习项目生命周期的持续过程。选择合适的评估指标和方法,是确保项目成功的关键。