神经网络前向传播与反向传播:原理详解与代码实现

186 阅读23分钟

本文为深度学习基础系列文章,将深入解析神经网络中前向传播和反向传播的工作原理,包含详细的数学推导、代码实现和可视化分析。

1. 引言:神经网络的核心机制

前向传播(Forward Propagation)和反向传播(Backward Propagation)是神经网络训练的两个核心过程,它们共同构成了深度学习的基础:

  • 前向传播:输入数据通过网络层层传递,最终产生预测结果
  • 反向传播:根据预测误差计算梯度,从输出层向输入层反向传播,更新网络参数

这两个过程的交替进行使得神经网络能够从数据中学习复杂的模式。

2. 前向传播深度解析

2.1 前向传播的数学原理

import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from typing import List, Dict

class ForwardPropagation:
    """前向传播详细实现"""
    
    def __init__(self):
        self.activation_history = {}
        self.forward_pass_count = 0
    
    def linear_forward(self, A_prev, W, b):
        """
        线性部分的前向传播
        A_prev: 前一层的激活值 (m, n_prev)
        W: 权重矩阵 (n_prev, n_current)
        b: 偏置向量 (1, n_current)
        返回: Z = A_prev * W + b
        """
        Z = np.dot(A_prev, W) + b
        return Z
    
    def activation_forward(self, Z, activation="relu"):
        """
        激活函数的前向传播
        Z: 线性输出
        activation: 激活函数类型
        返回: 激活后的输出
        """
        if activation == "relu":
            A = np.maximum(0, Z)
        elif activation == "sigmoid":
            A = 1 / (1 + np.exp(-Z))
        elif activation == "tanh":
            A = np.tanh(Z)
        elif activation == "linear":
            A = Z
        else:
            raise ValueError("不支持的激活函数")
        
        return A
    
    def layer_forward(self, A_prev, W, b, activation):
        """
        单层前向传播
        """
        Z = self.linear_forward(A_prev, W, b)
        A = self.activation_forward(Z, activation)
        
        # 保存中间结果用于反向传播
        cache = {"A_prev": A_prev, "W": W, "b": b, "Z": Z}
        
        return A, cache
    
    def model_forward(self, X, parameters, activations):
        """
        完整模型前向传播
        X: 输入数据 (m, n_x)
        parameters: 参数字典 {'W1':..., 'b1':..., ...}
        activations: 各层激活函数列表
        返回: 最终输出和所有层的缓存
        """
        caches = []
        A = X
        L = len(parameters) // 2  # 层数
        
        # 隐藏层前向传播
        for l in range(1, L):
            A_prev = A
            W = parameters[f'W{l}']
            b = parameters[f'b{l}']
            A, cache = self.layer_forward(A_prev, W, b, activations[l-1])
            caches.append(cache)
            
            # 记录激活值分布
            self.activation_history[f'layer_{l}'] = A.flatten()
        
        # 输出层前向传播(通常使用线性或sigmoid激活)
        W = parameters[f'W{L}']
        b = parameters[f'b{L}']
        AL, cache = self.layer_forward(A, W, b, activations[-1])
        caches.append(cache)
        
        self.forward_pass_count += 1
        self.activation_history[f'output_layer'] = AL.flatten()
        
        return AL, caches
    
    def demonstrate_forward_pass(self):
        """演示前向传播过程"""
        np.random.seed(1)
        
        # 创建示例网络参数
        parameters = {
            'W1': np.random.randn(2, 4) * 0.01,
            'b1': np.zeros((1, 4)),
            'W2': np.random.randn(4, 3) * 0.01,
            'b2': np.zeros((1, 3)),
            'W3': np.random.randn(3, 1) * 0.01,
            'b3': np.zeros((1, 1))
        }
        
        activations = ['relu', 'relu', 'sigmoid']
        
        # 输入数据
        X = np.random.randn(3, 2)  # 3个样本,每个样本2个特征
        
        print("输入数据 X:")
        print(X)
        print(f"形状: {X.shape}")
        
        # 执行前向传播
        output, caches = self.model_forward(X, parameters, activations)
        
        print(f"\n网络输出:")
        print(output)
        print(f"形状: {output.shape}")
        
        # 显示各层激活值统计
        print(f"\n各层激活值统计:")
        for layer_name, activations in self.activation_history.items():
            print(f"{layer_name}: mean={np.mean(activations):.4f}, "
                  f"std={np.std(activations):.4f}, "
                  f"min={np.min(activations):.4f}, "
                  f"max={np.max(activations):.4f}")
        
        return output, caches, parameters

# 运行前向传播演示
forward_prop = ForwardPropagation()
output, caches, parameters = forward_prop.demonstrate_forward_pass()

2.2 前向传播可视化

class ForwardPropagationVisualization:
    """前向传播可视化"""
    
    @staticmethod
    def visualize_forward_process():
        """可视化前向传播过程"""
        
        # 创建示例网络
        class SimpleNetwork:
            def __init__(self):
                self.weights = [
                    np.random.randn(2, 3) * 0.1,  # 输入层到隐藏层
                    np.random.randn(3, 2) * 0.1,  # 隐藏层到输出层
                    np.random.randn(2, 1) * 0.1   # 输出层
                ]
                self.biases = [
                    np.zeros((1, 3)),
                    np.zeros((1, 2)),
                    np.zeros((1, 1))
                ]
            
            def forward(self, x):
                activations = [x]
                z_values = []
                
                # 输入层
                print(f"输入层: {x}")
                
                # 隐藏层1
                z1 = np.dot(x, self.weights[0]) + self.biases[0]
                a1 = np.maximum(0, z1)  # ReLU
                activations.append(a1)
                z_values.append(z1)
                print(f"隐藏层1 - 线性: {z1}, 激活: {a1}")
                
                # 隐藏层2
                z2 = np.dot(a1, self.weights[1]) + self.biases[1]
                a2 = np.maximum(0, z2)  # ReLU
                activations.append(a2)
                z_values.append(z2)
                print(f"隐藏层2 - 线性: {z2}, 激活: {a2}")
                
                # 输出层
                z3 = np.dot(a2, self.weights[2]) + self.biases[2]
                a3 = 1 / (1 + np.exp(-z3))  # Sigmoid
                activations.append(a3)
                z_values.append(z3)
                print(f"输出层 - 线性: {z3}, 激活: {a3}")
                
                return activations, z_values
        
        # 创建网络并运行前向传播
        network = SimpleNetwork()
        input_data = np.array([[0.5, -0.2]])  # 单个样本
        activations, z_values = network.forward(input_data)
        
        # 可视化前向传播
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        
        # 1. 网络结构图
        ax1 = axes[0, 0]
        layer_sizes = [2, 3, 2, 1]
        layer_names = ['输入层', '隐藏层1', '隐藏层2', '输出层']
        
        for i, size in enumerate(layer_sizes):
            # 绘制神经元
            for j in range(size):
                circle = plt.Circle((i, j - (size-1)/2), 0.3, 
                                  fill=True, color='lightblue', alpha=0.7)
                ax1.add_patch(circle)
                ax1.text(i, j - (size-1)/2, f'{activations[i][0][j]:.2f}', 
                        ha='center', va='center', fontsize=10)
            
            # 绘制层标签
            ax1.text(i, (size-1)/2 + 0.5, layer_names[i], 
                    ha='center', va='bottom', fontsize=12, fontweight='bold')
        
        # 绘制连接线(简化的表示)
        for i in range(len(layer_sizes)-1):
            for j in range(layer_sizes[i]):
                for k in range(layer_sizes[i+1]):
                    ax1.plot([i+0.3, i+1-0.3], 
                            [j - (layer_sizes[i]-1)/2, k - (layer_sizes[i+1]-1)/2], 
                            'gray', alpha=0.3)
        
        ax1.set_xlim(-0.5, len(layer_sizes)-0.5)
        ax1.set_ylim(-2, 2)
        ax1.set_aspect('equal')
        ax1.set_title('前向传播过程 - 网络结构', fontsize=14)
        ax1.axis('off')
        
        # 2. 激活值变化
        ax2 = axes[0, 1]
        layer_indices = range(len(activations))
        mean_activations = [np.mean(a) for a in activations]
        std_activations = [np.std(a) for a in activations]
        
        ax2.errorbar(layer_indices, mean_activations, yerr=std_activations, 
                    fmt='o-', linewidth=2, markersize=8, capsize=5)
        ax2.set_xlabel('网络层')
        ax2.set_ylabel('激活值')
        ax2.set_title('各层激活值统计')
        ax2.grid(True, alpha=0.3)
        ax2.set_xticks(layer_indices)
        ax2.set_xticklabels(layer_names)
        
        # 3. 线性输出分布
        ax3 = axes[1, 0]
        for i, z in enumerate(z_values):
            ax3.hist(z.flatten(), bins=10, alpha=0.6, 
                    label=f'层 {i+1}', density=True)
        ax3.set_xlabel('线性输出值')
        ax3.set_ylabel('密度')
        ax3.set_title('各层线性输出分布')
        ax3.legend()
        ax3.grid(True, alpha=0.3)
        
        # 4. 激活函数效果
        ax4 = axes[1, 1]
        x_vals = np.linspace(-5, 5, 100)
        activations_funcs = {
            'ReLU': np.maximum(0, x_vals),
            'Sigmoid': 1 / (1 + np.exp(-x_vals)),
            'Tanh': np.tanh(x_vals)
        }
        
        for name, y_vals in activations_funcs.items():
            ax4.plot(x_vals, y_vals, label=name, linewidth=2)
        
        ax4.set_xlabel('输入值')
        ax4.set_ylabel('输出值')
        ax4.set_title('激活函数比较')
        ax4.legend()
        ax4.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()
        
        return activations, z_values

# 运行可视化
vis = ForwardPropagationVisualization()
activations, z_values = vis.visualize_forward_process()

3. 反向传播深度解析

3.1 反向传播的数学原理

class BackwardPropagation:
    """反向传播详细实现"""
    
    def __init__(self):
        self.gradient_history = {}
        self.backward_pass_count = 0
    
    def linear_backward(self, dZ, cache):
        """
        线性部分的反向传播
        dZ: 当前层线性输出的梯度
        cache: 前向传播保存的缓存 (A_prev, W, b, Z)
        返回: dA_prev, dW, db
        """
        A_prev, W, b = cache["A_prev"], cache["W"], cache["b"]
        m = A_prev.shape[0]
        
        dW = (1/m) * np.dot(A_prev.T, dZ)
        db = (1/m) * np.sum(dZ, axis=0, keepdims=True)
        dA_prev = np.dot(dZ, W.T)
        
        return dA_prev, dW, db
    
    def activation_backward(self, dA, Z, activation):
        """
        激活函数的反向传播
        dA: 当前层激活值的梯度
        Z: 当前层线性输出
        activation: 激活函数类型
        返回: dZ
        """
        if activation == "relu":
            dZ = np.array(dA, copy=True)
            dZ[Z <= 0] = 0
        elif activation == "sigmoid":
            s = 1 / (1 + np.exp(-Z))
            dZ = dA * s * (1 - s)
        elif activation == "tanh":
            dZ = dA * (1 - np.tanh(Z)**2)
        elif activation == "linear":
            dZ = dA
        else:
            raise ValueError("不支持的激活函数")
        
        return dZ
    
    def layer_backward(self, dA, cache, activation):
        """
        单层反向传播
        """
        Z = cache["Z"]
        dZ = self.activation_backward(dA, Z, activation)
        dA_prev, dW, db = self.linear_backward(dZ, cache)
        
        return dA_prev, dW, db
    
    def model_backward(self, AL, Y, caches, activations):
        """
        完整模型反向传播
        AL: 前向传播的输出
        Y: 真实标签
        caches: 前向传播的缓存列表
        activations: 各层激活函数列表
        返回: 梯度字典
        """
        grads = {}
        L = len(caches)  # 层数
        m = AL.shape[0]
        Y = Y.reshape(AL.shape)  # 确保形状匹配
        
        # 初始化反向传播
        dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))  # 二分类交叉熵损失导数
        
        # 输出层反向传播
        current_cache = caches[L-1]
        grads[f"dA{L}"], grads[f"dW{L}"], grads[f"db{L}"] = \
            self.layer_backward(dAL, current_cache, activations[L-1])
        
        # 记录梯度
        self.gradient_history[f'dW{L}'] = grads[f"dW{L}"].flatten()
        self.gradient_history[f'db{L}'] = grads[f"db{L}"].flatten()
        
        # 隐藏层反向传播
        for l in reversed(range(L-1)):
            current_cache = caches[l]
            dA_prev_temp, dW_temp, db_temp = \
                self.layer_backward(grads[f"dA{l+2}"], current_cache, activations[l])
            
            grads[f"dA{l+1}"] = dA_prev_temp
            grads[f"dW{l+1}"] = dW_temp
            grads[f"db{l+1}"] = db_temp
            
            # 记录梯度
            self.gradient_history[f'dW{l+1}'] = grads[f"dW{l+1}"].flatten()
            self.gradient_history[f'db{l+1}'] = grads[f"db{l+1}"].flatten()
        
        self.backward_pass_count += 1
        return grads
    
    def demonstrate_backward_pass(self, AL, Y, caches, activations):
        """演示反向传播过程"""
        print("开始反向传播演示...")
        print(f"预测输出 AL: {AL.flatten()}")
        print(f"真实标签 Y: {Y.flatten()}")
        
        # 计算初始损失
        loss = -np.mean(Y * np.log(AL) + (1 - Y) * np.log(1 - AL))
        print(f"初始损失: {loss:.4f}")
        
        # 执行反向传播
        grads = self.model_backward(AL, Y, caches, activations)
        
        print(f"\n计算得到的梯度:")
        for key in sorted(grads.keys()):
            if 'dW' in key:
                print(f"{key}: mean={np.mean(grads[key]):.6f}, "
                      f"std={np.std(grads[key]):.6f}")
        
        return grads, loss

# 创建示例数据并运行反向传播
# 使用之前前向传播的结果
Y = np.array([[1], [0], [1]])  # 真实标签

backward_prop = BackwardPropagation()
grads, loss = backward_prop.demonstrate_backward_pass(output, Y, caches, ['relu', 'relu', 'sigmoid'])

3.2 链式法则可视化

class ChainRuleVisualization:
    """链式法则可视化"""
    
    @staticmethod
    def visualize_chain_rule():
        """可视化反向传播中的链式法则"""
        
        # 创建一个简单的计算图: L = (y - sigmoid(w2 * relu(w1*x + b1) + b2))^2
        x = 2.0
        w1, b1 = 0.5, 0.1
        w2, b2 = 1.2, -0.2
        y_true = 1.0
        
        print("计算图: L = (y - sigmoid(w2 * relu(w1*x + b1) + b2))^2")
        print(f"参数: x={x}, w1={w1}, b1={b1}, w2={w2}, b2={b2}, y_true={y_true}")
        print("\n前向传播:")
        
        # 前向传播
        z1 = w1 * x + b1
        print(f"z1 = w1*x + b1 = {w1}*{x} + {b1} = {z1}")
        
        a1 = max(0, z1)  # ReLU
        print(f"a1 = relu(z1) = relu({z1}) = {a1}")
        
        z2 = w2 * a1 + b2
        print(f"z2 = w2*a1 + b2 = {w2}*{a1} + {b2} = {z2}")
        
        a2 = 1 / (1 + np.exp(-z2))  # Sigmoid
        print(f"a2 = sigmoid(z2) = sigmoid({z2}) = {a2}")
        
        L = (y_true - a2) ** 2  # 损失
        print(f"L = (y_true - a2)^2 = ({y_true} - {a2})^2 = {L}")
        
        print("\n反向传播 (链式法则):")
        
        # 反向传播 - 链式法则
        # dL/da2
        dL_da2 = -2 * (y_true - a2)
        print(f"dL/da2 = -2*(y_true - a2) = -2*({y_true} - {a2}) = {dL_da2}")
        
        # da2/dz2
        da2_dz2 = a2 * (1 - a2)
        print(f"da2/dz2 = a2*(1-a2) = {a2}*(1-{a2}) = {da2_dz2}")
        
        # dL/dz2
        dL_dz2 = dL_da2 * da2_dz2
        print(f"dL/dz2 = dL/da2 * da2/dz2 = {dL_da2} * {da2_dz2} = {dL_dz2}")
        
        # dz2/dw2
        dz2_dw2 = a1
        print(f"dz2/dw2 = a1 = {a1}")
        
        # dL/dw2
        dL_dw2 = dL_dz2 * dz2_dw2
        print(f"dL/dw2 = dL/dz2 * dz2/dw2 = {dL_dz2} * {dz2_dw2} = {dL_dw2}")
        
        # dz2/da1
        dz2_da1 = w2
        print(f"dz2/da1 = w2 = {w2}")
        
        # dL/da1
        dL_da1 = dL_dz2 * dz2_da1
        print(f"dL/da1 = dL/dz2 * dz2/da1 = {dL_dz2} * {dz2_da1} = {dL_da1}")
        
        # da1/dz1 (ReLU导数)
        da1_dz1 = 1 if z1 > 0 else 0
        print(f"da1/dz1 = relu_derivative(z1) = {da1_dz1} (因为 z1={z1} > 0)")
        
        # dL/dz1
        dL_dz1 = dL_da1 * da1_dz1
        print(f"dL/dz1 = dL/da1 * da1/dz1 = {dL_da1} * {da1_dz1} = {dL_dz1}")
        
        # dz1/dw1
        dz1_dw1 = x
        print(f"dz1/dw1 = x = {x}")
        
        # dL/dw1
        dL_dw1 = dL_dz1 * dz1_dw1
        print(f"dL/dw1 = dL/dz1 * dz1/dw1 = {dL_dz1} * {dz1_dw1} = {dL_dw1}")
        
        # 可视化计算图
        fig, ax = plt.subplots(figsize=(12, 8))
        
        # 节点位置
        nodes = {
            'x': (0, 3), 'w1': (0, 2), 'b1': (0, 1),
            'z1': (1, 2), 'a1': (2, 2),
            'w2': (2, 1), 'b2': (2, 0),
            'z2': (3, 1.5), 'a2': (4, 1.5),
            'y': (4, 0.5), 'L': (5, 1)
        }
        
        # 绘制节点
        for node, (x, y) in nodes.items():
            circle = plt.Circle((x, y), 0.15, fill=True, color='lightblue', alpha=0.8)
            ax.add_patch(circle)
            ax.text(x, y, node, ha='center', va='center', fontweight='bold')
        
        # 绘制边和前向传播值
        edges = [
            (('x', 'w1'), 'z1', f'z1={z1:.2f}'),
            (('z1',), 'a1', f'a1={a1:.2f}'),
            (('a1', 'w2', 'b2'), 'z2', f'z2={z2:.2f}'),
            (('z2',), 'a2', f'a2={a2:.2f}'),
            (('a2', 'y'), 'L', f'L={L:.2f}')
        ]
        
        for (sources, target), value_pos, label in edges:
            for source in sources:
                start = nodes[source]
                end = nodes[target]
                ax.annotate("", xy=end, xytext=start,
                           arrowprops=dict(arrowstyle="->", color="blue", lw=1.5))
            
            # 显示前向传播值
            mid_x = (nodes[target][0] + nodes[sources[0]][0]) / 2
            mid_y = (nodes[target][1] + nodes[sources[0]][1]) / 2
            ax.text(mid_x, mid_y + 0.1, label, ha='center', va='bottom', 
                   fontsize=8, bbox=dict(boxstyle="round,pad=0.3", facecolor="yellow", alpha=0.7))
        
        # 绘制反向传播梯度
        gradients = [
            ('L', 'a2', f'dL/da2={dL_da2:.2f}'),
            ('a2', 'z2', f'da2/dz2={da2_dz2:.2f}'),
            ('z2', 'w2', f'dz2/dw2={dz2_dw2:.2f}'),
            ('z2', 'a1', f'dz2/da1={dz2_da1:.2f}'),
            ('a1', 'z1', f'da1/dz1={da1_dz1}'),
            ('z1', 'w1', f'dz1/dw1={dz1_dw1:.2f}')
        ]
        
        for source, target, label in gradients:
            start = nodes[source]
            end = nodes[target]
            
            # 计算中点位置(稍微偏移以避免重叠)
            mid_x = (start[0] + end[0]) / 2
            mid_y = (start[1] + end[1]) / 2 - 0.2
            
            ax.annotate("", xy=end, xytext=start,
                       arrowprops=dict(arrowstyle="->", color="red", lw=1.5, linestyle="--"))
            
            ax.text(mid_x, mid_y, label, ha='center', va='top', 
                   fontsize=8, bbox=dict(boxstyle="round,pad=0.3", facecolor="pink", alpha=0.7))
        
        ax.set_xlim(-0.5, 5.5)
        ax.set_ylim(-0.5, 3.5)
        ax.set_aspect('equal')
        ax.set_title('反向传播链式法则可视化\n蓝色: 前向传播, 红色虚线: 反向传播', fontsize=14)
        ax.axis('off')
        
        plt.tight_layout()
        plt.show()
        
        return dL_dw1, dL_dw2

# 运行链式法则可视化
chain_rule = ChainRuleVisualization()
dL_dw1, dL_dw2 = chain_rule.visualize_chain_rule()

4. 完整训练流程实现

4.1 完整的神经网络训练类

class CompleteNeuralNetwork:
    """完整的神经网络实现"""
    
    def __init__(self, layer_dims, activations, learning_rate=0.01):
        """
        初始化神经网络
        layer_dims: 各层维度列表 [input_dim, hidden1_dim, ..., output_dim]
        activations: 各层激活函数列表
        learning_rate: 学习率
        """
        self.layer_dims = layer_dims
        self.activations = activations
        self.learning_rate = learning_rate
        self.parameters = {}
        self.history = {
            'losses': [],
            'accuracies': [],
            'gradients': []
        }
        
        self.initialize_parameters()
    
    def initialize_parameters(self):
        """初始化参数"""
        np.random.seed(1)
        L = len(self.layer_dims)
        
        for l in range(1, L):
            # He初始化(适合ReLU)
            self.parameters[f'W{l}'] = np.random.randn(
                self.layer_dims[l-1], self.layer_dims[l]) * np.sqrt(2 / self.layer_dims[l-1])
            self.parameters[f'b{l}'] = np.zeros((1, self.layer_dims[l]))
    
    def forward_propagation(self, X):
        """前向传播"""
        caches = []
        A = X
        L = len(self.parameters) // 2
        
        for l in range(1, L):
            A_prev = A
            W = self.parameters[f'W{l}']
            b = self.parameters[f'b{l}']
            Z = np.dot(A_prev, W) + b
            A = self.activate(Z, self.activations[l-1])
            cache = (A_prev, W, b, Z)
            caches.append(cache)
        
        # 输出层
        W = self.parameters[f'W{L}']
        b = self.parameters[f'b{L}']
        Z = np.dot(A, W) + b
        AL = self.activate(Z, self.activations[-1])
        cache = (A, W, b, Z)
        caches.append(cache)
        
        return AL, caches
    
    def backward_propagation(self, AL, Y, caches):
        """反向传播"""
        grads = {}
        L = len(caches)
        m = AL.shape[0]
        Y = Y.reshape(AL.shape)
        
        # 输出层梯度
        dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
        
        current_cache = caches[L-1]
        A_prev, W, b, Z = current_cache
        dZ = dAL * self.activate_derivative(Z, self.activations[-1])
        dW = (1/m) * np.dot(A_prev.T, dZ)
        db = (1/m) * np.sum(dZ, axis=0, keepdims=True)
        dA_prev = np.dot(dZ, W.T)
        
        grads[f'dW{L}'] = dW
        grads[f'db{L}'] = db
        
        # 隐藏层梯度
        for l in reversed(range(L-1)):
            current_cache = caches[l]
            A_prev, W, b, Z = current_cache
            
            dZ = dA_prev * self.activate_derivative(Z, self.activations[l])
            dW = (1/m) * np.dot(A_prev.T, dZ)
            db = (1/m) * np.sum(dZ, axis=0, keepdims=True)
            dA_prev = np.dot(dZ, W.T)
            
            grads[f'dW{l+1}'] = dW
            grads[f'db{l+1}'] = db
        
        return grads
    
    def update_parameters(self, grads):
        """更新参数"""
        L = len(self.parameters) // 2
        
        for l in range(1, L + 1):
            self.parameters[f'W{l}'] -= self.learning_rate * grads[f'dW{l}']
            self.parameters[f'b{l}'] -= self.learning_rate * grads[f'db{l}']
    
    def activate(self, Z, activation):
        """激活函数"""
        if activation == "relu":
            return np.maximum(0, Z)
        elif activation == "sigmoid":
            return 1 / (1 + np.exp(-Z))
        elif activation == "tanh":
            return np.tanh(Z)
        elif activation == "linear":
            return Z
        else:
            raise ValueError(f"不支持的激活函数: {activation}")
    
    def activate_derivative(self, Z, activation):
        """激活函数导数"""
        if activation == "relu":
            return (Z > 0).astype(float)
        elif activation == "sigmoid":
            s = 1 / (1 + np.exp(-Z))
            return s * (1 - s)
        elif activation == "tanh":
            return 1 - np.tanh(Z)**2
        elif activation == "linear":
            return np.ones_like(Z)
        else:
            raise ValueError(f"不支持的激活函数: {activation}")
    
    def compute_loss(self, AL, Y):
        """计算损失"""
        m = Y.shape[0]
        loss = -np.mean(Y * np.log(AL + 1e-8) + (1 - Y) * np.log(1 - AL + 1e-8))
        return loss
    
    def compute_accuracy(self, AL, Y):
        """计算准确率"""
        predictions = (AL > 0.5).astype(float)
        accuracy = np.mean(predictions == Y)
        return accuracy
    
    def train(self, X, Y, epochs=1000, verbose=True):
        """训练网络"""
        for epoch in range(epochs):
            # 前向传播
            AL, caches = self.forward_propagation(X)
            
            # 计算损失和准确率
            loss = self.compute_loss(AL, Y)
            accuracy = self.compute_accuracy(AL, Y)
            
            # 反向传播
            grads = self.backward_propagation(AL, Y, caches)
            
            # 更新参数
            self.update_parameters(grads)
            
            # 记录历史
            self.history['losses'].append(loss)
            self.history['accuracies'].append(accuracy)
            
            # 记录梯度范数
            grad_norms = {}
            for key, grad in grads.items():
                grad_norms[key] = np.linalg.norm(grad)
            self.history['gradients'].append(grad_norms)
            
            if verbose and epoch % 100 == 0:
                print(f"Epoch {epoch}, Loss: {loss:.4f}, Accuracy: {accuracy:.4f}")
    
    def predict(self, X):
        """预测"""
        AL, _ = self.forward_propagation(X)
        predictions = (AL > 0.5).astype(float)
        return predictions
    
    def plot_training_history(self):
        """绘制训练历史"""
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
        
        # 损失曲线
        ax1.plot(self.history['losses'])
        ax1.set_xlabel('Epoch')
        ax1.set_ylabel('Loss')
        ax1.set_title('Training Loss')
        ax1.grid(True, alpha=0.3)
        
        # 准确率曲线
        ax2.plot(self.history['accuracies'])
        ax2.set_xlabel('Epoch')
        ax2.set_ylabel('Accuracy')
        ax2.set_title('Training Accuracy')
        ax2.grid(True, alpha=0.3)
        
        # 梯度范数
        epochs = range(len(self.history['gradients']))
        layer_grads = {}
        
        for epoch_grads in self.history['gradients']:
            for key, norm in epoch_grads.items():
                if key not in layer_grads:
                    layer_grads[key] = []
                layer_grads[key].append(norm)
        
        for key, norms in layer_grads.items():
            ax3.plot(epochs, norms, label=key)
        
        ax3.set_xlabel('Epoch')
        ax3.set_ylabel('Gradient Norm')
        ax3.set_title('Gradient Norms by Layer')
        ax3.legend()
        ax3.grid(True, alpha=0.3)
        ax3.set_yscale('log')
        
        # 参数分布
        param_values = []
        param_names = []
        
        for key, param in self.parameters.items():
            param_values.extend(param.flatten())
            param_names.append(key)
        
        ax4.hist(param_values, bins=50, alpha=0.7, edgecolor='black')
        ax4.set_xlabel('Parameter Value')
        ax4.set_ylabel('Frequency')
        ax4.set_title('Parameter Distribution')
        ax4.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()

# 创建和训练神经网络
def demonstrate_complete_training():
    """演示完整训练过程"""
    
    # 创建合成数据
    np.random.seed(1)
    X = np.random.randn(100, 2)
    Y = (X[:, 0] ** 2 + X[:, 1] ** 2 > 1).astype(float).reshape(-1, 1)
    
    print(f"数据形状: X {X.shape}, Y {Y.shape}")
    print(f"类别分布: {np.sum(Y)} 个正样本, {len(Y) - np.sum(Y)} 个负样本")
    
    # 创建网络
    layer_dims = [2, 4, 4, 1]  # 输入2维,2个隐藏层各4个神经元,输出1维
    activations = ['relu', 'relu', 'sigmoid']  # 隐藏层用ReLU,输出层用Sigmoid
    
    nn = CompleteNeuralNetwork(layer_dims, activations, learning_rate=0.1)
    
    # 训练网络
    print("开始训练...")
    nn.train(X, Y, epochs=1000, verbose=True)
    
    # 绘制训练历史
    nn.plot_training_history()
    
    # 测试预测
    test_X = np.random.randn(10, 2)
    predictions = nn.predict(test_X)
    print(f"\n测试预测: {predictions.flatten()}")
    
    return nn

# 运行完整训练演示
trained_nn = demonstrate_complete_training()

5. 前向传播与反向传播流程图

graph TD
    A[输入数据 X] --> B[前向传播开始];
    
    B --> C[初始化参数 W, b];
    C --> D[层间计算 Z = W·A + b];
    D --> E[激活函数 A = activationZ];
    E --> F{是否输出层?};
    
    F -->|否| D;
    F -->|是| G[计算输出 Y_pred];
    G --> H[计算损失 L];
    
    H --> I[反向传播开始];
    I --> J[计算输出层梯度 dL/dY_pred];
    J --> K[层间反向传播];
    
    K --> L[计算梯度 dL/dW, dL/db];
    L --> M[传播梯度到前一层];
    M --> N{是否输入层?};
    
    N -->|否| K;
    N -->|是| O[参数更新 W = W - α·dL/dW];
    O --> P{训练完成?};
    
    P -->|否| B;
    P -->|是| Q[训练结束];
    
    style A fill:#e1f5fe
    style Q fill:#f1f8e9
    style B fill:#fff3e0
    style I fill:#fff3e0
    style H fill:#ffebee
    style O fill:#e8f5e8

6. 梯度流动分析

6.1 梯度检查与验证

class GradientAnalysis:
    """梯度分析和验证"""
    
    @staticmethod
    def gradient_check(parameters, grads, X, Y, activations, epsilon=1e-7):
        """
        梯度检查:比较解析梯度和数值梯度
        """
        print("开始梯度检查...")
        
        parameters_flat = GradientAnalysis.flatten_parameters(parameters)
        grads_flat = GradientAnalysis.flatten_gradients(grads)
        
        num_parameters = len(parameters_flat)
        grad_approx = np.zeros(num_parameters)
        
        # 计算数值梯度
        for i in range(num_parameters):
            # 正向扰动
            theta_plus = np.copy(parameters_flat)
            theta_plus[i] += epsilon
            parameters_plus = GradientAnalysis.reshape_parameters(theta_plus, parameters)
            J_plus = GradientAnalysis.compute_cost(X, Y, parameters_plus, activations)
            
            # 负向扰动
            theta_minus = np.copy(parameters_flat)
            theta_minus[i] -= epsilon
            parameters_minus = GradientAnalysis.reshape_parameters(theta_minus, parameters)
            J_minus = GradientAnalysis.compute_cost(X, Y, parameters_minus, activations)
            
            # 数值梯度
            grad_approx[i] = (J_plus - J_minus) / (2 * epsilon)
        
        # 比较梯度
        numerator = np.linalg.norm(grad_approx - grads_flat)
        denominator = np.linalg.norm(grad_approx) + np.linalg.norm(grads_flat)
        difference = numerator / denominator
        
        print(f"梯度检查结果: {difference}")
        if difference < 1e-7:
            print("✅ 梯度检查通过!解析梯度和数值梯度非常接近")
        else:
            print("⚠️  梯度检查警告:解析梯度和数值梯度有较大差异")
        
        return difference
    
    @staticmethod
    def flatten_parameters(parameters):
        """展平参数"""
        flattened = []
        for key in sorted(parameters.keys()):
            flattened.extend(parameters[key].flatten())
        return np.array(flattened)
    
    @staticmethod
    def flatten_gradients(grads):
        """展平梯度"""
        flattened = []
        for key in sorted(grads.keys()):
            if key.startswith('dW') or key.startswith('db'):
                flattened.extend(grads[key].flatten())
        return np.array(flattened)
    
    @staticmethod
    def reshape_parameters(flat_params, original_parameters):
        """重新构造参数"""
        parameters = {}
        start = 0
        
        for key in sorted(original_parameters.keys()):
            shape = original_parameters[key].shape
            size = np.prod(shape)
            parameters[key] = flat_params[start:start+size].reshape(shape)
            start += size
        
        return parameters
    
    @staticmethod
    def compute_cost(X, Y, parameters, activations):
        """计算损失"""
        # 简化的前向传播计算损失
        A = X
        L = len(parameters) // 2
        
        for l in range(1, L):
            W = parameters[f'W{l}']
            b = parameters[f'b{l}']
            Z = np.dot(A, W) + b
            A = GradientAnalysis.activate(Z, activations[l-1])
        
        # 输出层
        W = parameters[f'W{L}']
        b = parameters[f'b{L}']
        Z = np.dot(A, W) + b
        AL = GradientAnalysis.activate(Z, activations[-1])
        
        # 计算损失
        m = Y.shape[0]
        cost = -np.mean(Y * np.log(AL + 1e-8) + (1 - Y) * np.log(1 - AL + 1e-8))
        return cost
    
    @staticmethod
    def activate(Z, activation):
        """激活函数"""
        if activation == "relu":
            return np.maximum(0, Z)
        elif activation == "sigmoid":
            return 1 / (1 + np.exp(-Z))
        elif activation == "tanh":
            return np.tanh(Z)
        else:
            return Z

    @staticmethod
    def analyze_gradient_flow(network, X, Y):
        """分析梯度流动"""
        # 执行一次前向传播和反向传播
        AL, caches = network.forward_propagation(X)
        grads = network.backward_propagation(AL, Y, caches)
        
        # 分析各层梯度
        print("\n梯度流动分析:")
        layer_gradients = {}
        
        for key in sorted(grads.keys()):
            if key.startswith('dW'):
                layer_num = key[2:]
                grad_norm = np.linalg.norm(grads[key])
                layer_gradients[layer_num] = grad_norm
                print(f"层 {layer_num}: 梯度范数 = {grad_norm:.6f}")
        
        # 绘制梯度流动图
        plt.figure(figsize=(10, 6))
        layers = list(layer_gradients.keys())
        grad_norms = list(layer_gradients.values())
        
        plt.plot(range(len(layers)), grad_norms, 'ro-', linewidth=2, markersize=8)
        plt.xlabel('网络层 (从输出层到输入层)')
        plt.ylabel('梯度范数')
        plt.title('梯度流动分析')
        plt.grid(True, alpha=0.3)
        plt.xticks(range(len(layers)), layers)
        plt.yscale('log')
        
        # 检查梯度消失/爆炸
        max_grad = max(grad_norms)
        min_grad = min(grad_norms)
        
        if max_grad / min_grad > 1e6:
            print("⚠️  检测到可能的梯度爆炸问题")
        elif min_grad < 1e-8:
            print("⚠️  检测到可能的梯度消失问题")
        else:
            print("✅ 梯度流动正常")
        
        plt.show()
        
        return grads, layer_gradients

# 运行梯度分析
# 使用之前训练的网络进行梯度分析
sample_X = np.random.randn(5, 2)
sample_Y = (sample_X[:, 0] ** 2 + sample_X[:, 1] ** 2 > 1).astype(float).reshape(-1, 1)

grad_analysis = GradientAnalysis()
grads, layer_gradients = grad_analysis.analyze_gradient_flow(trained_nn, sample_X, sample_Y)

7. 性能优化技巧

7.1 前向传播优化

class ForwardPropagationOptimization:
    """前向传播优化技巧"""
    
    @staticmethod
    def optimized_forward_pass(X, parameters, activations):
        """
        优化的前向传播实现
        """
        # 使用更高效的计算方式
        A = X
        caches = []
        L = len(parameters) // 2
        
        for l in range(1, L + 1):
            A_prev = A
            W = parameters[f'W{l}']
            b = parameters[f'b{l}']
            
            # 使用einsum进行矩阵乘法(在某些情况下更高效)
            Z = np.einsum('ij,jk->ik', A_prev, W) + b
            
            # 激活函数
            if activations[l-1] == "relu":
                A = np.maximum(0, Z)
            elif activations[l-1] == "sigmoid":
                # 数值稳定的sigmoid计算
                A = np.where(Z >= 0, 
                           1 / (1 + np.exp(-Z)), 
                           np.exp(Z) / (1 + np.exp(Z)))
            else:
                A = Z
            
            cache = (A_prev, W, b, Z)
            caches.append(cache)
        
        return A, caches
    
    @staticmethod
    def benchmark_forward_pass():
        """前向传播性能基准测试"""
        import time
        
        # 创建大型网络
        layer_dims = [1000, 512, 256, 128, 64, 1]
        parameters = {}
        
        for i in range(1, len(layer_dims)):
            parameters[f'W{i}'] = np.random.randn(layer_dims[i-1], layer_dims[i]) * 0.01
            parameters[f'b{i}'] = np.zeros((1, layer_dims[i]))
        
        activations = ['relu'] * (len(layer_dims) - 2) + ['sigmoid']
        X = np.random.randn(100, 1000)  # 100个样本,1000个特征
        
        # 基准测试
        times_standard = []
        times_optimized = []
        
        for _ in range(10):
            # 标准实现
            start = time.time()
            AL1, caches1 = ForwardPropagation().model_forward(X, parameters, activations)
            times_standard.append(time.time() - start)
            
            # 优化实现
            start = time.time()
            AL2, caches2 = ForwardPropagationOptimization.optimized_forward_pass(X, parameters, activations)
            times_optimized.append(time.time() - start)
        
        print("前向传播性能基准测试:")
        print(f"标准实现平均时间: {np.mean(times_standard)*1000:.2f} ms")
        print(f"优化实现平均时间: {np.mean(times_optimized)*1000:.2f} ms")
        print(f"加速比: {np.mean(times_standard)/np.mean(times_optimized):.2f}x")
        
        # 验证结果一致性
        difference = np.max(np.abs(AL1 - AL2))
        print(f"结果差异: {difference:.6f}")
        
        return times_standard, times_optimized

# 运行性能测试
forward_times_std, forward_times_opt = ForwardPropagationOptimization.benchmark_forward_pass()

7.2 反向传播优化

class BackwardPropagationOptimization:
    """反向传播优化技巧"""
    
    @staticmethod
    def optimized_backward_pass(AL, Y, caches, activations):
        """
        优化的反向传播实现
        """
        grads = {}
        L = len(caches)
        m = AL.shape[0]
        Y = Y.reshape(AL.shape)
        
        # 数值稳定的损失梯度计算
        dAL = np.clip(AL - Y, -1e10, 1e10)  # 防止数值溢出
        
        for l in reversed(range(L)):
            A_prev, W, b, Z = caches[l]
            activation = activations[l]
            
            # 激活函数梯度
            if activation == "relu":
                dZ = dAL * (Z > 0)
            elif activation == "sigmoid":
                # 数值稳定的sigmoid梯度计算
                s = 1 / (1 + np.exp(-Z))
                dZ = dAL * s * (1 - s)
            elif activation == "tanh":
                dZ = dAL * (1 - np.tanh(Z)**2)
            else:
                dZ = dAL
            
            # 使用einsum进行矩阵乘法
            dW = np.einsum('ij,ik->jk', A_prev, dZ) / m
            db = np.sum(dZ, axis=0, keepdims=True) / m
            dA_prev = np.einsum('ij,jk->ik', dZ, W.T)
            
            grads[f'dW{l+1}'] = dW
            grads[f'db{l+1}'] = db
            
            dAL = dA_prev  # 为下一层准备
        
        return grads
    
    @staticmethod
    def benchmark_backward_pass():
        """反向传播性能基准测试"""
        import time
        
        # 使用之前的前向传播结果
        layer_dims = [1000, 512, 256, 128, 64, 1]
        parameters = {}
        
        for i in range(1, len(layer_dims)):
            parameters[f'W{i}'] = np.random.randn(layer_dims[i-1], layer_dims[i]) * 0.01
            parameters[f'b{i}'] = np.zeros((1, layer_dims[i]))
        
        activations = ['relu'] * (len(layer_dims) - 2) + ['sigmoid']
        X = np.random.randn(100, 1000)
        Y = np.random.randint(0, 2, (100, 1))
        
        # 前向传播获取缓存
        AL, caches = ForwardPropagationOptimization.optimized_forward_pass(X, parameters, activations)
        
        # 基准测试
        times_standard = []
        times_optimized = []
        
        for _ in range(10):
            # 标准实现
            start = time.time()
            grads1 = BackwardPropagation().model_backward(AL, Y, caches, activations)
            times_standard.append(time.time() - start)
            
            # 优化实现
            start = time.time()
            grads2 = BackwardPropagationOptimization.optimized_backward_pass(AL, Y, caches, activations)
            times_optimized.append(time.time() - start)
        
        print("反向传播性能基准测试:")
        print(f"标准实现平均时间: {np.mean(times_standard)*1000:.2f} ms")
        print(f"优化实现平均时间: {np.mean(times_optimized)*1000:.2f} ms")
        print(f"加速比: {np.mean(times_standard)/np.mean(times_optimized):.2f}x")
        
        # 验证结果一致性
        max_diff = 0
        for key in grads1.keys():
            diff = np.max(np.abs(grads1[key] - grads2[key]))
            max_diff = max(max_diff, diff)
        
        print(f"梯度结果最大差异: {max_diff:.6f}")
        
        return times_standard, times_optimized

# 运行反向传播性能测试
backward_times_std, backward_times_opt = BackwardPropagationOptimization.benchmark_backward_pass()

8. 总结与最佳实践

8.1 关键要点总结

class PropagationBestPractices:
    """前向传播和反向传播最佳实践"""
    
    @staticmethod
    def print_best_practices():
        """打印最佳实践"""
        
        practices = {
            '前向传播': [
                '使用数值稳定的激活函数实现',
                '监控各层激活值的分布',
                '定期检查梯度数值',
                '使用合适的参数初始化方法'
            ],
            '反向传播': [
                '实现梯度检查验证正确性',
                '监控梯度范数防止消失/爆炸',
                '使用梯度裁剪处理梯度爆炸',
                '选择合适的优化算法'
            ],
            '性能优化': [
                '使用向量化操作避免循环',
                '利用高效的矩阵乘法函数',
                '减少不必要的内存分配',
                '使用适当的数据类型'
            ],
            '调试技巧': [
                '从小网络开始验证正确性',
                '使用合成数据测试',
                '实现详细的日志记录',
                '可视化训练过程'
            ]
        }
        
        print("前向传播和反向传播最佳实践")
        print("=" * 60)
        
        for category, practice_list in practices.items():
            print(f"\n{category}:")
            for practice in practice_list:
                print(f"  ✅ {practice}")
    
    @staticmethod
    def common_pitfalls_and_solutions():
        """常见陷阱和解决方案"""
        
        pitfalls = {
            '梯度消失': {
                '症状': '深层网络训练困难,底层梯度接近0',
                '原因': '激活函数导数太小,网络太深',
                '解决方案': ['使用ReLU等梯度友好的激活函数', '添加残差连接', '使用BatchNorm']
            },
            '梯度爆炸': {
                '症状': '梯度值非常大,训练不稳定',
                '原因': '权重初始化不当,学习率太大',
                '解决方案': ['梯度裁剪', '合适的权重初始化', '减小学习率']
            },
            '数值不稳定': {
                '症状': '出现NaN或inf值',
                '原因': '数值计算溢出,激活函数输入过大',
                '解决方案': ['数值稳定的函数实现', '梯度裁剪', '权重正则化']
            },
            '训练震荡': {
                '症状': '损失值剧烈波动',
                '原因': '学习率太大,batch size太小',
                '解决方案': ['减小学习率', '增加batch size', '使用学习率调度']
            }
        }
        
        print("\n常见陷阱和解决方案")
        print("=" * 50)
        
        for pitfall, info in pitfalls.items():
            print(f"\n⚠️  {pitfall}:")
            print(f"   症状: {info['症状']}")
            print(f"   原因: {info['原因']}")
            print(f"   解决方案: {', '.join(info['解决方案'])}")

# 显示最佳实践
best_practices = PropagationBestPractices()
best_practices.print_best_practices()
best_practices.common_pitfalls_and_solutions()

8.2 实用工具函数

class PropagationUtils:
    """传播工具函数"""
    
    @staticmethod
    def check_gradient_health(grads, threshold_low=1e-8, threshold_high=1e2):
        """检查梯度健康状态"""
        print("梯度健康检查:")
        
        healthy = True
        for key, grad in grads.items():
            grad_norm = np.linalg.norm(grad)
            
            if grad_norm < threshold_low:
                print(f"  ⚠️  {key}: 梯度消失 (范数: {grad_norm:.2e})")
                healthy = False
            elif grad_norm > threshold_high:
                print(f"  ⚠️  {key}: 梯度爆炸 (范数: {grad_norm:.2e})")
                healthy = False
            else:
                print(f"  ✅ {key}: 健康 (范数: {grad_norm:.2e})")
        
        return healthy
    
    @staticmethod
    def visualize_parameter_updates(parameters_before, parameters_after, grads):
        """可视化参数更新"""
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
        
        # 参数变化
        param_changes = []
        param_names = []
        
        for key in parameters_before.keys():
            if key.startswith('W'):
                change = np.mean(np.abs(parameters_after[key] - parameters_before[key]))
                param_changes.append(change)
                param_names.append(key)
        
        ax1.bar(param_names, param_changes, color='lightblue', alpha=0.7)
        ax1.set_xlabel('参数')
        ax1.set_ylabel('平均变化量')
        ax1.set_title('参数更新幅度')
        ax1.tick_params(axis='x', rotation=45)
        ax1.grid(True, alpha=0.3)
        
        # 梯度分布
        all_grads = []
        for key, grad in grads.items():
            all_grads.extend(grad.flatten())
        
        ax2.hist(all_grads, bins=50, alpha=0.7, color='lightcoral', edgecolor='black')
        ax2.set_xlabel('梯度值')
        ax2.set_ylabel('频率')
        ax2.set_title('梯度分布')
        ax2.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()
    
    @staticmethod
    def create_propagation_report(forward_time, backward_time, grads, loss):
        """创建传播过程报告"""
        print("\n" + "="*50)
        print("传播过程分析报告")
        print("="*50)
        
        print(f"前向传播时间: {forward_time*1000:.2f} ms")
        print(f"反向传播时间: {backward_time*1000:.2f} ms")
        print(f"总传播时间: {(forward_time + backward_time)*1000:.2f} ms")
        print(f"最终损失: {loss:.4f}")
        
        # 梯度统计
        grad_norms = [np.linalg.norm(grad) for grad in grads.values()]
        print(f"梯度统计: 平均范数={np.mean(grad_norms):.2e}, "
              f"最大范数={np.max(grad_norms):.2e}, "
              f"最小范数={np.min(grad_norms):.2e}")
        
        # 健康检查
        is_healthy = PropagationUtils.check_gradient_health(grads)
        
        if is_healthy:
            print("总体评估: ✅ 传播过程健康")
        else:
            print("总体评估: ⚠️  传播过程存在问题,需要调整")

# 使用工具函数示例
def demonstrate_utils():
    """演示工具函数使用"""
    # 创建示例数据
    sample_X = np.random.randn(10, 2)
    sample_Y = np.random.randint(0, 2, (10, 1))
    
    # 创建简单网络
    layer_dims = [2, 4, 1]
    activations = ['relu', 'sigmoid']
    
    # 初始化参数
    parameters = {}
    for i in range(1, len(layer_dims)):
        parameters[f'W{i}'] = np.random.randn(layer_dims[i-1], layer_dims[i]) * 0.01
        parameters[f'b{i}'] = np.zeros((1, layer_dims[i]))
    
    # 保存初始参数
    parameters_before = {k: v.copy() for k, v in parameters.items()}
    
    import time
    
    # 前向传播
    start_time = time.time()
    AL, caches = ForwardPropagation().model_forward(sample_X, parameters, activations)
    forward_time = time.time() - start_time
    
    # 计算损失
    loss = -np.mean(sample_Y * np.log(AL + 1e-8) + (1 - sample_Y) * np.log(1 - AL + 1e-8))
    
    # 反向传播
    start_time = time.time()
    grads = BackwardPropagation().model_backward(AL, sample_Y, caches, activations)
    backward_time = time.time() - start_time
    
    # 参数更新(模拟)
    learning_rate = 0.1
    for key in parameters.keys():
        if key.startswith('W') or key.startswith('b'):
            grad_key = 'd' + key
            parameters[key] -= learning_rate * grads[grad_key]
    
    # 生成报告
    PropagationUtils.create_propagation_report(forward_time, backward_time, grads, loss)
    
    # 可视化更新
    PropagationUtils.visualize_parameter_updates(parameters_before, parameters, grads)

# 运行工具演示
demonstrate_utils()

前向传播和反向传播是神经网络训练的核心机制。通过深入理解这两个过程的工作原理、数学基础和实现细节,我们能够更好地设计、调试和优化神经网络模型。本文提供的详细解析、代码实现和可视化工具将帮助您在实际项目中更有效地应用这些知识。

注意:本文中的代码示例需要在合适的深度学习环境中运行,建议使用NumPy和Matplotlib库。在实际应用中,建议使用成熟的深度学习框架如PyTorch或TensorFlow,它们提供了更高效和稳定的实现。