在前面几节中,我们学习了监督学习、无监督学习以及知识表示与检索技术。今天,我们将进入强化学习的世界,这是一种让智能体通过与环境交互来学习最优行为策略的机器学习方法。我们将从经典的Q-Learning算法开始,逐步深入到深度强化学习。
强化学习概览
强化学习是机器学习的一个重要分支,它研究智能体(agent)在环境中如何通过试错来学习最优行为策略。
graph TD
A[强化学习] --> B[基本概念]
A --> C[经典算法]
A --> D[深度强化学习]
B --> E[智能体]
B --> F[环境]
B --> G[奖励]
B --> H[策略]
C --> I[Q-Learning]
C --> J[策略梯度]
D --> K[DQN]
D --> L[PPO]
强化学习基本元素
强化学习包含以下几个基本元素:
- 智能体(Agent) - 学习和决策的主体
- 环境(Environment) - 智能体交互的外部世界
- 状态(State) - 环境的描述
- 动作(Action) - 智能体可以执行的操作
- 奖励(Reward) - 环境对智能体动作的反馈
- 策略(Policy) - 智能体选择动作的规则
import numpy as np
import matplotlib.pyplot as plt
import random
# 强化学习基本框架示例
class RLFramework:
"""强化学习基本框架"""
def __init__(self):
self.state = None
self.total_reward = 0
def reset(self):
"""重置环境"""
self.state = self.get_initial_state()
self.total_reward = 0
return self.state
def get_initial_state(self):
"""获取初始状态"""
# 这里应该根据具体环境实现
return 0
def step(self, action):
"""执行动作并返回结果"""
# 这里应该根据具体环境实现
# 返回: 下一状态, 奖励, 是否结束, 额外信息
next_state = self.state + 1
reward = 1
done = next_state >= 10
info = {}
self.state = next_state
self.total_reward += reward
return next_state, reward, done, info
def get_actions(self, state):
"""获取当前状态下可执行的动作"""
# 这里应该根据具体环境实现
return [0, 1]
# 可视化强化学习交互过程
def visualize_rl_process():
"""可视化强化学习交互过程"""
fig, ax = plt.subplots(figsize=(12, 6))
# 绘制时间轴
time_steps = range(6)
ax.plot(time_steps, [0]*len(time_steps), 'ko-', markersize=10)
# 添加标签
for i, t in enumerate(time_steps):
ax.annotate(f'S{t}\nA{t}\nR{t+1}', (t, 0),
xytext=(0, 20 if i % 2 == 0 else -40),
textcoords='offset points',
ha='center', va='bottom' if i % 2 == 0 else 'top',
bbox=dict(boxstyle='round,pad=0.3', fc='yellow' if i % 2 == 0 else 'lightblue', alpha=0.7),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
ax.set_xlim(-0.5, 5.5)
ax.set_ylim(-60, 60)
ax.set_xlabel('时间步')
ax.set_title('强化学习交互过程: 状态-动作-奖励序列')
ax.axis('off')
plt.tight_layout()
plt.show()
visualize_rl_process()
print("强化学习交互过程说明:")
print("1. 智能体观察环境状态 S_t")
print("2. 根据策略选择动作 A_t")
print("3. 环境给出奖励 R_{t+1} 和新状态 S_{t+1}")
print("4. 重复上述过程")
Q-Learning算法
Q-Learning是一种经典的无模型强化学习算法,通过学习状态-动作价值函数Q(s,a)来找到最优策略。
Q-Learning原理
Q-Learning使用以下更新规则:
Q(s_t, a_t) ← Q(s_t, a_t) + α[r_{t+1} + γ max_{a} Q(s_{t+1}, a) - Q(s_t, a_t)]
其中:
- α 是学习率
- γ 是折扣因子
- r_{t+1} 是即时奖励
# 简单的网格世界环境
class GridWorld:
"""简单网格世界环境"""
def __init__(self, size=5):
self.size = size
self.state = (0, 0) # 起始位置
self.goal = (size-1, size-1) # 目标位置
self.actions = ['up', 'down', 'left', 'right']
def reset(self):
"""重置环境"""
self.state = (0, 0)
return self.state
def step(self, action):
"""执行动作"""
x, y = self.state
if action == 'up':
x = max(0, x - 1)
elif action == 'down':
x = min(self.size - 1, x + 1)
elif action == 'left':
y = max(0, y - 1)
elif action == 'right':
y = min(self.size - 1, y + 1)
self.state = (x, y)
# 计算奖励
if self.state == self.goal:
reward = 10 # 到达目标
done = True
else:
reward = -1 # 每步惩罚
done = False
return self.state, reward, done, {}
def get_actions(self):
"""获取可用动作"""
return self.actions
# Q-Learning算法实现
class QLearningAgent:
"""Q-Learning智能体"""
def __init__(self, actions, learning_rate=0.1, discount_factor=0.9, epsilon=0.1):
self.actions = actions
self.learning_rate = learning_rate
self.discount_factor = discount_factor
self.epsilon = epsilon
self.q_table = {} # Q表
def get_q_value(self, state, action):
"""获取Q值"""
if state not in self.q_table:
self.q_table[state] = {a: 0.0 for a in self.actions}
return self.q_table[state][action]
def update_q_value(self, state, action, reward, next_state):
"""更新Q值"""
current_q = self.get_q_value(state, action)
# 计算目标Q值
if next_state in self.q_table:
max_next_q = max([self.get_q_value(next_state, a) for a in self.actions])
else:
max_next_q = 0.0
target_q = reward + self.discount_factor * max_next_q
# 更新Q值
self.q_table[state][action] += self.learning_rate * (target_q - current_q)
def choose_action(self, state):
"""选择动作 (ε-贪婪策略)"""
if random.random() < self.epsilon:
# 探索: 随机选择动作
return random.choice(self.actions)
else:
# 利用: 选择最优动作
if state not in self.q_table:
self.q_table[state] = {a: 0.0 for a in self.actions}
q_values = self.q_table[state]
max_q = max(q_values.values())
best_actions = [a for a, q in q_values.items() if q == max_q]
return random.choice(best_actions)
# 训练Q-Learning智能体
def train_q_learning(episodes=1000):
"""训练Q-Learning智能体"""
env = GridWorld(size=4)
agent = QLearningAgent(env.get_actions(), learning_rate=0.1, discount_factor=0.9, epsilon=0.1)
episode_rewards = []
episode_lengths = []
for episode in range(episodes):
state = env.reset()
total_reward = 0
steps = 0
while True:
action = agent.choose_action(state)
next_state, reward, done, _ = env.step(action)
# 更新Q值
agent.update_q_value(state, action, reward, next_state)
total_reward += reward
steps += 1
state = next_state
if done:
break
episode_rewards.append(total_reward)
episode_lengths.append(steps)
# 每100轮打印一次统计信息
if (episode + 1) % 100 == 0:
avg_reward = np.mean(episode_rewards[-100:])
avg_length = np.mean(episode_lengths[-100:])
print(f"Episode {episode+1}: 平均奖励 = {avg_reward:.2f}, 平均步数 = {avg_length:.2f}")
return agent, episode_rewards, episode_lengths
# 训练智能体
print("开始训练Q-Learning智能体...")
trained_agent, rewards, lengths = train_q_learning(episodes=1000)
# 绘制训练过程
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(rewards)
plt.xlabel('Episode')
plt.ylabel('Total Reward')
plt.title('训练过程中的奖励变化')
plt.grid(True, alpha=0.3)
plt.subplot(1, 2, 2)
plt.plot(lengths)
plt.xlabel('Episode')
plt.ylabel('Episode Length')
plt.title('训练过程中的步数变化')
plt.grid(True, alpha=0.3)
plt.tight_layout()
plt.show()
# 测试训练好的智能体
def test_agent(agent, episodes=5):
"""测试训练好的智能体"""
env = GridWorld(size=4)
print("测试训练好的智能体:")
for episode in range(episodes):
state = env.reset()
total_reward = 0
steps = 0
path = [state]
print(f"\nEpisode {episode+1}:")
print(f"起始位置: {state}")
while True:
action = agent.choose_action(state)
next_state, reward, done, _ = env.step(action)
total_reward += reward
steps += 1
path.append(next_state)
state = next_state
print(f" 步骤 {steps}: 动作={action}, 状态={state}, 奖励={reward}")
if done or steps > 20: # 防止无限循环
break
print(f" 总奖励: {total_reward}, 总步数: {steps}")
print(f" 路径: {' -> '.join(map(str, path))}")
test_agent(trained_agent)
Q-Learning的改进:DQN
Deep Q-Network (DQN) 使用神经网络来近似Q函数,解决了Q-Learning在复杂环境中的维度灾难问题。
# 简单的DQN实现
class SimpleDQN:
"""简化版DQN"""
def __init__(self, state_size, action_size, learning_rate=0.001):
self.state_size = state_size
self.action_size = action_size
self.learning_rate = learning_rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
# 简化的Q网络(使用线性函数近似)
self.weights = np.random.randn(state_size, action_size) * 0.01
def q_values(self, state):
"""计算Q值"""
return np.dot(state, self.weights)
def choose_action(self, state):
"""选择动作"""
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
q_values = self.q_values(state)
return np.argmax(q_values)
def update(self, state, action, reward, next_state, done):
"""更新网络"""
q_values = self.q_values(state)
next_q_values = self.q_values(next_state)
# Q-Learning目标
target = reward
if not done:
target += 0.95 * np.max(next_q_values)
# 计算TD误差
td_error = target - q_values[action]
# 更新权重
self.weights[:, action] += self.learning_rate * td_error * state
# 降低探索率
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
# 状态编码函数
def encode_state(state, size=4):
"""将网格位置编码为向量"""
x, y = state
encoded = np.zeros(size * size)
encoded[x * size + y] = 1
return encoded
# DQN训练示例
def train_dqn(episodes=500):
"""训练DQN"""
env = GridWorld(size=3) # 使用较小的网格
state_size = 9 # 3x3网格
action_size = 4 # 4个动作
agent = SimpleDQN(state_size, action_size)
episode_rewards = []
for episode in range(episodes):
state = env.reset()
state_encoded = encode_state(state, 3)
total_reward = 0
for step in range(50):
action = agent.choose_action(state_encoded)
next_state, reward, done, _ = env.step(agent.actions[action])
next_state_encoded = encode_state(next_state, 3)
agent.update(state_encoded, action, reward, next_state_encoded, done)
total_reward += reward
state_encoded = next_state_encoded
state = next_state
if done:
break
episode_rewards.append(total_reward)
if (episode + 1) % 100 == 0:
avg_reward = np.mean(episode_rewards[-100:])
print(f"DQN Episode {episode+1}: 平均奖励 = {avg_reward:.2f}, 探索率 = {agent.epsilon:.4f}")
return agent, episode_rewards
# 训练DQN
print("\n开始训练DQN智能体...")
dqn_agent, dqn_rewards = train_dqn(episodes=500)
# 绘制DQN训练过程
plt.figure(figsize=(10, 6))
plt.plot(dqn_rewards)
plt.xlabel('Episode')
plt.ylabel('Total Reward')
plt.title('DQN训练过程中的奖励变化')
plt.grid(True, alpha=0.3)
plt.show()
策略梯度方法
策略梯度方法直接优化策略函数,而不是学习价值函数。
# 简单的策略梯度实现
class SimplePolicyGradient:
"""简化版策略梯度"""
def __init__(self, state_size, action_size, learning_rate=0.01):
self.state_size = state_size
self.action_size = action_size
self.learning_rate = learning_rate
# 策略网络参数
self.weights = np.random.randn(state_size, action_size) * 0.01
def policy(self, state):
"""策略函数(softmax)"""
logits = np.dot(state, self.weights)
exp_logits = np.exp(logits - np.max(logits)) # 数值稳定
return exp_logits / np.sum(exp_logits)
def choose_action(self, state):
"""根据策略选择动作"""
probs = self.policy(state)
return np.random.choice(self.action_size, p=probs)
def update(self, states, actions, rewards):
"""更新策略"""
# 计算折扣奖励
discounted_rewards = self.discount_rewards(rewards)
# 标准化奖励
discounted_rewards = (discounted_rewards - np.mean(discounted_rewards)) / (np.std(discounted_rewards) + 1e-8)
# 更新权重
for i in range(len(states)):
state = states[i]
action = actions[i]
reward = discounted_rewards[i]
# 计算策略梯度
probs = self.policy(state)
dsoftmax = probs.copy()
dsoftmax[action] -= 1
# 更新权重
self.weights[:, action] += self.learning_rate * reward * dsoftmax * state
def discount_rewards(self, rewards, gamma=0.95):
"""计算折扣奖励"""
discounted = np.zeros_like(rewards, dtype=float)
running_add = 0
for t in reversed(range(len(rewards))):
running_add = running_add * gamma + rewards[t]
discounted[t] = running_add
return discounted
# 策略梯度训练示例
def train_policy_gradient(episodes=300):
"""训练策略梯度智能体"""
env = GridWorld(size=3)
state_size = 9
action_size = 4
agent = SimplePolicyGradient(state_size, action_size)
episode_rewards = []
for episode in range(episodes):
state = env.reset()
state_encoded = encode_state(state, 3)
states = []
actions = []
rewards = []
total_reward = 0
for step in range(50):
action = agent.choose_action(state_encoded)
states.append(state_encoded)
actions.append(action)
next_state, reward, done, _ = env.step(agent.actions[action])
next_state_encoded = encode_state(next_state, 3)
rewards.append(reward)
total_reward += reward
state_encoded = next_state_encoded
state = next_state
if done:
break
# 更新策略
agent.update(states, actions, rewards)
episode_rewards.append(total_reward)
if (episode + 1) % 100 == 0:
avg_reward = np.mean(episode_rewards[-100:])
print(f"Policy Gradient Episode {episode+1}: 平均奖励 = {avg_reward:.2f}")
return agent, episode_rewards
# 训练策略梯度
print("\n开始训练策略梯度智能体...")
pg_agent, pg_rewards = train_policy_gradient(episodes=300)
# 绘制策略梯度训练过程
plt.figure(figsize=(10, 6))
plt.plot(pg_rewards)
plt.xlabel('Episode')
plt.ylabel('Total Reward')
plt.title('策略梯度训练过程中的奖励变化')
plt.grid(True, alpha=0.3)
plt.show()
现代强化学习应用
强化学习在多个领域都有重要应用:
# 应用领域可视化
def visualize_rl_applications():
"""可视化强化学习应用领域"""
applications = {
'游戏AI': 0.3,
'机器人控制': 0.25,
'自动驾驶': 0.2,
'金融交易': 0.1,
'推荐系统': 0.1,
'资源调度': 0.05
}
plt.figure(figsize=(12, 8))
# 饼图
plt.subplot(2, 2, 1)
plt.pie(applications.values(), labels=applications.keys(), autopct='%1.1f%%')
plt.title('强化学习应用领域分布')
# 条形图
plt.subplot(2, 2, 2)
apps = list(applications.keys())
values = list(applications.values())
plt.barh(apps, values, color='skyblue')
plt.xlabel('应用比例')
plt.title('强化学习应用领域')
plt.grid(True, alpha=0.3)
# 时间线图
plt.subplot(2, 1, 2)
years = [1992, 1997, 2013, 2015, 2016, 2017, 2020]
events = ['TD-Gammon', '深蓝', 'DQN', 'AlphaGo', 'AlphaGo Zero', 'OpenAI Five', 'AlphaFold']
plt.hlines(1, min(years)-1, max(years)+1, alpha=0.3)
plt.scatter(years, [1]*len(years), s=100, color='red')
for i, (year, event) in enumerate(zip(years, events)):
plt.annotate(event, (year, 1),
xytext=(0, 20 if i % 2 == 0 else -40),
textcoords='offset points',
ha='center', va='bottom' if i % 2 == 0 else 'top',
bbox=dict(boxstyle='round,pad=0.3', fc='lightgreen', alpha=0.7),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
plt.xlim(min(years)-1, max(years)+1)
plt.ylim(0.5, 1.5)
plt.yticks([])
plt.xlabel('年份')
plt.title('强化学习重要里程碑')
plt.tight_layout()
plt.show()
visualize_rl_applications()
print("强化学习重要应用:")
print("1. 游戏AI: 从Atari游戏到围棋、星际争霸")
print("2. 机器人控制: 学习行走、抓取等技能")
print("3. 自动驾驶: 决策和控制策略")
print("4. 金融交易: 算法交易策略")
print("5. 推荐系统: 个性化推荐策略")
print("6. 资源调度: 云计算资源分配")
本周学习总结
今天我们深入学习了强化学习的核心概念和算法:
-
强化学习基础
- 理解了强化学习的基本元素和交互过程
- 学会了马尔可夫决策过程的概念
-
Q-Learning算法
- 掌握了Q-Learning的原理和实现
- 实现了网格世界的智能体训练
-
深度强化学习
- 了解了DQN的基本思想
- 实现了简化的深度Q网络
-
策略梯度方法
- 学习了直接优化策略的方法
- 实现了简单的策略梯度算法
graph TD
A[强化学习] --> B[Q-Learning]
A --> C[深度强化学习]
A --> D[策略梯度]
B --> E[算法原理]
B --> F[代码实现]
C --> G[DQN]
C --> H[深度网络]
D --> I[策略优化]
D --> J[梯度计算]
课后练习
- 运行本节所有代码示例,理解各种算法的工作原理
- 修改GridWorld环境,增加障碍物,观察算法性能变化
- 调整Q-Learning的超参数(学习率、折扣因子、探索率),分析对训练效果的影响
- 研究Actor-Critic算法,理解其与Q-Learning和策略梯度的关系
下节预告
下一节我们将学习神经网络基础,包括感知机、多层感知机和反向传播算法,这些是深度学习的基石,敬请期待!
有任何疑问请在讨论区留言,我们会定期回复大家的问题。