深度学习笔记 - 使用LSTM实现时序预测

412 阅读4分钟

本任务使用torch.nn.LSTM实现时序预测,火灾温度数据集采用CSV格式存储。重点在于构建torch框架实现时序预测任务。

一. 数据预处理

调用data.describe()查看数据统计特征:

data = pd.read_csv('woodpine2.csv')
data.describe()

共5948条时序数据,除时间戳外有三个变量。 image.png

使用seaborn做数据可视化

    import seaborn as sns
    import matplotlib.pyplot as plt

    # 调整欲保存图片像素
    plt.rcParams['savefig.dpi'] = 500
    # 调整加载图片分辨率
    plt.rcParams['figure.dpi'] = 500

    fig, ax = plt.subplots(1, 3, constrained_layout=True, figsize=(14, 3))

    # 绘制三个变量关于时间的折线图
    sns.lineplot(data=data['Tem1'], ax=ax[0])
    sns.lineplot(data=data['CO 1'], ax=ax[1])
    sns.lineplot(data=data['Soot 1'], ax=ax[2])
    plt.show()

image.png

预处理方法通常采用MinmaxScaler,即把原始数据通过和最大值最小映射到某一范围上。或是StandardScaler把数据标准化,转化成均值为0,方差为1的正态分布。

from sklearn.preprocessing import MinMaxScaler

dataFrame = data.iloc[:, 1:]
print(dataFrame.shape)

scalar = MinMaxScaler() # 默认缩放到【0,1】
trained_data = scalar.fit_transform(dataFrame)

二. 构建dataloader

使用当前窗口内的数据预测下一时间步的数据,相应的dataloader如下。需要将ndarray数据转化成Tensor。

from torch.utils.data import TensorDataset, DataLoader
import numpy as np

import torch
from torch.utils.data import Dataset, DataLoader

class TimeSeriesDataset(Dataset):
    def __init__(self, data, window_size=5):
        """
        :param data: 输入的时序数据,维度为 (T, n)
        :param window_size: 滑动窗口大小,默认值为5
        """
        self.data = data
        self.window_size = window_size

    def __len__(self):
        return len(self.data) - self.window_size

    def __getitem__(self, idx):
        x = self.data[idx:idx + self.window_size]  # 窗口内的输入数据 (window_size, n)
        y = self.data[idx + self.window_size]      # 预测下一个时间步的数据 (n,)
        return torch.tensor(x, dtype=torch.float32), torch.tensor(y, dtype=torch.float32)

data = torch.from_numpy(dataFrame)

# 创建数据集实例
window_size = 10  # 假设我们使用10个时间步的数据来预测下一个时间步
dataset = TimeSeriesDataset(data, window_size)

# 创建DataLoader
batch_size = 32
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# 测试DataLoader
for x_batch, y_batch in dataloader:
    print(x_batch.shape)  # (batch_size, window_size, n)
    print(y_batch.shape)  # (batch_size, n)
    break

三. 模型搭建

使用torch构建一个LSTM+FC的结构并且测试跑通.

nn.LSTM输出包含两个部分:输出序列h_n(所有时间步的输出)和最后的隐藏状态c_n(只在最后一个时间步的输出)。h_n的形状是 (num_layers, batch_size, hidden_size),我们取最后一层的输出作为FC层的输入。

class net(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, nums_layer):
        super(net, self).__init__()
        # LSTM层
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)  
        # 全连接层
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        lstm_out, (h_n, c_n) = self.lstm(x)
  
        final_hidden_state = h_n[-1]
        
        # 通过全连接层得到输出
        output = self.fc(final_hidden_state)
        
        return output

测试:

input_size = 3  # 每个时间步的特征数
hidden_size = 64  # LSTM隐藏层的大小
output_size = 1  # 预测的输出维度,假设是单一值
num_layers = 1  # LSTM的层数

device = torch.device('cuda:0')

# 创建模型
model = net(input_size, hidden_size, output_size, num_layers)
x = torch.randn(32, 10, 3)

output = model(x)
print(output.shape)
# torch.Size([32, 1])

四. 模型训练

踩过的坑:

  • 使用notebook时coding时,尽量不要在原地覆盖,这样不好检查中间结果
  • 回归任务使用MSE作为loss

训练函数:

def train(dataloader, model, loss_fn, optimizer, device, scheduler):
    size = len(dataloader.dataset)  
    num_batches = len(dataloader)   

    train_loss = 0  # 初始化训练损失
    
    for X, y in dataloader:  # 获取输入数据和目标
        X, y = X.to(device), y.to(device)

        # 计算预测值
        pred = model(X)          # 网络输出
        loss = loss_fn(pred, y)  # 计算网络输出和真实值之间的损失
        
        # 反向传播
        optimizer.zero_grad()  # 清空梯度
        loss.backward()        # 反向传播
        optimizer.step()       # 更新模型参数
        
        # 累加损失
        train_loss += loss.item()

    scheduler.step()
    
    # 计算平均损失
    train_loss /= num_batches

    return train_loss

测试函数:

def test(dataloader, model, loss_fn, device):
    size = len(dataloader.dataset)  # 测试集的大小
    num_batches = len(dataloader)   # 批次数目
    
    test_loss = 0  # 初始化测试损失

    # 当不进行训练时,停止梯度更新,节省计算资源
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            
            # 计算模型的预测
            pred = model(X)
            
            # 计算损失
            loss = loss_fn(pred, y)
            
            # 累加损失
            test_loss += loss.item()

    # 计算平均损失
    test_loss /= num_batches

    return test_loss

主函数

import torch.optim 

model = net(input_size, hidden_size, output_size, num_layers)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

model = model.to(device)
loss_fn = nn.MSELoss()

learn_rate = 1e-2
opt = torch.optim.SGD(model.parameters(), lr=learn_rate)
scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=5, gamma=0.5)

epochs = 20
train_loss = []
test_loss = []

for epoch in range(epochs):
    model.train()
    epoch_train_loss = train(train_dl, model, loss_fn, opt, device, scheduler)
    
    model.eval()
    epoch_test_loss = test(test_dl, model, loss_fn, device)
    
    train_loss.append(epoch_train_loss)
    test_loss.append(epoch_test_loss)
    
    template = ('Epoch:{:2d}, Train_loss:{:.3f}, Test_loss:{:.3f}')
    print(template.format(epoch+1, epoch_train_loss, epoch_test_loss))
print('Done')

训练日志

image.png