本文已参与「新人创作礼」活动,一起开启掘金创作之路。
一篇很好的博客:详解PyTorch中的ModuleList和Sequential
用 ModuleList 构建多层子层的例子:
import torch
import torch.nn as nn
class EncoderLayer(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(4, 10, (3, 3))
self.conv2 = nn.Conv2d(10, 4, (3, 3))
self.relu = nn.ReLU()
def forward(self, x):
x=self.relu(self.conv1(x))
return self.relu(self.conv2(x))
class Encoder(nn.Module):
def __init__(self,num_layers):
super().__init__()
# encoders 由 num_layers个 EncoderLayer子层组成,每个子层结构相同,但参数不一定相同。
self.ModelList = nn.ModuleList([EncoderLayer() for _ in range(num_layers)])
def forward(self, x):
# ModuleList是一个list,只能通过list的操作方式(如用for循环、下标索引等)进行forward计算。
for layer in self.ModelList:
x = layer(x)
return x
input = torch.rand(5, 4, 30, 30)
model = Encoder(num_layers=4)
output = model(input)
print(model)
print(output.shape) # torch.Size([5, 4, 14, 14])
model:
Encoder(
(ModelList): ModuleList(
(0): EncoderLayer(
(conv1): Conv2d(4, 10, kernel_size=(3, 3), stride=(1, 1))
(conv2): Conv2d(10, 4, kernel_size=(3, 3), stride=(1, 1))
(relu): ReLU()
)
(1): EncoderLayer(
(conv1): Conv2d(4, 10, kernel_size=(3, 3), stride=(1, 1))
(conv2): Conv2d(10, 4, kernel_size=(3, 3), stride=(1, 1))
(relu): ReLU()
)
(2): EncoderLayer(
(conv1): Conv2d(4, 10, kernel_size=(3, 3), stride=(1, 1))
(conv2): Conv2d(10, 4, kernel_size=(3, 3), stride=(1, 1))
(relu): ReLU()
)
(3): EncoderLayer(
(conv1): Conv2d(4, 10, kernel_size=(3, 3), stride=(1, 1))
(conv2): Conv2d(10, 4, kernel_size=(3, 3), stride=(1, 1))
(relu): ReLU()
)
)
)