import torch
import torchvision
from d2l import torch as d2l
from torch import nn
from torch.nn import functional as F
from PIL import Image, ImageDraw
def cls_predictor(num_inputs, num_anchors, num_classes):
return nn.Conv2d(num_inputs, num_anchors * (num_classes + 1),
kernel_size=3, padding=1)
def bbox_predictor(num_inputs, num_anchors):
"""
边界框预测层
Args:
num_inputs:
num_anchors:
Returns:
"""
return nn.Conv2d(num_inputs, num_anchors * 4, kernel_size=3, padding=1)
def flatten_pred(pred):
return torch.flatten(pred.permute(0, 2, 3, 1), start_dim=1)
def concat_preds(preds):
return torch.cat([flatten_pred(p) for p in preds], dim=1)
def down_sample_blk(in_channels, out_channels):
blk = []
for _ in range(2):
blk.append(nn.Conv2d(in_channels, out_channels,
kernel_size=3, padding=1))
blk.append(nn.BatchNorm2d(out_channels))
blk.append(nn.ReLU())
in_channels = out_channels
blk.append(nn.MaxPool2d(2))
return nn.Sequential(*blk)
def base_net():
blk = []
num_filters = [3, 16, 32, 64]
for i in range(len(num_filters) - 1):
blk.append(down_sample_blk(num_filters[i], num_filters[i + 1]))
return nn.Sequential(*blk)
def get_blk(i):
"""
完整的单发多框检测模型由五个模块组成。
每个块生成的特征图既用于生成锚框,又用于预测这些锚框的类别和偏移量。
在这五个模块中,第一个是基本网络块,第二个到第四个是高和宽减半块,
最后一个模块使用全局最大池将高度和宽度都降到1
Args:
i:
Returns:
"""
if i == 0:
blk = base_net()
elif i == 1:
blk = down_sample_blk(64, 128)
elif i == 4:
blk = nn.AdaptiveMaxPool2d((1, 1))
else:
blk = down_sample_blk(128, 128)
return blk
def blk_forward(X, blk, size, ratio, cls_predictor, bbox_predictor):
Y = blk(X)
anchors = d2l.multibox_prior(Y, sizes=size, ratios=ratio)
cls_preds = cls_predictor(Y)
bbox_preds = bbox_predictor(Y)
return Y, anchors, cls_preds, bbox_preds
sizes = [[0.2, 0.272], [0.37, 0.447], [0.54, 0.619], [0.71, 0.79],
[0.88, 0.961]]
ratios = [[1, 2, 0.5]] * 5
num_anchors = len(sizes[0]) + len(ratios[0]) - 1
class TinySSD(nn.Module):
def __init__(self, num_classes, **kwargs):
super(TinySSD, self).__init__(**kwargs)
self.num_classes = num_classes
idx_to_in_channels = [64, 128, 128, 128, 128]
for i in range(5):
setattr(self, f'blk_{i}', get_blk(i))
setattr(self, f'cls_{i}', cls_predictor(idx_to_in_channels[i],
num_anchors, num_classes))
setattr(self, f'bbox_{i}', bbox_predictor(idx_to_in_channels[i],
num_anchors))
def forward(self, X):
anchors, cls_preds, bbox_preds = [None] * 5, [None] * 5, [None] * 5
for i in range(5):
X, anchors[i], cls_preds[i], bbox_preds[i] = blk_forward(
X, getattr(self, f'blk_{i}'), sizes[i], ratios[i],
getattr(self, f'cls_{i}'), getattr(self, f'bbox_{i}'))
anchors = torch.cat(anchors, dim=1)
cls_preds = concat_preds(cls_preds)
cls_preds = cls_preds.reshape(
cls_preds.shape[0], -1, self.num_classes + 1)
bbox_preds = concat_preds(bbox_preds)
return anchors, cls_preds, bbox_preds
cls_loss = nn.CrossEntropyLoss(reduction='none')
bbox_loss = nn.L1Loss(reduction='none')
def calc_loss(cls_preds, cls_labels, bbox_preds, bbox_labels, bbox_masks):
batch_size, num_classes = cls_preds.shape[0], cls_preds.shape[2]
cls = cls_loss(cls_preds.reshape(-1, num_classes),
cls_labels.reshape(-1)).reshape(batch_size, -1).mean(dim=1)
bbox = bbox_loss(bbox_preds * bbox_masks,
bbox_labels * bbox_masks).mean(dim=1)
return cls + bbox
def cls_eval(cls_preds, cls_labels):
return float((cls_preds.argmax(dim=-1).type(
cls_labels.dtype) == cls_labels).sum())
def bbox_eval(bbox_preds, bbox_labels, bbox_masks):
return float((torch.abs((bbox_labels - bbox_preds) * bbox_masks)).sum())
def train(net, train_data):
device = d2l.try_gpu()
num_epochs, timer = 20, d2l.Timer()
net = net.to(device)
trainer = torch.optim.SGD(net.parameters(), lr=0.2, weight_decay=5e-4)
for epoch in range(num_epochs):
metric = d2l.Accumulator(4)
net.train()
for features, target in train_data:
timer.start()
trainer.zero_grad()
X, Y = features.to(device), target.to(device)
anchors, cls_preds, bbox_preds = net(X)
bbox_labels, bbox_masks, cls_labels = d2l.multibox_target(anchors, Y)
l = calc_loss(cls_preds, cls_labels, bbox_preds, bbox_labels,
bbox_masks)
l.mean().backward()
trainer.step()
metric.add(cls_eval(cls_preds, cls_labels), cls_labels.numel(),
bbox_eval(bbox_preds, bbox_labels, bbox_masks),
bbox_labels.numel())
cls_err, bbox_mae = 1 - metric[0] / metric[1], metric[2] / metric[3]
print(f'{epoch + 1} ===>class err {cls_err:.2e}, bbox mae {bbox_mae:.2e}')
print(f'{len(train_iter.dataset) / timer.stop():.1f} examples/sec on '
f'{str(device)}')
def predict(net, X):
device = d2l.try_gpu()
net.eval()
anchors, cls_preds, bbox_preds = net(X.to(device))
cls_probs = F.softmax(cls_preds, dim=2).permute(0, 2, 1)
output = d2l.multibox_detection(cls_probs, bbox_preds, anchors)
idx = [i for i, row in enumerate(output[0]) if row[0] != -1]
return output[0, idx]
def display(img_tensor, outputs, threshold):
array = img_tensor.squeeze(0).permute(1, 2, 0)
image = Image.fromarray(array.byte().numpy())
draw = ImageDraw.Draw(image)
for row in outputs:
score = float(row[1])
if score < threshold:
continue
h, w = img_tensor.shape[2:4]
bbox = [row[2:6] * torch.tensor((w, h, w, h), device=row.device)]
draw.rectangle(tuple(bbox[0]), outline='red', width=2)
image.show()
if __name__ == '__main__':
batch_size = 32
train_iter, val_iter = d2l.load_data_bananas(batch_size)
net = TinySSD(num_classes=1)
net.load_state_dict(torch.load("ssd.pt", weights_only=True))
X = torchvision.io.read_image('../data/banana-detection/bananas_val/images/0.png').unsqueeze(0).float()
for features, target in val_iter:
for i in range(batch_size):
X = features[i].unsqueeze(0)
outputs = predict(net, X)
display(X, outputs.cpu(), threshold=0.9)
break
break