CenterNet代码解读——网络结构部分

1,953 阅读5分钟

代码地址: github.com/xingyizhou/…

论文地址:arxiv.org/pdf/1904.07…

网络结构部分

这里以dla34网络为例,主体在pose_dla_dcn.py

对照网络结构图和代码:

1. Root类

对应绿色框的aggregation node,有多个输入对象,用于聚合各个层的信息。

class Root(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, residual):
        super(Root, self).__init__()
        self.conv = nn.Conv2d(
            in_channels, out_channels, 1,
            stride=1, bias=False, padding=(kernel_size - 1) // 2)
        self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
        self.relu = nn.ReLU(inplace=True)
        self.residual = residual

    def forward(self, *x):
        children = x
        x = self.conv(torch.cat(x, 1))
        x = self.bn(x)
        if self.residual:
            x += children[0]
        x = self.relu(x)
        return x

2. Tree类

对应红色框的hierarchical deep agrregation(HDA)。其中主要包括几个核心部分:

  • level=1时,self.tree1和self.tree2都是基于BasicBlock模块构成 (residual结构的两层conv_bn_relu),等同于第一个红框。

  • level=2时,self.tree1和self.tree2递归调用Tree类,等同于第二个红框。

class Tree(nn.Module):
    def __init__(self, levels, block, in_channels, out_channels, stride=1,
                 level_root=False, root_dim=0, root_kernel_size=1,
                 dilation=1, root_residual=False):
        super(Tree, self).__init__()
        if root_dim == 0:
            root_dim = 2 * out_channels
        if level_root:
            root_dim += in_channels
        if levels == 1:
            self.tree1 = block(in_channels, out_channels, stride,
                               dilation=dilation)
            self.tree2 = block(out_channels, out_channels, 1,
                               dilation=dilation)
        else:
            self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
                              stride, root_dim=0,
                              root_kernel_size=root_kernel_size,
                              dilation=dilation, root_residual=root_residual)
            self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
                              root_dim=root_dim + out_channels,
                              root_kernel_size=root_kernel_size,
                              dilation=dilation, root_residual=root_residual)
        if levels == 1:
            self.root = Root(root_dim, out_channels, root_kernel_size,
                             root_residual)
        self.level_root = level_root
        self.root_dim = root_dim
        self.downsample = None
        self.project = None
        self.levels = levels
        if stride > 1:
            self.downsample = nn.MaxPool2d(stride, stride=stride)
        if in_channels != out_channels:
            self.project = nn.Sequential(
                nn.Conv2d(in_channels, out_channels,
                          kernel_size=1, stride=1, bias=False),
                nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
            )

    def forward(self, x, residual=None, children=None):
        children = [] if children is None else children
        bottom = self.downsample(x) if self.downsample else x
        residual = self.project(bottom) if self.project else bottom
        if self.level_root:
            children.append(bottom)
        x1 = self.tree1(x, residual)
        if self.levels == 1:
            x2 = self.tree2(x1)
            x = self.root(x2, x1, *children)
        else:
            children.append(x1)
            x = self.tree2(x1, children=children)
        return x

3. DLA

有了Tree类,可以拼接得到整个DLA架构

def dla34(pretrained=True, **kwargs):  # DLA-34
    model = DLA([1, 1, 1, 2, 2, 1],
                [16, 32, 64, 128, 256, 512],
                block=BasicBlock, **kwargs)
    if pretrained:
        model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
    return model

DLA中主要包括:

  • base_layer: conv_bn_relu ->n*16*512*512
  • level0: conv_bn_relu ->n*16*512*512
  • level1: conv_bn_relu ->n*32*256*256
  • level2: Tree(level=1, level_root=False, 32, 64) ->n*64*128*128
  • level3: Tree(level=2, level_root=True, 64, 128) ->n*128*64*64
  • level4: Tree(level=2, level_root=True, 128, 256) ->n*256*32*32
  • level5: Tree(level=1, level_root=True, 256, 512) ->n*512*16*16
class DLA(nn.Module):
    def __init__(self, levels, channels, num_classes=1000,
                 block=BasicBlock, residual_root=False, linear_root=False):
        super(DLA, self).__init__()
        self.channels = channels
        self.num_classes = num_classes
        self.base_layer = nn.Sequential(
            nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
                      padding=3, bias=False),
            nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
            nn.ReLU(inplace=True))
        self.level0 = self._make_conv_level(
            channels[0], channels[0], levels[0])
        self.level1 = self._make_conv_level(
            channels[0], channels[1], levels[1], stride=2)
        self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
                           level_root=False,
                           root_residual=residual_root)
        self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
                           level_root=True, root_residual=residual_root)
        self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
                           level_root=True, root_residual=residual_root)
        self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
                           level_root=True, root_residual=residual_root)
    def forward(self, x):
        y = []
        x = self.base_layer(x)
        for i in range(6):
            x = getattr(self, 'level{}'.format(i))(x)
            y.append(x)
        return y

最终的DLASeg,是在DLA的基础上使用Deformable Convolution和Upsample层组合进行信息提取,提升了空间分辨率。其核心是DLAUp和IDAUp, 这两个类中都使用了两个Deformable Convolution可变形卷积,然后使用ConvTranspose2d进行上采样。

class DLASeg(nn.Module):
    def __init__(self, base_name, heads, pretrained, down_ratio, final_kernel,
                 last_level, head_conv, out_channel=0):
        super(DLASeg, self).__init__()
        assert down_ratio in [2, 4, 8, 16]
        self.first_level = int(np.log2(down_ratio))
        self.last_level = last_level
        self.base = globals()[base_name](pretrained=pretrained)
        channels = self.base.channels
        scales = [2 ** i for i in range(len(channels[self.first_level:]))]
        self.dla_up = DLAUp(self.first_level, channels[self.first_level:], scales)

        if out_channel == 0:
            out_channel = channels[self.first_level]

        self.ida_up = IDAUp(out_channel, channels[self.first_level:self.last_level], 
                            [2 ** i for i in range(self.last_level - self.first_level)])
        
        self.heads = heads
        for head in self.heads:
            classes = self.heads[head]
            if head_conv > 0:
              fc = nn.Sequential(
                  nn.Conv2d(channels[self.first_level], head_conv,
                    kernel_size=3, padding=1, bias=True),
                  nn.ReLU(inplace=True),
                  nn.Conv2d(head_conv, classes, 
                    kernel_size=final_kernel, stride=1, 
                    padding=final_kernel // 2, bias=True))
              if 'hm' in head:
                fc[-1].bias.data.fill_(-2.19)
              else:
                fill_fc_weights(fc)
            else:
              fc = nn.Conv2d(channels[self.first_level], classes, 
                  kernel_size=final_kernel, stride=1, 
                  padding=final_kernel // 2, bias=True)
              if 'hm' in head:
                fc.bias.data.fill_(-2.19)
              else:
                fill_fc_weights(fc)
            self.__setattr__(head, fc)

    def forward(self, x):
        x = self.base(x)
        x = self.dla_up(x)

        y = []
        for i in range(self.last_level - self.first_level):
            y.append(x[i].clone())
        self.ida_up(y, 0, len(y))

        z = {}
        for head in self.heads:
            z[head] = self.__getattr__(head)(y[-1])
        return [z]

4. IDAUp

其中IDAUp负责解码,起始于较小的尺度,然后迭代合并更大的尺度。对应图中的橘色箭头。

  • layer_i = (layer_i->proj->up+layer_i-1)->node

  • proj和node是由DeformConv模块(Deconv_bn_relu)。project降低维度,up反卷积提升spatial size,从而与前一层layer相同,两层特征再进行聚合。

class IDAUp(nn.Module):
   def __init__(self, o, channels, up_f):
       super(IDAUp, self).__init__()
       for i in range(1, len(channels)):
           c = channels[i]
           f = int(up_f[i])  
           proj = DeformConv(c, o)
           node = DeformConv(o, o)
    
           up = nn.ConvTranspose2d(o, o, f * 2, stride=f, 
                                   padding=f // 2, output_padding=0,
                                   groups=o, bias=False)
           fill_up_weights(up)

           setattr(self, 'proj_' + str(i), proj)
           setattr(self, 'up_' + str(i), up)
           setattr(self, 'node_' + str(i), node)
                
       
   def forward(self, layers, startp, endp):
       for i in range(startp + 1, endp):
           upsample = getattr(self, 'up_' + str(i - startp))
           project = getattr(self, 'proj_' + str(i - startp))
           layers[i] = upsample(project(layers[i]))
           node = getattr(self, 'node_' + str(i - startp))
           layers[i] = node(layers[i] + layers[i - 1])

5. DLAUp

DLAUp共包含三个IDAUp:

  • ida(layers, 4, 6)
    • layer5 = (layer5->proj->up+layer4)->node # n*512*16*16->n*256*32*32
  • ida(layers, 3, 6)
    • layer4 = (layer4->proj->up+layer3)->node # n*256*32*32->n*128*64*64
    • layer5 = (layer5->proj->up+layer4)->node # n*256*32*32->n*128*64*64
  • ida(layers, 2, 6)
    • layer3 = (layer3->proj->up+layer2)->node # n*128*64*64->n*64*128*128
    • layer4 = (layer4->proj->up+layer3)->node # n*128*64*64->n*64*128*128
    • layer5 = (layer5->proj->up+layer4)->node # n*128*64*64->n*64*128*128
  • 输出out列表长度为4,存储了初始的layer5以及三次变换后的layer5(spatial size降序排)
class DLAUp(nn.Module):
   def __init__(self, startp, channels, scales, in_channels=None):
       super(DLAUp, self).__init__()
       self.startp = startp
       if in_channels is None:
           in_channels = channels
       self.channels = channels
       channels = list(channels)
       scales = np.array(scales, dtype=int)
       for i in range(len(channels) - 1):
           j = -i - 2
           setattr(self, 'ida_{}'.format(i),
                   IDAUp(channels[j], in_channels[j:],
                         scales[j:] // scales[j]))
           scales[j + 1:] = scales[j]
           in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]

   def forward(self, layers):
       out = [layers[-1]] # start with 32
       for i in range(len(layers) - self.startp - 1):
           ida = getattr(self, 'ida_{}'.format(i))
           ida(layers, len(layers) -i - 2, len(layers))
           out.insert(0, layers[-1])
       return out

6. DLASeg

回到DLASeg,共包括上面的DLA,DLAUp,IDAUp,以及三个head。示意图如下所示。

  • self.base为上面讲的DLA结构,包括base_layer以及level0/1/2/3/4/5六层结构
  • self.dla_up为上面讲的DLAUp结构,包括三个IDAUp结构,主要对浅层深层特征进行融合
  • self.ida_up包括一个IDAUp结构:
    • ida(y, 0, 3)
    • y[1] = (y[1]->proj->up+y[0])->node # n*128*64*64>n*64*128*128
    • y[2] = (y[2]->proj->up+y[1])->node # n*256*32*32>n*64*128*128
  • head部分包括self.hm/self.wh/self.reg,结构均为conv_relu_conv。不同head的输入均为y[2],已经融合了不同层的信息。

参考文献:

[1] cloud.tencent.com/developer/a…