您的当前位置:首页正文

【YOLOv8创新升级2】:RepVGG模型结构与YOLOv8相结合 (代码实现)

来源:爱站旅游
导读【YOLOv8创新升级2】:RepVGG模型结构与YOLOv8相结合 (代码实现)

摘要

repVGG绝对可以算得上2020年在backbone方面有很大影响力的工作,其核心思想是:通过结构重参数化思想,让训练网络的多路结构(多分支模型训练时的优势——性能高)转换为推理网络的单路结构(模型推理时的好处——速度快、省内存)),结构中均为3x3的卷积核,同时,计算库(如CuDNN,Intel MKL)和硬件针对3x3卷积有深度的优化,最终可以使网络有着高效的推理速率(其实TensorRT在构建engine阶段,对模型进行重构,底层也是应用了卷积合并,多分支融合思想,来使得模型最终有着高性能的推理速率)。

代码实现

核心代码

import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
 
__all__ = ['RepVGGBlock']
 
 
def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
    result = nn.Sequential()
    result.add_module('conv', nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
                                        kernel_size=kernel_size, stride=stride, padding=padding, groups=groups,
                                        bias=False))
    result.add_module('bn', nn.BatchNorm2d(num_features=out_channels))
 
    return result
 
 
class SEBlock(nn.Module):
 
    def __init__(self, input_channels, internal_neurons):
        super(SEBlock, self).__init__()
        self.down = nn.Conv2d(in_channels=input_channels, out_channels=internal_neurons, kernel_size=1, stride=1,
                              bias=True)
        self.up = nn.Conv2d(in_channels=internal_neurons, out_channels=input_channels, kernel_size=1, stride=1,
                            bias=True)
        self.input_channels = input_channels
 
    def forward(self, inputs):
        x = F.avg_pool2d(inputs, kernel_size=inputs.size(3))
        x = self.down(x)
        x = F.relu(x)
        x = self.up(x)
        x = torch.sigmoid(x)
        x = x.view(-1, self.input_channels, 1, 1)
        return inputs * x
 
 
class RepVGGBlock(nn.Module):
 
    def __init__(self, in_channels, out_channels, kernel_size=3,
                 stride=1, padding=1, dilation=1, groups=1, padding_mode='zeros', deploy=False, use_se=False):
        super(RepVGGBlock, self).__init__()
        self.deploy = deploy
        self.groups = groups
        self.in_channels = in_channels
 
        padding_11 = padding - kernel_size // 2
 
        self.nonlinearity = nn.SiLU()
        # self.nonlinearity = nn.ReLU()
 
        if use_se:
            self.se = SEBlock(out_channels, internal_neurons=out_channels // 16)
        else:
            self.se = nn.Identity()
 
        if deploy:
            self.rbr_reparam = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
                                         stride=stride,
                                         padding=padding, dilation=dilation, groups=groups, bias=True,
                                         padding_mode=padding_mode)
 
        else:
            self.rbr_identity = nn.BatchNorm2d(
                num_features=in_channels) if out_channels == in_channels and stride == 1 else None
            self.rbr_dense = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
                                     stride=stride, padding=padding, groups=groups)
            self.rbr_1x1 = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride,
                                   padding=padding_11, groups=groups)
            # print('RepVGG Block, identity = ', self.rbr_identity)
 
    def get_equivalent_kernel_bias(self):
        kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
        kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
        kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
        return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
 
    def _pad_1x1_to_3x3_tensor(self, kernel1x1):
        if kernel1x1 is None:
            return 0
        else:
            return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1])
 
    def _fuse_bn_tensor(self, branch):
        if branch is None:
            return 0, 0
        if isinstance(branch, nn.Sequential):
            kernel = branch.conv.weight
            running_mean = branch.bn.running_mean
            running_var = branch.bn.running_var
            gamma = branch.bn.weight
            beta = branch.bn.bias
            eps = branch.bn.eps
        else:
            assert isinstance(branch, nn.BatchNorm2d)
            if not hasattr(self, 'id_tensor'):
                input_dim = self.in_channels // self.groups
                kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32)
                for i in range(self.in_channels):
                    kernel_value[i, i % input_dim, 1, 1] = 1
                self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
            kernel = self.id_tensor
            running_mean = branch.running_mean
            running_var = branch.running_var
            gamma = branch.weight
            beta = branch.bias
            eps = branch.eps
        std = (running_var + eps).sqrt()
        t = (gamma / std).reshape(-1, 1, 1, 1)
        return kernel * t, beta - running_mean * gamma / std
 
    def forward(self, inputs):
        if self.deploy:
            return self.nonlinearity(self.rbr_dense(inputs))
        if hasattr(self, 'rbr_reparam'):
            return self.nonlinearity(self.se(self.rbr_reparam(inputs)))
 
        if self.rbr_identity is None:
            id_out = 0
        else:
            id_out = self.rbr_identity(inputs)
 
        return self.nonlinearity(self.se(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out))
 
    def switch_to_deploy(self):
        if hasattr(self, 'rbr_1x1'):
            kernel, bias = self.get_equivalent_kernel_bias()
            self.rbr_reparam = nn.Conv2d(in_channels=self.rbr_dense.conv.in_channels, out_channels=self.rbr_dense.conv.out_channels,
                                    kernel_size=self.rbr_dense.conv.kernel_size, stride=self.rbr_dense.conv.stride,
                                    padding=self.rbr_dense.conv.padding, dilation=self.rbr_dense.conv.dilation, groups=self.rbr_dense.conv.groups, bias=True)
            self.rbr_reparam.weight.data = kernel
            self.rbr_reparam.bias.data = bias
            for para in self.parameters():
                para.detach_()
            self.rbr_dense = self.rbr_reparam
            # self.__delattr__('rbr_dense')
            self.__delattr__('rbr_1x1')
            if hasattr(self, 'rbr_identity'):
                self.__delattr__('rbr_identity')
            if hasattr(self, 'id_tensor'):
                self.__delattr__('id_tensor')
            self.deploy = True

实现

在ultralytics/nn/modules/下新建RepVGGpy,并将代码写入。

from .RepVGGimport *
        # ============== RepVGGBlock ==============
        elif m is RepVGGBlock:
            c1, c2 = ch[f], args[0]
            if c2 != nc:
                c2 = make_divisible(min(c2, max_channels) * width, 8)
            args = [c1, c2, *args[1:]]
        # =========================================

![在这里插入图片描述](https://i-blog.csdnimg.cn/direct/21fc90047e2d4163a1ce0441f64f1215.png![

添加头文件
![在这里插入图片描述](https://i-blog.csdnimg.cn/direct/8d966a5520574126bbe90d62afce1815.png

# Ultralytics YOLO 🚀, AGPL-3.0 license
# YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
 
# Parameters
nc: 80  # number of classes
scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
  # [depth, width, max_channels]
  n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers,  3157200 parameters,  3157184 gradients,   8.9 GFLOPs
  s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients,  28.8 GFLOPs
  m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients,  79.3 GFLOPs
  l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs
  x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs
 
# YOLOv8.0n backbone
backbone:
  # [from, repeats, module, args]                   ·  640 × 640 × 3
  - [-1, 1, Conv, [64, 3, 2]]   # 0-P1/2            ·  320 × 320 × 64
  - [-1, 1, RepVGGBlock, [128, 3, 2]]  # 1-P2/4     ·  160 × 160 × 128
  - [-1, 3, C2f, [128, True]]   # 2                 ·  160 × 160 × 128
  - [-1, 1, RepVGGBlock, [256, 3, 2]]  # 3-P3/8     ·  80  × 80  × 256
  - [-1, 6, C2f, [256, True]]   # 4                 ·  80  × 80  × 256
  - [-1, 1, RepVGGBlock, [512, 3, 2]]  # 5-P4/16    ·  40  × 40  × 512
  - [-1, 6, C2f, [512, True]]   # 6                 ·  40  × 40  × 512
  - [-1, 1, RepVGGBlock, [1024, 3, 2]] # 7-P5/32    ·  20  × 20  × 1024
  - [-1, 3, C2f, [1024, True]]  # 8                 ·  20  × 20  × 1024
  - [-1, 1, SPPF, [1024, 5]]    # 9                 ·  20  × 20  × 1024
 
# YOLOv8.0n head
head:
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]] #     ·  40 × 40 × 1024
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4      ·  40 × 40 × 1536
  - [-1, 3, C2f, [512]] # 12                         ·  40 × 40 × 512
 
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]] #     ·  80 × 80 × 512
  - [[-1, 4], 1, Concat, [1]] # cat backbone P3      ·  80 × 80 × 768
  - [-1, 3, C2f, [256]] # 15 (P3/8-small)            ·  80 × 80 × 256
 
  - [-1, 1, Conv, [256, 3, 2]] #                     ·  40 × 40 × 256
  - [[-1, 12], 1, Concat, [1]] # cat head P4         ·  40 × 40 × 768
  - [-1, 3, C2f, [512]] # 18 (P4/16-medium)          ·  40 × 40 × 512
 
  - [-1, 1, Conv, [512, 3, 2]] #                     ·  20 × 20 × 512
  - [[-1, 9], 1, Concat, [1]] # cat head P5          ·  20 × 20 × 1536
  - [-1, 3, C2f, [1024]] # 21 (P5/32-large)          ·  20 × 20 × 1024
 
  - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)

训练代码

import warnings
warnings.filterwarnings('ignore')
from ultralytics import YOLO
 
if __name__ == '__main__':
 
    model = YOLO(r'ultralytics\cfg\models\v8\yolov8-RepVgg.yaml') #SPDCOnv
    model.train(
        data=r'myway.yaml',
        cache=False,
        imgsz=960,
        epochs=300,
        single_cls=False,  # 是否是单类别检测
        batch=4,
        close_mosaic=0,
        workers=0,
        device='0',
        optimizer='SGD',   # using SGD
        # resume='runs/train/exp/weights/last.pt',   # 如过想续训就设置 last.pt 的地址
        amp=False,                                   # 如果出现训练损失为 Nan 可以关闭 amp
        project='runs/train',
        name='exp',
    )

因篇幅问题不能全部显示,请点此查看更多更全内容

Top