使用深度可分离卷积优化高分辨率网络
{ "title": "请将下面代码的卷积方式替换为深度可分离卷积", "description": "本代码示例展示如何将高分辨率网络中的标准卷积层替换为深度可分离卷积层,以提高效率和性能。", "keywords": "深度可分离卷积, 高分辨率网络, 效率优化, 性能提升, PyTorch", "content": "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Create by Bin Xiao (Bin.Xiao@microsoft.com)\n# Modified by Tianheng Cheng(tianhengcheng@gmail.com), Yang Zhao\n# ------------------------------------------------------------------------------\n\nfrom future import absolute_import\nfrom future import division\nfrom future import print_function\n\nimport os\nimport logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nBatchNorm2d = nn.BatchNorm2d\nBN_MOMENTUM = 0.01\nlogger = logging.getLogger(name)\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n '''3x3 convolution with padding'''\n return nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def init(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).init()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def init(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).init()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, groups=planes, bias=False)\n self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,\n bias=False)\n self.bn3 = BatchNorm2d(planes * self.expansion,\n momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass HighResolutionModule(nn.Module):\n def init(self, num_branches, blocks, num_blocks, num_inchannels,\n num_channels, fuse_method, multi_scale_output=True):\n super(HighResolutionModule, self).init()\n self._check_branches(\n num_branches, blocks, num_blocks, num_inchannels, num_channels)\n\n self.num_inchannels = num_inchannels\n self.fuse_method = fuse_method\n self.num_branches = num_branches\n\n self.multi_scale_output = multi_scale_output\n\n self.branches = self._make_branches(\n num_branches, blocks, num_blocks, num_channels)\n self.fuse_layers = self._make_fuse_layers()\n self.relu = nn.ReLU(inplace=True)\n\n def _check_branches(self, num_branches, blocks, num_blocks,\n num_inchannels, num_channels):\n if num_branches != len(num_blocks):\n error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(\n num_branches, len(num_blocks))\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n if num_branches != len(num_channels):\n error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(\n num_branches, len(num_channels))\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n if num_branches != len(num_inchannels):\n error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(\n num_branches, len(num_inchannels))\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n def _make_one_branch(self, branch_index, block, num_blocks, num_channels,\n stride=1):\n downsample = None\n if stride != 1 or \n self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.num_inchannels[branch_index],\n num_channels[branch_index] * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm2d(num_channels[branch_index] * block.expansion,\n momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(self.num_inchannels[branch_index],\n num_channels[branch_index], stride, downsample))\n self.num_inchannels[branch_index] = \n num_channels[branch_index] * block.expansion\n for i in range(1, num_blocks[branch_index]):\n layers.append(block(self.num_inchannels[branch_index],\n num_channels[branch_index]))\n\n return nn.Sequential(layers)\n\n def _make_branches(self, num_branches, block, num_blocks, num_channels):\n branches = []\n\n for i in range(num_branches):\n branches.append(\n self._make_one_branch(i, block, num_blocks, num_channels))\n\n return nn.ModuleList(branches)\n\n def _make_fuse_layers(self):\n if self.num_branches == 1:\n return None\n\n num_branches = self.num_branches\n num_inchannels = self.num_inchannels\n fuse_layers = []\n for i in range(num_branches if self.multi_scale_output else 1):\n fuse_layer = []\n for j in range(num_branches):\n if j > i:\n fuse_layer.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_inchannels[i],\n 1,\n 1,\n 0,\n bias=False),\n BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM)))\n # nn.Upsample(scale_factor=2*(j-i), mode='nearest')))\n elif j == i:\n fuse_layer.append(None)\n else:\n conv3x3s = []\n for k in range(i - j):\n if k == i - j - 1:\n num_outchannels_conv3x3 = num_inchannels[i]\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_outchannels_conv3x3,\n 3, 2, 1, bias=False),\n BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM)))\n else:\n num_outchannels_conv3x3 = num_inchannels[j]\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(num_inchannels[j],\n num_outchannels_conv3x3,\n 3, 2, 1, bias=False),\n BatchNorm2d(num_outchannels_conv3x3,\n momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)))\n fuse_layer.append(nn.Sequential(*conv3x3s))\n fuse_layers.append(nn.ModuleList(fuse_layer))\n\n return nn.ModuleList(fuse_layers)\n\n def get_num_inchannels(self):\n return self.num_inchannels\n\n def forward(self, x):\n if self.num_branches == 1:\n return [self.branches0]\n\n for i in range(self.num_branches):\n x[i] = self.branchesi\n\n x_fuse = []\n for i in range(len(self.fuse_layers)):\n y = x[0] if i == 0 else self.fuse_layers[i]0\n for j in range(1, self.num_branches):\n if i == j:\n y = y + x[j]\n elif j > i:\n y = y + F.interpolate(\n self.fuse_layers[i]j,\n size=[x[i].shape[2], x[i].shape[3]],\n mode='bilinear')\n else:\n y = y + self.fuse_layers[i]j\n x_fuse.append(self.relu(y))\n\n return x_fuse\n\n\nblocks_dict = {\n 'BASIC': BasicBlock,\n 'BOTTLENECK': Bottleneck\n}\n\n\nclass HighResolutionNet(nn.Module):\n\n def init(self, config, **kwargs):\n self.inplanes = 64\n extra = config.MODEL.EXTRA\n super(HighResolutionNet, self).init()\n\n # stem net\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,\n bias=False)\n self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,\n bias=False)\n self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.sf = nn.Softmax(dim=1)\n self.layer1 = self._make_layer(Bottleneck, 64, 64, 4)\n\n self.stage2_cfg = extra['STAGE2']\n num_channels = self.stage2_cfg['NUM_CHANNELS']\n block = blocks_dict[self.stage2_cfg['BLOCK']]\n num_channels = [\n num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition1 = self._make_transition_layer(\n [256], num_channels)\n self.stage2, pre_stage_channels = self._make_stage(\n self.stage2_cfg, num_channels)\n\n self.stage3_cfg = extra['STAGE3']\n num_channels = self.stage3_cfg['NUM_CHANNELS']\n block = blocks_dict[self.stage3_cfg['BLOCK']]\n num_channels = [\n num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition2 = self._make_transition_layer(\n pre_stage_channels, num_channels)\n self.stage3, pre_stage_channels = self._make_stage(\n self.stage3_cfg, num_channels)\n\n self.stage4_cfg = extra['STAGE4']\n num_channels = self.stage4_cfg['NUM_CHANNELS']\n block = blocks_dict[self.stage4_cfg['BLOCK']]\n num_channels = [\n num_channels[i] * block.expansion for i in range(len(num_channels))]\n self.transition3 = self.make_transition_layer(\n pre_stage_channels, num_channels)\n self.stage4, pre_stage_channels = self.make_stage(\n self.stage4_cfg, num_channels, multi_scale_output=True)\n\n final_inp_channels = sum(pre_stage_channels)\n\n self.head = nn.Sequential(\n nn.Conv2d(\n in_channels=final_inp_channels,\n out_channels=final_inp_channels,\n kernel_size=1,\n stride=1,\n padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0),\n BatchNorm2d(final_inp_channels, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True),\n nn.Conv2d(\n in_channels=final_inp_channels,\n out_channels=config.MODEL.NUM_JOINTS,\n kernel_size=extra.FINAL_CONV_KERNEL,\n stride=1,\n padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0)\n )\n\n def make_transition_layer(\n self, num_channels_pre_layer, num_channels_cur_layer):\n num_branches_cur = len(num_channels_cur_layer)\n num_branches_pre = len(num_channels_pre_layer)\n\n transition_layers = []\n for i in range(num_branches_cur):\n if i < num_branches_pre:\n if num_channels_cur_layer[i] != num_channels_pre_layer[i]:\n transition_layers.append(nn.Sequential(\n nn.Conv2d(num_channels_pre_layer[i],\n num_channels_cur_layer[i],\n 3,\n 1,\n 1,\n bias=False),\n BatchNorm2d(\n num_channels_cur_layer[i], momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)))\n else:\n transition_layers.append(None)\n else:\n conv3x3s = []\n for j in range(i + 1 - num_branches_pre):\n inchannels = num_channels_pre_layer[-1]\n outchannels = num_channels_cur_layer[i] \n if j == i - num_branches_pre else inchannels\n conv3x3s.append(nn.Sequential(\n nn.Conv2d(\n inchannels, outchannels, 3, 2, 1, bias=False),\n BatchNorm2d(outchannels, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)))\n transition_layers.append(nn.Sequential(*conv3x3s))\n\n return nn.ModuleList(transition_layers)\n\n def make_layer(self, block, inplanes, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(inplanes, planes, stride, downsample))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def make_stage(self, layer_config, num_inchannels,\n multi_scale_output=True):\n num_modules = layer_config['NUM_MODULES']\n num_branches = layer_config['NUM_BRANCHES']\n num_blocks = layer_config['NUM_BLOCKS']\n num_channels = layer_config['NUM_CHANNELS']\n block = blocks_dict[layer_config['BLOCK']]\n fuse_method = layer_config['FUSE_METHOD']\n\n modules = []\n for i in range(num_modules):\n # multi_scale_output is only used last module\n if not multi_scale_output and i == num_modules - 1:\n reset_multi_scale_output = False\n else:\n reset_multi_scale_output = True\n modules.append(\n HighResolutionModule(num_branches,\n block,\n num_blocks,\n num_inchannels,\n num_channels,\n fuse_method,\n reset_multi_scale_output)\n )\n num_inchannels = modules[-1].get_num_inchannels()\n\n return nn.Sequential(*modules), num_inchannels\n\n def forward(self, x):\n # h, w = x.size(2), x.size(3)\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n x = self.layer1(x)\n\n x_list = []\n for i in range(self.stage2_cfg['NUM_BRANCHES']):\n if self.transition1[i] is not None:\n x_list.append(self.transition1i)\n else:\n x_list.append(x)\n y_list = self.stage2(x_list)\n\n x_list = []\n for i in range(self.stage3_cfg['NUM_BRANCHES']):\n if self.transition2[i] is not None:\n x_list.append(self.transition2i)\n else:\n x_list.append(y_list[i])\n y_list = self.stage3(x_list)\n\n x_list = []\n for i in range(self.stage4_cfg['NUM_BRANCHES']):\n if self.transition3[i] is not None:\n x_list.append(self.transition3i)\n else:\n x_list.append(y_list[i])\n x = self.stage4(x_list)\n\n # Head Part\n height, width = x[0].size(2), x[0].size(3)\n x1 = F.interpolate(x[1], size=(height, width), mode='bilinear', align_corners=False)\n x2 = F.interpolate(x[2], size=(height, width), mode='bilinear', align_corners=False)\n x3 = F.interpolate(x[3], size=(height, width), mode='bilinear', align_corners=False)\n x = torch.cat([x[0], x1, x2, x3], 1)\n x = self.head(x)\n\n return x\n\n def init_weights(self, pretrained=''):\n logger.info('=> init weights from normal distribution')\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal(m.weight, mode='fan_out', nonlinearity='relu')\n nn.init.normal(m.weight, std=0.001)\n # nn.init.constant(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant(m.weight, 1)\n nn.init.constant(m.bias, 0)\n if os.path.isfile(pretrained):\n pretrained_dict = torch.load(pretrained)\n logger.info('=> loading pretrained model {}'.format(pretrained))\n model_dict = self.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items()\n if k in model_dict.keys()}\n for k, _ in pretrained_dict.items():\n logger.info(\n '=> loading {} pretrained model {}'.format(k, pretrained))\n model_dict.update(pretrained_dict)\n self.load_state_dict(model_dict)\n\n\ndef get_face_alignment_net(config, **kwargs):\n\n model = HighResolutionNet(config, **kwargs)\n pretrained = config.MODEL.PRETRAINED if config.MODEL.INIT_WEIGHTS else ''\n model.init_weights(pretrained=pretrained)\n\n return model\n\n内容:将conv3x3函数中的nn.Conv2d替换为nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False),并将BasicBlock和Bottleneck中的卷积层替换为深度可分离卷积层。同时,修改HighResolutionNet中的head部分,将其中的nn.Conv2d替换为\n\npython\n# 修改后的head部分\nself.head = nn.Sequential(\n nn.Conv2d(\n in_channels=final_inp_channels,\n out_channels=final_inp_channels,\n kernel_size=1,\n stride=1,\n padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0, \n groups=final_inp_channels, \n bias=False\n ),\n BatchNorm2d(final_inp_channels, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True),\n nn.Conv2d(\n in_channels=final_inp_channels,\n out_channels=config.MODEL.NUM_JOINTS,\n kernel_size=extra.FINAL_CONV_KERNEL,\n stride=1,\n padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0, \n bias=False\n )\n)\n
原文地址: https://www.cveoy.top/t/topic/otAw 著作权归作者所有。请勿转载和采集!