导入必要的库

import torch.nn as nn

定义 gUNet 类

class gUNet(nn.Module): # 初始化函数 def init(self, kernel_size=5, base_dim=32, depths=[4, 4, 4, 4, 4, 4, 4], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion): super(gUNet, self).init() # 设置参数 assert len(depths) % 2 == 1 # 检查 depths 的长度是否是奇数 stage_num = len(depths) # 获取 depths 的长度 half_num = stage_num // 2 # 计算 half_num net_depth = sum(depths) # 计算 net_depth embed_dims = [2i*base_dim for i in range(half_num)] # 计算 embed_dims embed_dims = embed_dims + [2half_num*base_dim] + embed_dims[::-1] # 计算 embed_dims

    self.patch_size = 2 ** (stage_num // 2) # 计算 patch_size
    self.stage_num = stage_num # 设置 stage_num
    self.half_num = half_num # 设置 half_num

    # 输入卷积
    self.inconv = PatchEmbed(patch_size=1, in_chans=3, embed_dim=embed_dims[0], kernel_size=3)

    # 主干网络
    self.layers = nn.ModuleList()
    self.downs = nn.ModuleList()
    self.ups = nn.ModuleList()
    self.skips = nn.ModuleList()
    self.fusions = nn.ModuleList()

    for i in range(self.stage_num):
        self.layers.append(BasicLayer(dim=embed_dims[i], depth=depths[i], net_depth=net_depth, kernel_size=kernel_size, 
                                      conv_layer=conv_layer, norm_layer=norm_layer, gate_act=gate_act))

    for i in range(self.half_num):
        self.downs.append(PatchEmbed(patch_size=2, in_chans=embed_dims[i], embed_dim=embed_dims[i+1]))
        self.ups.append(PatchUnEmbed(patch_size=2, out_chans=embed_dims[i], embed_dim=embed_dims[i+1]))
        self.skips.append(nn.Conv2d(embed_dims[i], embed_dims[i], 1))
        self.fusions.append(fusion_layer(embed_dims[i]))

    # 输出卷积
    self.outconv = PatchUnEmbed(patch_size=1, out_chans=3, embed_dim=embed_dims[-1], kernel_size=3)


def forward(self, x):
    feat = self.inconv(x)

    skips = []

    for i in range(self.half_num):
        feat = self.layers[i](feat)
        skips.append(self.skips[i](feat))
        feat = self.downs[i](feat)

    feat = self.layers[self.half_num](feat)

    for i in range(self.half_num-1, -1, -1):
        feat = self.ups[i](feat)
        feat = self.fusions[i]([feat, skips[i]])
        feat = self.layers[self.stage_num-i-1](feat)

    x = self.outconv(feat) + x

    return x

all = ['gUNet', 'gunet_t', 'gunet_s', 'gunet_b', 'gunet_d']

Normalization batch size of 16~32 may be good

定义 gunet_t 函数

def gunet_t(): # 4 cards 2080Ti return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

定义 gunet_s 函数

def gunet_s(): # 4 cards 3090 return gUNet(kernel_size=5, base_dim=24, depths=[4, 4, 4, 8, 4, 4, 4], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

定义 gunet_b 函数

def gunet_b(): # 4 cards 3090 return gUNet(kernel_size=5, base_dim=24, depths=[8, 8, 8, 16, 8, 8, 8], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

定义 gunet_d 函数

def gunet_d(): # 4 cards 3090 return gUNet(kernel_size=5, base_dim=24, depths=[16, 16, 16, 32, 16, 16, 16], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

gUNet: 高效的深度学习模型架构,用于图像处理

原文地址: https://www.cveoy.top/t/topic/nwZ4 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录