定义了一个列表,包含了所有导出的函数名称

all = ['gunet_k3_t', 'gunet_k7_t', 'gunet_s5_t', 'gunet_s9_t', 'gunet_ln_t', 'gunet_in_t', 'gunet_nn_t', 'gunet_hsig_t', 'gunet_tanh_t', 'gunet_relu_t', 'gunet_gelu_t', 'gunet_idt_t', 'gunet_sum_t', 'gunet_cat_t', 'gunet_se_t', 'gunet_eca_t', 'gunet_rs_t', 'gunet_d2x_t', 'gunet_w2x_t', 'gunet_nb1_t', 'gunet_nb2_t', 'gunet_nb4_t', 'gunet_nb8_t', 'gunet_nb16_t', 'gunet_nb32_t', 'gunet_nb64_t', 'gunet_nb128_t', 'gunet_0wd_t', 'gunet_cwd_t', 'gunet_nf_t', 'gunet_nw_t', 'gunet_ni_t', 'gunet_nmp_t', 'gunet_t_0', 'gunet_t_1', 'gunet_t_2', 'gunet_t_3', 'gunet_t_4']

定义了多个函数,每个函数的作用是返回一个gUNet模型的实例

每个函数的名称中包含了一些字符串,用来表示不同的模型配置

每个函数的实现中,都调用了gUNet类的构造函数来创建模型实例,并传入了不同的参数

这些参数控制了模型的一些配置,例如卷积核大小、基础维度、深度、卷积层类型、归一化层类型、门控激活函数、融合层类型等

def gunet_k3_t(): return gUNet(kernel_size=3, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_k7_t(): return gUNet(kernel_size=7, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_s5_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 4, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_s9_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 2, 4, 2, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_ln_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=LayerNorm, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_in_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.InstanceNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_nn_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.Identity, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_hsig_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Hardsigmoid, fusion_layer=SKFusion)

def gunet_tanh_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Tanh, fusion_layer=SKFusion)

def gunet_relu_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayerWithoutGating, norm_layer=nn.BatchNorm2d, gate_act=nn.ReLU, fusion_layer=SKFusion)

def gunet_gelu_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayerWithoutGating, norm_layer=nn.BatchNorm2d, gate_act=nn.GELU, fusion_layer=SKFusion)

def gunet_idt_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Identity, fusion_layer=SKFusion)

def gunet_sum_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SumFusion)

def gunet_cat_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=CatFusion)

def gunet_se_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayerSE, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_rs_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayerRS, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_eca_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayerECA, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_d2x_t(): return gUNet(kernel_size=5, base_dim=24, depths=[4, 4, 4, 8, 4, 4, 4], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_w2x_t(): return gUNet(kernel_size=5, base_dim=32, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_nb1_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=GhostBN_1, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_nb2_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=GhostBN_2, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_nb4_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=GhostBN_4, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_nb8_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=GhostBN_8, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_nb16_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=GhostBN_16, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_nb32_t(): # default return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_nb64_t(): # 2 cards 3090 return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_nb128_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.SyncBatchNorm, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

For the ablation of some other training strategies, the training code needs to be modified

def gunet_0wd_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_cwd_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_nf_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_nw_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_ni_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_nmp_t(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_t_0(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_t_1(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_t_2(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_t_3(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

def gunet_t_4(): return gUNet(kernel_size=5, base_dim=24, depths=[2, 2, 2, 4, 2, 2, 2], conv_layer=ConvLayer, norm_layer=nn.BatchNorm2d, gate_act=nn.Sigmoid, fusion_layer=SKFusion)

gUNet 模型配置函数代码注释

原文地址: https://www.cveoy.top/t/topic/jZuW 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录