修改后的代码如下所示:

class Conv_Block(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(Conv_Block, self).__init__()
        self.conv = nn.Sequential(
            nn.BatchNorm2d(ch_in),
            nn.ReLU(inplace=True),
            nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1)
        )

    def forward(self, x):
        return self.conv(x)

class Dense_Block(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(Dense_Block, self).__init__()
        self.conv1 = Conv_Block(ch_in, ch_out)
        self.conv2 = Conv_Block(ch_out + ch_in, ch_out)
        self.conv3 = Conv_Block(ch_out * 2 + ch_in, ch_out)

    def forward(self, input_tensor):
        x1 = self.conv1(input_tensor)
        add1 = torch.cat([x1, input_tensor], dim=1)
        x2 = self.conv2(add1)
        add2 = torch.cat([x1, input_tensor, x2], dim=1)
        x3 = self.conv3(add2)
        return x3

class Up_Conv(nn.Module):
    def __init__(self, ch_in, ch_out):
        super(Up_Conv, self).__init__()
        self.up = nn.Sequential(
            nn.Upsample(scale_factor=2),
            nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(ch_out),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.up(x)

class DenseU_Net(nn.Module):
    def __init__(self, img_ch, output_ch, filters=[64, 128, 256, 512, 1024]):
        super(DenseU_Net, self).__init__()

        self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.relu = nn.ReLU(inplace=True)

        self.Conv0 = nn.Conv2d(img_ch, filters[0], kernel_size=7, padding=3, stride=1)
        self.dens_blocks = nn.ModuleList([
            Dense_Block(filters[i], filters[i + 1]) for i in range(len(filters) - 1)
        ])

        self.conv_center_1 = Conv_Block(filters[-1], filters[-1])
        self.conv_center_2 = Conv_Block(filters[-1], filters[-1])
        self.drop_center = nn.Dropout(0.5)

        self.up_convs = nn.ModuleList([
            Up_Conv(filters[i + 1], filters[i]) for i in range(len(filters) - 1)
        ])

        self.conv_final_1 = nn.Conv2d(filters[0], filters[0] // 2, 7, 1, 3)
        self.conv_final_2 = nn.Conv2d(filters[0] // 2, output_ch, 3, 1, 1)

    def forward(self, x):
        x = self.Conv0(x)
        down = [x]
        
        for block in self.dens_blocks:
            x = block(x)
            down.append(x)
            x = self.Maxpool(x)
        
        center = self.conv_center_1(down[-1])
        center = self.conv_center_2(center)
        center = self.drop_center(center)

        for i, up_conv in enumerate(self.up_convs):
            x = up_conv(center)
            x = torch.cat([down[-(i + 2)], x], dim=1)
            x = self.dens_blocks[-(i + 2)](x)

        x = self.conv_final_1(x)
        x = self.relu(x)
        x = self.conv_final_2(x)

        return x

在这个修改后的代码中,filters 的默认值被修改为 [64, 128, 256, 512, 1024],这样就可以按照要求设置不同的通道数。同时,为了使前向传播更直观,我将 DenseU_Net 类中的前向传播函数进行了适当的调整,使得代码更易读

修改如下代码使其filters=64 128 256 5121024并使得forward函数中的前向传播更直观:class Conv_BlocknnModule def __init__self ch_in ch_out superConv_Block self__init__ selfconv = nnSequential nnBatchN

原文地址: http://www.cveoy.top/t/topic/iTfC 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录