基于如下代码重新构建出DenseUnet的网络:import torchimport torchnn as nnclass ResidualConvnnModule def __init__self input_dim output_dim stride padding superResidualConv self__init__ selfconv_block =
import torch import torch.nn as nn
class DenseBlock(nn.Module): def init(self, input_dim, output_dim, stride, padding): super(DenseBlock, self).init()
self.conv_block = nn.Sequential(
nn.BatchNorm2d(input_dim),
nn.ReLU(),
nn.Conv2d(
input_dim, output_dim, kernel_size=3, stride=stride, padding=padding
),
nn.BatchNorm2d(output_dim),
nn.ReLU(),
nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=1),
)
self.conv_skip = nn.Sequential(
nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=stride, padding=1),
nn.BatchNorm2d(output_dim),
)
def forward(self, x):
return torch.cat([x, self.conv_block(x)], dim=1)
class TransitionBlock(nn.Module): def init(self, input_dim, output_dim): super(TransitionBlock, self).init()
self.transition = nn.Sequential(
nn.BatchNorm2d(input_dim),
nn.ReLU(),
nn.Conv2d(input_dim, output_dim, kernel_size=1),
nn.AvgPool2d(kernel_size=2, stride=2),
)
def forward(self, x):
return self.transition(x)
class DenseUnet(nn.Module): def init(self, channel, output_ch, filters=[64, 128, 256, 512, 1024]): super(DenseUnet, self).init()
self.input_layer = nn.Sequential(
nn.Conv2d(channel, filters[0], kernel_size=3, padding=1),
nn.BatchNorm2d(filters[0]),
nn.ReLU(),
nn.Conv2d(filters[0], filters[0], kernel_size=3, padding=1),
)
self.input_skip = nn.Sequential(
nn.Conv2d(channel, filters[0], kernel_size=3, padding=1)
)
self.dense_block_1 = self._make_dense_block(filters[0], filters[1], 2, 1)
self.transition_1 = TransitionBlock(filters[1] + filters[0], filters[1])
self.dense_block_2 = self._make_dense_block(filters[1], filters[2], 2, 1)
self.transition_2 = TransitionBlock(filters[2] + filters[1], filters[2])
self.dense_block_3 = self._make_dense_block(filters[2], filters[3], 2, 1)
self.transition_3 = TransitionBlock(filters[3] + filters[2], filters[3])
self.bridge = self._make_dense_block(filters[3], filters[4], 2, 1)
self.upsample_1 = nn.ConvTranspose2d(
filters[4], filters[4], kernel_size=2, stride=2
)
self.dense_block_4 = self._make_dense_block(
filters[4] + filters[3], filters[3], 1, 1
)
self.upsample_2 = nn.ConvTranspose2d(
filters[3], filters[3], kernel_size=2, stride=2
)
self.dense_block_5 = self._make_dense_block(
filters[3] + filters[2], filters[2], 1, 1
)
self.upsample_3 = nn.ConvTranspose2d(
filters[2], filters[2], kernel_size=2, stride=2
)
self.dense_block_6 = self._make_dense_block(
filters[2] + filters[1], filters[1], 1, 1
)
self.upsample_4 = nn.ConvTranspose2d(
filters[1], filters[1], kernel_size=2, stride=2
)
self.dense_block_7 = self._make_dense_block(
filters[1] + filters[0], filters[0], 1, 1
)
self.output_layer = nn.Sequential(
nn.Conv2d(filters[0], output_ch, kernel_size=1, stride=1),
nn.Sigmoid(),
)
def _make_dense_block(self, input_dim, output_dim, stride, padding):
layers = []
layers.append(DenseBlock(input_dim, output_dim, stride, padding))
layers.append(DenseBlock(output_dim * 2, output_dim, stride, padding))
layers.append(DenseBlock(output_dim * 3, output_dim, stride, padding))
return nn.Sequential(*layers)
def forward(self, x):
# Encode
x1 = self.input_layer(x) + self.input_skip(x)
x2 = self.dense_block_1(x1)
x3 = self.transition_1(x2)
x4 = self.dense_block_2(x3)
x5 = self.transition_2(x4)
x6 = self.dense_block_3(x5)
x7 = self.transition_3(x6)
# Bridge
x8 = self.bridge(x7)
# Decode
x9 = self.upsample_1(x8)
x10 = torch.cat([x9, x7], dim=1)
x11 = self.dense_block_4(x10)
x12 = self.upsample_2(x11)
x13 = torch.cat([x12, x5], dim=1)
x14 = self.dense_block_5(x13)
x15 = self.upsample_3(x14)
x16 = torch.cat([x15, x3], dim=1)
x17 = self.dense_block_6(x16)
x18 = self.upsample_4(x17)
x19 = torch.cat([x18, x1], dim=1)
x20 = self.dense_block_7(x19)
output = self.output_layer(x20)
return outpu
原文地址: https://www.cveoy.top/t/topic/iTtQ 著作权归作者所有。请勿转载和采集!