这段代码报错的原因是在DenseBlock的构造函数中,第一个卷积层的输出通道数应该是output_dim而不是input_dim。因此,需要将第一个卷积层的输出通道数从input_dim改为output_dim。修改后的代码如下:

class DenseBlock(nn.Module):
    def __init__(self, input_dim, output_dim, stride, padding):
        super(DenseBlock, self).__init__()

        self.conv_block = nn.Sequential(
            nn.BatchNorm2d(input_dim),
            nn.ReLU(),
            nn.Conv2d(
                input_dim, output_dim, kernel_size=3, stride=stride, padding=padding
            ),
            nn.BatchNorm2d(output_dim),
            nn.ReLU(),
            nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=1),
        )
        self.conv_skip = nn.Sequential(
            nn.Conv2d(output_dim, output_dim, kernel_size=3, stride=stride, padding=1),
            nn.BatchNorm2d(output_dim),
        )

    def forward(self, x):
        print(x.shape)
        print( (self.conv_block(x)).shape)
        return torch.cat([x, self.conv_block(x)], dim=1)

另外,还需要注意在DenseUnet的构造函数中,输入层的第二个卷积层的输入通道数应该是filters[0]而不是filters[1]。因此,需要将第二个卷积层的输入通道数从filters[1]改为filters[0]。修改后的代码如下:

class DenseUnet(nn.Module):
    def __init__(self, channel, output_ch, filters=[64, 128, 256, 512, 1024]):
        super(DenseUnet, self).__init__()

        self.input_layer = nn.Sequential(
            nn.Conv2d(channel, filters[0], kernel_size=3, padding=1),
            nn.BatchNorm2d(filters[0]),
            nn.ReLU(),
            nn.Conv2d(filters[0], filters[0], kernel_size=3, padding=1),
        )
        self.input_skip = nn.Sequential(
            nn.Conv2d(channel, filters[0], kernel_size=3, padding=1)
        )

        self.dense_block_1 = self._make_dense_block(filters[0], filters[1], 2, 1)
        self.transition_1 = TransitionBlock(filters[1] + filters[0], filters[1])

        self.dense_block_2 = self._make_dense_block(filters[1], filters[2], 2, 1)
        self.transition_2 = TransitionBlock(filters[2] + filters[1], filters[2])

        self.dense_block_3 = self._make_dense_block(filters[2], filters[3], 2, 1)
        self.transition_3 = TransitionBlock(filters[3] + filters[2], filters[3])

        self.bridge = self._make_dense_block(filters[3], filters[4], 2, 1)

        self.upsample_1 = nn.ConvTranspose2d(
            filters[4], filters[4], kernel_size=2, stride=2
        )
        self.dense_block_4 = self._make_dense_block(
            filters[4] + filters[3], filters[3], 1, 1
        )

        self.upsample_2 = nn.ConvTranspose2d(
            filters[3], filters[3], kernel_size=2, stride=2
        )
        self.dense_block_5 = self._make_dense_block(
            filters[3] + filters[2], filters[2], 1, 1
        )

        self.upsample_3 = nn.ConvTranspose2d(
            filters[2], filters[2], kernel_size=2, stride=2
        )
        self.dense_block_6 = self._make_dense_block(
            filters[2] + filters[1], filters[1], 1, 1
        )

        self.upsample_4 = nn.ConvTranspose2d(
            filters[1], filters[1], kernel_size=2, stride=2
        )
        self.dense_block_7 = self._make_dense_block(
            filters[1] + filters[0], filters[0], 1, 1
        )

        self.output_layer = nn.Sequential(
            nn.Conv2d(filters[0], output_ch, kernel_size=1, stride=1),
            nn.Sigmoid(),
        )

    def _make_dense_block(self, input_dim, output_dim, stride, padding):
        layers = []
        layers.append(DenseBlock(input_dim, output_dim, stride, padding))
        layers.append(DenseBlock(output_dim * 2, output_dim, stride, padding))
        layers.append(DenseBlock(output_dim * 3, output_dim, stride, padding))
        return nn.Sequential(*layers)

    def forward(self, x):
        # Encode
        x1 = self.input_layer(x) + self.input_skip(x)
        print(x.shape)
        print(x1.shape)
        x2 = self.dense_block_1(x1)
        x3 = self.transition_1(x2)

        x4 = self.dense_block_2(x3)
        x5 = self.transition_2(x4)

        x6 = self.dense_block_3(x5)
        x7 = self.transition_3(x6)

        # Bridge
        x8 = self.bridge(x7)

        # Decode
        x9 = self.upsample_1(x8)
        x10 = torch.cat([x9, x7], dim=1)
        x11 = self.dense_block_4(x10)

        x12 = self.upsample_2(x11)
        x13 = torch.cat([x12, x5], dim=1)
        x14 = self.dense_block_5(x13)

        x15 = self.upsample_3(x14)
        x16 = torch.cat([x15, x3], dim=1)
        x17 = self.dense_block_6(x16)

        x18 = self.upsample_4(x17)
        x19 = torch.cat([x18, x1], dim=1)
        x20 = self.dense_block_7(x19)

        output = self.output_layer(x20)
``
以下代码报错:RuntimeError Sizes of tensors must match except in dimension 1 Expected size 256 but got size 128 for tensor number 1 in the listclass DenseBlocknnModule def __init__self input_dim output_di

原文地址: https://www.cveoy.top/t/topic/iTuu 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录