优化以下代码使得其更加简洁和集成化:import torch from torchautograd import Variableimport torchnn as nnimport torchnnfunctional as Fimport numpy as np#Dense UNetclass modelnnModule def __init__self supermod
import torch from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import numpy as np
Dense UNet
class model(nn.Module):
def init(self):
super(model,self).init()
self.first_conv = nn.Sequential(
nn.Conv2d(1, 32, 1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.2)
)
self.dense_blocks = nn.ModuleList()
self.downsamplings = nn.ModuleList()
self.upsamplings = nn.ModuleList()
for i in range(4):
self.dense_blocks.append(nn.Sequential(
nn.Conv2d(32*(i+1), 32, 3, 1, 1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.2),
nn.Conv2d(32*(i+2), 32, 3, 1, 1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.2),
nn.Conv2d(32*(i+3), 32, 3, 1, 1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.2)
))
self.downsamplings.append(nn.Sequential(
nn.Conv2d(32, 32, 2, 2),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.2)
))
self.upsamplings.append(nn.Sequential(
nn.ConvTranspose2d(32, 32, 2, 2),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.2)
))
self.last_conv = nn.Sequential(
nn.Conv2d(32, 1, 1),
nn.Sigmoid()
)
def forward(self, x):
conv_activation = torch.nn.LeakyReLU(0.2)
first = self.first_conv(x)
dense_outputs = []
for i in range(4):
dense_output = self.dense_blocks[i](torch.cat([first] + dense_outputs, dim=1))
dense_outputs.append(dense_output)
downsampled = self.downsamplings[i](dense_output)
for i in range(3, -1, -1):
upsampled = self.upsamplings[i](dense_outputs[i])
upsampled = torch.cat([upsampled, dense_outputs[i-1]], dim=1)
dense_output = self.dense_blocks[i](upsampled)
dense_outputs[i] = dense_output
mask = self.last_conv(dense_outputs[3])
output = torch.multiply(mask, x)
return
原文地址: http://www.cveoy.top/t/topic/iTaW 著作权归作者所有。请勿转载和采集!