import torch.nn as nn import torch.nn.functional as F

class MLP(nn.Module): def init(self): super(MLP, self).init() self.linear1 = nn.Linear(32*32, 2048) self.linear2 = nn.Linear(2048, 512) self.linear3 = nn.Linear(512, 20)

def forward(self, x):
    x = x.view(x.size(0), -1)
    x = self.linear1(x)
    x = F.sigmoid(x)
    x = self.linear2(x)
    x = F.relu(x)
    x = self.linear3(x)
    return x

if name == 'main': net = MLP() # initialize weights and biases for name, param in net.named_parameters(): if 'bias' in name: nn.init.constant_(param, 0.0) elif 'weight' in name: nn.init.uniform_(param, 0.0, 1.0) print(f'{name}'s mean: \t {param.mean()}') # test with a random input x = torch.randn(1, 1, 32, 32) out = net(x) print(f'Flatten output shape: \t {x.view(x.size(0), -1).shape}') print(f'Linear output shape: \t {net.linear1(out).shape}') print(f'Sigmoid output shape: \t {net.linear2(F.sigmoid(net.linear1(out))).shape}') print(f'ReLU output shape: \t {net.linear3(F.relu(net.linear2(F.sigmoid(net.linear1(out))))).shape}') print(f'Linear output shape: \t {out.shape}'


原文地址: https://www.cveoy.top/t/topic/eOsq 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录