autoencoderfitx_train x_train epochs=300 batch_size=128 shuffle=True validation_data=x_test x_testcallbacks=EarlyStoppingmonitor=val_loss min_delta=00001 patience=5改成pytorch
import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader, TensorDataset from torch.optim.lr_scheduler import ReduceLROnPlateau
class Autoencoder(nn.Module): def init(self): super(Autoencoder, self).init() self.encoder = nn.Sequential( nn.Linear(784, 256), nn.ReLU(True), nn.Linear(256, 64), nn.ReLU(True), nn.Linear(64, 16)) self.decoder = nn.Sequential( nn.Linear(16, 64), nn.ReLU(True), nn.Linear(64, 256), nn.ReLU(True), nn.Linear(256, 784), nn.Tanh())
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
Load the dataset
train_dataset = TensorDataset(torch.Tensor(x_train), torch.Tensor(x_train)) test_dataset = TensorDataset(torch.Tensor(x_test), torch.Tensor(x_test)) train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=128, shuffle=False)
Initialize the model
model = Autoencoder()
Define the loss function and optimizer
criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=0.001)
Train the model
scheduler = ReduceLROnPlateau(optimizer, 'min', patience=5) for epoch in range(300): model.train() train_loss = 0 for data in train_loader: inputs, _ = data optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, inputs) loss.backward() optimizer.step() train_loss += loss.item() * inputs.size(0) train_loss /= len(train_loader.dataset)
model.eval()
test_loss = 0
with torch.no_grad():
for data in test_loader:
inputs, _ = data
outputs = model(inputs)
loss = criterion(outputs, inputs)
test_loss += loss.item() * inputs.size(0)
test_loss /= len(test_loader.dataset)
scheduler.step(test_loss)
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(epoch+1, train_loss, test_loss))
原文地址: https://www.cveoy.top/t/topic/bDJb 著作权归作者所有。请勿转载和采集!