autoencoderfitx_train x_train epochs=300 batch_size=128 shuffle=True validation_data=x_test x_testcallbacks=EarlyStoppingmonitor=val_loss min_delta=00001 patience=5改为pytorch
import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from torch.optim import Adam
Define the autoencoder model
class Autoencoder(nn.Module): def init(self): super(Autoencoder, self).init() self.encoder = nn.Sequential( nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), ) self.decoder = nn.Sequential( nn.Linear(32, 64), nn.ReLU(), nn.Linear(64, 128), nn.ReLU(), nn.Linear(128, 784), nn.Sigmoid(), )
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
Prepare the data
x_train = torch.tensor(x_train).float() x_test = torch.tensor(x_test).float() train_dataset = TensorDataset(x_train, x_train) test_dataset = TensorDataset(x_test, x_test) train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=128, shuffle=True)
Create the autoencoder model and optimizer
autoencoder = Autoencoder() optimizer = Adam(autoencoder.parameters())
Define the loss function
criterion = nn.MSELoss()
Train the autoencoder
for epoch in range(300): train_loss = 0 for batch in train_loader: optimizer.zero_grad() x, y = batch output = autoencoder(x) loss = criterion(output, y) loss.backward() optimizer.step() train_loss += loss.item() * x.shape[0] train_loss /= len(train_loader.dataset)
test_loss = 0
with torch.no_grad():
for batch in test_loader:
x, y = batch
output = autoencoder(x)
loss = criterion(output, y)
test_loss += loss.item() * x.shape[0]
test_loss /= len(test_loader.dataset)
print(f"Epoch {epoch + 1}, Train Loss: {train_loss:.4f}, Test Loss: {test_loss:.4f}")
原文地址: https://www.cveoy.top/t/topic/bzS5 著作权归作者所有。请勿转载和采集!