import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader, TensorDataset

Define the autoencoder model

class Autoencoder(nn.Module): def init(self): super(Autoencoder, self).init() self.encoder = nn.Sequential( nn.Linear(784, 256), nn.ReLU(), nn.Linear(256, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 32) ) self.decoder = nn.Sequential( nn.Linear(32, 64), nn.ReLU(), nn.Linear(64, 128), nn.ReLU(), nn.Linear(128, 256), nn.ReLU(), nn.Linear(256, 784), nn.Sigmoid() )

def forward(self, x):
    encoded = self.encoder(x)
    decoded = self.decoder(encoded)
    return decoded

Convert data to tensors and create data loaders

x_train = torch.Tensor(x_train) x_test = torch.Tensor(x_test) train_dataset = TensorDataset(x_train, x_train) test_dataset = TensorDataset(x_test, x_test) train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=128, shuffle=True)

Define the autoencoder model and optimizer

autoencoder = Autoencoder() criterion = nn.BCELoss() optimizer = optim.Adam(autoencoder.parameters())

Train the autoencoder

num_epochs = 300 for epoch in range(num_epochs): running_loss = 0.0 for data in train_loader: inputs, _ = data optimizer.zero_grad() outputs = autoencoder(inputs) loss = criterion(outputs, inputs) loss.backward() optimizer.step() running_loss += loss.item() train_loss = running_loss / len(train_loader) with torch.no_grad(): running_loss = 0.0 for data in test_loader: inputs, _ = data outputs = autoencoder(inputs) loss = criterion(outputs, inputs) running_loss += loss.item() test_loss = running_loss / len(test_loader) print(f"Epoch {epoch+1}/{num_epochs}, Train Loss: {train_loss:.6f}, Test Loss: {test_loss:.6f}")

autoencoderfitx_train x_train epochs=300 batch_size=128 shuffle=True validation_data=x_test x_testcallbacks=EarlyStoppingmonitor=val_loss min_delta=00001 patience=5上述代码改为pytorch代码

原文地址: https://www.cveoy.top/t/topic/bzTo 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录