这个网络可以被看作一个简化版的Informer网络,因为Informer网络通常包含多个Encoder-Decoder层、Self-Attention层等复杂结构,而这个网络只包含了简单的全连接层。但是这个网络可以作为一个基础模型,在此基础上进行改进和扩展。

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from torch.utils.data import Dataset, DataLoader

# 读取csv文件,提取所需数据
data = pd.read_csv('./data/柳林.csv')
x = data.iloc[:, 1:5].values
y = data.iloc[:, -1].values

# 将数据划分为训练集和测试集
train_size = int(len(x) * 0.8)
train_x, test_x = x[:train_size, :], x[train_size:, :]
train_y, test_y = y[:train_size], y[train_size:]

# 对数据进行归一化处理
scaler_x = MinMaxScaler()
train_x = scaler_x.fit_transform(train_x)
test_x = scaler_x.transform(test_x)

scaler_y = MinMaxScaler()
train_y = scaler_y.fit_transform(train_y.reshape(-1, 1))
test_y = scaler_y.transform(test_y.reshape(-1, 1))

# 定义数据集类
class MyDataset(Dataset):
    def __init__(self, x, y):
        self.x = x
        self.y = y

    def __getitem__(self, idx):
        return self.x[idx], self.y[idx]

    def __len__(self):
        return len(self.x)

# 搭建informer网络模型
class Informer(nn.Module):
    def __init__(self, input_size, output_size):
        super(Informer, self).__init__()
        self.encoder = nn.Sequential(
            nn.Linear(input_size, 64),
            nn.ReLU(),
            nn.Linear(64, 128),
            nn.ReLU(),
            nn.Linear(128, 256),
            nn.ReLU(),
            nn.Linear(256, 512),
            nn.ReLU(),
            nn.Linear(512, 1024)
        )
        self.decoder = nn.Sequential(
            nn.Linear(1024, 512),
            nn.ReLU(),
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, output_size)
        )

    def forward(self, x):
        x = self.encoder(x)
        x = self.decoder(x)
        return x

# 定义模型超参数
input_size = 4
output_size = 1
batch_size = 16
learning_rate = 0.001
num_epochs = 100

# 定义数据集和数据加载器
train_set = MyDataset(train_x, train_y)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
test_set = MyDataset(test_x, test_y)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)

# 初始化模型和优化器
model = Informer(input_size, output_size)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 模型训练并保存
for epoch in range(num_epochs):
    for i, (x, y) in enumerate(train_loader):
        x = x.float()
        y = y.float()
        optimizer.zero_grad()
        output = model(x)
        loss = nn.MSELoss()(output, y)
        loss.backward()
        optimizer.step()

    print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, loss.item()))

torch.save(model.state_dict(), 'informer.pth')

# 模型测试,输出测试结果和评价指标
model.eval()
test_loss = 0
with torch.no_grad():
    for x, y in test_loader:
        x = x.float()
        y = y.float()
        output = model(x)
        test_loss += nn.MSELoss()(output, y).item()

    test_loss /= len(test_loader)
    print('Test Loss: {:.4f}'.format(test_loss))

    # 将预测值和测试值进行反归一化处理
    predict = scaler_y.inverse_transform(output.detach().numpy())
    target = scaler_y.inverse_transform(y.detach().numpy())

    # 输出评价指标R方、mse、mape和mae
    ssr = ((predict - target.mean()) ** 2).sum()
    sst = ((target - target.mean()) ** 2).sum()
    r2_score = ssr / sst
    mse = np.mean((predict - target) ** 2)
    mape = np.mean(np.abs((predict - target) / target))
    mae = np.mean(np.abs(predict - target))

    print('R2 Score: {:.4f}'.format(r2_score))
    print('MSE: {:.4f}'.format(mse))
    print('MAPE: {:.4f}'.format(mape))
    print('MAE: {:.4f}'.format(mae))
简化版Informer网络模型:用PyTorch实现时间序列预测

原文地址: https://www.cveoy.top/t/topic/mKNH 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录