import os import random import numpy as np import pandas as pd

import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data as Data

from sklearn.model_selection import train_test_split from tqdm import tqdm

设置设备

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

加载电力负荷数据集

df = pd.read_csv('Electric_Power_Consumption.csv', delimiter=';', parse_dates={'dt': ['Date', 'Time']}, infer_datetime_format=True, na_values=['nan', '?'], index_col='dt')

填充缺失值

df.fillna(method='ffill', inplace=True)

重采样为小时数据

df = df.resample('H').mean()

数据归一化

df = (df - df.mean()) / df.std()

将数据分成训练集和测试集

train_df, test_df = train_test_split(df, test_size=0.2, shuffle=False)

将数据转换为时间序列

seq_len = 24 * 7 # 一周 timestep_pred = 24 # 预测未来24小时 train_data = [] for i in range(seq_len, len(train_df) - timestep_pred): train_data.append(train_df.iloc[i - seq_len:i].values.tolist()) train_data = np.array(train_data) train_labels = train_df.iloc[seq_len + timestep_pred:].values

test_data = [] for i in range(seq_len, len(test_df) - timestep_pred): test_data.append(test_df.iloc[i - seq_len:i].values.tolist()) test_data = np.array(test_data) test_labels = test_df.iloc[seq_len + timestep_pred:].values

打印数据集形状

print('训练集形状: train_data: %s and train_labels: %s' % (train_data.shape, train_labels.shape)) print('测试集形状: test_data: %s and test_labels: %s' % (test_data.shape, test_labels.shape))

将数据转换为PyTorch张量

train_y_ts = torch.from_numpy(train_labels).float().to(device) train_X_ts = torch.from_numpy(train_data).float().to(device)

test_y_ts = torch.from_numpy(test_labels).float().to(device) test_X_ts = torch.from_numpy(test_data).float().to(device)

创建训练集和测试集

train_set = Data.TensorDataset(train_X_ts, train_y_ts) test_set = Data.TensorDataset(test_X_ts, test_y_ts)

设置联邦学习参数

num_clients = 10 # 客户端数量 num_selected = 10 # 每轮选取的客户端数量 num_rounds = 29 # 训练轮数 epochs = 1 # 每轮训练的epoch数量 batch_size = 64 # 批次大小

将训练集随机分成多个客户端

traindata_split = torch.utils.data.random_split(train_set, [int(train_labels.shape[0] / num_clients)] * (num_clients - 1) + [train_labels.shape[0] - int(train_labels.shape[0] / num_clients) * (num_clients - 1)])

创建数据加载器

train_loader = [torch.utils.data.DataLoader(x, batch_size=batch_size, shuffle=True) for x in traindata_split] test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=True)

定义LSTM模型

class LSTM(nn.Module): def init(self, input_dim, hidden_dim, seq_len, num_layers=2): super(LSTM, self).init() self.input_dim = input_dim self.hidden_dim = hidden_dim self.seq_len = seq_len self.num_layers = num_layers

    self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
    self.fc = nn.Linear(hidden_dim * seq_len, 1)

def forward(self, x):
    h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).to(device)
    c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).to(device)

    out, (hn, cn) = self.lstm(x, (h0, c0))
    out = out.reshape(out.size(0), -1)
    out = self.fc(out)

    return out

定义粒子

class Particle: def init(self, model): # 初始化粒子位置和速度 self.position = [param.data.detach().clone() for param in model.parameters()] self.velocity = [torch.zeros_like(param) for param in self.position] self.best_position = [param.data.detach().clone() for param in self.position] self.best_loss = float('inf')

定义PSO算法

class PSO: def init(self, model, criterion, lr=0.01, momentum=0.8, weight_decay=0.001): # 初始化PSO算法参数 self.model = model self.criterion = criterion self.lr = lr self.momentum = momentum self.weight_decay = weight_decay self.particles = [Particle(model) for _ in range(num_selected)]

# 更新粒子位置和速度
def step(self):
    for i in range(num_selected):
        # 更新速度
        for j, param in enumerate(self.particles[i].position):
            self.particles[i].velocity[j] = self.momentum * self.particles[i].velocity[j] + 2 * torch.rand_like(param) * (self.particles[i].best_position[j] - param) + 2 * torch.rand_like(param) * (global_best_position[j] - param)
            self.particles[i].velocity[j].clamp_(-1, 1)  # 将速度限制在[-1, 1]之间

        # 更新位置
        for j, param in enumerate(self.particles[i].position):
            self.particles[i].position[j] += self.lr * self.particles[i].velocity[j]
            self.particles[i].position[j].clamp_(-1, 1)  # 将位置限制在[-1, 1]之间

        # 更新粒子最佳位置和损失
        self.update_best(i)

# 更新粒子最佳位置和损失
def update_best(self, i):
    # 将参数更新到模型中
    for j, param in enumerate(self.particles[i].position):
        self.model.state_dict()[f'lstm.weight_ih_l0'] = self.particles[i].position[0]
        self.model.state_dict()[f'lstm.weight_hh_l0'] = self.particles[i].position[1]
        self.model.state_dict()[f'lstm.bias_ih_l0'] = self.particles[i].position[2]
        self.model.state_dict()[f'lstm.bias_hh_l0'] = self.particles[i].position[3]
        self.model.state_dict()[f'lstm.weight_ih_l1'] = self.particles[i].position[4]
        self.model.state_dict()[f'lstm.weight_hh_l1'] = self.particles[i].position[5]
        self.model.state_dict()[f'lstm.bias_ih_l1'] = self.particles[i].position[6]
        self.model.state_dict()[f'lstm.bias_hh_l1'] = self.particles[i].position[7]
        self.model.state_dict()[f'fc.weight'] = self.particles[i].position[8]
        self.model.state_dict()[f'fc.bias'] = self.particles[i].position[9]

    # 评估模型性能
    loss = self.evaluate_fitness()

    # 更新粒子最佳位置和损失
    if loss < self.particles[i].best_loss:
        self.particles[i].best_position = [param.data.detach().clone() for param in self.particles[i].position]
        self.particles[i].best_loss = loss

# 评估模型性能
def evaluate_fitness(self):
    total_loss = 0.0
    with torch.no_grad():
        for data, target in test_loader:
            output = self.model(data)
            loss = self.criterion(output, target)
            total_loss += loss.item() * len(data)

    return total_loss / len(test_set)

# 获取最佳粒子
def get_best(self):
    best_particle = min(self.particles, key=lambda x: x.best_loss)
    return best_particle.best_position, best_particle.best_loss

# 评估模型性能
def evaluate(self, data_loader):
    total_loss = 0.0
    with torch.no_grad():
        for data, target in data_loader:
            output = self.model(data)
            loss = self.criterion(output, target)
            total_loss += loss.item() * len(data)

    return total_loss / len(data_loader.dataset)

使用PSO训练模型

input_dim = train_data.shape[2] hidden_dim = 128 model = LSTM(input_dim, hidden_dim, seq_len).to(device) criterion = nn.MSELoss() optimizer_pso = PSO(model=model, criterion=criterion)

初始化全局最佳位置和损失

global_best_loss = float('inf') global_best_position = None

训练循环

for r in range(num_rounds): selected_particles = random.sample(optimizer_pso.particles, num_selected)

# 每个客户端的训练循环
for i in range(num_selected):
    # 使用Adam优化器进行训练
    optimizer = optim.Adam(model.parameters(), lr=0.01, weight_decay=0.001)

    # 将参数复制到模型中
    for param, particle_param in zip(model.parameters(), selected_particles[i].position):
        param.data.copy_(particle_param)

    # 训练过程
    for epoch in range(epochs):
        for data, target in tqdm(train_loader[i], desc='训练轮数 %d/%d 客户端 %d/%d' % (r + 1, num_rounds, i + 1, num_clients)):
            if len(target.shape) == 2:
                target = torch.unsqueeze(target, dim=2)
            target = target.permute(0, 2, 1)  # 将目标张量的形状转换为 [batch_size, seq_len, 1]
            target = target[:, :, 0]  # 将最后一维的长度为 1 的维度去掉
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            # 更新粒子最佳位置和损失
            optimizer_pso.update_best(i)

            # 更新全局最佳位置和损失
            if optimizer_pso.particles[i].best_loss < global_best_loss:
                global_best_loss = optimizer_pso.particles[i].best_loss
                global_best_position = [param.data.detach().clone() for param in optimizer_pso.particles[i].best_position]

            # 评估模型性能
            train_mse = optimizer_pso.evaluate(train_loader[i])
            test_mse = optimizer_pso.evaluate(test_loader)
            print('轮数 %d/%d, 客户端 %d/%d, 训练集MSE: %.4f, 测试集MSE: %.4f' % (r + 1, num_rounds, i + 1, num_clients, train_mse, test_mse))

    # 更新粒子速度和位置
    optimizer_pso.step()

    # 将参数复制回粒子中
    for param, particle_param in zip(model.parameters(), selected_particles[i].position):
        particle_param.data.copy_(param.data)

# 打印训练和测试集的最终评估结果
train_mse = optimizer_pso.evaluate(train_loader[0])
test_mse = optimizer_pso.evaluate(test_loader)
print('最终评估结果, 训练集MSE: %.4f, 测试集MSE: %.4f' % (train_mse, test_mse))

# 打印进度
print('轮数 %d/%d, 全局最佳损失: %.4f' % (r + 1, num_rounds, global_best_loss))
基于粒子群优化的电力负荷预测

原文地址: https://www.cveoy.top/t/topic/nNy2 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录