我想预测未来五天的温度数据 输入数据就是前6天的温度数据 我的outputs哪个才是我的预测结果呢 根据我的代码我的模型outputs应该是由layer_output_list last_state_list组成的吧并且由于return_all_layers=False所以应该只返回最后一层的参数。 那么我想知道我代码里输出的outputs是个元组这个元组是不是就是我预测的结果呢我直接用这个元组来
根据您的代码,outputs 是由 layer_output_list 和 last_state_list 组成的元组,其中 layer_output_list 包含了每一层 LSTM 的输出,而 last_state_list 包含了每一层 LSTM 最后一个时间步的隐藏状态和细胞状态。
如果您只需要最后一个时间步的预测结果,可以使用 outputs[0][:,-1,:,:,:],这样可以获取最后一层 LSTM 在最后一个时间步的输出。如果您需要所有时间步的预测结果,可以使用 outputs[0],这样可以获取所有层 LSTM 的输出。不过需要注意的是,此时的输出维度为 (batch_size, num_time_steps-6, hidden_dim, num_lon, num_dep),需要进行 reshape 才能和标签数据进行比较计算损失函数。
修改后的代码如下:
import pandas as pd import numpy as np import torch import torch.nn as nn import torch.optim as optim from datetime import datetime from torch.utils.data import DataLoader from torch.utils.data import TensorDataset
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ConvLSTMCell(nn.Module): def init(self, input_dim, hidden_dim, kernel_size, bias): super(ConvLSTMCell, self).init() self.input_dim = input_dim self.hidden_dim = hidden_dim self.kernel_size = kernel_size self.padding = kernel_size[0] // 2, kernel_size[1] // 2 self.bias = True self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim, out_channels=4 * self.hidden_dim, kernel_size=self.kernel_size, padding=self.padding, bias=self.bias).cuda()
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
h_cur= h_cur.cuda()
combined = torch.cat([input_tensor, h_cur], dim=1)
combined_conv = self.conv(combined.float())
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def init_hidden(self, batch_size, image_size):
height, width = image_size
return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device),
torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device))
class ConvLSTM(nn.Module): def init(self, input_dim,hidden_dim, kernel_size, num_layers, batch_first=False, bias=True, return_all_layers=False): super(ConvLSTM, self).init() self._check_kernel_size_consistency(kernel_size)
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
cell_list.append(ConvLSTMCell(input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor, hidden_state=None):
if not self.batch_first:
input_tensor = input_tensor.permute(1, 0, 2, 3, 4)
b, t, _, h, w = input_tensor.size()
if hidden_state is not None:
raise NotImplementedError()
else:
hidden_state = self._init_hidden(batch_size=b,
image_size=(h, w))
layer_output_list = []
last_state_list = []
seq_len = input_tensor.size(1)
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :], cur_state=[h, c])
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output
layer_output_list.append(layer_output)
last_state_list.append([h, c])
if not self.return_all_layers:
outputs = layer_output_list[-1][:,-1,:,:,:]
else:
outputs = layer_output_list
outputs = torch.reshape(outputs, (b, -1, self.hidden_dim[-1], h, w))
return outputs, last_state_list
def _init_hidden(self, batch_size, image_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size, image_size))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
#实例化对象 model = ConvLSTM(input_dim=1, hidden
原文地址: https://www.cveoy.top/t/topic/bsSz 著作权归作者所有。请勿转载和采集!