问题出在模型的输出部分,因为您设置了return_all_layers为False,但是在定义forward函数时,最后返回的是layer_output_list和last_state_list这两个列表,而不是layer_output_list[-1]和last_state_list[-1],所以最终输出的outputs是一个包含多个张量的列表。要解决这个问题,需要修改forward函数,将最后返回的部分改为只返回最后一层的输出和隐藏状态,即将layer_output_list和last_state_list改为layer_output_list[-1]和last_state_list[-1]。修改后的代码如下:

class ConvLSTM(nn.Module): def init(self, input_dim,hidden_dim, kernel_size, num_layers, batch_first=False, bias=True, return_all_layers=False): super(ConvLSTM, self).init()

    self._check_kernel_size_consistency(kernel_size)
    
    # Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
    kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
    hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
    if not len(kernel_size) == len(hidden_dim) == num_layers:
        raise ValueError('Inconsistent list length.')
    
    self.input_dim = input_dim
    self.hidden_dim = hidden_dim
    self.kernel_size = kernel_size
    self.num_layers = num_layers
    self.batch_first = batch_first
    self.bias = bias
    self.return_all_layers = return_all_layers
    
    cell_list = []
    for i in range(0, self.num_layers):
        cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
        
        cell_list.append(ConvLSTMCell(input_dim=cur_input_dim,
                                      hidden_dim=self.hidden_dim[i],
                                      kernel_size=self.kernel_size[i],
                                      bias=self.bias))
        
    self.cell_list = nn.ModuleList(cell_list)

def forward(self, input_tensor, hidden_state=None):
    if not self.batch_first:
        # (t, b, c, h, w) -> (b, t, c, h, w)
        input_tensor = input_tensor.permute(1, 0, 2, 3, 4)

    b, t, _, h, w = input_tensor.size()

    # Implement stateful ConvLSTM
    if hidden_state is not None:
        raise NotImplementedError()
    else:
        # Since the init is done in forward. Can send image size here
        hidden_state = self._init_hidden(batch_size=b,
                                         image_size=(h, w))

    layer_output_list = []
    last_state_list = []

    seq_len = input_tensor.size(1)
    cur_layer_input = input_tensor

    for layer_idx in range(self.num_layers):
        h, c = hidden_state[layer_idx]

        output_inner = []

        for t in range(seq_len):
            h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
                                             cur_state=[h, c])
            output_inner.append(h)
            
        layer_output = torch.stack(output_inner, dim=1)
        cur_layer_input = layer_output
        
        layer_output_list.append(layer_output)
        last_state_list.append([h, c])

    if not self.return_all_layers:
        return layer_output_list[-1], last_state_list[-1]
    else:
        return layer_output_list, last_state_list

def _init_hidden(self, batch_size, image_size):
    init_states = []
    for i in range(self.num_layers):
        init_states.append(self.cell_list[i].init_hidden(batch_size, image_size))
    return init_states

@staticmethod
def _check_kernel_size_consistency(kernel_size):
    if not (isinstance(kernel_size, tuple) or
            (isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
        raise ValueError('`kernel_size` must be tuple or list of tuples')

@staticmethod
def _extend_for_multilayer(param, num_layers):
    if not isinstance(param, list):
        param = [param] * num_layers
    return param

#实例化对象 model = ConvLSTM(input_dim=1, hidden_dim=[64, 64], kernel_size=[(1, 55), (1, 55)], num_layers=2)

#设置优化参数 criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(list(model.parameters()), lr=0.001, momentum=0.9)

读取Excel文件

df = pd.read_excel(r'C:\Users\19738\Desktop\数据集\01.xlsx') df = df.sort_values(by=['日期', '经度', '深度'])

#首先,根据时间、经度和深度对数据进行排序:

将数据转换为numpy数组

data = df.values time = df.iloc[:, 0] # 提取时间列 longitude = df.iloc[:, 1] # 提取经度列 depth = df.iloc[:, 2] # 提取深度列

假设您的数据张量大小为 (num_samples,num_time_steps,1, num_lon, num_dep, )

num_samples = 100 num_lon = 2 num_dep = 55 num_time_steps = 11

构建新的数据张量,初始化为 0

temp_data = np.zeros((num_samples, num_time_steps, 1, num_lon, num_dep))

for i in range(num_samples): #在这个实现中,我们使用了四个嵌套的循环来遍历数据张量中的所有位置。在每个位置上,我们根据当前时间、经度和深度信息计算对应的索引,然后从DataFrame中获取对应的温度值,并填充到数据张量中。注意,为了计算索引,我们需要将时间、经度和深度信息分别转换成字符串类型。 for j in range(num_lon): for k in range(num_dep): for t in range(num_time_steps): # 根据时间、经度、深度信息计算对应的索引 date_str = time

下面是我的整体代码现在存在一个问题。我的return_all_layers 设置为 False可是打印了模型的输出outputs发现存在多个张量形如tensor 00243 00280 00281 00083 00123 00105 00243 00280 00281 00083 00123 00105 -00117 -00

原文地址: https://www.cveoy.top/t/topic/brBN 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录