对以下代码进行注释:#定义训练函数def traintrain_features test_features train_labels test_labels num_epochs=400 loss = nnMSELossreduction=none input_shape = train_featuresshape-1 # 不设置偏置因为我们已经在多项式中实现了它
#定义训练函数 def train(train_features, test_features, train_labels, test_labels, num_epochs=400): #定义损失函数为均方误差损失函数 loss = nn.MSELoss(reduction='none') #获取训练数据的特征维度 input_shape = train_features.shape[-1] # 构建一个只有一层的神经网络,不设置偏置 net = nn.Sequential(nn.Linear(input_shape, 1, bias=False)) # 设置batch_size为10或者训练数据的样本数中的最小值 batch_size = min(10, train_labels.shape[0]) # 加载训练数据 train_iter = d2l.load_array((train_features, train_labels.reshape(-1,1)), batch_size) # 加载测试数据 test_iter = d2l.load_array((test_features, test_labels.reshape(-1,1)), batch_size, is_train=False) # 使用随机梯度下降算法优化神经网络的参数 trainer = torch.optim.SGD(net.parameters(), lr=0.01) # 初始化动画器 animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) # 循环迭代训练神经网络 for epoch in range(num_epochs): # 训练一个epoch d2l.train_epoch_ch3(net, train_iter, loss, trainer) # 每20个epoch计算一次训练误差和测试误差,并记录到动画器中 if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss))) # 输出神经网络的参数权重 print('weight:', net[0].weight.data.numpy()
原文地址: https://www.cveoy.top/t/topic/ckVr 著作权归作者所有。请勿转载和采集!