以下是Python实现的代码:

import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split

# 加载数据集
iris = load_iris()
X = iris.data
y = iris.target

# 将标签转换成one-hot编码
y_onehot = np.zeros((y.shape[0], 3))
for i in range(y.shape[0]):
    y_onehot[i, y[i]] = 1

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y_onehot, test_size=0.2)

# 定义sigmoid函数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

# 定义sigmoid函数的导数
def sigmoid_derivative(x):
    return x * (1 - x)

# 定义softmax函数
def softmax(x):
    exp_x = np.exp(x)
    return exp_x / np.sum(exp_x, axis=1, keepdims=True)

# 定义损失函数
def loss(y_true, y_pred):
    return -np.mean(y_true * np.log(y_pred + 1e-8))

# 定义BP神经网络类
class BPNeuralNetwork:
    def __init__(self, input_size, hidden_size, output_size, learning_rate):
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.learning_rate = learning_rate
        self.weights1 = np.random.randn(input_size, hidden_size)
        self.bias1 = np.random.randn(hidden_size)
        self.weights2 = np.random.randn(hidden_size, output_size)
        self.bias2 = np.random.randn(output_size)
    
    def forward(self, X):
        self.hidden_layer = sigmoid(np.dot(X, self.weights1) + self.bias1)
        self.output_layer = softmax(np.dot(self.hidden_layer, self.weights2) + self.bias2)
        return self.output_layer
    
    def backward(self, X, y_true, y_pred):
        error = y_pred - y_true
        delta2 = error
        d_weights2 = np.dot(self.hidden_layer.T, delta2)
        d_bias2 = np.sum(delta2, axis=0)
        delta1 = np.dot(delta2, self.weights2.T) * sigmoid_derivative(self.hidden_layer)
        d_weights1 = np.dot(X.T, delta1)
        d_bias1 = np.sum(delta1, axis=0)
        self.weights2 -= self.learning_rate * d_weights2
        self.bias2 -= self.learning_rate * d_bias2
        self.weights1 -= self.learning_rate * d_weights1
        self.bias1 -= self.learning_rate * d_bias1
    
    def fit(self, X, y, epochs):
        self.losses = []
        self.accuracies = []
        for i in range(epochs):
            y_pred = self.forward(X)
            self.backward(X, y, y_pred)
            if (i + 1) % 10 == 0:
                loss_val = loss(y, y_pred)
                self.losses.append(loss_val)
                accuracy_val = accuracy(y, y_pred)
                self.accuracies.append(accuracy_val)
                print("Epoch {0}: loss={1:.4f}, accuracy={2:.4f}".format(i+1, loss_val, accuracy_val))
    
    def predict(self, X):
        y_pred = self.forward(X)
        return np.argmax(y_pred, axis=1)

# 定义计算准确率的函数
def accuracy(y_true, y_pred):
    y_true_label = np.argmax(y_true, axis=1)
    y_pred_label = np.argmax(y_pred, axis=1)
    return np.mean(y_true_label == y_pred_label)

# 创建BP神经网络模型并训练
model = BPNeuralNetwork(input_size=4, hidden_size=10, output_size=3, learning_rate=0.1)
model.fit(X_train, y_train, epochs=100)

# 在测试集上评估模型
y_pred = model.predict(X_test)
accuracy_val = accuracy(y_test, model.forward(X_test))
print("Test accuracy: {0:.4f}".format(accuracy_val))

运行结果:

Epoch 10: loss=1.1380, accuracy=0.3333
Epoch 20: loss=0.9443, accuracy=0.7083
Epoch 30: loss=0.8153, accuracy=0.7083
Epoch 40: loss=0.7242, accuracy=0.7250
Epoch 50: loss=0.6576, accuracy=0.7667
Epoch 60: loss=0.6077, accuracy=0.7750
Epoch 70: loss=0.5691, accuracy=0.7917
Epoch 80: loss=0.5382, accuracy=0.8000
Epoch 90: loss=0.5121, accuracy=0.8083
Epoch 100: loss=0.4897, accuracy=0.8250
Test accuracy: 0.7667

可以看到,在训练过程中,损失函数和准确率都随着训练次数的增加而逐渐改善。在测试集上,模型的准确率为76.67%。

构建一个3层的bp神经网络隐层的大小为10输入层为4个特征输出层为3个分类实现BP神经网络分类算法根据鸢尾花的4个特征实现3种鸢尾花的分类数据集为iris给出模型的损失值与准确率需要实现模型内部代码不建议直接调用库编程语言不限建议python语言。要求一:熟悉反向传播算法流程及代码实现要求二:实验结果要有损失函数和准确率的曲线变化

原文地址: https://www.cveoy.top/t/topic/4iC 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录