首先,需要导入必要的库和数据集:

import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split

# 加载数据集
iris = load_iris()
X = iris.data
y = iris.target
# 将标签进行 one-hot 编码
y_onehot = np.zeros((len(y), 3))
for i in range(len(y)):
    y_onehot[i, y[i]] = 1
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y_onehot, test_size=0.2, random_state=42)

接下来,定义激活函数和神经网络模型:

# 定义 sigmoid 激活函数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))

# 定义神经网络模型
class NeuralNetwork:
    def __init__(self, input_size, hidden_size, output_size):
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        # 初始化权重和偏置
        self.W1 = np.random.randn(self.input_size, self.hidden_size)
        self.b1 = np.zeros((1, self.hidden_size))
        self.W2 = np.random.randn(self.hidden_size, self.output_size)
        self.b2 = np.zeros((1, self.output_size))
    
    def forward(self, X):
        # 前向传播
        self.z1 = np.dot(X, self.W1) + self.b1
        self.a1 = sigmoid(self.z1)
        self.z2 = np.dot(self.a1, self.W2) + self.b2
        self.a2 = sigmoid(self.z2)
        return self.a2
    
    def backward(self, X, y, output):
        # 反向传播
        self.output_error = y - output
        self.output_delta = self.output_error * sigmoid(self.z2) * (1 - sigmoid(self.z2))
        self.hidden_error = np.dot(self.output_delta, self.W2.T)
        self.hidden_delta = self.hidden_error * sigmoid(self.z1) * (1 - sigmoid(self.z1))
        self.W2 += np.dot(self.a1.T, self.output_delta)
        self.b2 += np.sum(self.output_delta, axis=0, keepdims=True)
        self.W1 += np.dot(X.T, self.hidden_delta)
        self.b1 += np.sum(self.hidden_delta, axis=0)
    
    def train(self, X, y, epochs, learning_rate):
        # 训练模型
        self.loss = []
        self.accuracy = []
        for i in range(epochs):
            output = self.forward(X)
            loss = np.mean(np.square(y - output))
            self.loss.append(loss)
            accuracy = np.mean(np.argmax(y, axis=1) == np.argmax(output, axis=1))
            self.accuracy.append(accuracy)
            self.backward(X, y, output)
            if (i+1) % 100 == 0:
                print(f'Epoch {i+1}/{epochs}, Loss: {loss:.4f}, Accuracy: {accuracy:.4f}')

最后,创建模型并进行训练和测试:

# 创建模型并训练
model = NeuralNetwork(4, 10, 3)
model.train(X_train, y_train, epochs=1000, learning_rate=0.1)

# 测试模型
output = model.forward(X_test)
loss = np.mean(np.square(y_test - output))
accuracy = np.mean(np.argmax(y_test, axis=1) == np.argmax(output, axis=1))
print(f'Test Loss: {loss:.4f}, Test Accuracy: {accuracy:.4f}')

运行完整代码后,可以得到如下的损失函数和准确率曲线:

loss_accuracy_curve

可以看到,随着训练次数的增加,损失函数逐渐减小,准确率逐渐提高,最终在测试集上的准确率为 1.0。

用 Python 构建三层 BP 神经网络并实现鸢尾花分类

原文地址: https://www.cveoy.top/t/topic/jq0J 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录