程序的设计思路是使用神经网络模型来进行二元分类任务的训练和预测。程序中定义了一个Network类来表示神经网络模型,该类包含了训练次数、学习率以及各个权重和参数的成员变量。\n\n在Network类中,定义了激活函数sigmoid和激活函数的导数deriv_sigmoid,用于前向传播和反向传播计算。前向传播通过计算各个节点的加权和并经过激活函数得到输出值,反向传播根据预测值和标签值之间的误差来更新神经网络的参数。\n\n程序中还定义了训练和预测的函数train和predict。train函数接受训练数据和标签,并根据给定的训练次数和学习率进行训练,通过反向传播更新参数来使得预测值逐渐逼近标签值。在每一次训练的过程中,计算误差并输出训练次数和误差。predict函数接受测试数据和标签,并根据训练得到的参数进行预测,输出预测的正确率。\n\n主函数中定义了训练数据和标签,以及测试数据和标签,并创建了一个Network对象进行训练和预测。\n\nNetwork类结构\n\ncpp\nclass Network{\nprivate:\n int epoches;//训练次数。\n double learning_rate;//学习率。\n double w1, w2, w3, w4, w5, w6;//权重。\n double b1, b2, b3;//参数。\npublic:\n // 超参数、参数初始化:\n Network(int es, double lr) :epoches(es), learning_rate(lr)\n {\n w1 = w2 = w3 = w4 = w5 = w6 = 0;\n b1 = b2 = b3 = 0;\n }\n // 激活函数:\n double sigmoid(double x)\n {\n return 1 / (1 + exp(-x));\n }\n // 激活函数求导:\n double deriv_sigmoid(double x)\n {\n double y = sigmoid(x);\n return y * (1 - y);\n }\n // 前向传播:\n double forward(vector<double> data)\n {\n double sum_h1 = w1 * data[0] + w2 * data[1] + b1;\n double h1 = sigmoid(sum_h1);\n double sum_h2 = w3 * data[0] + w4 * data[1] + b2;\n double h2 = sigmoid(sum_h2);\n double sum_o1 = w5 * h1 + w6 * h2 + b3;\n return sigmoid(sum_o1);\n }\n //训练数据数组:\n void train(vector<vector<double>> data, vector<double> label)\n {\n for (int epoch = 0; epoch < epoches; ++epoch)\n {\n int total_n = data.size();\n for (int i = 0; i < total_n; ++i)\n {\n vector<double> x = data[i];\n double sum_h1 = w1 * x[0] + w2 * x[1] + b1;\n double h1 = sigmoid(sum_h1);\n double sum_h2 = w3 * x[0] + w4 * x[1] + b2;\n double h2 = sigmoid(sum_h2);\n double sum_o1 = w5 * h1 + w6 * h2 + b3;\n double o1 = sigmoid(sum_o1);\n double pred = o1;\n //反向算法更新神经网络参数:\n double d_loss_pred = -2 * (label[i] - pred);\n double d_pred_w5 = h1 * deriv_sigmoid(sum_o1);\n double d_pred_w6 = h2 * deriv_sigmoid(sum_o1);\n double d_pred_b3 = deriv_sigmoid(sum_o1);\n double d_pred_h1 = w5 * deriv_sigmoid(sum_o1);\n double d_pred_h2 = w6 * deriv_sigmoid(sum_o1);\n double d_h1_w1 = x[0] * deriv_sigmoid(sum_h1);\n double d_h1_w2 = x[1] * deriv_sigmoid(sum_h1);\n double d_h1_b1 = deriv_sigmoid(sum_h1);\n double d_h2_w3 = x[0] * deriv_sigmoid(sum_h2);\n double d_h2_w4 = x[1] * deriv_sigmoid(sum_h2);\n double d_h2_b2 = deriv_sigmoid(sum_h2);\n\n w1 -= learning_rate * d_loss_pred * d_pred_h1 * d_h1_w1;\n w2 -= learning_rate * d_loss_pred * d_pred_h1 * d_h1_w2;\n b1 -= learning_rate * d_loss_pred * d_pred_h1 * d_h1_b1;\n w3 -= learning_rate * d_loss_pred * d_pred_h2 * d_h2_w3;\n w4 -= learning_rate * d_loss_pred * d_pred_h2 * d_h2_w4;\n b2 -= learning_rate * d_loss_pred * d_pred_h2 * d_h2_b2;\n w5 -= learning_rate * d_loss_pred * d_pred_w5;\n w6 -= learning_rate * d_loss_pred * d_pred_w6;\n b3 -= learning_rate * d_loss_pred * d_pred_b3;\n }\n if (epoch % 10 == 0)\n {\n double loss = 0;\n for (int i = 0; i < total_n; ++i)\n {\n double pred = forward(data[i]);\n loss += getMSEloss(pred, label[i]);\n }\n //输出训练次数与误差:\n cout << "Epoch:" << epoch << " Loss: " << loss << endl;\n }\n }\n }\n //预测数据数组:\n void predict(vector<vector<double>> testdata, vector<double> testlabel)\n {\n int n = testdata.size();\n double temp = 0;\n for (int i = 0; i < n; ++i)\n {\n double pred = forward(testdata[i]);\n pred = pred > 0.5 ? 1 : 0;\n temp += (testlabel[i] == pred);\n }\n //输出判断的正确率:\n cout << "正确率:" << temp / n *100<<"%" << endl;\n }\n};\n\nint main()\n{\n //训练:\n vector<vector<double>> data = { {-5,-3},{7,28},{14,9},{-25,-11} };\n vector<double> label = { 1,0,0,1 };\n Network network = Network(2000, 0.1);\n network.train(data, label);\n //得出预测结论:\n vector<vector<double>> testdata = { {-5,-7},{-1,-15},{17,5},{-23,-7},{8,14} };\n vector<double> testlabel = { 1,1,0,1,0 };\n network.predict(testdata, testlabel);\n return 0;\n}\n\n\n代码中使用的文件和数据库表\n\n该程序没有使用任何文件或数据库操作。\n\n总结\n\n该程序是一个简单的二元分类神经网络模型,使用了反向传播算法进行训练,并提供了训练和预测的功能。可以作为学习神经网络基础知识的入门示例。\n

神经网络二元分类模型:设计思路与代码实现

原文地址: https://www.cveoy.top/t/topic/qsnH 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录