C 语言实现卷积神经网络垃圾识别分类 - 简易示例
///'#include <stdio.h>//n#include <stdlib.h>//n#include <math.h>//n//n#define INPUT_SIZE 784//n#define HIDDEN_SIZE 100//n#define OUTPUT_SIZE 2//n#define LEARNING_RATE 0.01//n#define EPOCHS 10//n#define TRAIN_SIZE 60000//n#define TEST_SIZE 10000//n//ntypedef struct {//n double* data;//n} Matrix;//n//ntypedef struct {//n Matrix input;//n Matrix weight;//n Matrix bias;//n Matrix output;//n Matrix error;//n Matrix delta;//n} Layer;//n//ndouble sigmoid(double x) {//n return 1 / (1 + exp(-x));//n}//n//nMatrix createMatrix(int rows, int cols) {//n Matrix mat;//n mat.data = (double*)malloc(rows * cols * sizeof(double));//n return mat;//n}//n//nvoid freeMatrix(Matrix* mat) {//n free(mat->data);//n}//n//nvoid randomizeMatrix(Matrix* mat) {//n for (int i = 0; i < INPUT_SIZE; i++) {//n mat->data[i] = rand() / (double)RAND_MAX;//n }//n}//n//nvoid forward(Layer* layer) {//n for (int i = 0; i < HIDDEN_SIZE; i++) {//n double sum = 0.0;//n for (int j = 0; j < INPUT_SIZE; j++) {//n sum += layer->input.data[j] * layer->weight.data[i * INPUT_SIZE + j];//n }//n layer->output.data[i] = sigmoid(sum + layer->bias.data[i]);//n }//n}//n//nvoid backward(Layer* layer) {//n for (int i = 0; i < INPUT_SIZE; i++) {//n double sum = 0.0;//n for (int j = 0; j < HIDDEN_SIZE; j++) {//n sum += layer->weight.data[j * INPUT_SIZE + i] * layer->delta.data[j];//n }//n layer->error.data[i] = sum;//n }//n}//n//nvoid update(Layer* layer) {//n for (int i = 0; i < HIDDEN_SIZE; i++) {//n for (int j = 0; j < INPUT_SIZE; j++) {//n layer->weight.data[i * INPUT_SIZE + j] += LEARNING_RATE * layer->delta.data[i] * layer->input.data[j];//n }//n layer->bias.data[i] += LEARNING_RATE * layer->delta.data[i];//n }//n}//n//nvoid train(Layer* hiddenLayer, Layer* outputLayer, Matrix* trainData, int* trainLabels) {//n for (int epoch = 0; epoch < EPOCHS; epoch++) {//n double error = 0.0;//n for (int i = 0; i < TRAIN_SIZE; i++) {//n hiddenLayer->input.data = trainData[i].data;//n forward(hiddenLayer);//n outputLayer->input.data = hiddenLayer->output.data;//n forward(outputLayer);//n //n for (int j = 0; j < OUTPUT_SIZE; j++) {//n double target = (j == trainLabels[i]) ? 1.0 : 0.0;//n outputLayer->delta.data[j] = outputLayer->output.data[j] * (1 - outputLayer->output.data[j]) * (target - outputLayer->output.data[j]);//n error += 0.5 * pow(target - outputLayer->output.data[j], 2);//n }//n //n backward(outputLayer);//n update(outputLayer);//n backward(hiddenLayer);//n update(hiddenLayer);//n }//n printf(/'Epoch %d, Error: %f//n/', epoch + 1, error);//n }//n}//n//nint predict(Layer* hiddenLayer, Layer* outputLayer, Matrix* testData) {//n hiddenLayer->input.data = testData->data;//n forward(hiddenLayer);//n outputLayer->input.data = hiddenLayer->output.data;//n forward(outputLayer);//n //n int maxIndex = 0;//n for (int i = 1; i < OUTPUT_SIZE; i++) {//n if (outputLayer->output.data[i] > outputLayer->output.data[maxIndex]) {//n maxIndex = i;//n }//n }//n return maxIndex;//n}//n//nint main() {//n srand(0);//n //n // 加载训练数据和标签//n //n Matrix* trainData = (Matrix*)malloc(TRAIN_SIZE * sizeof(Matrix));//n int* trainLabels = (int*)malloc(TRAIN_SIZE * sizeof(int));//n //n // 加载测试数据和标签//n //n Matrix* testData = (Matrix*)malloc(TEST_SIZE * sizeof(Matrix));//n int* testLabels = (int*)malloc(TEST_SIZE * sizeof(int));//n //n Layer hiddenLayer;//n hiddenLayer.input = createMatrix(1, INPUT_SIZE);//n hiddenLayer.weight = createMatrix(HIDDEN_SIZE, INPUT_SIZE);//n hiddenLayer.bias = createMatrix(1, HIDDEN_SIZE);//n hiddenLayer.output = createMatrix(1, HIDDEN_SIZE);//n hiddenLayer.error = createMatrix(1, INPUT_SIZE);//n hiddenLayer.delta = createMatrix(1, HIDDEN_SIZE);//n //n Layer outputLayer;//n outputLayer.input = createMatrix(1, HIDDEN_SIZE);//n outputLayer.weight = createMatrix(OUTPUT_SIZE, HIDDEN_SIZE);//n outputLayer.bias = createMatrix(1, OUTPUT_SIZE);//n outputLayer.output = createMatrix(1, OUTPUT_SIZE);//n outputLayer.error = createMatrix(1, HIDDEN_SIZE);//n outputLayer.delta = createMatrix(1, OUTPUT_SIZE);//n //n randomizeMatrix(&hiddenLayer.weight);//n randomizeMatrix(&hiddenLayer.bias);//n randomizeMatrix(&outputLayer.weight);//n randomizeMatrix(&outputLayer.bias);//n //n train(&hiddenLayer, &outputLayer, trainData, trainLabels);//n //n int correct = 0;//n for (int i = 0; i < TEST_SIZE; i++) {//n int prediction = predict(&hiddenLayer, &outputLayer, &testData[i]);//n if (prediction == testLabels[i]) {//n correct++;//n }//n }//n //n double accuracy = (double)correct / TEST_SIZE;//n printf(/'Accuracy: %f//n/', accuracy);//n //n freeMatrix(&hiddenLayer.input);//n freeMatrix(&hiddenLayer.weight);//n freeMatrix(&hiddenLayer.bias);//n freeMatrix(&hiddenLayer.output);//n freeMatrix(&hiddenLayer.error);//n freeMatrix(&hiddenLayer.delta);//n //n freeMatrix(&outputLayer.input);//n freeMatrix(&outputLayer.weight);//n freeMatrix(&outputLayer.bias);//n freeMatrix(&outputLayer.output);//n freeMatrix(&outputLayer.error);//n freeMatrix(&outputLayer.delta);//n //n return 0;//n}//n/
原文地址: https://www.cveoy.top/t/topic/pBOk 著作权归作者所有。请勿转载和采集!