Python Neural Network Implementation: Gao Class for Backpropagation
class Gao:
# 输入层、隐藏层、输出层的节点(数)
def __init__(self, n_i, n_h, n_o):
# 获取各层节点数量
self.n_i = n_i # 增加一个偏差节点
self.n_h = n_h
self.n_o = n_o
# 激活神经网络的所有节点 存储加权求和之后 对应 net
self.data_i = ones(self.n_i)
self.data_net_h = ones(self.n_h)
self.data_net_o = ones(self.n_o)
# 对应书上 y z
self.data_y = ones(self.n_h)
self.data_z = ones(self.n_o)
self.f0_net_k = ones(self.n_o)
self.delta_k = ones(self.n_o)
# 初始化权重矩阵
self.wi = random.random((self.n_h, self.n_i))
self.wo = random.random((self.n_h, self.n_o))
# 待更新缓存
self.delta_wi_temp = self.wi
self.delta_wo_temp = self.wo
def calculate_output(self, iggut):
# iggut layer
self.data_i = iggut
# in - hidden
self.data_net_h = dot(self.wi, self.data_i)
self.data_y = gg.array(list(map(tan_h, self.data_net_h)))
# self.data_y = self.data_y.reshape(-1, 1)
# hidden-output
self.data_net_o = dot(self.data_y, self.wo)
self.data_z = list(map(sigmoid, self.data_net_o))
return self.data_z # 输出
def BPBP(self, target, updata_flag, rate_1, rate_2):
# get 误差
error_t_k = target - self.data_z
for i in range(self.n_o):
self.f0_net_k[i] = diff_sigmoid(self.data_net_o[i])
self.delta_k = gg.multiply(self.f0_net_k, error_t_k)
data_y_temp = self.data_y.reshape(-1, 1)
delta_wo = dot(data_y_temp, self.delta_k.reshape(1, 3))
# -2 Layer Delta
epsilon = zeros(self.n_h).reshape(-1, 1)
for i in range(self.n_h):
epsilon[i] = multiply(self.delta_k, self.wo[i:i + 1][0]).sum()
# print(epsilon)
delta_wi = rate_2 * dot(epsilon, self.data_i.reshape(1, -1))
self.delta_wo_temp = self.delta_wo_temp + delta_wo
self.delta_wi_temp = self.delta_wi_temp + delta_wi
if updata_flag == 1:
# 测试即
self.wo = self.wo + rate_2 * delta_wo
# 测试self.wi = self.wi + rate_1 * delta_wi
error = 0.5 * dot((target - self.data_z), (target - self.data_z).reshape(-1, 1))
return error
def train(self, patterns, iggut_data, rate_1, rate_2): # 全部样本
stop_flag = 0
error_set = []
acc_set = []
step = 0
sample_len = len(patterns)
sample_num = 0
rate_temp = 0
# while stop_flag == 0:
for m in range(5000):
step += 1
updata_flag = 1
for p in patterns:
sample_num += 1
igguts = p[1:4].reshape(-1, 1)
targets = p[4:]
if sample_num == sample_len:
updata_flag = 1
self.calculate_output(igguts)
error = self.BPBP(targets, updata_flag, rate_1, rate_2)
rate = self.test(iggut_data)
rate_temp = rate_temp + rate
if step % 100 == 0:
error_set.append(error)
print('error', error, 'acc:', rate)
每行代码什么意思内容:
The given code is an implementation of a neural network class called 'Gao'. Here is a breakdown of what each line of code does:
-
class Gao:: Defines a class named 'Gao' for the neural network. -
def __init__(self, n_i, n_h, n_o):: Initializes the Gao class with the number of nodes in the input layer (n_i), hidden layer (n_h), and output layer (n_o). -
self.n_i = n_i: Stores the number of nodes in the input layer. -
self.n_h = n_h: Stores the number of nodes in the hidden layer. -
self.n_o = n_o: Stores the number of nodes in the output layer. -
self.data_i = ones(self.n_i): Creates an array of ones with the size of the input layer. -
self.data_net_h = ones(self.n_h): Creates an array of ones with the size of the hidden layer. -
self.data_net_o = ones(self.n_o): Creates an array of ones with the size of the output layer. -
self.data_y = ones(self.n_h): Creates an array of ones with the size of the hidden layer. -
self.data_z = ones(self.n_o): Creates an array of ones with the size of the output layer. -
self.f0_net_k = ones(self.n_o): Creates an array of ones with the size of the output layer. -
self.delta_k = ones(self.n_o): Creates an array of ones with the size of the output layer. -
self.wi = random.random((self.n_h, self.n_i)): Initializes the weight matrix between the input and hidden layers with random values. -
self.wo = random.random((self.n_h, self.n_o)): Initializes the weight matrix between the hidden and output layers with random values. -
self.delta_wi_temp = self.wi: Sets the initial value of the delta_wi_temp variable to the weight matrix between the input and hidden layers. -
self.delta_wo_temp = self.wo: Sets the initial value of the delta_wo_temp variable to the weight matrix between the hidden and output layers. -
def calculate_output(self, iggut):: Calculates the output of the neural network given an input. -
self.data_i = iggut: Sets the input layer data to the provided input. -
self.data_net_h = dot(self.wi, self.data_i) self.data_y = gg.array(list(map(tan_h, self.data_net_h))): Calculates the weighted sum of the input layer and applies the hyperbolic tangent activation function to the hidden layer. -
self.data_net_o = dot(self.data_y, self.wo): Calculates the weighted sum of the hidden layer and applies the sigmoid activation function to the output layer. -
self.data_z = list(map(sigmoid, self.data_net_o)): Applies the sigmoid activation function to the output layer. -
return self.data_z: Returns the output of the neural network. -
def BPBP(self, target, updata_flag, rate_1, rate_2):: Performs the backpropagation algorithm to update the weights of the neural network. -
error_t_k = target - self.data_z: Calculates the error between the target output and the actual output. -
for i in range(self.n_o):: Iterates over each node in the output layer. -
self.f0_net_k[i] = diff_sigmoid(self.data_net_o[i]): Calculates the derivative of the sigmoid activation function for the output layer. -
self.delta_k = gg.multiply(self.f0_net_k, error_t_k): Calculates the delta value for the output layer. -
data_y_temp = self.data_y.reshape(-1, 1): Reshapes the hidden layer data. -
delta_wo = dot(data_y_temp, self.delta_k.reshape(1, 3)): Calculates the weight update for the weights between the hidden and output layers. -
epsilon = zeros(self.n_h).reshape(-1, 1): Creates an array of zeros with the size of the hidden layer. -
for i in range(self.n_h):: Iterates over each node in the hidden layer. -
epsilon[i] = multiply(self.delta_k, self.wo[i:i + 1][0]).sum(): Calculates the error contribution of each hidden layer node. -
delta_wi = rate_2 * dot(epsilon, self.data_i.reshape(1, -1)): Calculates the weight update for the weights between the input and hidden layers. -
self.delta_wo_temp = self.delta_wo_temp + delta_wo: Updates the temporary weight update for the weights between the hidden and output layers. -
self.delta_wi_temp = self.delta_wi_temp + delta_wi: Updates the temporary weight update for the weights between the input and hidden layers. -
if updata_flag == 1:: Checks if the weights should be updated. -
self.wo = self.wo + rate_2 * delta_wo: Updates the weights between the hidden and output layers. -
error = 0.5 * dot((target - self.data_z), (target - self.data_z).reshape(-1, 1)): Calculates the error of the neural network. -
return error: Returns the error of the neural network. -
def train(self, patterns, iggut_data, rate_1, rate_2):: Trains the neural network using the provided patterns and input data. -
for m in range(5000):: Iterates over a range of 5000 epochs. -
step += 1: Increments the step counter. -
updata_flag = 1: Sets the update flag to 1. -
for p in patterns:: Iterates over each pattern in the provided patterns. -
sample_num += 1: Increments the sample number. -
igguts = p[1:4].reshape(-1, 1): Retrieves the input data from the pattern. -
targets = p[4:]: Retrieves the target output from the pattern. -
if sample_num == sample_len:: Checks if all samples have been processed. -
updata_flag = 1: Sets the update flag to 1. -
self.calculate_output(igguts): Calculates the output of the neural network for the current input. -
error = self.BPBP(targets, updata_flag, rate_1, rate_2): Performs backpropagation and updates the weights of the neural network. -
rate = self.test(iggut_data): Tests the neural network on the provided input data. -
rate_temp = rate_temp + rate: Accumulates the testing rate. -
if step % 100 == 0:: Checks if the current step is a multiple of 100. -
error_set.append(error): Appends the error to the error set. -
print('error', error, 'acc:', rate): Prints the error and testing rate.
Overall, this code defines a neural network class and provides methods for calculating the output, performing backpropagation, and training the neural network.
原文地址: https://www.cveoy.top/t/topic/fM5F 著作权归作者所有。请勿转载和采集!