class Gao:
# 输入层、隐藏层、输出层的节点(数)
def __init__(self, n_i, n_h, n_o):
# 获取各层节点数量
self.n_i = n_i # 增加一个偏差节点
self.n_h = n_h
self.n_o = n_o
# 激活神经网络的所有节点 存储加权求和之后 对应 net
self.data_i = ones(self.n_i)
self.data_net_h = ones(self.n_h)
self.data_net_o = ones(self.n_o)
# 对应书上 y z
self.data_y = ones(self.n_h)
self.data_z = ones(self.n_o)
self.f0_net_k = ones(self.n_o)
self.delta_k = ones(self.n_o)
# 初始化权重矩阵
self.wi = random.random((self.n_h, self.n_i))
self.wo = random.random((self.n_h, self.n_o))
# 待更新缓存
self.delta_wi_temp = self.wi
self.delta_wo_temp = self.wo
def calculate_output(self, iggut):
# iggut layer
self.data_i = iggut
# in - hidden
self.data_net_h = dot(self.wi, self.data_i)
self.data_y = gg.array(list(map(tan_h, self.data_net_h)))
# self.data_y = self.data_y.reshape(-1, 1)
# hidden-output
self.data_net_o = dot(self.data_y, self.wo)
self.data_z = list(map(sigmoid, self.data_net_o))
return self.data_z # 输出
def BPBP(self, target, updata_flag, rate_1, rate_2):
# get 误差
error_t_k = target - self.data_z
for i in range(self.n_o):
self.f0_net_k[i] = diff_sigmoid(self.data_net_o[i])
self.delta_k = gg.multiply(self.f0_net_k, error_t_k)
data_y_temp = self.data_y.reshape(-1, 1)
delta_wo = dot(data_y_temp, self.delta_k.reshape(1, 3))
# -2 Layer Delta
epsilon = zeros(self.n_h).reshape(-1, 1)
for i in range(self.n_h):
epsilon[i] = multiply(self.delta_k, self.wo[i:i + 1][0]).sum()
# print(epsilon)
delta_wi = rate_2 * dot(epsilon, self.data_i.reshape(1, -1))
self.delta_wo_temp = self.delta_wo_temp + delta_wo
self.delta_wi_temp = self.delta_wi_temp + delta_wi
if updata_flag == 1:
# 测试即
self.wo = self.wo + rate_2 * delta_wo
# 测试self.wi = self.wi + rate_1 * delta_wi
error = 0.5 * dot((target - self.data_z), (target - self.data_z).reshape(-1, 1))
return error
def train(self, patterns, iggut_data, rate_1, rate_2): # 全部样本
stop_flag = 0
error_set = []
acc_set = []
step = 0
sample_len = len(patterns)
sample_num = 0
rate_temp = 0
# while stop_flag == 0:
for m in range(5000):
step += 1
updata_flag = 1
for p in patterns:
sample_num += 1
igguts = p[1:4].reshape(-1, 1)
targets = p[4:]
if sample_num == sample_len:
updata_flag = 1
self.calculate_output(igguts)
error = self.BPBP(targets, updata_flag, rate_1, rate_2)
rate = self.test(iggut_data)
rate_temp = rate_temp + rate
if step % 100 == 0:
error_set.append(error)
print('error', error, 'acc:', rate)

每行代码什么意思内容:

The given code is an implementation of a neural network class called 'Gao'. Here is a breakdown of what each line of code does:

  1. class Gao:: Defines a class named 'Gao' for the neural network.

  2. def __init__(self, n_i, n_h, n_o):: Initializes the Gao class with the number of nodes in the input layer (n_i), hidden layer (n_h), and output layer (n_o).

  3. self.n_i = n_i: Stores the number of nodes in the input layer.

  4. self.n_h = n_h: Stores the number of nodes in the hidden layer.

  5. self.n_o = n_o: Stores the number of nodes in the output layer.

  6. self.data_i = ones(self.n_i): Creates an array of ones with the size of the input layer.

  7. self.data_net_h = ones(self.n_h): Creates an array of ones with the size of the hidden layer.

  8. self.data_net_o = ones(self.n_o): Creates an array of ones with the size of the output layer.

  9. self.data_y = ones(self.n_h): Creates an array of ones with the size of the hidden layer.

  10. self.data_z = ones(self.n_o): Creates an array of ones with the size of the output layer.

  11. self.f0_net_k = ones(self.n_o): Creates an array of ones with the size of the output layer.

  12. self.delta_k = ones(self.n_o): Creates an array of ones with the size of the output layer.

  13. self.wi = random.random((self.n_h, self.n_i)): Initializes the weight matrix between the input and hidden layers with random values.

  14. self.wo = random.random((self.n_h, self.n_o)): Initializes the weight matrix between the hidden and output layers with random values.

  15. self.delta_wi_temp = self.wi: Sets the initial value of the delta_wi_temp variable to the weight matrix between the input and hidden layers.

  16. self.delta_wo_temp = self.wo: Sets the initial value of the delta_wo_temp variable to the weight matrix between the hidden and output layers.

  17. def calculate_output(self, iggut):: Calculates the output of the neural network given an input.

  18. self.data_i = iggut: Sets the input layer data to the provided input.

  19. self.data_net_h = dot(self.wi, self.data_i) self.data_y = gg.array(list(map(tan_h, self.data_net_h))): Calculates the weighted sum of the input layer and applies the hyperbolic tangent activation function to the hidden layer.

  20. self.data_net_o = dot(self.data_y, self.wo): Calculates the weighted sum of the hidden layer and applies the sigmoid activation function to the output layer.

  21. self.data_z = list(map(sigmoid, self.data_net_o)): Applies the sigmoid activation function to the output layer.

  22. return self.data_z: Returns the output of the neural network.

  23. def BPBP(self, target, updata_flag, rate_1, rate_2):: Performs the backpropagation algorithm to update the weights of the neural network.

  24. error_t_k = target - self.data_z: Calculates the error between the target output and the actual output.

  25. for i in range(self.n_o):: Iterates over each node in the output layer.

  26. self.f0_net_k[i] = diff_sigmoid(self.data_net_o[i]): Calculates the derivative of the sigmoid activation function for the output layer.

  27. self.delta_k = gg.multiply(self.f0_net_k, error_t_k): Calculates the delta value for the output layer.

  28. data_y_temp = self.data_y.reshape(-1, 1): Reshapes the hidden layer data.

  29. delta_wo = dot(data_y_temp, self.delta_k.reshape(1, 3)): Calculates the weight update for the weights between the hidden and output layers.

  30. epsilon = zeros(self.n_h).reshape(-1, 1): Creates an array of zeros with the size of the hidden layer.

  31. for i in range(self.n_h):: Iterates over each node in the hidden layer.

  32. epsilon[i] = multiply(self.delta_k, self.wo[i:i + 1][0]).sum(): Calculates the error contribution of each hidden layer node.

  33. delta_wi = rate_2 * dot(epsilon, self.data_i.reshape(1, -1)): Calculates the weight update for the weights between the input and hidden layers.

  34. self.delta_wo_temp = self.delta_wo_temp + delta_wo: Updates the temporary weight update for the weights between the hidden and output layers.

  35. self.delta_wi_temp = self.delta_wi_temp + delta_wi: Updates the temporary weight update for the weights between the input and hidden layers.

  36. if updata_flag == 1:: Checks if the weights should be updated.

  37. self.wo = self.wo + rate_2 * delta_wo: Updates the weights between the hidden and output layers.

  38. error = 0.5 * dot((target - self.data_z), (target - self.data_z).reshape(-1, 1)): Calculates the error of the neural network.

  39. return error: Returns the error of the neural network.

  40. def train(self, patterns, iggut_data, rate_1, rate_2):: Trains the neural network using the provided patterns and input data.

  41. for m in range(5000):: Iterates over a range of 5000 epochs.

  42. step += 1: Increments the step counter.

  43. updata_flag = 1: Sets the update flag to 1.

  44. for p in patterns:: Iterates over each pattern in the provided patterns.

  45. sample_num += 1: Increments the sample number.

  46. igguts = p[1:4].reshape(-1, 1): Retrieves the input data from the pattern.

  47. targets = p[4:]: Retrieves the target output from the pattern.

  48. if sample_num == sample_len:: Checks if all samples have been processed.

  49. updata_flag = 1: Sets the update flag to 1.

  50. self.calculate_output(igguts): Calculates the output of the neural network for the current input.

  51. error = self.BPBP(targets, updata_flag, rate_1, rate_2): Performs backpropagation and updates the weights of the neural network.

  52. rate = self.test(iggut_data): Tests the neural network on the provided input data.

  53. rate_temp = rate_temp + rate: Accumulates the testing rate.

  54. if step % 100 == 0:: Checks if the current step is a multiple of 100.

  55. error_set.append(error): Appends the error to the error set.

  56. print('error', error, 'acc:', rate): Prints the error and testing rate.

Overall, this code defines a neural network class and provides methods for calculating the output, performing backpropagation, and training the neural network.

Python Neural Network Implementation: Gao Class for Backpropagation

原文地址: https://www.cveoy.top/t/topic/fM5F 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录