import tensorflow as tf

定义输入层和隐藏层的神经元个数

input_size = 6 hidden_size = 8

定义输入层

x = tf.placeholder(tf.float32, shape=[None, input_size])

定义隐藏层和输出层的权值和偏置

W1 = tf.Variable(tf.random_normal([input_size, hidden_size]), dtype=tf.float32) b1 = tf.Variable(tf.random_normal([hidden_size]), dtype=tf.float32) W2 = tf.Variable(tf.random_normal([hidden_size, 1]), dtype=tf.float32) b2 = tf.Variable(tf.random_normal([1]), dtype=tf.float32)

定义隐藏层和输出层的计算方式

hidden_layer = tf.nn.relu(tf.matmul(x, W1) + b1) output_layer = tf.matmul(hidden_layer, W2) + b2

定义损失函数和优化器

y = tf.placeholder(tf.float32, shape=[None, 1]) loss = tf.reduce_mean(tf.square(y - output_layer)) optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(loss)

定义初始化操作

init = tf.global_variables_initializer()

启动会话,开始训练

with tf.Session() as sess: sess.run(init) for i in range(1000): # 训练1000次 # 将输入和输出数据喂入模型 _, cost = sess.run([optimizer, loss], feed_dict={x: input_data, y: output_data}) if i % 100 == 0: print("Epoch:", '%04d' % (i+1), "cost=", "{:.9f}".format(cost)) print("Optimization Finished!")

# 测试模型
test_input = [[1, 2, 3, 4, 5, 6]]
test_output = sess.run(output_layer, feed_dict={x: test_input})
print("Test Output:", test_output)
输入层6个神经元,隐藏层8个神经元,输出1个参数,激活函数为relu函数,生成神经网络模型代码

原文地址: https://www.cveoy.top/t/topic/AHr 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录