代码说明(搭建神经网络的步骤)

#coding:utf-8
#0导入模块,生成模拟数据集。
#tensorflow学习笔记(北京大学) tf3_6.py 完全解析神经网络搭建学习
import tensorflow as tf
import numpy as np
BATCH_SIZE = 8
SEED = 23455

rdm = np.random.RandomState(SEED)
X = rdm.rand(32,2)
Y_ = [[int(x0 + x1 < 1)] for (x0, x1) in X]  #for(x0,x1) in x是从x中取出x0和x1的所有值,[int(x0+x1<1)]意思时x0+x1<1,则Y_=1;否则为0;
print("X:
",X)
print("Y_:
",Y_)
x = tf.placeholder(tf.float32, shape=(None, 2))#用placeholder实现输入定义
y_= tf.placeholder(tf.float32, shape=(None, 1))#用placeholder实现占位
w1= tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))#正态分布随机数
w2= tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))#正态分布随机数
a = tf.matmul(x, w1)#点积
y = tf.matmul(a, w2)#点积

#2定义损失函数及反向传播方法。
loss_mse = tf.reduce_mean(tf.square(y-y_)) 
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss_mse)
#train_step = tf.train.MomentumOptimizer(0.001,0.9).minimize(loss_mse)
#train_step = tf.train.AdamOptimizer(0.001).minimize(loss_mse)

#3生成会话,训练STEPS轮
with tf.Session() as sess:
    init_op = tf.global_variables_initializer()#初始化
    sess.run(init_op)
    # 输出目前(未经训练)的参数取值。
    print("w1:
", sess.run(w1))
    print("w2:
", sess.run(w2))
    print("
")
    
    # 训练模型。
    STEPS = 3000
    for i in range(STEPS):#3000轮
        start = (i*BATCH_SIZE) % 32 #i*8%32
        end = start + BATCH_SIZE    #i*8%32+8
        sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]})
        if i % 500 == 0:
            total_loss = sess.run(loss_mse, feed_dict={x: X, y_: Y_})
            print("After %d training step(s), loss_mse on all data is %g" % (i, total_loss))
    
    # 输出训练后的参数取值。
    print("
")
    print("w1:
", sess.run(w1))
    print("w2:
", sess.run(w2))
#,只搭建承载计算过程的
#计算图,并没有运算,如果我们想得到运算结果就要用到“会话 Session()”了。 
#√会话(Session): 执行计算图中的节点运算  
    print("w1:
", w1)
    print("w2:
", w2)


学习内容来自:慕课APP中人工智能实践-Tensorflow笔记;北京大学曹健老师的课程
SEED = 23455
rdm = np.random.RandomState(SEED)
X = rdm.rand(32,2)
rand(32,2)是生成32行2列的0~1的随机数。
rdm = np.random.RandomState(SEED)指定种子值,使在同样条件下产生的随机数一样,便于和视频内容进行相关调试;
 start = (i*BATCH_SIZE) % 32 #i*8%32
 end = start + BATCH_SIZE    #i*8%32+8
 sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]})
也就是说i=0,start=0,end=8;i=1,start=8,end=16;i=2,start=16,end=24;i=3,start=24,end=32;i=4,start=0,end=8。故每次训练8组数据,一共训练3000次。
sess.run(train_step, feed_dict={x: X[start:end], y_: Y_[start:end]})
不断将X[start:end]数据和Y_[start:end]数据喂给x与y_;减小loss(误差)值;优化loss;
if i % 500 == 0:
            total_loss = sess.run(loss_mse, feed_dict={x: X, y_: Y_})
            print("After %d training step(s), loss_mse on all data is %g" % (i, total_loss))
没500次打印一次total_loss

 
原文地址:https://www.cnblogs.com/fcfc940503/p/10931317.html