tensorflow(4):神经网络框架总结

#总结神经网络框架

#1,搭建网络设计结构(前向传播)  文件forward.py
def forward(x,regularizer): # 输入x 和正则化权重
    w=
    b=
    y=
    return y

def get_weight(shape,regularizer): #w的shape 和w的权重
    w=tf.Variable(tf.random_normal(shape),dtype=tf.float32) # 给w赋初值
    #给 每一个w的正则化损失加到总损失中
    tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))
    return w

def get_bias(shape): #某一层b的个数
    b=tf.Variable(tf.constant(0.01,shape=shape)) #赋初值
    return b

#反向传播就是训练模型,优化参数  文件backward.py
def backward(): 
    x=tf.placeholder(tf.float32,shape=(None,2)) #给输入占位
    y_=tf.placeholder(tf.float32,shape=(None,1))
    y=forward.forward(x,refularizer)
    global_step=tf.Variable(0,trianable=False) #轮数计数器
    loss=  #定义loss
    
#    loss 可以是 自定义或者交叉熵
#    y与y_的差距{loss_mse}=tf.reduce_mean(tf.square(y-y_))
#    也可以是:
#    ce=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
#    y与y_的差距{cem}=tf.reduce_mean(ce)
#    加入正则化之后,需要
#    loss=y与y_的差距+tf.add_n(tf.get_collection('losses'))
    
    
    train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
    
    
#    如果使用ema 增加以下,动态计算学习率
    learning_rate=tf.train.exponential_decay(LEARNING_RATE_BASE,
                                        global_step, LEARNING_RATE_STEP(即数据库样本总数/batch_size),
                                        LEARNING_RATE_DECAY,
                                        staircase=True)
    
    ema=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
    ema_op=ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step,ema_op]):
        train_op=tf.no_op(name='train')
        
    with tf.Session() as sess:
        init_op=tf.global_variables_initializer()
        sess.run(init_op)
        
        for i in range(steps):
            sess.run(train_step,feed_dict={x: ,y_= })
            if i % 轮数 ==0:
                print('')
                
    if '__name__'=='__main__':  #判断是否运行的是主文件
        backward()
        

使用正则化提高泛化性, 使用指数衰减学习率加快优化效率
使用三个模块
1 生成数据集 generateds.py
2 前向传播 forward.py
3 反向传播 backward.py

# 生成数据集 generateds.py
import numpy as np
import matploblib.pyplot as plt
seed=2
def genetateds():
    rdm=np.random.RandomState(seed)
    X=rdm.randn(300,2)
    Y_=[int(x0*x0+x1*x1<2) for (x0,x1) in X]
    Y_c=[['red' if y else 'blue'] for y in Y_] #1则红色,0则蓝色
    X=np.vstack(X).reshape(-1,2) #整理为n行2列,按行的顺序来
    Y_=np.vstack(Y_).reshape(-1,1)# 整理为n行1列
    return X,Y_,Y_c
# 前向传播
import tensorflow as tf

def get_weight(shape,regularizer): #w的shape 和w的权重
    w=tf.Variable(tf.random_normal(shape),dtype=tf.float32)
    tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))
    return w

def get_bias(shape): #b的长度
    b=tf.Variable(tf.constant(0.01,shape=shape))
    return b
    
def forward(x,regularizer): # 输入x 和正则化权重
    w1=get_weight([2,11],regularizer)
    b1=get_bias([11])
    y1=tf.nn.relu(tf.matmul(x,w1)+b1) #relu 激活函数
    
    w2=get_weight([11,1],regularizer)
    b2=get_bias([1])
    y=tf.matmul(y1,w2)+b2  #输出层不过激活函数
    return y
# 反向传播
import tensorflow as tf 
import numpy as np
import matplotlib.pyplot as plt
import genarateds
import forward
steps=40000
batch_size=30
LEARNING_RATE_BASE=0.001
LEARNING_RATE_DECAY=0.999
regularizer=0.01

def backward(): 
    x=tf.placeholder(tf.float32,shape=(None,2)) #给输入占位
    y_=tf.placeholder(tf.float32,shape=(None,1))
    
    X,Y_,Y_c=genarateds.generateds()
    y=forward.forward(x,refularizer)
    
    global_step=tf.Variable(0,trianable=False) #轮数计数器
    learning_rate=tf.train.exponential_decay(LEARNING_RATE_BASE,
                                        global_step, 300/batch_size,
                                        LEARNING_RATE_DECAY,
                                        staircase=True)
    #定义损失函数
    loss_mse=tf.reduce_mean(tf.square(y-y_))
    loss_total=loss_mse+tf.add_n(tf.get_collection('losses'))
    #定义反向传播方法 包含正则化
    train_step=tf.train.AdamOptimizer(learning_rate).minimize(loss_total)
    with tf.Session() as sess:
        init_op=tf.global_variables_initializer()
        sess.run(init_op)
        for i in range(steps):
            start=(i*batch_size)%300
            end=start+batch_size
            sess.run(train_step,feed_dict={x:X[start:end],y_:Y_[start:end]})
            if i%10000==0:
                loss_v=sess.run(loss_total,feed_dict={x:X,y_:Y_})
                print('after %d steps,loss is:%f'%(i,loss_v))

        xx,yy=np.mgrid[-3:3:0.01,-3:3:0.01]
        grid=np.c_[xx.ravel(),yy.ravel()]
        probs=sess.run(y,feed_dict={x:grid})
        probs=probs.reshape(xx.shape) #调整成xx的样子

    plt.scatter(X[:,0],X[:,1],c=np.squeeze(Y_c))
    plt.contour(xx,yy,probs,levels=[.5]) #给probs=0.5的值上色 (显示分界线)
    plt.show()
              
    if '__name__'=='__main__':  #判断是否运行的是主文件
        backward()

最后运行 backward.py 即可!!

----END---- HAVE A GOOD ONE! 以上为本人课余自学工具书/blog的笔记整理, 常有更新, 非100%原创!且读且学习。
原文地址:https://www.cnblogs.com/xuying-fall/p/8976328.html