Tensorflow笔记(基础):批处理(batch_normalization)

CODE

# - * - coding: utf - 8 -*-
#
# 作者:田丰(FontTian)
# 创建时间:'2017/8/2'
# 邮箱:fonttian@Gmaill.com
# CSDN:http://blog.csdn.net/fontthrone

import tensorflow as tf
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 计算Wx_plus_b 的均值与方差,其中axis = [0] 表示想要标准化的维度
Wx_plus_b = []
out_size = []

fc_mean, fc_var = tf.nn.moments(Wx_plus_b, axes=[0], )
scale = tf.Variable(tf.ones([out_size]))
shift = tf.Variable(tf.zeros([out_size]))
epsilon = 0.001
Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, fc_mean, fc_var, shift, scale, epsilon)
# 上面的代码也就是在做
Wx_plus_b = (Wx_plus_b - fc_mean) / tf.sqrt(fc_var + 0.001)
Wx_plus_b = Wx_plus_b * scale + shift
#

RUN


sigmoid
[[ 0.7310586   0.88079703]
 [ 0.7310586   0.88079703]
 [ 0.7310586   0.88079703]]

relu
Tensor("Relu:0", shape=(2,), dtype=float32)

dropout
[[-0.  0.  6.  0.]]
[[-0.  0.  0.  0.]]
原文地址:https://www.cnblogs.com/fonttian/p/7294790.html