tensorflow常用函数介绍

tensorflow常用函数介绍

tensorflow tf.train.Supervisor作用

tf.train.Supervisor可以简化编程,避免显示地实现restore操作.通过一个例子看.

import tensorflow as tf
import numpy as np
import os
log_path = r"D:Sourcemodellinear"
log_name = "linear.ckpt"
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3

# Try to find values for W and b that compute y_data = W * x_data + b
# (We know that W should be 0.1 and b 0.3, but TensorFlow will
# figure that out for us.)
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = W * x_data + b

# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)

# Before starting, initialize the variables.  We will 'run' this first.
saver = tf.train.Saver()
init = tf.global_variables_initializer()

# Launch the graph.
sess = tf.Session()
sess.run(init)

if len(os.listdir(log_path)) != 0:  # 已经有模型直接读取
    saver.restore(sess, os.path.join(log_path, log_name))
for step in range(201):
    sess.run(train)
    if step % 20 == 0:
        print(step, sess.run(W), sess.run(b))
saver.save(sess, os.path.join(log_path, log_name))

这段代码是对tensorflow官网上的demo做一个微小的改动.如果模型已经存在,就先读取模型接着训练.tf.train.Supervisor可以简化这个步骤.看下面的代码.

import tensorflow as tf
import numpy as np
import os
log_path = r"D:Sourcemodelsupervisor"
log_name = "linear.ckpt"
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3

W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = W * x_data + b

loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)

saver = tf.train.Saver()
init = tf.global_variables_initializer()

sv = tf.train.Supervisor(logdir=log_path, init_op=init)  # logdir用来保存checkpoint和summary
saver = sv.saver  # 创建saver
with sv.managed_session() as sess:  # 会自动去logdir中去找checkpoint,如果没有的话,自动执行初始化
    for i in range(201):
        sess.run(train)
        if i % 20 == 0:
            print(i, sess.run(W), sess.run(b))
    saver.save(sess, os.path.join(log_path, log_name))

sv = tf.train.Supervisor(logdir=log_path, init_op=init)会判断模型是否存在.如果存在,会自动读取模型.不用显式地调用restore.

tf.global_variables_initializer()

1.问题描述
很多时候,我们都会在会话中加入一句:
sess.run(tf.global_variables_initializer())
至于为什么,好像就是觉得要初始化变量而已,其实,这句话有具体的应用原因
2.用程序来说明

import tensorflow as tf
# 必须要使用global_variables_initializer的场合
# 含有tf.Variable的环境下,因为tf中建立的变量是没有初始化的,也就是在debug时还不是一个tensor量,而是一个Variable变量类型
size_out = 10
tensor = tf.Variable(tf.random_normal(shape=[size_out]))
init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)  # initialization variables
    print(sess.run(tensor))
# 可以不适用初始化的场合
# 不含有tf.Variable、tf.get_Variable的环境下
# 比如只有tf.random_normal或tf.constant等
size_out = 10
tensor = tf.random_normal(shape=[size_out])  # 这里debug是一个tensor量哦
init = tf.global_variables_initializer()
with tf.Session() as sess:
    # sess.run(init)  # initialization variables
    print(sess.run(tensor))

tf.gradients()

在tensorflow中,tf.gradients()的参数如下:

tf.gradients(ys, xs, 
			 grad_ys=None, 
			 name='gradients',
			 colocate_gradients_with_ops=False,
			 gate_gradients=False,
			 aggregation_method=None,
			 stop_gradients=None)
1234567

先不给出参数的意义~

对求导函数而言,其主要功能即求导公式:∂ y ∂ x frac {partial y}{partial x}∂xy。在tensorflow中,y yy和x xx都是tensor。

更进一步,tf.gradients()接受求导值ysxs不仅可以是tensor,还可以是list,形如[tensor1, tensor2, …, tensorn]。当ysxs都是list时,它们的求导关系为:

gradients() adds ops to the graph to output the derivatives of ys with respect to xs. It returns a list of Tensor of length len(xs) where each tensor is the sum(dy/dx) for y in ys.

意思是:

  1. tf.gradients()实现ysxs求导

  2. 求导返回值是一个list,list的长度等于len(xs)

  3. 假设返回值是[grad1, grad2, grad3],

    ys
    

    =[y1, y2],

    xs
    

    =[x1, x2, x3]。则,真实的计算过程为:

    • g r a d 1 = y 1 x 1 + y 2 x 1 grad1 = frac {y1}{x1} + frac {y2}{x1}gra**d1=x1y1+x1y2
    • g r a d 2 = y 1 x 2 + y 2 x 2 grad2 = frac {y1}{x2} + frac {y2}{x2}gra**d2=x2y1+x2y2
    • g r a d 3 = y 1 x 3 + y 2 x 3 grad3 = frac {y1}{x3} + frac {y2}{x3}gra**d3=x3y1+x3y2

基础实践

以线性回归为例,实践tf.gradients()的基础功能。线性回归:y = 3 × x + 2 y = 3 imes x + 2y=3×x+2

import numpy as np
import tensorflow as tf


sess = tf.Session()

x_input = tf.placeholder(tf.float32, name='x_input')
y_input = tf.placeholder(tf.float32, name='y_input')
w = tf.Variable(2.0, name='weight')
b = tf.Variable(1.0, name='biases')
y = tf.add(tf.multiply(x_input, w), b)
loss_op = tf.reduce_sum(tf.pow(y_input - y, 2)) / (2 * 32)
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss_op)
gradients_node = tf.gradients(loss_op, w)
print(gradients_node)

'''tensorboard'''
# tf.summary.scalar('norm_grads', gradients_node)
# tf.summary.histogram('norm_grads', gradients_node)
# merged = tf.summary.merge_all()
# writer = tf.summary.FileWriter('log')

init = tf.global_variables_initializer()
sess.run(init)

'''构造数据集'''
x_pure = np.random.randint(-10, 100, 32)
x_train = x_pure + np.random.randn(32) / 10  # 为x加噪声
y_train = 3 * x_pure + 2 + np.random.randn(32) / 10  # 为y加噪声

for i in range(20):
	_, gradients, loss = sess.run([train_op, gradients_node, loss_op],
								  feed_dict={x_input: x_train[i], y_input: y_train[i]})
	print("epoch: {} 	 loss: {} 	 gradients: {}".format(i, loss, gradients))

sess.close()
123456789101112131415161718192021222324252627282930313233343536

输出:

epoch: 0 	 loss: 94.6083221436 	 gradients: [-187.66052]
epoch: 1 	 loss: 1.52120530605 	 gradients: [3.0984864]
epoch: 2 	 loss: 101.41834259 	 gradients: [241.91911]
...
epoch: 18 	 loss: 0.0215022582561 	 gradients: [-0.44370675]
epoch: 19 	 loss: 0.0189439821988 	 gradients: [-0.31349587]
123456

可以看到梯度逐渐减小,说明模型逐渐收敛。同时也可以看到参数更新的方向主要是梯度下降的方向(尽管伴随着震荡)。

tf.random_uniform

首先,TensorFlow官网给出的解释:

tf.random_normal(
    shape,
    mean=0.0,
    stddev=1.0,
    dtype=tf.float32,
    seed=None,
    name=None
)12345678

shape代表形状,也就是1纬的还是2纬的还是n纬的数组。
下面看图说话:

import tensorflow as tf
res = tf.random_uniform([1], -1, 1)
with tf.Session() as sess:
    print(sess.run(res))
#结果为:[0.8457055]
123456
import tensorflow as tf
res = tf.random_uniform([2], -1, 1)
with tf.Session() as sess:
    print(sess.run(res))
#结果为:[-0.20672345  0.6750064 ]12345
import tensorflow as tf
res = tf.random_uniform((4, 4), -1, 1)
with tf.Session() as sess:
    print(sess.run(res))
#结果为:
[[ 0.61043835 -0.35983467 -0.02590227  0.6653023 ]
 [ 0.4215083   0.6378925  -0.5907881   0.94997907]
 [ 0.02292204 -0.7329526   0.03539038 -0.63158727]
 [ 0.15353537 -0.21372676 -0.5452025  -0.44808888]]
原文地址:https://www.cnblogs.com/chenyameng/p/14148336.html