tensorflow之tensorboard

参考https://www.cnblogs.com/felixwang2/p/9184344.html

边学习,边练习

 1 # https://www.cnblogs.com/felixwang2/p/9184344.html
 2 # TensorFlow(七):tensorboard网络执行
 3 # MNIST数据集 手写数字
 4 import tensorflow as tf
 5 from tensorflow.examples.tutorials.mnist import input_data
 6 
 7 # 参数概要
 8 def variable_summaries(var):
 9     with tf.name_scope('summaries'):
10         mean=tf.reduce_mean(var)
11         tf.summary.scalar('mean',mean)# 平均值
12         with tf.name_scope('stddev'):
13             stddev=tf.sqrt(tf.reduce_mean(tf.square(var-mean)))
14         tf.summary.scalar('stddev',stddev)# 标准差
15         tf.summary.scalar('max',tf.reduce_max(var)) # 最大值
16         tf.summary.scalar('min',tf.reduce_min(var)) # 最小值
17         tf.summary.histogram('histogram',var) # 直方图
18 
19 
20 
21 # 载入数据集
22 mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
23 
24 
25 
26 # 每个批次的大小
27 batch_size=100
28 # 计算一共有多少个批次
29 n_batch=mnist.train.num_examples//batch_size
30 
31 # 命名空间
32 with tf.name_scope('input'):
33     # 定义两个placeholder
34     x=tf.placeholder(tf.float32,[None,784],name='x-input')
35     y=tf.placeholder(tf.float32,[None,10],name='y-input')
36     
37 with tf.name_scope('layer'):
38     # 创建一个简单的神经网络
39     with tf.name_scope('wights'):
40         W=tf.Variable(tf.zeros([784,10]),name='W')
41         variable_summaries(W)
42     with tf.name_scope('biases'):
43         b=tf.Variable(tf.zeros([10]),name='b')
44         variable_summaries(b)
45     with tf.name_scope('wx_plus_b'):
46         wx_plus_b=tf.matmul(x,W)+b
47     with tf.name_scope('softmax'):
48         prediction=tf.nn.softmax(wx_plus_b)
49 
50 with tf.name_scope('loss'):
51     # 二次代价函数
52     loss=tf.reduce_mean(tf.square(y-prediction))
53     tf.summary.scalar('loss',loss) # 一个值就不用调用函数了
54 with tf.name_scope('train'):
55     # 使用梯度下降法
56     train_step=tf.train.GradientDescentOptimizer(0.2).minimize(loss)
57 
58 # 初始化变量
59 init=tf.global_variables_initializer()
60 
61 with tf.name_scope('accuracy'):
62     with tf.name_scope('correct_prediction'):
63         # 求最大值在哪个位置,结果存放在一个布尔值列表中
64         correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))# argmax返回一维张量中最大值所在的位置
65     with tf.name_scope('accuracy'):
66         # 求准确率
67         accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) # cast作用是将布尔值转换为浮点型。
68         tf.summary.scalar('accuracy',accuracy) # 一个值就不用调用函数了
69         
70 # 合并所有的summary
71 merged=tf.summary.merge_all()
72 
73 gpu_options = tf.GPUOptions(allow_growth=True)
74 with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
75     sess.run(init)
76     writer=tf.summary.FileWriter('logs/',sess.graph) # 写入文件
77     
78     
79     for epoch in range(10):
80         for batch in range(n_batch):
81             batch_xs,batch_ys=mnist.train.next_batch(batch_size)
82             summary,_=sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys})
83         
84         # 添加样本点
85         writer.add_summary(summary,epoch)
86         #求准确率
87         acc=sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
88         print('Iter:'+str(epoch)+',Testing Accuracy:'+str(acc))
View Code

在命令窗口,使用命令 tensorboard --logdir=F:documentspyder-py3study_tensorlogs

生成一个地址可以查看训练中间数据

PS F:documentspyder-py3study_tensor> tensorboard --logdir=F:documentspyder-py3study_tensorlogs
f:programdataanaconda3envspy35libsite-packagesh5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
TensorBoard 1.10.0 at http://KOTIN:6006 (Press CTRL+C to quit)

在网页输入 

http://KOTIN:6006
或者输入

http://localhost:6006
就可以看到生成的状态图:




原文地址:https://www.cnblogs.com/juluwangshier/p/11415254.html