周总结3

关于深度学习的实践

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('./data/',one_hot=True)

# print("train data size:",mnist.train.num_examples)
# print("validation data size:",mnist.validation.num_examples)
# print("test data size:",mnist.test.num_examples)
#
# print(mnist.train.labels[0].shape)
# print(mnist.train.images[0].shape)

# BATCH_SIZE = 128
# xs,ys = mnist.train.next_batch(BATCH_SIZE)
# print("xs shape:",xs.shape)

# tf.get_collection("") 从集合里取出全部的变量,生成一个列表
# tf.add_n([]) 把列表里面所有的元素相加
# tf.cast(x,dtype) 把x转换为dtype类型
# tf.argmax(x,axis) 返回最大值所在的索引号
# os.path.join("data","mnist") data/mnist

BATCH_SIZE = 128  # 设置每一轮的batch大小
LEARNING_RATE = 0.1  # 初始学习率
LEARNING_RATE_DECAY = 0.99  # 学习率的衰减
REGULARIZER = 0.0001  # 正则化参数
STEP = 50000  # 最大训练步数
MODEL_SAVE_PATH = './model/'
MODEL_NAME = 'mnist_model'

def hidden_layer(input_tensor,weights1,biases1,weight2,biases2,layer_name):
    layer1 = tf.nn.relu(tf.matmul(input_tensor,weights1)+biases1)
    return tf.matmul(layer1,weight2)+biases2

x = tf.placeholder(tf.float32,[None,784],name="x-input")
y_ = tf.placeholder(tf.float32,[None,10],name="y-input")

# 生成隐藏层参数
weights1 = tf.Variable(tf.truncated_normal([784,500],stddev=0.1))
biases1 = tf.Variable(tf.constant(0,1,shape=[500]))

# 生成输出层参数
weights2 = tf.Variable(tf.truncated_normal([500,10],stddev=0.1))
biases2 = tf.Variable(tf.constant(0,1,shape=[10]))

# 计算经过神经网络前向传播后得到的y
y = hidden_layer(x,weights1,biases1,weights2,biases2,'y')

# 初始化一个滑动平均类,衰减率为0.99
# 为了使模型在训练的前期可以更新的更快,这里使用num_updates参数
averages_class = tf.train.ExponentialMovingAverage(0.99,STEP)

# 定义一个更新变量滑动平均值的草祖宗需要向滑动平均类的apply()函数提供一个参数列表
averages_op = averages_class.apply(tf.trainable_variables())

average_y = hidden_layer(x,averages_class.average(weights1),
                         averages_class.average(biases1),
                         averages_class.average(weights2),
                         averages_class.average(biases2),
                         'average_y')

# 随机梯度下降优化器,学习率指数衰减
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZER)
regularization = regularizer(weights1)+regularizer(weights2)
loss = tf.reduce_mean(cross_entropy)+regularization

# 用指数衰减法设置学习率,另学习率连续衰减
learning_rate = tf.train.exponential_decay(LEARNING_RATE,STEP,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)

train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

train_op = tf.group(train_step,averages_op)

correct_predicition = tf.equal(tf.argmax(average_y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_predicition,tf.float32))

with tf.Session() as sess:
    tf.global_variables_initializer().run()
    validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
    test_feed = {x: mnist.test.images, y_: mnist.test.labels}

    for i in range(STEP):
        if i % 1000 == 0:
            validate_accuracy = sess.run(accuracy,feed_dict=validate_feed)
            print("after %d training steps,validation accuracy=%g%%"%(i,validate_accuracy*100))

        xs,ys = mnist.train.next_batch(batch_size=BATCH_SIZE)
        sess.run(train_op,feed_dict={x:xs,y_:ys})

    test_accuracy = sess.run(accuracy,feed_dict=test_feed)
    print("after %d training steps,validation accuracy=%g%%" % (STEP, test_accuracy * 100))

总结的经验:初期就是背代码,理解代码,理解思路,我现在就是初期,感觉在深度学习上需要下很大的功夫,才能入门。

另外公文流转基本完成了,但有很多需要优化的部分。

下周优化公文流转,继续深度学习。

遇到的问题:现在就是深度学习基础的代码框架还有要做什么都明白了,就是语法记不清,而且如果换一个问题就没有自己的思路,感觉还是没有入门。

原文地址:https://www.cnblogs.com/xrj-/p/13699035.html