eager模式与自定义训练

前言

Tensorflow的eager模式是一个命令式编程环境,它使得我们可以立即评估操作产生的结果,而无需构建计算图。

图运算需要搭建好整个框架,再把东西倒进去才能生出结果,过程中是看不到里面的运作。

eager模式极大的方便了我们使用Tensorflow、调试模型,增加了网络调试的灵活程度和Tensorflow对于初学者的友好性,亦称为Tensorflow的交互模式。

与Tensorflow 1.x版本不同,Tensorflow 2.0默认使用eager模式,执行tf.executing_eagerly(),返回True。

编译环境

Tensorflow2.0

Python 3.6.12

Cuda 10.0.13

cuDNN 7.6.5

eager模式的优点:

1.提供了一个灵活的研究和实验机器学习平台。

2.提供了一个直观的界面,自然地构建代码并使用Python数据结构,快速迭代小型模型和小型数据。

3.更容易调试,在交互式环境中直接检查、运行模型、测试变化,这个过程中代码会及时报错。

4.可以使用自然地控制流,是python的控制流而不是图控制流,热切执行支持大多数Tensorflow操作和GPU加速。eager模式下,Tensorflow操作会立即执行并将其值返回给python,tf.Tensor对象引用具体值而不是计算图中节点的符号句柄。

5.eager模式下Tensorflow可与Numpy很好地协作,Tensorflow数学运算可将Python对象和Numpy数组转换为tf.Tensor对象,而tf.Tensor.numpy方法将对象的值作为Numpy返回ndarry。

eager模式下的代码

import tensorflow as tf

print(tf.executing_eagerly())
# True
x = [[2,]]
m = tf.matmul(x,x) #矩阵相乘
print(m)
#tf.Tensor([[4]], shape=(1, 1), dtype=int32)

#Tensor->numpy
print(m.numpy(),type(m.numpy()))
# [[4]] <class 'numpy.ndarray'>

#建立一个常量
a = tf.constant([[1,2],[3,4]])
print(a)
# tf.Tensor(
# [[1 2]
#  [3 4]], shape=(2, 2), dtype=int32)
print(a.numpy())
# [[1 2]
#  [3 4]]

#相加
print(tf.add(a,1).numpy())
# [[2 3]
#  [4 5]]

#乘法
print(tf.multiply(a,2).numpy())
# [[2 4]
#  [6 8]]

#利用python控制流写tensorflow的运算
num = tf.convert_to_tensor(10)
for i in range(num.numpy()):
    i = tf.constant(i)
    if int(i) % 2 == 0:
        print("even")
    else:
        print("odd")
# even
# odd
# even
# odd
# even
# odd
# even
# odd
# even
# odd        

#与numpy协作运算
import numpy as np
a1 = np.array([[1,2],
               [3,4]])
b1 = tf.constant([[1,5],
                  [7,4]])           
print(a1 + b1,type(a1 + b1),(a1 + b1).dtype)  
# tf.Tensor(
# [[ 2  7]
#  [10  8]], shape=(2, 2), dtype=int32) <class 'tensorflow.python.framework.ops.EagerTensor'> <dtype: 'int32'>    

f = tf.convert_to_tensor(2.0)
print(float(f))
# 2.0   
View Code

变量

import tensorflow as tf

#创建一个变量,主要运用与梯度下降去改变这些变量
v = tf.Variable(0.0)
print(v + 1,(v + 1).numpy())
# tf.Tensor(1.0, shape=(), dtype=float32) 1.0

#直接改变变量的值
v.assign(5.0)
print(v)
# <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=5.0>

#可以每次增加变量的值
for i in range(tf.constant(3).numpy()):    
    v.assign_add(1.0)   
    print(v)
# <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=6.0>
# <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=7.0>
# <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=8.0>

#可以每次减去变量的值
v.assign_sub(3)
print(v)
# <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=5.0>

#读取变量的值
print(v.read_value())
# tf.Tensor(5.0, shape=(), dtype=float32)
View Code

自动微分运算

import tensorflow as tf
#tape(磁带) 去记录我们梯度的一个过程,会自动跟踪变量运算
w = tf.Variable([[1.0]])
with tf.GradientTape() as t:
    loss = w * w                     #损失函数               
grad = t.gradient(loss,w)            #梯度就是Loss对w进行求导

print(grad.numpy())
# [[2.]]

w2 = tf.constant(3.0)
with tf.GradientTape() as t2:
    t2.watch(w2)                     #让t去跟踪这个常量的运算
    loss2 = w2 * w2                  #损失函数               
dloss_dw = t2.gradient(loss2,w2)     #梯度就是Loss对w进行求导
print(dloss_dw.numpy())         
# 6.0
#添加参数persistant可以永久计算参数
w = tf.constant(4.0)
with tf.GradientTape(persistent=True) as t:
    t.watch(w)
    y = w * w
    z = y * y
dy_dw = t.gradient(y,w)
print(dy_dw.numpy())                 #8.0
dz_dy = t.gradient(z,y)              #出现RunTimeError说明tape它立即释放,不保留
print(dz_dy.numpy())                 #32.0

自定义训练(MNIST数据集)

import tensorflow as tf
from tensorflow.keras import layers,optimizers,losses

(train_image,train_label),(test_image,test_label) = tf.keras.datasets.mnist.load_data() 

#扩大维度
print(train_image.shape)
# (60000, 28, 28)
train_image = tf.expand_dims(train_image,-1)
train_image = tf.cast(train_image/255,tf.float32)

test_iamge = tf.expand_dims(test_image,-1)
test_image = tf.cast(test_image/255,tf.float32)

train_label = tf.cast(train_label,tf.int64)
test_label = tf.cast(test_label,tf.int64)

dataset = tf.data.Dataset.from_tensor_slices((train_image,train_label))
test_dataset = tf.data.Dataset.from_tensor_slices((test_image,test_label))

BUFFER_SIZE = 10000
BATCH_SIZE = 32

dataset = dataset.shuffle(BUFFER_SIZE).repeat().batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)

model = tf.keras.Sequential()
#如果要添加任意形状的shape可以(None,None,1)
model.add(layers.Conv2D(16,(3,3),input_shape=(28,28,1),activation='relu'))
model.add(layers.Conv2D(32,(3,3),activation='relu'))
model.add(layers.GlobalAveragePooling2D())
# model.add(layers.Dense(10,activation='softmax'))
model.add(layers.Dense(10))
model.summary()

optimizer = optimizers.Adam()
loss_fuc = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)   #最后未激活

features,labels = next(iter(dataset))

predictions = model.predict(features)
print(predictions.shape)
# (32, 10)
print(tf.argmax(predictions,1))
# tf.Tensor([3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3], shape=(32,), dtype=int64)

train_loss = tf.keras.metrics.Mean(name='train_loss')
train_acc = tf.keras.metrics.SparseCategoricalAccuracy(name='train_acc')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_acc = tf.keras.metrics.SparseCategoricalAccuracy(name='test_acc')

#自定义训练
def loss(model,x,y):    #计算损失值,y表示实际的label,y_表示预测的label
    y_ = model(x)
    return loss_fuc(y,y_)

#批次训练过程
def train_step(model,images,labels):
    with tf.GradientTape() as t:
        pred = model(images)                    #得到预测结果
        loss_step = loss_fuc(labels,pred)
    #计算梯度
    grads = t.gradient(loss_step,model.trainable_variables)
    #优化梯度
    optimizer.apply_gradients(zip(grads,model.trainable_variables))
    train_loss(loss_step)
    train_acc(labels,pred)
 
def test_step(model,images,labels):
    pred = model(images)                    #得到预测结果
    loss_step = loss_fuc(labels,pred)
    train_loss(loss_step)
    train_acc(labels,pred)

EPOCH = 10
def train():
    for epoch in range(EPOCH):
        for(batch,(images,labels)) in enumerate(dataset):
            train_step(model,images,labels)
            print('.',end='')
        print('
')
        print('epoch{} loss is {},acc is {}'.format(epoch,
                                                    train_loss.result(),
                                                    train_acc.result()))
        
        for(batch,(images,labels)) in enumerate(test_dataset):
            test_step(model,images,labels)
            print('.',end='')
        print('
')
        print('epoch{} loss is {},acc is {}'.format(epoch,
                                                    test_loss.result(),
                                                    test_acc.result()))   
            
        #重置状态
        train_loss.reset_states()
        train_acc.reset_states()
        test_loss.reset_states()
        test_acc.reset_states()
        
train()

tf.keras.metrics模块

m = tf.keras.metrics.Mean('acc')
print(m(10))
# tf.Tensor(10.0, shape=(), dtype=float32)
print(m(20))
# tf.Tensor(15.0, shape=(), dtype=float32)
print(m([10,10]))
# tf.Tensor(12.5, shape=(), dtype=float32)
print(m.result().numpy())
# 12.5
 
#重置状态
print(m.reset_states())
# None
print(m(20))
# tf.Tensor(20.0, shape=(), dtype=float32)
原文地址:https://www.cnblogs.com/Fantac/p/13844214.html