TensorFlow 2.0 最基础的线性回归

 1 import tensorflow as tf
 2 import numpy as np
 3 
 4 opt=tf.compat.v1.train.AdamOptimizer(1e-1)
 5 
 6 input_xs=np.random.rand(1000)
 7 input_ys=3*input_xs+0.217
 8 
 9 # y=3*x+0.217
10 # 使用tensorflow,numpy 迭代计算线性函数
11 
12 weight=tf.Variable(1.0,dtype=tf.float32,name="weight")
13 bias=tf.Variable(1.,dtype=tf.float32,name="bias")
14 
15 def model(xs):
16     logits=tf.multiply(xs,weight)+bias
17     return logits
18 
19 for xs,ys in zip(input_xs,input_ys):
20     xs=np.reshape(xs,[1])
21     ys=np.reshape(ys,[1])
22     with tf.GradientTape() as tape:
23         _loss=tf.reduce_mean(tf.pow(model(xs)-ys,2))/(2*1000)
24     grads=tape.gradient(_loss,[weight,bias])
25     opt.apply_gradients(zip(grads,[weight,bias])) 
26     #attention the s after the word apply_gradient
27     print('Training loss is :',_loss.numpy())
28     
29 print(weight)
30 print(bias)
~~Jason_liu O(∩_∩)O
原文地址:https://www.cnblogs.com/JasonCow/p/14515252.html