tensorflow学习笔记(1)

1.简单线性回归

#线性回归
import numpy as np
data_x=np.linspace(0,10,30)
data_y=data_x*3+7+np.random.normal(0,1,30)

import matplotlib.pyplot as plt
%matplotlib inline

plt.scatter(data_x,data_y)

w=tf.Variable(1.,name='weights')
b=tf.Variable(0.,name='bias')

x=tf.placeholder(tf.float32,shape=None)
y=tf.placeholder(tf.float32,shape=[None])

pred=tf.multiply(x,w)+b

loss=tf.reduce_sum(tf.squared_difference(pred,y))

learning_rate=0.0001

train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)#梯度下降训练

sess=tf.Session()
sess.run(tf.global_variables_initializer())

for i in range(5000):
    sess.run(train_step,feed_dict={x:data_x,y:data_y})
    if i%100==0:
        print(sess.run([loss,w,b],feed_dict={x:data_x,y:data_y}))

 2.多分类问题

#多分类问题
import tensorflow as tf


tf.__version__

import numpy as np
import requests

r=requests.get('http://archive.ics.uci.edu/ml/machine-learning-database/iris/iris.data')#从网站获取数据

with open('iris.data','w') as f:
    f.write(r.text)#将文件写入本地

import pandas as pd
#data=pd.read_csv('iris.data',names=['e_cd','e_kd','b_cd','b_kd','cat'])#读取文件,设置列名
data=pd.read_csv('iris.csv',header=0, index_col=0 )#header=0时,第一行为列索引,index_col=0时,第一列为行索引

data

#画出所有数字类型特征之间的关系
import seaborn as sns
%matplotlib inline
sns.pairplot(data)

data.Species.unique()#查看有几种分类》array(['setosa', 'versicolor', 'virginica'], dtype=object)

#将分类变成独热编码
data['c1']=np.array(data['Species']=='setosa').astype(np.float32)
data['c2']=np.array(data['Species']=='versicolor').astype(np.float32)
data['c3']=np.array(data['Species']=='virginica').astype(np.float32)
target=np.stack([data.c1.values,data.c2.values,data.c3.values]).T

shuju=np.stack([data['Sepal.Length'],data['Sepal.Width'],data['Petal.Length'],data['Petal.Width']]).T

shuju.shape,target.shape

#定义网络
x=tf.placeholder('float',shape=[None,4])
y=tf.placeholder('float',shape=[None,3])
weight=tf.Variable(tf.truncated_normal([4,3]))
bias=tf.Variable(tf.truncated_normal([3]))
combine_input=tf.matmul(x,weight)+bias

pred=tf.nn.softmax(combine_input)

loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=combine_input))

correct_pred=tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32))

train_step=tf.train.AdamOptimizer(0.005).minimize(loss)

sess=tf.Session()
sess.run(tf.global_variables_initializer())

for i in range(1000):
    index=np.random.permutation(len(target))#每次训练打乱数据
    shuju=shuju[index]
    target=target[index]
    sess.run(train_step,feed_dict={x:shuju,y:target})
    if i%100==0:
        print(sess.run((loss,accuracy),feed_dict={x:shuju,y:target}))

  

原文地址:https://www.cnblogs.com/Turing-dz/p/13195406.html