[python机器学习]机器学习简单示例-KNN、决策树、线性回归、逻辑回归

1.KNN

查找距离已知的几个点最近的类型,并返回这个类型进行预测。

如小明在北京,小红在北京,小刚在河南,而我距离小明和小红比小刚近,则我最可能在北京而不是河南

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : KNN近邻算法.py
# @Author: 赵路仓
# @Date  : 2020/4/2
# @Desc  : 学习网站:https://www.bilibili.com/video/BV1nt411r7tj?p=21
# @Contact : 398333404@qq.com

from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
import numpy as np


def knn_iris():
    """
    用KNN算法对鸢尾花进行分类
    :return:
    """
    # 1.获取数据
    iris = load_iris()
    print(iris)

    # 2.划分数据集
    x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=6)

    # 3.特征工程:标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)

    # 4.KNN算法预估器
    estimator = KNeighborsClassifier(n_neighbors=6)
    estimator.fit(x_train, y_train)

    # 5.模型评估
    # 方法一:直接对比真实数据和预测值
    y_predit = estimator.predict(x_test)
    print("y_predit:
", y_predit)
    print("对比真实值和预测值:
", y_test == y_predit)

    # 方法2:计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为:
", score)

    # 预测新的鸾尾花品种
    x_new = np.array([[5, 2.9, 1, 0.2]])
    prediction = estimator.predict(x_new)
    print(prediction)
    return None


def knn_iris_gscv():
    """
    用KNN算法对鸢尾花进行分类,添加网格搜索与交叉验证
    :return:
    """
    # 1.获取数据
    iris = load_iris()
    print(iris)

    # 2.划分数据集
    x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=6)

    # 3.特征工程:标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)

    # 4.KNN算法预估器
    estimator = KNeighborsClassifier(n_neighbors=5)
    # 加入网格搜索与交叉验证
    # 参数准备 从下侧中取n_neighbors
    param_dict = {
        "n_neighbors": [1, 3, 5, 7, 9, 11]
    }
    estimator = GridSearchCV(estimator, param_grid=param_dict, cv=10)
    estimator.fit(x_train, y_train)

    # 5.模型评估
    # 方法一:直接对比真实数据和预测值
    y_predit = estimator.predict(x_test)
    print("y_predit:
", y_predit)
    print("对比真实值和预测值:
", y_test == y_predit)

    # 方法2:计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为:
", score)

    """
       最佳参数:best_params_
       最佳结果:best_score_
       最佳估计器:best_estimator_
       交叉验证结果:cv_results_
       """
    print("最佳参数:
", estimator.best_params_)
    print("最佳结果:
", estimator.best_score_)
    print("最佳估计器:
", estimator.best_estimator_)
    print("交叉验证结果:
", estimator.cv_results_)

    # 预测新的鸾尾花品种
    x_new = np.array([[5, 2.9, 1, 0.2]])
    prediction = estimator.predict(x_new)
    print(prediction)
    return None


if __name__ == "__main__":
    # 代码1:KNN对鸾尾花分类
    # knn_iris()
    # 代码2:KNN预测鸾尾花分类并添加网格搜索和交叉验证
    knn_iris_gscv()
View Code

2.决策树

分类树(决策树)是一种十分常用的分类方法。他是一种监管学习,所谓监管学习就是给定一堆样本,每个样本都有一组属性和一个类别,这些类别是事先确定的,那么通过学习得到一个分类器,这个分类器能够对新出现的对象给出正确的分类。这样的机器学习就被称之为监督学习。

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : 决策树.py
# @Author: 赵路仓
# @Date  : 2020/4/3
# @Desc  : https://www.bilibili.com/video/BV1nt411r7tj?p=28
# @Contact : 398333404@qq.com
import os

from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, export_graphviz
import graphviz


def decision_iris():
    """
    用决策树对鸢尾花数据进行分类
    :return:
    """
    # 1.获取数据集
    iris = load_iris()
    print(iris.data[1])
    print(iris.target[1])

    # 2.划分数据集
    x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=22)
    print(y_train)

    # 3.决策树预估器
    estimator = DecisionTreeClassifier(criterion="entropy")
    estimator.fit(x_train, y_train)

    # 4.模型评估
    # 方法一:直接对比真实数据和预测值
    y_predit = estimator.predict(x_test)
    print("y_predit:
", y_predit)
    print("对比真实值和预测值:
", y_test == y_predit)

    # 方法2:计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为:
", score)

    # 可视化决策树
    # 生成文件
    dot_data = export_graphviz(estimator, out_file=None)
    graph = graphviz.Source(dot_data)
    graph.render("tree")  # tree3是我想要命名的pdf名称
    return None


if __name__ == "__main__":
    decision_iris()
View Code

3.线性回归

线性回归的任务是找到一个从特征空间X到输出空间Y的最优的线性映射函数

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : 波士顿房价预测.py
# @Author: 赵路仓
# @Date  : 2020/4/11
# @Desc  :
# @Contact : 398333404@qq.com 

from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, SGDRegressor, Ridge
from sklearn.metrics import mean_squared_error


# 正规方程
def linear1():
    """
    正规方程的优化方法对波士顿房价进行预测
    :return:
    """
    # 1.获取数据
    boston = load_boston()

    # 2.划分数据集
    x_train, x_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=22)

    # 3.标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)

    # 4.预估器 正规方程优化 小于十万条
    estimator = LinearRegression()
    estimator.fit(x_train, y_train)

    # 5.得出模型
    print("正规方程权重系数为:", estimator.coef_)
    print("正规方程偏置:", estimator.intercept_)

    # 6.模型评估
    y_predit = estimator.predict(x_test)
    print("预测房价:", y_predit)
    error = mean_squared_error(y_test, y_predit)
    print("正规方程-均方误差:", error)

    return None


# 梯度下降
def linear2():
    """
    梯度下降的优化方法对波士顿房价进行预测
    :return:
    """
    # 1.获取数据
    boston = load_boston()

    # 2.划分数据集
    x_train, x_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=22)

    # 3.标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)

    # 4.预估器 梯度下降,eta0学习率,max_iter迭代次数,大量数据推荐使用
    estimator = SGDRegressor(learning_rate="constant", eta0=0.001, max_iter=10000)
    estimator.fit(x_train, y_train)

    # 5.得出模型
    print("梯度下降权重系数为:", estimator.coef_)
    print("梯度下降偏置:", estimator.intercept_)

    # 6.模型评估
    y_predit = estimator.predict(x_test)
    print("预测房价:", y_predit)
    error = mean_squared_error(y_test, y_predit)
    print("梯度下降-均方误差:", error)
    return None


# 岭回归
def linear3():
    """
    岭回归对波士顿房价进行预测
    :return:
    """
    # 1.获取数据
    boston = load_boston()

    # 2.划分数据集
    x_train, x_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=22)

    # 3.标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)

    # 4.预估器 梯度下降,eta0学习率,max_iter迭代次数,大量数据推荐使用
    estimator = Ridge(max_iter=10000)
    estimator.fit(x_train, y_train)

    # 5.得出模型
    print("岭回归权重系数为:", estimator.coef_)
    print("岭回归偏置:", estimator.intercept_)

    # 6.模型评估
    y_predit = estimator.predict(x_test)
    print("预测房价:", y_predit)
    error = mean_squared_error(y_test, y_predit)
    print("岭回归-均方误差:", error)
    return None


if __name__ == "__main__":
    # 代码1:正规方程
    linear1()
    # 代码2:梯度下降
    linear2()
    # 代码3:岭回归
    linear3()
View Code

4.逻辑回归

简单来说, 逻辑回归(Logistic Regression)是一种用于解决二分类(0 or 1)问题的机器学习方法,用于估计某种事物的可能性。比如某用户购买某商品的可能性,某病人患有某种疾病的可能性,以及某广告被用户点击的可能性等。 

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : 癌症逻辑回归.py
# @Author: 赵路仓
# @Date  : 2020/4/11
# @Desc  :
# @Contact : 398333404@qq.com 

from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, roc_auc_score
import pandas as pd
import numpy as np


def cancer_demo():
    """
    利用逻辑回归对乳腺癌进行二分类
    :return:
    """
    # 载入数据
    cancer = load_breast_cancer()
    # print(cancer.feature_names)
    # print(cancer.data)
    # print(cancer.target)

    # 划分数据集
    x_train, x_test, y_train, y_test = train_test_split(cancer.data, cancer.target)

    # 标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    print(x_train)

    # 构建预估器
    estimator = LogisticRegression()
    estimator.fit(x_train, y_train)

    # 得出模型
    print("逻辑回归权重系数:", estimator.coef_)
    print("逻辑回归偏置:", estimator.intercept_)

    # 模型评估
    # 方法一:直接对比真实数据和预测值
    y_predit = estimator.predict(x_test)
    print("y_predit:
", y_predit)
    print("对比真实值和预测值:
", y_test == y_predit)

    # 方法2:计算准确率
    score = estimator.score(x_test, y_test)
    print("准确率为:
", score)

    # 查看精确率 召回率 以及F1-score
    report = classification_report(y_test, y_predit, labels=[0, 1], target_names=['良性', '恶性'])
    print(report)
    roc=roc_auc_score(y_test,y_predit)
    print("ROC曲线:",roc)


if __name__ == "__main__":
    cancer_demo()
View Code
原文地址:https://www.cnblogs.com/zlc364624/p/12874078.html