手工搭建多层(多隐藏层)BP神经网络

手工搭建多层(多隐藏层)BP神经网络
在这里插入图片描述

#!/usr/bin/python3
# 名称:Network_Py
# 时间: 2018/12/13 11:09
# 作者:
# 邮件:425776024@qq.com

import numpy as np
from src.mnist_loader import load_data_wrapper


def sigmoid(X):
    return 1 / (1 + np.exp(-X))


def sigmoid_prime(z):
    return sigmoid(z) * (1 - sigmoid(z))


class NetWork(object):
    def __init__(self, sizes):
        # np.random.seed(0)
        self.num_layers = len(sizes)
        self.sizes = sizes
        # 偏移
        self.biases = [np.random.rand(y, 1) for y in sizes[1:]]
        # 权重生成
        self.weights = [np.random.rand(x, y) for x, y in zip(sizes[1:], sizes[:-1])]

    def feedforward(self, a):
        # a:input
        for b, w in zip(self.biases, self.weights):
            a = sigmoid(np.dot(w, a) + b)
        return a

    def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None):
        '''
        :param training_data:
        :param epochs: 迭代次数
        :param mini_batch_size: 随机份数
        :param eta: 学习率
        :param test_data:
        :return:
        '''
        # 随机梯度下降算法
        if test_data:
            n_test = len(test_data)
        n = len(training_data)
        for j in range(epochs):
            np.random.shuffle(training_data)
            mini_batchs = [training_data[k:k + mini_batch_size] for k in range(0, n, mini_batch_size)]
            for mini_batch in mini_batchs:
                # 更具mini_batch数据更新权重w,b
                self.update_mini_batch(mini_batch, eta)
            if test_data:
                print('Epoch {0} :success rate:{1}/{2}'.format(j, self.evaluate(test_data), n_test))
            else:
                print("Epoch {0} complete".format(j))

    def update_mini_batch(self, mini_batch, eta):
        '''
        更新权重,偏置,方向传播,随机梯度下降
        :param mini_batch:
        :param eta:
        :return:
        '''
        nabla_b = [np.zeros(b.shape) for b in self.biases]  # 累计biases偏导数
        nabla_w = [np.zeros(w.shape) for w in self.weights]  # 累计weights偏导数
        for x, y in mini_batch:
            # 每一个数据的误差db,dw
            delta_nabla_b, delta_nabla_w = self.backprop(x, y)
            nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
            nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]

        # 用累计误差做更新
        # w  = w -  alpha * 1/m * dw
        # b  = b -  alpha * 1/b * db
        self.weights = [w - (eta / len(mini_batch)) * nw
                        for w, nw in zip(self.weights, nabla_w)]
        self.biases = [b - (eta / len(mini_batch)) * nb
                       for b, nb in zip(self.biases, nabla_b)]

    def backprop(self, x, y):
        '''
        反向传播,根据x,y数据计算
        :param x:
        :param y:
        :return:
        '''
        # 每一层需要更新的微小量
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        # 输入数据
        activation = x
        activations = [x]  # 保存每一层的输入
        zs = []  # 保存每一层的z
        for b, w in zip(self.biases, self.weights):
            # z=wx+b
            z = np.dot(w, activation) + b
            zs.append(z)
            # a=sigmod(z)
            activation = sigmoid(z)
            # 保存a
            activations.append(activation)

        # 计算输出层的误差
        # dz=dc/da * dsigma
        delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(zs[-1])
        # db=dz,只计算最后一层,其余前面层有后一层计算出
        nabla_b[-1] = delta
        # dw=dz * a[l-1] 只计算最后一层,其余前面层有后一层计算出
        nabla_w[-1] = np.dot(delta, activations[-2].transpose())

        # 反向计算所有层的误差
        for Li in range(2, self.num_layers):
            z = zs[-Li]
            sp = sigmoid_prime(z)
            delta = np.dot(self.weights[-Li + 1].transpose(), delta) * sp
            nabla_b[-Li] = delta
            nabla_w[-Li] = np.dot(delta, activations[-Li - 1].transpose())

        return (nabla_b, nabla_w)

    def evaluate(self, test_data):
        '''
        评估
        :param test_data:
        :return:
        '''
        test_results = [(np.argmax(self.feedforward(x)), y)
                        for (x, y) in test_data]
        return sum(int(x == y) for (x, y) in test_results)

    def cost_derivative(self, output_activations, y):
        '''cost_derivative为代价函数的导数
        :param output_activations:  最终输出
        :param y:   真实标记
        :return:    (a-y)
        '''
        return (output_activations - y)
    # 预测
    def predict(self, data):
        value = self.feedforward(data)
        return value.tolist().index(max(value))

INPUT = 28 * 28  # 每张图像28x28个像素
OUTPUT = 10  # 0-9十个分类

#搭建784 * 10 * 10 * 10 *10 的5层神经网络
net = NetWork([INPUT, 10,10,10, OUTPUT])

# 从mnist提供的库中装载数据
training_data, validation_data, test_data = load_data_wrapper()

# 训练
net.SGD(training_data, 30, 10, 3.0, test_data=test_data)

# 计算准确率
correct = 0
for test_feature in test_data:
    if net.predict(test_feature[0]) == test_feature[1]:
        correct += 1
print("percent: ", correct / len(test_data))

原文地址:https://www.cnblogs.com/onenoteone/p/12441806.html