模式识别 之 BP算法

神经网络算法.....  不能讲什么东西,就是一黑盒,研究这东西的人都弄不明白,更何况我这一小专!

有时候,不能收敛,不知道为什么....

#define InputN (2)
#define HN (InputN*2 + 1)
#define OutN (1)


typedef struct __BP_DATA__
{
double Input[InputN];
double Teach[OutN];
}BP_DATA_t;

typedef struct __BP_t__
{
double y[OutN];

double xOut[InputN];
double hOut[HN];
double yOut[OutN];

double w[InputN][HN]; // 权值
double v[HN][OutN]; //

double deltaw[InputN][HN]; // 权值
double deltav[HN][OutN]; //

double hd_delta[HN];
double y_delta[OutN];

double err;
double errLimit;

double alpha;
double beta;

int maxLoopNum;
}BP_t;

double sigmod(double val)
{

return 1/(1 + exp(-val));
}


void BP_XunLian(BP_t *pBp, BP_DATA_t *pData, int dataNum)
{
for (int i=0; i<InputN; i++)
{
for (int j = 0; j<HN; j++)
{
pBp->w[i][j] = (rand()/32767.0)*2 - 1;
pBp->deltaw[i][j] = 0.0;
}
}

for (int i=0; i<HN; i++)
{
for (int j = 0; j<OutN; j++)
{
pBp->v[i][j] = (rand()/32767.0)*2 - 1;
pBp->deltav[i][j] = 0.0;
}
}


int loop = 0;
while( loop < pBp->maxLoopNum)
{

pBp->err = 0.0;

for (int m = 0; m<dataNum; m++)
{

double maxVal = 0.0;
double minVal = 0.0;

for (int i=0; i<InputN; i++)
{
pBp->xOut[i] = pData[m].Input[i];
if (i == 0)
{
maxVal = pBp->xOut[i];
minVal = pBp->xOut[i];
}
else
{
if (maxVal < pBp->xOut[i])
{
maxVal = pBp->xOut[i];
}

if (minVal > pBp->xOut[i])
{
minVal = pBp->xOut[i];
}
}
}

for (int i=0; i<OutN; i++)
{
pBp->y[i] = pData[m].Teach[i];
}


maxVal = 1.0;
minVal = 0.0;


// 正向传播

for (int i=0; i<InputN; i++)
{
pBp->xOut[i] = (pBp->xOut[i] - minVal) / (maxVal - minVal);
}

// 2) 第二层
for (int i=0; i<HN; i++)
{
double sumTemp = 0.0;
for (int j=0; j<InputN; j++)
{
sumTemp += pBp->w[i][j] * pBp->xOut[j];
}
pBp->hOut[i] = tanh(sumTemp);
}

// 3) 第三层
for (int i=0; i<OutN; i++)
{
double sumTemp = 0.0;
for (int j=0; j<HN; j++)
{
sumTemp += pBp->v[i][j] * pBp->hOut[j];
}
pBp->yOut[i] = sigmod(sumTemp);
}


// 误差传播
for (int i=0; i< OutN; i++)
{
double errTemp = pBp->y[i] - pBp->yOut[i];
pBp->y_delta[i] = errTemp * sigmod(pBp->yOut[i]) * ( 1.0 - sigmod(pBp->yOut[i]));
pBp->err += errTemp * errTemp;
}

for (int i=0; i<HN; i++)
{
double errTemp = 0.0;
for (int j=0; j<OutN; j++)
{
errTemp += pBp->y_delta[j] * pBp->v[i][j];
}

if(abs(1.0 - pBp->hOut[i]) < 0.0000001)
{
pBp->hOut[i] = 1.0 - 0.0000001;
}

//pBp->hd_delta[i] = errTemp * (1.0 + pBp->hOut[i])/(1.0 - pBp->hOut[i]);

pBp->hd_delta[i] = errTemp * (1-tanh(pBp->hOut[i])*tanh(pBp->hOut[i])) ;


}

// 调速权值

for (int i=0; i<OutN; i++)
{
for (int j = 0; j<HN; j++)
{
pBp->deltav[j][i] = pBp->alpha * pBp->deltav[j][i] + pBp->beta * pBp->y_delta[i] * pBp->hOut[j];
pBp->v[j][i] += pBp->deltav[j][i];
}
}

for (int i=0; i<HN; i++)
{
for (int j = 0; j<InputN; j++)
{
pBp->deltaw[j][i] = pBp->alpha * pBp->deltaw[j][i] + pBp->beta * pBp->hd_delta[i] * pBp->xOut[j];
pBp->w[j][i] += pBp->deltaw[j][i];
}
}

}

pBp->err = pBp->err/2;
if (pBp->err < pBp->errLimit)
{
AfxMessageBox("学习成功, 已经收敛!");
break;
}


loop++;
}


}

void BP_JianYan(BP_t *pBp, BP_DATA_t *pData)
{

for (int i=0; i<InputN; i++)
{
pBp->xOut[i] = pData->Input[i];
}

// 2) 第二层
for (int i=0; i<HN; i++)
{
double sumTemp = 0.0;
for (int j=0; j<InputN; j++)
{
sumTemp += pBp->w[i][j] * pBp->xOut[j];
}
pBp->hOut[i] = tanh(sumTemp);
}

// 3) 第三层
for (int i=0; i<OutN; i++)
{
double sumTemp = 0.0;
for (int j=0; j<HN; j++)
{
sumTemp += pBp->v[i][j] * pBp->hOut[j];
}
pBp->yOut[i] = sigmod(sumTemp);
pData->Teach[i] = pBp->yOut[i];
}

}

int _tmain(int argc, TCHAR* argv[], TCHAR* envp[])
{
int nRetCode = 0;

BP_DATA_t data[15] =
{
{ {1.780, 1.140 },1},
{ {1.960, 1.180 },1},
{ {1.860, 1.200 },1},
{ {1.720, 1.240 },0},
{ {2.000, 1.260 },1},
{ {2.000, 1.280 },1},
{ {1.960, 1.300 },1},
{ {1.740, 1.360 },0},

{ {1.640, 1.380 },0},
{ {1.820, 1.380 },0},
{ {1.900, 1.380 },0},
{ {1.700, 1.400 },0},
{ {1.820, 1.480 },0},
{ {1.820, 1.540 },0},
{ {2.080, 1.560 },0},
};


BP_t *pBp = new BP_t;
pBp->alpha = 0.1;
pBp->beta = 0.46;
pBp->errLimit = 0.001;
pBp->maxLoopNum = 55000;

BP_XunLian(pBp, data, 15);

printf(" 误差 %6.4f \n", pBp->err);


for (int i=0; i<15; i++)
{
printf("%3d %8.5f %8.5f %8.5f ", i, data[i].Input[0], data[i].Input[1], data[i].Teach[0]);
BP_JianYan(pBp,&data[i]);
printf(" %8.5f \n", data[i].Teach[0]);

}



system("pause");
return nRetCode;
}

作者微信号: xh66i88
原文地址:https://www.cnblogs.com/signal/p/2874332.html