改进的BP神经网络算法(C语言源码)

合集下载

bp神经网络详细步骤C实现

bp神经网络详细步骤C实现
e+=(yd[k]-x2[k])*(yd[k]-x2[k]);
//更新V,V矩阵是隐藏层与输出层之间的权值for(intj=0;j<hideNum;j++)
{
v[j,k]+=rate*qq[k]*x1[j];
}
}
//计算隐层误差
for(intj=0;j<hideNum;j++)
{
//PP矩阵是隐藏层的误差
{
//★数据归一化
for(inti=0;i<inNum;i++)
{
x[i]=p[isamp,i]/in_rate;
}
for(inti=0;i<outNum;i++)
{
yd[i]=t[isamp,i]/in_rate;
}
//计算隐层的输入和输出
for(intj=0;j<hideNum;j++)
{
o1[j]=0.0;
{
w[i]+=dw[i];
}
}
//数据仿真函数
publicdouble[]sim(double[]psim)
{
for(i nti=O;i<i nN um;i++)
x[i]=psim[i]/in_rate;//in_rate为归一化系数
for(i ntj=O;j<hideNum;j++)
{
o1[j]=0.0;
{pMax=Math.Abs(p[isamp,i]);
}
}for(intj=0;j<outNum;j++)
{if(Math.Abs(t[isamp,j])>pMax)

BP算法代码实现

BP算法代码实现

BP算法代码实现BP算法(Backpropagation Algorithm)是一种常用的神经网络训练算法,它主要用于监督式学习任务中的模型训练。

BP算法的核心思想是通过反向传播来更新神经网络的权重和偏差,以使得神经网络的输出逼近目标输出。

在反向传播的过程中,通过求解梯度来更新每个连接权重和偏置的值,从而最小化损失函数。

以下是BP算法的代码实现示例:```pythonimport numpy as npclass NeuralNetwork:def __init__(self, layers):yers = layersself.weights = []self.biases = []self.activations = []#初始化权重和偏置for i in range(1, len(layers)):self.weights.append(np.random.randn(layers[i], layers[i-1])) self.biases.append(np.random.randn(layers[i]))def sigmoid(self, z):return 1 / (1 + np.exp(-z))def sigmoid_derivative(self, z):return self.sigmoid(z) * (1 - self.sigmoid(z))def forward_propagate(self, X):self.activations = []activation = X#前向传播计算每一层的激活值for w, b in zip(self.weights, self.biases):z = np.dot(w, activation) + bactivation = self.sigmoid(z)self.activations.append(activation)return activationdef backward_propagate(self, X, y, output):deltas = [None] * len(yers)deltas[-1] = output - y#反向传播计算每一层的误差(梯度)for i in reversed(range(len(yers)-1)):delta = np.dot(self.weights[i].T, deltas[i+1]) * self.sigmoid_derivative(self.activations[i])deltas[i] = delta#更新权重和偏置for i in range(len(yers)-1):self.weights[i] -= 0.1 * np.dot(deltas[i+1],self.activations[i].T)self.biases[i] -= 0.1 * np.sum(deltas[i+1], axis=1)def train(self, X, y, epochs):for epoch in range(epochs):output = self.forward_propagate(X)self.backward_propagate(X, y, output)def predict(self, X):output = self.forward_propagate(X)return np.round(output)```上述代码使用numpy实现了一个简单的多层神经网络,支持任意层数和任意神经元个数的构建。

改进的BP神经网络算法

改进的BP神经网络算法

改进的BP神经网络算法以下是一个简单的改进的BP神经网络算法的C语言源码,注释已经添加在代码中,代码的运行结果是将一个简单的线性函数拟合为输入值的平方的2倍。

```c#include <stdio.h>#include <stdlib.h>#include <math.h>#define INPUT_LAYER_SIZE 1 // 输入层节点个数#define HIDDEN_LAYER_SIZE 10 // 隐藏层节点个数#define OUTPUT_LAYER_SIZE 1 // 输出层节点个数#define LEARNING_RATE 0.1 // 学习率double sigmoid(double x)return 1 / (1 + exp(-x));double sigmoid_derivative(double x)return x * (1 - x);//训练函数void train(double input[INPUT_LAYER_SIZE], doubleexpected_output[OUTPUT_LAYER_SIZE], doublew_ih[INPUT_LAYER_SIZE][HIDDEN_LAYER_SIZE], doublew_ho[HIDDEN_LAYER_SIZE][OUTPUT_LAYER_SIZE], double *b_h, double *b_o)//前向传播double hidden_layer_activations[HIDDEN_LAYER_SIZE];double output_layer_activations[OUTPUT_LAYER_SIZE];for (int i = 0; i < HIDDEN_LAYER_SIZE; i++)double activation = 0;for (int j = 0; j < INPUT_LAYER_SIZE; j++)activation += input[j] * w_ih[j][i];}activation += *b_h;hidden_layer_activations[i] = sigmoid(activation);}for (int i = 0; i < OUTPUT_LAYER_SIZE; i++)double activation = 0;for (int j = 0; j < HIDDEN_LAYER_SIZE; j++)activation += hidden_layer_activations[j] * w_ho[j][i];}activation += *b_o;output_layer_activations[i] = sigmoid(activation);}//反向传播double output_layer_errors[OUTPUT_LAYER_SIZE];double hidden_layer_errors[HIDDEN_LAYER_SIZE];for (int i = 0; i < OUTPUT_LAYER_SIZE; i++)output_layer_errors[i] = (expected_output[i] - output_layer_activations[i]) *sigmoid_derivative(output_layer_activations[i]);}for (int i = 0; i < HIDDEN_LAYER_SIZE; i++)double error = 0;for (int j = 0; j < OUTPUT_LAYER_SIZE; j++)error += output_layer_errors[j] * w_ho[i][j];}hidden_layer_errors[i] = error *sigmoid_derivative(hidden_layer_activations[i]);}//更新权值和偏置for (int i = 0; i < HIDDEN_LAYER_SIZE; i++)for (int j = 0; j < OUTPUT_LAYER_SIZE; j++)w_ho[i][j] += LEARNING_RATE * output_layer_errors[j] * hidden_layer_activations[i];}}for (int i = 0; i < INPUT_LAYER_SIZE; i++)for (int j = 0; j < HIDDEN_LAYER_SIZE; j++)w_ih[i][j] += LEARNING_RATE * hidden_layer_errors[j] * input[i];}}*b_o += LEARNING_RATE * output_layer_errors[0];*b_h += LEARNING_RATE * hidden_layer_errors[0];//测试函数double test(double input[INPUT_LAYER_SIZE], doublew_ih[INPUT_LAYER_SIZE][HIDDEN_LAYER_SIZE], doublew_ho[HIDDEN_LAYER_SIZE][OUTPUT_LAYER_SIZE], double b_h, double b_o)double hidden_layer_activations[HIDDEN_LAYER_SIZE];double output_layer_activations[OUTPUT_LAYER_SIZE];for (int i = 0; i < HIDDEN_LAYER_SIZE; i++)double activation = 0;for (int j = 0; j < INPUT_LAYER_SIZE; j++)activation += input[j] * w_ih[j][i];}activation += b_h;hidden_layer_activations[i] = sigmoid(activation);}for (int i = 0; i < OUTPUT_LAYER_SIZE; i++)double activation = 0;for (int j = 0; j < HIDDEN_LAYER_SIZE; j++)activation += hidden_layer_activations[j] * w_ho[j][i]; }activation += b_o;output_layer_activations[i] = sigmoid(activation);}return output_layer_activations[0];int mai//初始化权值和偏置double w_ih[INPUT_LAYER_SIZE][HIDDEN_LAYER_SIZE];double w_ho[HIDDEN_LAYER_SIZE][OUTPUT_LAYER_SIZE];double b_h = 0;double b_o = 0;for (int i = 0; i < INPUT_LAYER_SIZE; i++)for (int j = 0; j < HIDDEN_LAYER_SIZE; j++)w_ih[i][j] = ((double) rand( / RAND_MAX) * 2 - 1; // [-1, 1]之间的随机数}}for (int i = 0; i < HIDDEN_LAYER_SIZE; i++)for (int j = 0; j < OUTPUT_LAYER_SIZE; j++)w_ho[i][j] = ((double) rand( / RAND_MAX) * 2 - 1; // [-1, 1]之间的随机数}}//训练模型for (int epoch = 0; epoch < MAX_EPOCHS; epoch++)double input = ((double) rand( / RAND_MAX) * 10; // [0, 10]之间的随机数double expected_output = 2 * pow(input, 2); // y = 2x^2train(&input, &expected_output, w_ih, w_ho, &b_h, &b_o);}//测试模型double input = 5;double output = test(&input, w_ih, w_ho, b_h, b_o);printf("Input: %.2f, Output: %.2f\n", input, output);return 0;```这个代码实现了一个包含一个输入层、一个隐藏层、一个输出层的BP神经网络,使用了sigmoid激活函数和均方差误差函数。

BP神经网络C程序代码

BP神经网络C程序代码

BP神经网络C程序在该题的程序设计中采用了文件相关的操作,记录了相关学习和测试信息数据。

权值用伪随机数函数随机产生(范围是(0,0.5))采用结构体及链表来实现神经网络的结构分为实例结构体、层结构体和网络结构体数据结构的设计参照了《人工神经网络原理》(马锐编著,北京:机械工业出版社,2010,7)一书学习算法的优化也参照该书采用学习效率自适应调整算法优化源程序的学习算法,以减少学习次数由于能力和知识有限,该程序存在较大漏洞误差,在调整学习率时,不好掌握调节系数初始权值的限定范围适中,则程序的学习次数将明显减少在随机赋初始权值(0,0.5)时,学习次数可调节至135,但对测试数据的判别效果不理想,没有采用#include<stdio.h>#include<stdlib.h>#include<math.h>#include<malloc.h>#define TRUE 1#define FALSE 0#define NUM_LAYERS 4#define NUM 20 //训练实例个数#define N 2 //输入层单元数#define M 2 //输出层单元数int Units[NUM_LAYERS] = {N,3,3,M}; //每层单元数FILE *fp,*fb;typedef struct //训练实例{float x[N];float y[M];}TRAIN;typedef struct //网络层结构{int Units; //该层中单元的个数float *Output; //第i 个单元的输出float *Error; //第i 个单元的校正误差float **Weight; //第i 个单元的连接权值typedef struct //网络{LAYER **Layer; //隐层定义LAYER *Inputlayer; //输入层LAYER *Outputlayer; //输出层float Error; //允许误差float Eta; //学习率}NET;//初始化伪随机数发生器void InitializeRandoms(){srand(4711);return;}//产生随机实数并规范化float RandomReal() //产生(-0.5,0.5)之间的随机数{return (float)((rand()%100)/200.0);}//初始化训练数据void InitializeTrainingData(TRAIN *training){int i,j;char filename[20];printf("\n请输入训练实例的数据文件名: \n");gets(filename);fb = fopen(filename,"r");fprintf(fp,"\n\n--Saving initialization training datas ...\n");for(i=0;i<NUM;i++){for(j=0;j<N;j++){fscanf(fb,"%f",&(training+i)->x[j]);fprintf(fp,"%10.4f",(training+i)->x[j]);}for(j=0;j<M;j++){fscanf(fb,"%f",&(training+i)->y[j]);fprintf(fp,"%10.4f",(training+i)->y[j]);fprintf(fp,"\n");}fclose(fb);return;}//应用程序初始化void InitializeApplication(NET *Net){Net->Eta = (float)0.3;Net->Error = (float)0.0001;fp = fopen("BPResultData.txt","w+");return;}//应用程序关闭时终止打开的文件void FinalizeApplication(NET *Net){fclose(fp);return;}//分配内存,建立网络void GenerateNetwork(NET *Net){int l,i;Net->Layer = (LAYER **)calloc(NUM_LAYERS,sizeof(LAYER *));for(l=0;l<NUM_LAYERS;l++){Net->Layer[l] = (LAYER *)malloc(sizeof(LAYER));Net->Layer[l]->Units = Units[l];Net->Layer[l]->Output = (float *) calloc(Units[l]+1,sizeof(float));Net->Layer[l]->Error = (float *) calloc(Units[l]+1,sizeof(float));Net->Layer[l]->Weight = (float **)calloc(Units[l]+1,sizeof(float *));Net->Layer[l]->Output[0] = 1;if(l != 0)for(i=1;i <= Units[l];i++) //下标从"1"开始Net->Layer[l]->Weight[i] = (float *)calloc(Units[l-1]+1,sizeof(float));}Net->Inputlayer = Net->Layer[0];Net->Outputlayer = Net->Layer[NUM_LAYERS - 1];return;}//产生随机实数作为初始连接权值void RandomWeights(NET *Net){int l,i,j;for(l=1;l<NUM_LAYERS;l++)for(i=1;i <= Net->Layer[l]->Units;i++)for(j=0;j <= Net->Layer[l-1]->Units;j++)Net->Layer[l]->Weight[i][j] = RandomReal();return;}//设置输入层的输出值void SetInput(NET *Net,float *Input){int i;for(i=1;i <= Net->Inputlayer->Units;i++)Net->Inputlayer->Output[i] = Input[i-1]; //输入层采用u(x) = xreturn;}//设置输出层的输出值void GetOutput(NET *Net,float *Output){int i;for(i=1;i <= Net->Outputlayer->Units;i++)Output[i-1] = (float)(1/(1 + exp(-Net->Outputlayer->Output[i]))); //输出层采用f(x)=1/(1+e^(-x))return;}//层间顺传播void PropagateLayer(NET *Net,LAYER *Lower,LAYER *Upper){int i,j;float sum;for(i=1;i <= Upper->Units;i++){sum = 0;for(j=1;j <= Lower->Units;j++)sum += (Upper->Weight[i][j] * Lower->Output[j]);Upper->Output[i] = (float)(1/(1 + exp(-sum)));}return;}//整个网络所有层间的顺传播void PropagateNet(NET *Net){int l;for(l=0;l < NUM_LAYERS-1;l++)PropagateLayer(Net,Net->Layer[l],Net->Layer[l+1]);return;}//计算输出层误差void ComputeOutputError(NET *Net,float *target){int i;float Out,Err;for(i=1;i <= Net->Outputlayer->Units;i++){Out = Net->Outputlayer->Output[i];Err = target[i-1] - Out;Net->Outputlayer->Error[i] = Out*(1-Out)*Err;}return;}//层间逆传播void BackpropagateLayer(NET *Net,LAYER *Upper,LAYER *Lower) {int i,j;float Out,Err;for(i=1;i <= Lower->Units;i++){Out = Lower->Output[i];Err = 0;for(j=1;j <= Upper->Units;j++)Err += (Upper->Weight[j][i] * Upper->Error[j]);Lower->Error[i] = Out*(1-Out)*Err;}return;}//整个网络所有层间的逆传播void BackpropagateNet(NET *Net){int l;for(l=NUM_LAYERS-1;l>1;l--)BackpropagateLayer(Net,Net->Layer[l],Net->Layer[l-1]);return;}//权值调整void AdjustWeights(NET *Net){int l,i,j;float Out,Err;for(l=1;l<NUM_LAYERS;l++)for(i=1;i <= Net->Layer[l]->Units;i++)for(j=0;j <= Net->Layer[l-1]->Units;j++){Out = Net->Layer[l-1]->Output[j];Err = Net->Layer[l]->Error[i];Net->Layer[l]->Weight[i][j] += (Net->Eta*Err*Out);}return;}//网络处理过程void SimulateNet(NET *Net,float *Input,float *Output,float *target,int TrainOrNot) {SetInput(Net,Input); //输入数据PropagateNet(Net); //模式顺传播GetOutput(Net,Output); //形成输出ComputeOutputError(Net,target); //计算输出误差if(TrainOrNot){BackpropagateNet(Net); //误差逆传播AdjustWeights(Net); //调整权值}return;}//训练过程void TrainNet(NET *Net,TRAIN *training){int l,i,j,k;int count=0,flag=0;float Output[M],outputfront[M],ERR,err,sum;do{flag = 0;sum = 0;ERR = 0;if(count >= 1)for(j=0;j<M;j++)outputfront[j]=Output[j];SimulateNet(Net,(training+(count%NUM))->x,Output,(training+(count%NUM))->y,TRUE);if(count >= 1){k = count%NUM;for(i=1;i <= Net->Outputlayer->Units;i++){sum += Net->Outputlayer->Error[i];err = (training+k-1)->y[i-1] - outputfront[i-1];ERR += (outputfront[i-1] * (1 - outputfront[i-1]) * err);}if(sum <= ERR)Net->Eta = (float)(0.9999 * Net->Eta);elseNet->Eta = (float)(1.0015 * Net->Eta);}if(count >= NUM){for(k=1;k <= M;k++)if(Net->Outputlayer->Error[k] > Net->Error){ flag=1; break; }if(k>M)flag=0;}count++;}while(flag || count <= NUM);fprintf(fp,"\n\n\n");fprintf(fp,"--training results ... \n");fprintf(fp,"training times: %d\n",count);fprintf(fp,"\n*****the final weights*****\n");for(l=1;l<NUM_LAYERS;l++){for(i=1;i <= Net->Layer[l]->Units;i++){for(j=1;j <= Net->Layer[l-1]->Units;j++)fprintf(fp,"%15.6f",Net->Layer[l]->Weight[i][j]);fprintf(fp,"\n");}fprintf(fp,"\n\n");}}//评估过程void EvaluateNet(NET *Net){int i;printf("\n\n(");fprintf(fp,"\n\n(");for(i=1;i <= Net->Inputlayer->Units;i++){printf(" %.4f",Net->Inputlayer->Output[i]);fprintf(fp,"%10.4f",Net->Inputlayer->Output[i]);}printf(")\t");fprintf(fp,")\t");for(i=1;i <= Net->Outputlayer->Units;i++){if(fabs(Net->Outputlayer->Output[i] - 1.0) <= 0.0499){printf("肯定是第%d 类, ",i);fprintf(fp,"肯定是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.9) <= 0.0499){printf("几乎是第%d 类, ",i);fprintf(fp,"几乎是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.8) <= 0.0499){printf("极是第%d 类, ",i);fprintf(fp,"极是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.7) <= 0.0499){printf("很是第%d 类, ",i);fprintf(fp,"很是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.6) <= 0.0499){printf("相当是第%d 类, ",i);fprintf(fp,"相当是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.5) <= 0.0499){printf("差不多是第%d 类, ",i);fprintf(fp,"差不多是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.4) <= 0.0499){printf("比较像是第%d 类, ",i);fprintf(fp,"比较像是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.3) <= 0.0499){printf("有些像是第%d 类, ",i);fprintf(fp,"有些像是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.2) <= 0.0499){printf("有点像是第%d 类, ",i);fprintf(fp,"有点像是第%d 类, ",i);}if(fabs(Net->Outputlayer->Output[i] - 0.1) <= 0.0499){printf("稍稍像是第%d 类, ",i);fprintf(fp,"稍稍像是第%d 类, ",i);}if(Net->Outputlayer->Output[i] <= 0.0499){printf("肯定不是第%d 类, ",i);fprintf(fp,"肯定不是第%d 类, ",i);}}printf("\n\n");fprintf(fp,"\n\n\n");return;}//测试过程void TestNet(NET *Net){TRAIN Testdata;float Output[M];int i,j,flag=0;char select;fprintf(fp,"\n\n--Saving test datas ...\n");do{printf("\n请输入测试数据(x1 x2 ... y1 y2 ...): \n");for(j=0;j<N;j++){scanf("%f",&Testdata.x[j]);fprintf(fp,"%10.4f",Testdata.x[j]);}for(j=0;j<M;j++){scanf("%f",&Testdata.y[j]);fprintf(fp,"%10.4f",Testdata.y[j]);}fprintf(fp,"\n");SimulateNet(Net,Testdata.x,Output,Testdata.y,FALSE);fprintf(fp,"\n--NET Output and Error of the Test Data ....\n");for(i=1;i <= Net->Outputlayer->Units;i++)fprintf(fp,"%10.6f %10.6f\n",Net->Outputlayer->Output[i],Net->Outputlayer->Error[i]);EvaluateNet(Net);printf("\n继续测试?(y/n):\n");getchar();scanf("%c",&select);printf("\n");if((select == 'y')||(select == 'Y'))flag = 1;elseflag=0;}while(flag);return;}//主函数void main(){TRAIN TrainingData[NUM];NET Net;InitializeRandoms(); //初始化伪随机数发生器GenerateNetwork(&Net); //建立网络RandomWeights(&Net); //形成初始权值InitializeApplication(&Net); //应用程序初始化,准备运行InitializeTrainingData(TrainingData); //记录训练数据TrainNet(&Net,TrainingData); //开始训练TestNet(&Net);FinalizeApplication(&Net); //程序关闭,完成善后工作return;}。

改进地BP神经网络算法C语言源码

改进地BP神经网络算法C语言源码

#include "stdio.h"#include "stdlib.h"#include "time.h"#include "math.h"/*********************************************inpoints 为输入神经元个数,可改变outpoints为输出神经元个数defaultpoints为隐层神经元个数datagrough为样本数据个数****************************************************以下数据定义可以修改*****/#define A 0#define a 1#define b 1#define c 1#define ALFA 0.85#define BETA 0.2 //学习率0~1#define Total 20000#define inpoints 9#define outpoints 5#define defaultpoints 28#define datagrough 44#define forecastdata 4/**********定义所需变量********/double InpointData[datagrough][inpoints],OutpointData[datagrough][outpoints]; /* 输入输出数据 */double InpointData_MAX[inpoints],InpointData_MIN[inpoints]; /* 每个因素最大数据 */double OutpointData_MAX[outpoints],OutpointData_MIN[outpoints]; /* 每个因素最小数据 */doublew[defaultpoints][inpoints],limen[defaultpoints],v[outpoints][defaultpoints]; /* 连接权值、阈值 */doubledlta_w[defaultpoints][inpoints],dlta_limen[defaultpoints],dlta_v[outpoints][defaultpoints]; /* 连接权、阈值修正值 */doubledefaultOutpoint[defaultpoints],Outpoint_dp[outpoints],Outpoint_ep[datagrough];/**************************读数据文件******************************/void ReadData(){FILE *fp1,*fp2;int i,j;if((fp1=fopen("D:\\data\\训练输入.txt","r"))==NULL){printf("1can not open the file\n");exit(0);}for(i=0;i<datagrough;i++)for(j=0;j<inpoints;j++)fscanf(fp1,"%lf",&InpointData[i][j]);fclose(fp1);if((fp2=fopen("D:\\data\\训练输出.txt","r"))==NULL){printf("2can not open the file\n");exit(0);}for(i=0;i<datagrough;i++)for(j=0;j<outpoints;j++)fscanf(fp2,"%lf",&OutpointData[i][j]);fclose(fp2);}/*****************************************************//*****************************************归一化******************************************************/void unitary(){int i,j;int k=0;for(j=0;j<inpoints;j++) //找出每列的最大、最小值存放在数组InpointData_MAX[j]、InpointData_MIN[j]中{InpointData_MAX[j]=InpointData[0][j];InpointData_MIN[j]=InpointData[0][j];for(i=0;i<datagrough;i++)if(InpointData_MAX[j]<InpointData[i][j])InpointData_MAX[j]=InpointData[i][j];else if(InpointData_MIN[j]>InpointData[i][j])InpointData_MIN[j]=InpointData[i][j];}for(j=0;j<outpoints;j++) //找出每列的最大、最小值存放在数组OutpointData_MAX[j]、OutpointData_MIN[j]中{OutpointData_MAX[j]=OutpointData[0][j];OutpointData_MIN[j]=OutpointData[0][j];for(i=0;i<datagrough;i++)if(OutpointData_MAX[j]<OutpointData[i][j])OutpointData_MAX[j]=OutpointData[i][j];else if(OutpointData_MIN[j]>OutpointData[i][j])OutpointData_MIN[j]=OutpointData[i][j];}/***************将数据归一处理,处理之后的数据全部在[0,1]之间*************************/for(j=0;j<inpoints;j++)for(i=0;i<datagrough;i++)if(InpointData_MAX[j]==0)InpointData[i][j]=0;elseInpointData[i][j]=(InpointData[i][j]-InpointData_MIN[j]+A)/(InpointData_MAX[j]-InpointData_MIN[j]+A);for(j=0;j<outpoints;j++)for(i=0;i<datagrough;i++)if(OutpointData_MAX[j]==0)OutpointData[i][j]=0;elseOutpointData[i][j]=(OutpointData[i][j]-OutpointData_MIN[j]+A)/(OutpointData_MAX [j]-OutpointData_MIN[j]+A);}/*****************************************************//*********************初始化,随机赋初值**************************/void Initialization(){int i,j;srand((unsigned)time(NULL)); //头文件名 #include <time.h>for(i=0;i<defaultpoints;i++) //给输入层到隐层的连接权赋随机值LianJie_w[i][j],这些值在[0,1]for(j=0;j<inpoints;j++){w[i][j]=(rand()*2.0/RAND_MAX-1)/2;dlta_w[i][j]=0;}for(i=0;i<defaultpoints;i++){limen[i]=(rand()*2.0/RAND_MAX-1)/2;dlta_limen[i]=0;}for(i=0;i<outpoints;i++) //给隐层到输出层的连接权赋初值for(j=0;j<defaultpoints;j++){v[i][j]=(rand()*2.0/RAND_MAX-1)/2;dlta_v[i][j]=0;}}/**********************求单样本的计算输出误差*******************************/ void out_sub1(int t){int i,j;double defaultInpoint[defaultpoints];double Outpoint_y[outpoints];Outpoint_ep[t]=0;for(i=0;i<defaultpoints;i++){double sum=0;for(j=0;j<inpoints;j++)sum+=w[i][j]*InpointData[t][j];defaultInpoint[i]=sum+limen[i];defaultOutpoint[i]=1/(a+b*exp(-1*c*defaultInpoint[i]));//求O[i] }for(j=0;j<outpoints;j++)//求Y[i]{Outpoint_y[j]=0;for(i=0;i<defaultpoints;i++)Outpoint_y[j]+=v[j][i]*defaultOutpoint[i];Outpoint_dp[j]=OutpointData[t][j]-Outpoint_y[j];Outpoint_ep[t]+=Outpoint_dp[j]*Outpoint_dp[j]/2;}}/*****************************反算权值******************************************/void out_sub2(int t){int i,j,k;double s;for(i=0;i<defaultpoints;i++){s=0;for(j=0;j<outpoints;j++){dlta_v[j][i]=ALFA*dlta_v[j][i]+BETA*Outpoint_dp[j]*defaultOutpoint[i]; //s+=v[j][i]*Outpoint_dp[j];v[j][i]+=dlta_v[j][i];}dlta_limen[i]=ALFA*dlta_limen[i]+BETA*defaultOutpoint[i]*(1-defaultOutpoint[i]) *s;//limen[i]+=dlta_limen[i];for(k=0;k<inpoints;k++){dlta_w[i][k]=ALFA*dlta_w[i][k]+BETA*defaultOutpoint[i]*(1-defaultOutpoint[i])*s *InpointData[t][k];//w[i][k]=w[i][k]+dlta_w[i][k];}}}/*******************************************************/void forecast(){int i,j,t,k=0;double e,e1[forecastdata]={0}; //训练误差double sss;double InputData_x[forecastdata][inpoints],tp[forecastdata][outpoints];doubledefInpoint,defOutpoint[defaultpoints],y[forecastdata][outpoints];//y[forecastda ta][outpoints]为网络检验输出FILE *fp1,*fp3;if((fp1=fopen("D:\\data\\预测输入.txt","r"))==NULL) //检验数据输入{printf("3can not open the file\n");exit(0);}for(i=0;i<forecastdata;i++)for(j=0;j<inpoints;j++)fscanf(fp1,"%lf",&InputData_x[i][j]);fclose(fp1);if((fp3=fopen("D:\\data\\预测输出.txt","r"))==NULL) //实际检验结果输出{printf("31can not open the file\n");exit(0);}for(i=0;i<forecastdata;i++)for(j=0;j<outpoints;j++)fscanf(fp3,"%lf",&tp[i][j]);fclose(fp3);for(j=0;j<inpoints;j++) // 检验数据归一化for(i=0;i<forecastdata;i++)if(InpointData_MAX[j]==0)InputData_x[i][j]=0;elseInputData_x[i][j]=(InputData_x[i][j]-InpointData_MIN[j]+A)/(InpointData_MAX[j]-InpointData_MIN[j]+A);for(j=0;j<outpoints;j++)for(i=0;i<forecastdata;i++)if(OutpointData_MAX[j]==0)tp[i][j]=0;elsetp[i][j]=(tp[i][j]-OutpointData_MIN[j]+A)/(OutpointData_MAX[j]-OutpointData_MIN [j]+A);do{Initialization(); //初始化连接权值w[i][j],limen[i],v[k][i]k=0;do{e=0;for(t=0;t<datagrough;t++){out_sub1(t); //正向计算网络输出out_sub2(t); //反向计算,修正权值e+=Outpoint_ep[t]; //计算输出误差}k++;}while((k<Total)&&(e>0.1));sss=0; //中间参数for(t=0;t<forecastdata;t++){e1[t]=0;for(i=0;i<defaultpoints;i++){double sum=0;for(j=0;j<inpoints;j++)sum+=w[i][j]*InputData_x[t][j];defInpoint=sum+limen[i];defOutpoint[i]=1/(a+b*exp(-1*c*defInpoint));}for(j=0;j<outpoints;j++){y[t][j]=0;for(i=0;i<defaultpoints;i++)y[t][j]+=v[j][i]*defOutpoint[i];e1[t]+=(y[t][j]-tp[t][j])*(y[t][j]-tp[t][j])/2;y[t][j]=y[t][j]*(OutpointData_MAX[j]-OutpointData_MIN[j]+A)+OutpointData_MI N[j]-A;}sss+=e1[t];}sss=sss/forecastdata;printf(" %lf %lf\n",e,sss);}while(sss>0.12);}/********************************************************/void main(){int i,j,k;FILE *fp2;ReadData(); //读训练数据:输入和输出unitary(); //归一化,将输入输出数据归一,结果在[0,1]中forecast(); //检验误差if((fp2=fopen("D:\\data\\计算权值.txt","w"))==NULL) //文件输出训练好的权值{printf("6can not open the file\n");exit(0);}for(i=0;i<defaultpoints;i++){for(k=0;k<inpoints;k++)fprintf(fp2," %lf ",w[i][k]);fprintf(fp2,"\n");}fprintf(fp2,"\n");for(i=0;i<defaultpoints;i++)fprintf(fp2," %lf ",limen[i]);fprintf(fp2,"\n\n");for(i=0;i<defaultpoints;i++){for(j=0;j<outpoints;j++)fprintf(fp2," %lf ",v[j][i]);fprintf(fp2,"\n");}fclose(fp2);}。

BP神经网络算法改进

BP神经网络算法改进
[3] c omp os it ion u si n g ACO -BP neu ra l net work [J]. T her moch i m ic a Ac t a,20 07,454(1):6 4- 68.
[4] 向国全,董 道 珍.BP 模 型中的 激 励函数 和 改 进 的网络训 练 法 [J].计算 机研究与发展,1997(2):113-117.
D OI:10.16 6 6 0/ j.c n k i.1674- 0 98X.2017.20.14 6
BP神经网络算法改进
信息科学
黄尚晴1 赵志勇2 孙立波3 (1浙江工商大学统计与数学学院 浙江杭州 310018;2华中科技大学自动化学院 湖北武汉 430074;
3东北大学信息科学与工程学院 辽宁沈阳 110819)
文 选 择 训 练 样 本 集中50 0 0 0 个样 本用于训 练,并 选 择 测 试 样
本 集中的10 0 0 0 个样 本用于测 试。由于其中每 个 样 本 数 据 是
32×32 的 矩 阵,其 要 转 化 成 7 8 4 维 特 征 向量,并 根 据 上一 节
中提 到的 样 本分 组 方案。选 择 m i n _b at ch=2 0, =5,计算出
种,一种 是批 量梯度下 降 法、随 机 梯度 下 降 算 法 和 随 机 批 量
梯度 下 降 算 法。这 些 算 法 虽 然 都 对 BP 算 法 的 收 敛 性 有所 改
善,但 是都 没有考虑 样 本之间的 重复性与 其内在 的 联系所 造
成的 训练 过 程的 繁琐。
综 合 以 上 考 虑 ,本 文 作 以 下 改 进 。对 于 给 定 输 入 样
。为了便于本文的讨 论,
选 择隐含层的激 活函数为 S型函数 1( )=1/(1+ ex p (- ));

神经网络BP算法程序C语言实现

神经网络BP算法程序C语言实现

/************************************************ Back Propagation Algorithm************************************************/ #include "stdio.h"#include "stdlib.h"#include "math.h"/************************************************ The Definition of User Data************************************************/ #define MAXINPUT 1#define MAXHIDE 3#define MAXOUTPUT 1#define MAX 1#define MIN -1#define T 100#define CA 4double a=0.8;double b=0.05;double k=0;double error=0;int t=0;double sout[MAXOUTPUT];double shide[MAXHIDE];double m=2;double howchange[MAXHIDE][MAXOUTPUT];double ihwchange[MAXINPUT][MAXHIDE];double CatalogueOut[CA][MAXOUTPUT];double CatalogueIn[CA][MAXINPUT];/************************************************ The Definition of Data Structure************************************************/ struct theBP{double input[MAXINPUT];double hide[MAXHIDE];double output[MAXOUTPUT];double ihw[MAXINPUT][MAXHIDE];double how[MAXHIDE][MAXOUTPUT];};struct theBP bpa;/************************************************ Definition of Prototype************************************************/ void WeightInitial();void InitialError();void InPutCatalogue();void CalculateOut(int k);void CalculateError(int k);void ReverseHideError();void CalculateChange();void CalculateNewWeight();void Test();void TestCalculateOut();void camain();void main(){WeightInitial();// InitialError();InPutCatalogue();//doint m=0;while(1){printf("请选择要进行的操作\n");printf("0----------------学习\n");printf("1----------------测试\n");printf("2----------------退出\n");scanf("%d",&m);switch(m){case 0:camain();break;case 1:Test();break;case 2:exit(0);}//while((error)>k);;}}void camain(){for(t=0;t<T;t++){for(int k=0;k<CA;k++){CalculateOut(k);CalculateError(k);ReverseHideError();CalculateChange();CalculateNewWeight();}for(k=0;k<CA;k++){CalculateOut(k);}}}/************************************************Function:initial the weight************************************************/void WeightInitial(){//产生输入层到隐藏层的权值for(int i=0;i<MAXINPUT;i++){for(int j=0;j<MAXHIDE;j++){bpa.ihw[i][j]=0.3;//((double)rand()/(double)(RAND_MAX))*(MAX-MIN)+MIN;}}//产生从隐藏层到输出层的权值for(i=0;i<MAXHIDE;i++){for(int j=0;j<MAXOUTPUT;j++){bpa.how[i][j]=0.2;//((double)rand()/(double)(RAND_MAX))*(MAX-MIN)+MIN;}}}/************************************************Function:input the Catalogue************************************************/void InPutCatalogue(){for(int k=0;k<CA;k++){printf("请输入第%d个样本的输入值:\n",k);for(int i=0;i<MAXINPUT;i++){scanf("%lf",&bpa.input[i]);CatalogueIn[k][i]=bpa.input[i];}printf("请输入第%d个样本的输出值:\n",k);for(i=0;i<MAXOUTPUT;i++){scanf("%lf",&CatalogueOut[k][i]);}}}/************************************************Function:calculate the out************************************************/void CalculateOut(int k){//计算隐藏层的输出for(int j=0;j<MAXHIDE;j++){double sum2=0;for(int i=0;i<MAXINPUT;i++){bpa.input[i]=CatalogueIn[k][i];sum2+=bpa.ihw[i][j]*bpa.input[i];//计算输入}bpa.hide[j]=1/(1+exp(-sum2));//计算输出}//计算每输出层个单元的输入和输出for(j=0;j<MAXOUTPUT;j++){double sum3=0;for(int i=0;i<MAXHIDE;i++){sum3+=bpa.how[i][j]*bpa.hide[i];//计算输入}bpa.output[j]=m*sum3;//计算输出bpa.output[j]=1/(1+exp(-sum3))printf("第%d个样本的最后输出%lf\n",k,bpa.output[j]);}}void TestCalculateOut(){//计算隐藏层的输出for(int j=0;j<MAXHIDE;j++){double sum1=0;for(int i=0;i<MAXINPUT;i++){sum1=sum1+bpa.ihw[i][j]*bpa.input[i];//计算输入}bpa.hide[j]=1/(1+exp(-sum1));//计算输出}//计算每输出层个单元的输入和输出for(j=0;j<MAXOUTPUT;j++){double sum2=0;for(int i=0;i<MAXHIDE;i++){sum2=sum2+bpa.how[i][j]*bpa.hide[i];//计算输入}bpa.output[j]=m*sum2;//计算输出bpa.output[j]=1/(1+exp(sum2))printf("最后输出%lf\n",bpa.output[j]);}}/************************************************Function:对输出层Calculate************************************************/void CalculateError(int k){double temp=0;error=0;for(int i=0;i<MAXOUTPUT;i++){temp=(CatalogueOut[k][i]-bpa.output[i])*(CatalogueOut[k][i]-bpa.output[i]);error=(0.5)*temp+error;}for(i=0;i<MAXOUTPUT;i++){sout[i]=(CatalogueOut[k][i]-bpa.output[i])*bpa.output[i]*(1-bpa.output[i]);}}/************************************************Function: 从后向前对隐藏层************************************************/void ReverseHideError(){for(int i=0;i<MAXHIDE;i++){double sum=0;for(int j=0;j<MAXOUTPUT;j++){sum+=sout[j]*bpa.how[i][j];}shide[i]=(bpa.hide[i])*(1-bpa.hide[i])*sum;}}/************************************************Function:Calculate the 权值的变化量************************************************/void CalculateChange(){int j=0;//隐藏层到输出层for(int i=0;i<MAXHIDE;i++){for(j=0;j<MAXOUTPUT;j++){howchange[i][j]=a*(howchange[i][j])+b*(sout[i])*(bpa.hide[i]);// }}//对输入层到隐藏层for(i=0;i<MAXINPUT;i++){for(j=0;j<MAXHIDE;j++){ihwchange[i][j]=a*(ihwchange[i][j])+b*(shide[i])*(bpa.input[i]);// }}}/************************************************Function:Calculate the 新的权值************************************************/void CalculateNewWeight(){int j=0;//隐藏层到输出层for(int i=0;i<MAXHIDE;i++){for(j=0;j<MAXOUTPUT;j++){bpa.how[i][j]=bpa.how[i][j]+howchange[i][j];}}//对输入层到隐藏层for(i=0;i<MAXINPUT;i++){for(j=0;j<MAXHIDE;j++){bpa.ihw[i][j]=bpa.ihw[i][j]+ihwchange[i][j];}}}void Test(){printf("请输入测试数据的输入值:\n");for(int i=0;i<MAXINPUT;i++){scanf("%lf",&bpa.input[i]);}TestCalculateOut();}。

BP神经网络C++源码及训练测试数据

BP神经网络C++源码及训练测试数据
{
for(int j=0; j<inNodeCount; j++)
{
detaMidW[i][j] = detaMidW[i][j]*alpha+studyRate*detaMid[i]*in[j];
#ifndef _BP_
#define _BP_
#include <iostream>
#include <cmath>
#include <cstdlib>
#include <string>
#include <ctime>
using namespace std;
class BP
{
double a = 1.7159;
double b = 2.0/3.0;
return -a*b*(pow(tanh(b*x),2)-1);
}
double BP::turn(double* in, double* out) //in:输入数据,out:期望输出数据,返回值:单次训练误差
{
inOut[i] = 0.0;
for(int j=0; j<midNodeCount; j++)
inOut[i] += outWeight[i][j]*Omid[j];
Oout[i] = sigmoid(inOut[i]-midThreshold[i]);
}
double e = 0.0;
for(int i=0; i<midNodeCount; i++) //计算隐藏层输出值
{
inMid[i] = 0.0;
  1. 1、下载文档前请自行甄别文档内容的完整性,平台不提供额外的编辑、内容补充、找答案等附加服务。
  2. 2、"仅部分预览"的文档,不可在线预览部分如存在完整性等问题,可反馈申请退款(可完整预览的文档不适用该条件!)。
  3. 3、如文档侵犯您的权益,请联系客服反馈,我们会尽快为您处理(人工客服工作时间:9:00-18:30)。

#include "stdio.h"#include "stdlib.h"#include "time.h"#include "math.h"/*********************************************inpoints 为输入神经元个数,可改变outpoints为输出神经元个数defaultpoints为隐层神经元个数datagrough为样本数据个数****************************************************以下数据定义可以修改*****/#define A 0#define a 1#define b 1#define c 1#define ALFA 0.85#define BETA 0.2 //学习率0~1#define Total 20000#define inpoints 9#define outpoints 5#define defaultpoints 28#define datagrough 44#define forecastdata 4/**********定义所需变量********/double InpointData[datagrough][inpoints],OutpointData[datagrough][outpoints]; /* 输入输出数据*/double InpointData_MAX[inpoints],InpointData_MIN[inpoints]; /* 每个因素最大数据*/double OutpointData_MAX[outpoints],OutpointData_MIN[outpoints]; /* 每个因素最小数据*/double w[defaultpoints][inpoints],limen[defaultpoints],v[outpoints][defaultpoints]; /* 连接权值、阈值*/double dlta_w[defaultpoints][inpoints],dlta_limen[defaultpoints],dlta_v[outpoints][defaultpoints]; /* 连接权、阈值修正值*/double defaultOutpoint[defaultpoints],Outpoint_dp[outpoints],Outpoint_ep[datagrough];/**************************读数据文件******************************/void ReadData(){FILE *fp1,*fp2;int i,j;if((fp1=fopen("D:\\data\\训练输入.txt","r"))==NULL){printf("1can not open the file\n");exit(0);}for(i=0;i<datagrough;i++)for(j=0;j<inpoints;j++)fscanf(fp1,"%lf",&InpointData[i][j]);fclose(fp1);if((fp2=fopen("D:\\data\\训练输出.txt","r"))==NULL){printf("2can not open the file\n");exit(0);}for(i=0;i<datagrough;i++)for(j=0;j<outpoints;j++)fscanf(fp2,"%lf",&OutpointData[i][j]);fclose(fp2);}/*****************************************************//*****************************************归一化******************************************************/void unitary(){int i,j;int k=0;for(j=0;j<inpoints;j++) //找出每列的最大、最小值存放在数组InpointData_MAX[j]、InpointData_MIN[j]中{InpointData_MAX[j]=InpointData[0][j];InpointData_MIN[j]=InpointData[0][j];for(i=0;i<datagrough;i++)if(InpointData_MAX[j]<InpointData[i][j])InpointData_MAX[j]=InpointData[i][j];else if(InpointData_MIN[j]>InpointData[i][j])InpointData_MIN[j]=InpointData[i][j];}for(j=0;j<outpoints;j++) //找出每列的最大、最小值存放在数组OutpointData_MAX[j]、OutpointData_MIN[j]中{OutpointData_MAX[j]=OutpointData[0][j];OutpointData_MIN[j]=OutpointData[0][j];for(i=0;i<datagrough;i++)if(OutpointData_MAX[j]<OutpointData[i][j])OutpointData_MAX[j]=OutpointData[i][j];else if(OutpointData_MIN[j]>OutpointData[i][j])OutpointData_MIN[j]=OutpointData[i][j];}/***************将数据归一处理,处理之后的数据全部在[0,1]之间*************************/for(j=0;j<inpoints;j++)for(i=0;i<datagrough;i++)if(InpointData_MAX[j]==0)InpointData[i][j]=0;elseInpointData[i][j]=(InpointData[i][j]-InpointData_MIN[j]+A)/(InpointData_MAX[j]-InpointData_ MIN[j]+A);for(j=0;j<outpoints;j++)for(i=0;i<datagrough;i++)if(OutpointData_MAX[j]==0)OutpointData[i][j]=0;elseOutpointData[i][j]=(OutpointData[i][j]-OutpointData_MIN[j]+A)/(OutpointData_MAX[j]-Outpoi ntData_MIN[j]+A);}/*****************************************************//*********************初始化,随机赋初值**************************/void Initialization(){int i,j;srand((unsigned)time(NULL)); //头文件名#include <time.h>for(i=0;i<defaultpoints;i++) //给输入层到隐层的连接权赋随机值LianJie_w[i][j],这些值在[0,1]for(j=0;j<inpoints;j++){w[i][j]=(rand()*2.0/RAND_MAX-1)/2;dlta_w[i][j]=0;}for(i=0;i<defaultpoints;i++)limen[i]=(rand()*2.0/RAND_MAX-1)/2;dlta_limen[i]=0;}for(i=0;i<outpoints;i++) //给隐层到输出层的连接权赋初值for(j=0;j<defaultpoints;j++){v[i][j]=(rand()*2.0/RAND_MAX-1)/2;dlta_v[i][j]=0;}}/**********************求单样本的计算输出误差*******************************/ void out_sub1(int t){int i,j;double defaultInpoint[defaultpoints];double Outpoint_y[outpoints];Outpoint_ep[t]=0;for(i=0;i<defaultpoints;i++){double sum=0;for(j=0;j<inpoints;j++)sum+=w[i][j]*InpointData[t][j];defaultInpoint[i]=sum+limen[i];defaultOutpoint[i]=1/(a+b*exp(-1*c*defaultInpoint[i]));//求O[i]}for(j=0;j<outpoints;j++)//求Y[i]{Outpoint_y[j]=0;for(i=0;i<defaultpoints;i++)Outpoint_y[j]+=v[j][i]*defaultOutpoint[i];Outpoint_dp[j]=OutpointData[t][j]-Outpoint_y[j];Outpoint_ep[t]+=Outpoint_dp[j]*Outpoint_dp[j]/2;}}/*****************************反算权值******************************************/void out_sub2(int t){int i,j,k;double s;for(i=0;i<defaultpoints;i++)s=0;for(j=0;j<outpoints;j++){dlta_v[j][i]=ALFA*dlta_v[j][i]+BETA*Outpoint_dp[j]*defaultOutpoint[i]; //s+=v[j][i]*Outpoint_dp[j];v[j][i]+=dlta_v[j][i];}dlta_limen[i]=ALFA*dlta_limen[i]+BETA*defaultOutpoint[i]*(1-defaultOutpoint[i])*s;// limen[i]+=dlta_limen[i];for(k=0;k<inpoints;k++){dlta_w[i][k]=ALFA*dlta_w[i][k]+BETA*defaultOutpoint[i]*(1-defaultOutpoint[i])*s*InpointDat a[t][k];//w[i][k]=w[i][k]+dlta_w[i][k];}}}/*******************************************************/void forecast(){int i,j,t,k=0;double e,e1[forecastdata]={0}; //训练误差double sss;double InputData_x[forecastdata][inpoints],tp[forecastdata][outpoints];doubledefInpoint,defOutpoint[defaultpoints],y[forecastdata][outpoints];//y[forecastdata][outpoints]为网络检验输出FILE *fp1,*fp3;if((fp1=fopen("D:\\data\\预测输入.txt","r"))==NULL) //检验数据输入{printf("3can not open the file\n");exit(0);}for(i=0;i<forecastdata;i++)for(j=0;j<inpoints;j++)fscanf(fp1,"%lf",&InputData_x[i][j]);fclose(fp1);if((fp3=fopen("D:\\data\\预测输出.txt","r"))==NULL) //实际检验结果输出{printf("31can not open the file\n");exit(0);}for(i=0;i<forecastdata;i++)for(j=0;j<outpoints;j++)fscanf(fp3,"%lf",&tp[i][j]);fclose(fp3);for(j=0;j<inpoints;j++) // 检验数据归一化for(i=0;i<forecastdata;i++)if(InpointData_MAX[j]==0)InputData_x[i][j]=0;elseInputData_x[i][j]=(InputData_x[i][j]-InpointData_MIN[j]+A)/(InpointData_MAX[j]-InpointData _MIN[j]+A);for(j=0;j<outpoints;j++)for(i=0;i<forecastdata;i++)if(OutpointData_MAX[j]==0)tp[i][j]=0;elsetp[i][j]=(tp[i][j]-OutpointData_MIN[j]+A)/(OutpointData_MAX[j]-OutpointData_MIN[j]+A);do{Initialization(); //初始化连接权值w[i][j],limen[i],v[k][i]k=0;do{e=0;for(t=0;t<datagrough;t++){out_sub1(t); //正向计算网络输出out_sub2(t); //反向计算,修正权值e+=Outpoint_ep[t]; //计算输出误差}k++;}while((k<Total)&&(e>0.1));sss=0; //中间参数for(t=0;t<forecastdata;t++){e1[t]=0;for(i=0;i<defaultpoints;i++){double sum=0;for(j=0;j<inpoints;j++)sum+=w[i][j]*InputData_x[t][j];defInpoint=sum+limen[i];defOutpoint[i]=1/(a+b*exp(-1*c*defInpoint));}for(j=0;j<outpoints;j++){y[t][j]=0;for(i=0;i<defaultpoints;i++)y[t][j]+=v[j][i]*defOutpoint[i];e1[t]+=(y[t][j]-tp[t][j])*(y[t][j]-tp[t][j])/2;y[t][j]=y[t][j]*(OutpointData_MAX[j]-OutpointData_MIN[j]+A)+OutpointData_MIN[j]-A;}sss+=e1[t];}sss=sss/forecastdata;printf(" %lf %lf\n",e,sss);}while(sss>0.12);}/********************************************************/void main(){int i,j,k;FILE *fp2;ReadData(); //读训练数据:输入和输出unitary(); //归一化,将输入输出数据归一,结果在[0,1]中forecast(); //检验误差if((fp2=fopen("D:\\data\\计算权值.txt","w"))==NULL) //文件输出训练好的权值{printf("6can not open the file\n");exit(0);}for(i=0;i<defaultpoints;i++){for(k=0;k<inpoints;k++)fprintf(fp2," %lf ",w[i][k]);fprintf(fp2,"\n");}fprintf(fp2,"\n");for(i=0;i<defaultpoints;i++)fprintf(fp2," %lf ",limen[i]);fprintf(fp2,"\n\n");for(i=0;i<defaultpoints;i++){for(j=0;j<outpoints;j++)fprintf(fp2," %lf ",v[j][i]);fprintf(fp2,"\n");}fclose(fp2);}。

相关文档
最新文档