// Author: John McCullock
// Date: 10-15-05
// Description: Elman Network Example 1.
//http://www.mnemstudio.org/neural-networks-elman.htm
#include <iostream>
#include <iomanip>
#include <cmath>
#include <string>
#include <ctime>
#include <cstdlib> using namespace std; const int maxTests = 10000;
const int maxSamples = 4; const int inputNeurons = 6;
const int hiddenNeurons = 3;
const int outputNeurons = 6;
const int contextNeurons = 3; const double learnRate = 0.2; //Rho.
const int trainingReps = 2000; //beVector is the symbol used to start or end a sequence.
double beVector[inputNeurons] = {1.0, 0.0, 0.0, 0.0, 0.0, 0.0}; // 0 1 2 3 4 5
double sampleInput[3][inputNeurons] = {{0.0, 0.0, 0.0, 1.0, 0.0, 0.0},
{0.0, 0.0, 0.0, 0.0, 0.0, 1.0},
{0.0, 0.0, 1.0, 0.0, 0.0, 0.0}}; //Input to Hidden weights (with biases).
double wih[inputNeurons + 1][hiddenNeurons]; //Context to Hidden weights (with biases).
double wch[contextNeurons + 1][hiddenNeurons]; //Hidden to Output weights (with biases).
double who[hiddenNeurons + 1][outputNeurons]; //Hidden to Context weights (no biases).
double whc[outputNeurons + 1][contextNeurons]; //Activations.
double inputs[inputNeurons];
double hidden[hiddenNeurons];
double target[outputNeurons];
double actual[outputNeurons];
double context[contextNeurons]; //Unit errors.
double erro[outputNeurons];
double errh[hiddenNeurons]; void ElmanNetwork();
void testNetwork();
void feedForward();
void backPropagate();
void assignRandomWeights();
int getRandomNumber();
double sigmoid(double val);
double sigmoidDerivative(double val); int main(){ cout << fixed << setprecision(3) << endl; //Format all the output.
srand((unsigned)time(0)); //Seed random number generator with system time.
ElmanNetwork();
testNetwork(); return 0;
} void ElmanNetwork(){
double err;
int sample = 0;
int iterations = 0;
bool stopLoop = false; assignRandomWeights(); //Train the network.
do { if(sample == 0){
for(int i = 0; i <= (inputNeurons - 1); i++){
inputs[i] = beVector[i];
} // i
} else {
for(int i = 0; i <= (inputNeurons - 1); i++){
inputs[i] = sampleInput[sample - 1][i];
} // i
} //After the samples are entered into the input units, the sample are
//then offset by one and entered into target-output units for
//later comparison.
if(sample == maxSamples - 1){
for(int i = 0; i <= (inputNeurons - 1); i++){
target[i] = beVector[i];
} // i
} else {
for(int i = 0; i <= (inputNeurons - 1); i++){
target[i] = sampleInput[sample][i];
} // i
} feedForward(); err = 0.0;
for(int i = 0; i <= (outputNeurons - 1); i++){
err += sqrt(target[i] - actual[i]);
} // i
err = 0.5 * err; if(iterations > trainingReps){
stopLoop = true;
}
iterations += 1; backPropagate(); sample += 1;
if(sample == maxSamples){
sample = 0;
}
} while(stopLoop == false); cout << "Iterations = " << iterations << endl;
} void testNetwork(){
int index;
int randomNumber, predicted;
bool stopTest, stopSample, successful; //Test the network with random input patterns.
stopTest = false;
for(int test = 0; test <= maxTests; test++){ //Enter Beginning string.
inputs[0] = 1.0;
inputs[1] = 0.0;
inputs[2] = 0.0;
inputs[3] = 0.0;
inputs[4] = 0.0;
inputs[5] = 0.0;
cout << "(0) "; feedForward(); stopSample = false;
successful = false;
index = 0; //note:If failed then index start from 0 again
     /*However, the nature of this kind of recurrent network is easier to understand (at least to me),
imply by referring to the unit's position in serial order (i.e.; Y0, Y1, Y2, Y3, ...).  
So for the purpose of this illustration, I'll just use strings of numbers like: 0, 3, 5, 2, 0,
where 0 refers to Y0, 3 refers to Y3, 5 refers to Y5, etc.  Each string begins and ends with a terminal symbol; I'll use 0 for this example.*/ 

randomNumber = 0;
predicted = 0; do { for(int i = 0; i <= 5; i++){
cout << actual[i] << " ";
if(actual[i] >= 0.3){
//The output unit with the highest value (usually over 3.0)
//is the network's predicted unit that it expects to appear
//in the next input vector.
//For example, if the 3rd output unit has the highest value,
//the network expects the 3rd unit in the next input to
//be 1.0
//If the actual value isn't what it expected, the random
//sequence has failed, and a new test sequence begins.
predicted = i;
}
} // i
cout << "\n"; randomNumber = getRandomNumber(); //Enter a random letter. index += 1; //Increment to the next position.
if(index == 5){
stopSample = true;
} else {
cout << "(" << randomNumber << ") ";
} for( i = 0; i <= 5; i++){
if(i == randomNumber){//note:i==randomNumber&&i == predicted then succeed
inputs[i] = 1.0;
if(i == predicted){
successful = true;
//for(int k=0;k<5;k++)//have a look;
// cout<<"\nTang :the sequence is:"<<inputs[k]<<'\t';
//cout<<endl;
} else {
//Failure. Stop this sample and try a new sample.
stopSample = true;
}
} else {
inputs[i] = 0.0;
}
} // i feedForward(); } while(stopSample == false); //Enter another letter into this sample sequence. if((index > 4) && (successful == true)){ //note: stop the iteration until success a sequence matching success at least 5 times.
//If the random sequence happens to be in the correct order,
//the network reports success.
cout << "Success." << endl;
cout << "Completed " << test << " tests." << endl;
stopTest = true;
break;
} else {
cout << "Failed." << endl;
if(test > maxTests){
stopTest = true;
cout << "Completed " << test << " tests with no success." << endl;
break;
}
}
} // Test
} void feedForward(){
double sum; //Calculate input and context connections to hidden layer.
for(int hid = 0; hid <= (hiddenNeurons - 1); hid++){
sum = 0.0;
//from input to hidden...
for(int inp = 0; inp <= (inputNeurons - 1); inp++){
sum += inputs[inp] * wih[inp][hid];
} // inp
//from context to hidden...
for(int con = 0; con <= (contextNeurons - 1); con++){
sum += context[con] * wch[con][hid];
} // con
//Add in bias.
sum += wih[inputNeurons][hid];
sum += wch[contextNeurons][hid];
hidden[hid] = sigmoid(sum);
} // hid //Calculate the hidden to output layer.
for(int out = 0; out <= (outputNeurons - 1); out++){
sum = 0.0;
for(int hid = 0; hid <= (hiddenNeurons - 1); hid++){
sum += hidden[hid] * who[hid][out];
} // hid //Add in bias.
sum += who[hiddenNeurons][out];
actual[out] = sigmoid(sum);
} // out //Copy outputs of the hidden to context layer.
for(int con = 0; con <= (contextNeurons - 1); con++){
context[con] = hidden[con];
} // con } void backPropagate(){ //Calculate the output layer error (step 3 for output cell).
for(int out = 0; out <= (outputNeurons - 1); out++){
erro[out] = (target[out] - actual[out]) * sigmoidDerivative(actual[out]);
} // out //Calculate the hidden layer error (step 3 for hidden cell).
for(int hid = 0; hid <= (hiddenNeurons - 1); hid++){
errh[hid] = 0.0;
for(int out = 0; out <= (outputNeurons - 1); out++){
errh[hid] += erro[out] * who[hid][out];
} // out
errh[hid] *= sigmoidDerivative(hidden[hid]);
} // hid //Update the weights for the output layer (step 4).
for( out = 0; out <= (outputNeurons - 1); out++){
for(int hid = 0; hid <= (hiddenNeurons - 1); hid++){
who[hid][out] += (learnRate * erro[out] * hidden[hid]);
} // hid
//Update the bias.
who[hiddenNeurons][out] += (learnRate * erro[out]);
} // out //Update the weights for the hidden layer (step 4).
for( hid = 0; hid <= (hiddenNeurons - 1); hid++){
for(int inp = 0; inp <= (inputNeurons - 1); inp++){
wih[inp][hid] += (learnRate * errh[hid] * inputs[inp]);
} // inp
//Update the bias.
wih[inputNeurons][hid] += (learnRate * errh[hid]);
} // hid } void assignRandomWeights(){ for(int inp = 0; inp <= inputNeurons; inp++){
for(int hid = 0; hid <= (hiddenNeurons - 1); hid++){
//Assign a random weight value between -0.5 and 0.5
wih[inp][hid] = -0.5 + double(rand()/(RAND_MAX + 1.0));
} // hid
} // inp for(int con = 0; con <= contextNeurons; con++){
for(int hid = 0; hid <= (hiddenNeurons - 1); hid++){
//Assign a random weight value between -0.5 and 0.5
wch[con][hid] = -0.5 + double(rand()/(RAND_MAX + 1.0));
} // hid
} // con for(int hid = 0; hid <= hiddenNeurons; hid++){
for(int out = 0; out <= (outputNeurons - 1); out++){
//Assign a random weight value between -0.5 and 0.5
who[hid][out] = -0.5 + double(rand()/(RAND_MAX + 1.0));
} // out
} // hid for(int out = 0; out <= outputNeurons; out++){
for(int con = 0; con <= (contextNeurons - 1); con++){
//These are all fixed weights set to 0.5
whc[out][con] = 0.5;
} // con
} // out } int getRandomNumber(){
//Generate random value between 0 and 6.
return int(6*rand()/(RAND_MAX + 1.0));
} double sigmoid(double val){
return (1.0 / (1.0 + exp(-val)));
} double sigmoidDerivative(double val){
return (val * (1.0 - val));
}

Elman network with additional notes的更多相关文章

  1. 论文笔记之:Progressive Neural Network Google DeepMind

    Progressive Neural Network  Google DeepMind 摘要:学习去解决任务的复杂序列 --- 结合 transfer (迁移),并且避免 catastrophic f ...

  2. 详解循环神经网络(Recurrent Neural Network)

    本文结构: 模型 训练算法 基于 RNN 的语言模型例子 代码实现 1. 模型 和全连接网络的区别 更细致到向量级的连接图 为什么循环神经网络可以往前看任意多个输入值 循环神经网络种类繁多,今天只看最 ...

  3. Heterogeneous Self-Organizing Network for Access and Backhaul

    This application discloses methods for creating self-organizing networks implemented on heterogeneou ...

  4. Real-time storage area network

    A cluster of computing systems is provided with guaranteed real-time access to data storage in a sto ...

  5. Gitlab的搭建

    从网上看了一大堆的资料,最终选定按照github上的文档来搭建,虽然本人英文不好,就这样看着 这个博客弯曲完全是拷贝过来的,只为了做个笔记 原文地址:https://github.com/gitlab ...

  6. CentOS6.5Minimal安装Gitlab7.5

    文章出处:http://www.restran.net/2015/04/09/gilab-centos-installation-note/ 在 CentOS 6.5 Minimal 系统环境下,用源 ...

  7. Windows Server 2008 HPC 版本介绍以及的Pack

    最近接触了下 这个比较少见的 Windows Server版本 Windows Server 2008 HPC 微软官方的介绍 http://www.microsoft.com/china/hpc/ ...

  8. 【Caffe 测试】Training LeNet on MNIST with Caffe

    Training LeNet on MNIST with Caffe We will assume that you have Caffe successfully compiled. If not, ...

  9. rsync Backups for Windows

    Transfer your Windows Backups to an rsync server over SSH rsync.net provides cloud storage for offsi ...

随机推荐

  1. Python基础—基础数据类型list(Day4)

    基础数据类型 四.list列表  存放大量数据,大量的数据在列表中便于操作,列表示有序的,有索引值,可切片,方便取值.  1.list的增    1).append('元素') 在列表后面按元素添加 ...

  2. 解决Chrome94之后非安全网站请求localhost报CORS问题

    问题 自从谷歌浏览器升级到chrome94版本后,在非安全网站下通过请求本地接口就会出现以下错误: Access to XMLHttpRequest at 'http://127.0.0.1:1000 ...

  3. python中类的初始化案例

    1 class Chinese: 2 # 初始化方法的创建,init两边双下划线. 3 def __init__(self, hometown): 4 self.hometown = hometown ...

  4. RENIX 软件如何进行IP地址管理——网络测试仪实操

    本文主要介绍了BIGTAO网络测试仪如何通过RENIX软件进行IP地址管理.文章分为五部分内容,第一部分介绍了如何通过机框显示屏查看IP地址,之后几部分分别介绍了机框按钮修改.机框接显示器/键盘修改. ...

  5. 用Smartbi与Tableau制作仪表盘有什么不同?

    随着数据应用程度的加深,用户.尤其是业务部门用户越来越希望能够了解业务表现数据更深层次的原因.导致到越来越多的业务人员参于数据分析.这样传统的BI就面临新模式的挑战了.哪我们即然花大量时间授人以鱼,为 ...

  6. Bash初识与常用命令

    转至:https://www.cnblogs.com/baishou/p/13850258.html Shell介绍 Shell在中文的意思是壳,寓意是操作系统的壳.Shell是指一种应用程序,这个应 ...

  7. 实用TCP协议(1):TCP 协议简介

    传输控制协议(TCP,Transmission Control Protocol)是一种面向连接的.可靠的.基于字节流的传输层通信协议.TCP 协议假设下层协议可以提供简单的不可靠数据报, 并在此基础 ...

  8. MySql的事务及数据的导入导出

    Mysql的事务及应用 1.补充数据库的导入与导出 导入 mysql -u root -p database < E:/SS/Test.sql -- 即执行sql文件 导出 -- 结构+数据 m ...

  9. docker 搭建php 开发环境 添加扩展redis、swoole、xdebug

    docker-compose搭建lnmp 先决条件 首先需要安装docker 安装docker-compost 1.创建lnmp工作目录 #创建三个目录 mkdir lnmp && c ...

  10. Linux CentOS7.X-目录切换

    一.cd 命令 在Linux中,进行目录的切换需要使用cd命令. 二.Linux目录结构  三.Linux CentOS7中几个常用目录切换方式 1.cd usr 切换到当前目录下的usr目录. 2. ...