//+------------------------------------------------------------------+
//| MLP_cannonball.mq4 |
//| Copyright © 2008, MetaQuotes Software Corp. |
//| http://www.metaquotes.net |
//+------------------------------------------------------------------+
#property copyright "Copyright © 2008, MetaQuotes Software Corp."
#property link "http://www.metaquotes.net"
/* This program implements a simple Multi Layer Perceptron to solve the canonball problem (regression)
Adapted by Sebastien Marcel (2003-2004) from D. Collobert
The goal is to estimate the position X of a bullet at time t (fixed) given the initial speed (v) and angle (a)
y
/ \
|
|
|
| . X
| .
| .
| .
| .
| .
| .
| .
| .
| .
|. (v,a)
--------------------------> x
*/
#define PI 3.14159265358979323846
#define RAND_MAX 32767.0
//
// Datasets
#define N_PATTERNS_TRAIN 500
#define N_PATTERNS_TEST 500
#define N_PATTERNS_TRAIN_TEST 1000 //N_PATTERNS_TRAIN + N_PATTERNS_TEST
// w1[N_HU1][IN1]=>w1[4][3]
// H In
// | |
// w1[0][0] w1[0][1] w1[0][2]
// w1[1][0] w1[1][1] w1[1][2]
// w1[2][0] w1[2][1] w1[2][2]
// w1[3][0] w1[3][1] w1[3][2]
// forward train:
// aHidden[0] = xBias;
// aHidden[1] = xBias*w1[1][0] + (In0)*w1[1][1] + (In1)*w1[1][2];
// aHidden[2] = xBias*w1[2][0] + (In0)*w1[2][1] + (In1)*w1[2][2];
// aHidden[3] = xBias*w1[3][0] + (In0)*w1[3][1] + (In1)*w1[3][2];
// yHidden[0] = xBias;
// yHidden[1] = f(aHidden[1]);
// yHidden[2] = f(aHidden[2]);
// yHidden[3] = f(aHidden[3]);
//
// w2[OUT][N_HU1]=>w2[2][4]
//
// w2[0][0] w2[0][1] w2[0][2] w2[0][3]
// w2[1][0] w2[1][1] w2[1][2] w2[1][3]
// | |
// Out H
// forward train:
// aOutput[0] = 0.0;
// aOutput[1] = 0.0;
// aOutput[0] = aOutput[0] + yHidden[0]*w2[0][0];
// aOutput[1] = aOutput[1] + yHidden[0]*w2[1][0];
// aOutput[0] = aOutput[0] + yHidden[1]*w2[0][1];
// aOutput[1] = aOutput[1] + yHidden[1]*w2[1][1];
// aOutput[0] = aOutput[0] + yHidden[2]*w2[0][2];
// aOutput[1] = aOutput[1] + yHidden[2]*w2[1][2];
// aOutput[0] = aOutput[0] + yHidden[3]*w2[0][3];
// aOutput[1] = aOutput[1] + yHidden[3]*w2[1][3];
//
// out0 = f(aOutput[0]);
// out1 = f(aOutput[1]);
#define IN 8
#define IN1 9 //IN+1
#define OUT 2
#define N_HU 12
#define N_HU1 13 //N_HU + 1
//*****************************
// Inputs
//
double DataIn0[N_PATTERNS_TRAIN_TEST]; // speed of bullet
double DataIn1[N_PATTERNS_TRAIN_TEST]; // angle of bullet
double DataIn2[N_PATTERNS_TRAIN_TEST]; // speed of bullet
double DataIn3[N_PATTERNS_TRAIN_TEST]; // angle of bullet
double DataIn4[N_PATTERNS_TRAIN_TEST]; // speed of bullet
double DataIn5[N_PATTERNS_TRAIN_TEST]; // angle of bullet
double DataIn6[N_PATTERNS_TRAIN_TEST]; // speed of bullet
double DataIn7[N_PATTERNS_TRAIN_TEST]; // angle of bullet
// X[N_PATTERNS_TRAIN + N_PATTERNS_TEST];
//*****************************
// Targets
//
double DataOut0[N_PATTERNS_TRAIN_TEST]; // x position at time t
double DataOut1[N_PATTERNS_TRAIN_TEST]; // y position at time t
// Y[N_PATTERNS_TRAIN + N_PATTERNS_TEST];
//*****************************
// to create the data
//
double vmax;
double gravity;
//*****************************
// to train the MLP
//
double lambda = 0.01; // learning rate
double mu = 0.6; // inertia momentum rate
double mse_min = 0.001; // minimum Mean Squared Error
int max_iterations = 500; // maximum number of iterations
//*****************************
// MLP data
// weights between hidden neurons and inputs
double w1[N_HU1][IN1];
double w1old[N_HU1][IN1];
// weights between outputs and hidden neurons
double w2[OUT][N_HU1];
double w2old[OUT][N_HU1];
// values of integration function
double aHidden[N_HU1];
double aOutput[OUT];
// values of transfert function
double yHidden[N_HU1];
// constant value of bias
double xBias = 1.0;
int init()
{
vmax = 1.0/10;
gravity = 1.0/200;
return(0);
}
//+------------------------------------------------------------------+
//| script program start function |
//+------------------------------------------------------------------+
int start()
{
//----
initrand();
main();
//----
return(0);
}
//+------------------------------------------------------------------+
//*****************************
// Sigmoid transfer function
//
double f(double x)
{
return (1.0 /(1.0 + MathExp(-x)));
}
//*****************************
// Derivative of Sigmoid
//
double f_prime(double x)
{
double z = MathExp(-x);
double one_plus_z = 1.0 + z;
return (z / (one_plus_z * one_plus_z));
}
//*****************************
// Compute the x position of bullet at time t
//
double xpos(double t, double v, double teta)
{
return( v * t * MathCos(teta*PI/180.0));
}
//*****************************
// Compute the y position of bullet at time t
//
double ypos(double t, double v, double teta)
{
return( -0.5 * gravity * t * t + v * t * MathSin(teta*PI/180.0));
}
//*****************************
// Compute the MSE
//
double MSE(
double a0,
double a1,
double b0,
double b1
)
{
return (0.5*((a0 - b0)*(a0 - b0) + (a1 - b1)*(a1 - b1)));
}
//*****************************
// Create the MLP
//
void createMLP()
{
int i, j;
// weights between hidden neurons and inputs
for (i = 0; i<IN1; i++)
{
for (j = 0; j< N_HU1; j++)
{
w1[j][i] = Random(-1.0, 1.0);
w1old[j][i] = w1[j][i];
}
}
// weights between outputs and hidden neurons
for (i = 0; i<OUT; i++)
{
for (j = 0; j< N_HU1; j++)
{
w2[i][j] = Random(-1.0, 1.0);
w2old[i][j] = w2[i][j];
}
}
}
//*****************************
// Create the datasets
//
void createDatasets()
{
int i;
i = 0;
while (i < (N_PATTERNS_TRAIN + N_PATTERNS_TEST))
{
DataIn0[i] = (Random(0.0, vmax)) / vmax;
DataIn1[i] = (Random(0.0, 90.0)) / 90.0;
DataIn2[i] = (Random(0.0, vmax)) / vmax;
DataIn3[i] = (Random(0.0, 90.0)) / 90.0;
DataIn4[i] = (Random(0.0, vmax)) / vmax;
DataIn5[i] = (Random(0.0, 90.0)) / 90.0;
DataIn6[i] = (Random(0.0, vmax)) / vmax;
DataIn7[i] = (Random(0.0, 90.0)) / 90.0;
DataOut0[i] = xpos(10, DataIn0[i] * vmax, (DataIn1[i])*90.0);
DataOut1[i] = ypos(10, DataIn0[i] * vmax, (DataIn1[i])*90.0);
if (DataOut1[i] > 0.0) i = i + 1;
}
}
//*****************************
// Forward the input in the MLP
//
void forward(
double In0,
double In1,
double In2,
double In3,
double In4,
double In5,
double In6,
double In7,
double &out0,
double &out1
)
{
int i;
aHidden[0] = xBias;
yHidden[0] = xBias;
for (i = 1; i <= N_HU; i++)
{
aHidden[i] = xBias*w1[i][0] + (In0)*w1[i][1] +
(In1)*w1[i][2] +
(In2)*w1[i][3] +
(In3)*w1[i][4] +
(In4)*w1[i][5] +
(In5)*w1[i][6] +
(In6)*w1[i][7] +
(In7)*w1[i][8] ;
yHidden[i] = f(aHidden[i]);
}
aOutput[0] = 0.0;
aOutput[1] = 0.0;
for (i = 0; i <= N_HU; i++)
{
aOutput[0] = aOutput[0] + yHidden[i]*w2[0][i];
aOutput[1] = aOutput[1] + yHidden[i]*w2[1][i];
}
out0 = f(aOutput[0]);
out1 = f(aOutput[1]);
}
//*****************************
// Backward (back-propagate the gradient of error)
//
void backward(
double Yestimate0,
double Yestimate1,
double Outwant0,
double Outwant1,
double In0,
double In1,
double In2,
double In3,
double In4,
double In5,
double In6,
double In7,
double lambda,
double mu
)
{
double wnew, Goutput[OUT], Ghidden[N_HU1];
int i,j;
//
Goutput[0] = (Yestimate0 - Outwant0) * f_prime(aOutput[0]);
Goutput[1] = (Yestimate1 - Outwant1) * f_prime(aOutput[1]);
//
for (i = 0; i<=N_HU; i++)
Ghidden[i] = (Goutput[0]*w2[0][i] + Goutput[1]*w2[1][i])*f_prime(aHidden[i]);
//
for (j = 0; j<OUT;j++)
{
for (i = 0; i<=N_HU; i++)
{
wnew = w2[j][i] - lambda * yHidden[i] * Goutput[j] + mu * (w2[j][i] - w2old[j][i]);
w2old[j][i] = w2[j][i];
w2[j][i] = wnew;
}
}
//
for (i = 0; i<=N_HU; i++)
{
wnew = w1[i][0] - lambda * xBias * Ghidden[i] + mu * (w1[i][0] - w1old[i][0]);
w1old[i][0] = w1[i][0];
w1[i][0] = wnew;
wnew = w1[i][1] - lambda * In0 * Ghidden[i] + mu * (w1[i][1] - w1old[i][1]);
w1old[i][1] = w1[i][1];
w1[i][1] = wnew;
wnew = w1[i][2] - lambda * In1 * Ghidden[i] + mu * (w1[i][2] - w1old[i][2]);
w1old[i][2] = w1[i][2];
w1[i][2] = wnew;
wnew = w1[i][3] - lambda * In2 * Ghidden[i] + mu * (w1[i][3] - w1old[i][3]);
w1old[i][3] = w1[i][3];
w1[i][3] = wnew;
wnew = w1[i][4] - lambda * In3 * Ghidden[i] + mu * (w1[i][4] - w1old[i][4]);
w1old[i][4] = w1[i][4];
w1[i][4] = wnew;
wnew = w1[i][5] - lambda * In4 * Ghidden[i] + mu * (w1[i][5] - w1old[i][5]);
w1old[i][5] = w1[i][5];
w1[i][5] = wnew;
wnew = w1[i][6] - lambda * In5 * Ghidden[i] + mu * (w1[i][6] - w1old[i][6]);
w1old[i][6] = w1[i][6];
w1[i][6] = wnew;
wnew = w1[i][7] - lambda * In6 * Ghidden[i] + mu * (w1[i][7] - w1old[i][7]);
w1old[i][7] = w1[i][7];
w1[i][7] = wnew;
wnew = w1[i][8] - lambda * In7 * Ghidden[i] + mu * (w1[i][8] - w1old[i][8]);
w1old[i][8] = w1[i][8];
w1[i][8] = wnew;
}
}
//*****************************
// Stochastic gradient training
//
double train(int P)
{
double Yestimate0;
double Yestimate1;
double mse_;
double mse_total;
mse_total = 0.0;
// For each train patterns
for(int p = 0 ; p < P ; p++)
{
// Forward current train pattern into the MLP
forward(
//X[p],
DataIn0[p],
DataIn1[p],
DataIn2[p],
DataIn3[p],
DataIn4[p],
DataIn5[p],
DataIn6[p],
DataIn7[p],
//&Yestimate
Yestimate0,
Yestimate1
);
// Computes the MSE
mse_ = MSE(
//Y[p],
DataOut0[p],
DataOut1[p],
//Yestimate
Yestimate0,
Yestimate1
);
// Accumulate the MSE
mse_total = mse_total + mse_;
// Backward MSE gradient
backward(
//Yestimate,
Yestimate0,
Yestimate1,
//Y[p],
DataOut0[p], //Outwant
DataOut1[p], //Outwant
//X[p],
DataIn0[p],
DataIn1[p],
DataIn2[p],
DataIn3[p],
DataIn4[p],
DataIn5[p],
DataIn6[p],
DataIn7[p],
lambda,
mu
);
}
// Return normalized train MSE
double P1=P;
return (mse_total /P1);
}
//
int main()
{
double mse_train;
double mse_test;
double mse_;
double Yestimate0;
double Yestimate1;
//*****************************
createMLP();
//*****************************
createDatasets();
//*****************************
//for (int i = 0; i< N_PATTERNS_TRAIN; i++) printf(" TRN: x=[%f %f] y=[%f %f]\n", X[i].v, X[i].a, Y[i].x, Y[i].y);
//for (int i = N_PATTERNS_TRAIN; i<(N_PATTERNS_TRAIN + N_PATTERNS_TEST); i++) printf(" TST: x=[%f %f] y=[%f %f]\n", X[i].v, X[i].a, Y[i].x, Y[i].y);
Print("Stochastic gradient training:");
//
int pf_train = FileOpen("mse_train_mt4_in8.txt",FILE_CSV|FILE_WRITE,' ');//FileOpen("mse_train.txt", "w");
int pf_test = FileOpen("mse_test_mt4_in8.txt",FILE_CSV|FILE_WRITE,' ');//FileOpen("mse_test.txt", "w");
int iter;
for(iter = 1 ; iter <= max_iterations ; iter++)
{
//*****************************
// train
//*****************************
mse_train = train(N_PATTERNS_TRAIN);
FileWrite(pf_train,mse_train);
//*****************************
// test
//*****************************
mse_test = 0.0;
// For each test pattern
// (N_PATTERNS_TRAIN + N_PATTERNS_TEST) N_PATTERNS_TRAIN 0
// +-----------------------------------------------------------+------------------------------------------+
//
for(int i = N_PATTERNS_TRAIN ; i < (N_PATTERNS_TRAIN + N_PATTERNS_TEST) ; i++)
{
Yestimate0 = 0.0;
Yestimate1 = 0.0;
// forward current pattern into the MLP
//forward(X[i], &Yestimate);
forward(
//X[p],
DataIn0[i],
DataIn1[i],
DataIn2[i],
DataIn3[i],
DataIn4[i],
DataIn5[i],
DataIn6[i],
DataIn7[i],
//&Yestimate
Yestimate0,
Yestimate1
);
// computes MSE
//mse_ = MSE(Y[i], Yestimate);
mse_ = MSE(
//Y[p],
DataOut0[i],
DataOut1[i],
//Yestimate
Yestimate0,
Yestimate1
);
// accumulate MSE
mse_test += mse_;
}
// Normalize the MSE
mse_test /= N_PATTERNS_TEST;
FileWrite(pf_test,mse_test);
Print("->",iter);
//fflush(stdout);
if (mse_train < mse_min) break;
}
//Print("\n");
//
FileClose(pf_test);
FileClose(pf_train);
//*****************************
// Print info about training
//
Print("Number of iterations = ", iter);
Print("Final MSE train = ", mse_train);
Print("Final MSE test = ", mse_test);
Print("End of program.");
return (0);
}
//use this first function to seed the random number generator,
//call this before any of the other functions
void initrand()
{
//srand((unsigned)(time(0)));
MathSrand(TimeLocal());
}
//generates a psuedo-random double between 0.0 and 0.999...
double randdouble0()
{
return (MathRand()/((RAND_MAX)+1));
}
//generates a psuedo-random double between min and max
double randdouble2(double min, double max)
{
if (min>max)
{
return (randdouble0()*(min-max)+max);
}
else
{
return (randdouble0()*(max-min)+min);
}
}
// Random generator
double Random(double inf, double sup)
{
return(randdouble2(inf,sup));
}
Comments