iCub-main
lmlibindirect.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007-2009 Arjan Gijsberts
3  * CopyPolicy: Released under the terms of the GNU GPL v2.0.
4  *
5  * Example code how to use the learningMachine library in an indirect manner.
6  */
7 #include <iostream>
8 #include <stdexcept>
9 #include <cmath>
12 #include <yarp/sig/Vector.h>
13 #include <yarp/os/Property.h>
14 #include <yarp/math/Math.h>
15 #include <yarp/math/Rand.h>
16 
17 #define MIN(a, b) ((a < b) ? a : b)
18 
19 #define NO_TRAIN 1000
20 #define NO_TEST 1000
21 #define NOISE_MIN -0.05
22 #define NOISE_MAX 0.05
23 
24 
25 using namespace iCub::learningmachine;
26 using namespace yarp::os;
27 using namespace yarp::sig;
28 using namespace yarp::math;
29 
30 // taken from LWPR example code
31 double cross(double x1, double x2) {
32  x1 *= x1;
33  x2 *= x2;
34  double a = std::exp(-10 * x1);
35  double b = std::exp(-50 * x2);
36  double c = 1.25 * std::exp(-5 * (x1 + x2));
37  return (a > b) ? ((a > c) ? a : c) : ((b > c) ? b : c);
38 }
39 
40 double sin2d(double x1, double x2) {
41  return std::sin(x1 + x2);
42 }
43 
44 void elementProd(const Vector& v1, Vector& v2) {
45  for(size_t i = 0; i < MIN(v1.size(), v2.size()); i++) {
46  v2[i] = v1[i] * v2[i];
47  }
48 }
49 
50 Vector elementDiv(const Vector& v, double d) {
51  Vector ret(v.size());
52  for(size_t i = 0; i < v.size(); i++) {
53  ret[i] = (d == 0.) ? v[i] : v[i] / d;
54  }
55  return ret;
56 }
57 
58 
59 std::pair<Vector, Vector> createSample() {
60  std::pair<Vector, Vector> sample;
61  sample.first.resize(2);
62  sample.second.resize(2);
63  sample.first[0] = Rand::scalar(-1, +1);
64  sample.first[1] = Rand::scalar(-1, +1);
65  sample.second[0] = sin2d(sample.first[0], sample.first[1]);
66  sample.second[1] = cross(sample.first[0], sample.first[1]);
67  return sample;
68 }
69 
70 /*
71  * This example shows how LearningMachine classes can be used in a indirect
72  * manner in your code. In this context, this means that the YARP configuration
73  * mechanism is used and instances are of the abstract base type. This
74  * facilitates easy migration to other learning methods. Please see all
75  * direct/indirect/portable examples to have an idea which method suits your
76  * application best.
77  *
78  * Please keep in mind that the purpose is to demonstrate how to interface with
79  * the learningMachine library. The synthetic data used in this example is
80  * utterly useless.
81  */
82 
83 int main(int argc, char** argv) {
84  Vector trainMSE(2);
85  Vector testMSE(2);
86  Vector noise_min(2);
87  Vector noise_max(2);
88 
89  std::cout << "LearningMachine library example (indirect)" << std::endl;
90 
91  // create Regularized Least Squares learner
92  // we need pointers here!
93  IMachineLearner* rls = new RLSLearner();
94  Property p("(dom 250) (cod 2) (lambda 0.5)");
95  rls->configure(p);
96  std::cout << "Learner:" << std::endl << rls->getInfo() << std::endl;
97 
98  // create Random Feature transformer
99  ITransformer* rf = new RandomFeature();
100  p.fromString("(dom 2) (cod 250) (gamma 16.0)", true);
101  rf->configure(p);
102  std::cout << "Transformer:" << std::endl << rf->getInfo() << std::endl;
103 
104 
105  // create and feed training samples
106  noise_min = NOISE_MIN;
107  noise_max = NOISE_MAX;
108 
109  trainMSE = 0.0;
110  for(int i = 0; i < NO_TRAIN; i++) {
111  // create a new training sample
112  std::pair<Vector, Vector> sample = createSample();
113 
114  // add some noise to output for training
115  Vector noisyOutput = sample.second + Rand::vector(noise_min, noise_max);
116 
117  // transform input using RF
118  Vector transInput = rf->transform(sample.first);
119 
120  // make prediction before feeding full sample
121  Prediction prediction = rls->predict(transInput);
122  //std::cout << "Predict: " << prediction.toString() << std::endl;
123  Vector diff = prediction.getPrediction() - sample.second;
124  elementProd(diff, diff);
125  trainMSE = trainMSE + diff;
126 
127  // train on complete sample with noisy output
128  rls->feedSample(transInput, noisyOutput);
129  }
130  trainMSE = elementDiv(trainMSE, NO_TRAIN);
131  std::cout << "Train MSE: " << trainMSE.toString() << std::endl;
132 
133  // predict test samples
134  testMSE = 0.;
135  for(int i = 0; i < NO_TEST; i++) {
136  // create a new testing sample
137  std::pair<Vector, Vector> sample = createSample();
138 
139  // transform input using RF
140  Vector transInput = rf->transform(sample.first);
141 
142  // make prediction
143  Prediction prediction = rls->predict(transInput);
144  Vector diff = prediction.getPrediction() - sample.second;
145  elementProd(diff, diff);
146  //std::cout << "Sample: " << sample.input <<
147  testMSE = testMSE + diff;
148  }
149  testMSE = elementDiv(testMSE, NO_TEST);
150  std::cout << "Test MSE: " << testMSE.toString() << std::endl;
151 
152  delete rls;
153  delete rf;
154 }
155 
156 
A generalized interface for a learning machine for offline and online learning machines (e....
virtual bool configure(yarp::os::Searchable &config)
Change parameters.
virtual Prediction predict(const yarp::sig::Vector &input)=0
Ask the learning machine to predict the output for a given input.
virtual std::string getInfo()
Asks the learning machine to return a string containing information on its operation so far.
virtual void feedSample(const yarp::sig::Vector &input, const yarp::sig::Vector &output)=0
Provide the learning machine with an example of the desired mapping.
A class that provides a preprocessing interface, which can be used to preprocess the data samples tha...
Definition: ITransformer.h:57
virtual bool configure(yarp::os::Searchable &config)
Definition: ITransformer.h:168
virtual yarp::sig::Vector transform(const yarp::sig::Vector &input)
Transforms an input vector.
Definition: ITransformer.h:104
virtual std::string getInfo()
Asks the transformer to return a string containing statistics on its operation so far.
Definition: ITransformer.h:115
A class that represents a prediction result.
Definition: Prediction.h:44
yarp::sig::Vector getPrediction()
Accessor for the expected value of the prediction.
Definition: Prediction.h:106
Recursive Regularized Least Squares (a.k.a.
Definition: RLSLearner.h:46
Implementation of Random Feature preprocessing.
Definition: RandomFeature.h:45
exp(-x3 *T)]
#define NO_TEST
#define MIN(a, b)
int main(int argc, char **argv)
#define NOISE_MAX
double cross(double x1, double x2)
Vector elementDiv(const Vector &v, double d)
std::pair< Vector, Vector > createSample()
#define NOISE_MIN
void elementProd(const Vector &v1, Vector &v2)
double sin2d(double x1, double x2)
#define NO_TRAIN