OpenNN  2.2
Open Neural Networks Library
inverse_sum_squared_error.cpp
1 /****************************************************************************************************************/
2 /* */
3 /* OpenNN: Open Neural Networks Library */
4 /* www.artelnics.com/opennn */
5 /* */
6 /* I N V E R S E S U M S Q U A R E D E R R O R C L A S S */
7 /* */
8 /* Roberto Lopez */
9 /* Artelnics - Making intelligent use of data */
11 /* */
12 /****************************************************************************************************************/
13 
14 
15 // OpenNN includes
16 
17 #include "inverse_sum_squared_error.h"
18 
19 
20 namespace OpenNN
21 {
22 
23 // DEFAULT CONSTRUCTOR
24 
28 
30 {
32 
33  set_default();
34 }
35 
36 // NEURAL NETWORK CONSTRUCTOR
37 
42 
44  : PerformanceTerm(new_neural_network_pointer)
45 {
47 
48  set_default();
49 }
50 
51 
52 // GENERAL CONSTRUCTOR
53 
60 
61 InverseSumSquaredError::InverseSumSquaredError(NeuralNetwork* new_neural_network_pointer, MathematicalModel* new_mathematical_model_pointer, DataSet* new_data_set_pointer)
62  : PerformanceTerm(new_neural_network_pointer, new_mathematical_model_pointer, new_data_set_pointer)
63 {
65 
66  set_default();
67 }
68 
69 
70 // XML CONSTRUCTOR
71 
76 
77 InverseSumSquaredError::InverseSumSquaredError(const tinyxml2::XMLDocument& inverse_sum_squared_error_document)
78  : PerformanceTerm(inverse_sum_squared_error_document)
79 {
81 
82  set_default();
83 }
84 
85 
86 // DESTRUCTOR
87 
90 
92 {
93 }
94 
95 
96 // METHODS
97 
98 // const UnknownsMethod& get_unknowns_method(void) const method
99 
101 
103 {
104  return(unknowns_method);
105 }
106 
107 
108 // std::string write_unknowns_method(void) const method
109 
111 
113 {
114  switch(unknowns_method)
115  {
116  case LookUpTable:
117  {
118  return("LookUpTable");
119  }
120  break;
121 
122  case IndependentParametersValues:
123  {
124  return("IndependentParametersValues");
125  }
126  break;
127 
128  default:
129  {
130  std::ostringstream buffer;
131 
132  buffer << "OpenNN Exception: InverseSumSquaredError class.\n"
133  << "std::string write_unknowns_method(void) const method.\n"
134  << "Unknown property method.\n";
135 
136  throw std::logic_error(buffer.str());
137  }
138  break;
139  }
140 }
141 
142 
143 // void set_unknowns_method(const UnknownsMethod&) method
144 
147 
149 {
150  unknowns_method = new_unknowns_method;
151 }
152 
153 
154 // void set_unknowns_method(const std::string&) method
155 
158 
159 void InverseSumSquaredError::set_unknowns_method(const std::string& new_unknowns_method)
160 {
161  if(new_unknowns_method == "LookUpTable")
162  {
163  set_unknowns_method(LookUpTable);
164  }
165  else if(new_unknowns_method == "IndependentParametersValues")
166  {
167  set_unknowns_method(IndependentParametersValues);
168  }
169  else
170  {
171  std::ostringstream buffer;
172 
173  buffer << "OpenNN Exception: ScalingLayer class.\n"
174  << "void set_unknowns_method(const std::string&) method.\n"
175  << "Unknown property method: " << new_unknowns_method << ".\n";
176 
177  throw std::logic_error(buffer.str());
178  }
179 }
180 
181 
182 // void set_default(void) method
183 
189 
191 {
192  unknowns_method = IndependentParametersValues;
193 
194  display = true;
195 }
196 
197 
198 // void check(void) const method
199 
202 
204 {
205  std::ostringstream buffer;
206 
207  // Neural network stuff
208 
210  {
211  buffer << "OpenNN Exception: InverseSumSquaredError class.\n"
212  << "void check(void) const method.\n"
213  << "Pointer to neural network is NULL.\n";
214 
215  throw std::logic_error(buffer.str());
216  }
217 
218  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
219 
220  if(!multilayer_perceptron_pointer)
221  {
222  buffer << "OpenNN Exception: InverseSumSquaredError class.\n"
223  << "void check(void) const method.\n"
224  << "Pointer to multilayer perceptron is NULL.\n";
225 
226  throw std::logic_error(buffer.str());
227  }
228 
229  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
230  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
231 
232  if(inputs_number == 0)
233  {
234  buffer << "OpenNN Exception: InverseSumSquaredError class.\n"
235  << "void check(void) const method.\n"
236  << "Number of inputs in multilayer perceptron object is zero.\n";
237 
238  throw std::logic_error(buffer.str());
239  }
240 
241  if(outputs_number == 0)
242  {
243  buffer << "OpenNN Exception: InverseSumSquaredError class.\n"
244  << "void check(void) const method.\n"
245  << "Number of outputs in multilayer perceptron object is zero.\n";
246 
247  throw std::logic_error(buffer.str());
248  }
249 
250  // Mathematical model stuff
251 
253  {
254  buffer << "OpenNN Exception: InverseSumSquaredError class.\n"
255  << "void check(void) const method.\n"
256  << "Pointer to mathematical model is NULL.\n";
257 
258  throw std::logic_error(buffer.str());
259  }
260 
261  // Data set stuff
262 
263  if(!data_set_pointer)
264  {
265  buffer << "OpenNN Exception: InverseSumSquaredError class.\n"
266  << "void check(void) const method.\n"
267  << "Pointer to data set is NULL.\n";
268 
269  throw std::logic_error(buffer.str());
270  }
271 
272  // Final solutions error stuff
273 
274 }
275 
276 
277 // double calculate_performance(void) const method
278 
280 
282 {
283  // Control sentence
284 
285  #ifndef NDEBUG
286 
287  check();
288 
289  #endif
290 
291  // Data set stuff
292 
293  const Matrix<double> training_input_data = data_set_pointer->arrange_training_input_data();
294  const Matrix<double> training_target_data = data_set_pointer->arrange_training_target_data();
295 
296  const size_t training_instances_number = training_input_data.get_rows_number();
297 
298  // Mathematical model stuff
299 
300  const Matrix<double> training_solution_data = mathematical_model_pointer->calculate_dependent_variables(*neural_network_pointer, training_input_data);
301 
302  return(training_solution_data.calculate_sum_squared_error(training_target_data)/(double)training_instances_number);
303 }
304 
305 
306 // double calculate_performance(const Vector<double>&) const method
307 
308 double InverseSumSquaredError::calculate_performance(const Vector<double>& potential_parameters) const
309 {
310  // Control sentence (if debug)
311 
312  #ifndef NDEBUG
313 
314  check();
315 
316  #endif
317 
318  #ifndef NDEBUG
319 
320  const size_t size = potential_parameters.size();
321 
322  const size_t parameters_number = neural_network_pointer->count_parameters_number();
323 
324  if(size != parameters_number)
325  {
326  std::ostringstream buffer;
327 
328  buffer << "OpenNN Exception: InverseSumSquaredError class.\n"
329  << "double calculate_performance(const Vector<double>&) const method.\n"
330  << "Size (" << size << ") must be equal to number of parameters (" << parameters_number << ").\n";
331 
332  throw std::logic_error(buffer.str());
333  }
334 
335  #endif
336 
337  NeuralNetwork neural_network_copy(*neural_network_pointer);
338 
339  neural_network_copy.set_parameters(potential_parameters);
340 
341  InverseSumSquaredError inverse_sum_squared_error_copy(*this);
342 
343  inverse_sum_squared_error_copy.set_neural_network_pointer(&neural_network_copy);
344 
345  return(inverse_sum_squared_error_copy.calculate_performance());
346 }
347 
348 
349 // double calculate_generalization_performance(void) const method
350 
352 
354 {
355  // Control sentence (if debug)
356 
357  #ifndef NDEBUG
358 
359  check();
360 
361  #endif
362 
363  // Neural network
364 
365  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
366 
367  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
368  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
369 
370  // Data set
371 
372  const Instances& instances = data_set_pointer->get_instances();
373 
374  const size_t generalization_instances_number = instances.count_generalization_instances_number();
375 
376  const Vector<size_t> generalization_indices = instances.arrange_generalization_indices();
377 
378  size_t generalization_index;
379 
380  const Variables& variables = data_set_pointer->get_variables();
381 
382  const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
383  const Vector<size_t> targets_indices = variables.arrange_targets_indices();
384 
385  const MissingValues& missing_values = data_set_pointer->get_missing_values();
386 
387  // Performance functional
388 
389  Vector<double> inputs(inputs_number);
390  Vector<double> outputs(outputs_number);
391  Vector<double> targets(outputs_number);
392 
393  double generalization_objective = 0.0;
394 
395  int i = 0;
396 
397  #pragma omp parallel for private(i, generalization_index, inputs, outputs, targets) reduction(+ : generalization_objective)
398 
399  for(i = 0; i < (int)generalization_instances_number; i++)
400  {
401  generalization_index = generalization_indices[i];
402 
403  if(missing_values.has_missing_values(generalization_index))
404  {
405  continue;
406  }
407 
408  // Input vector
409 
410  inputs = data_set_pointer->get_instance(generalization_index, inputs_indices);
411 
412  // Output vector
413 
414  outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
415 
416  // Target vector
417 
418  targets = data_set_pointer->get_instance(generalization_index, targets_indices);
419 
420  // Sum of squares error
421 
422  generalization_objective += outputs.calculate_sum_squared_error(targets);
423  }
424 
425  return(generalization_objective);
426 }
427 
428 
429 // std::string write_performance_term_type(void) const method
430 
432 
434 {
435  return("INVERSE_SUM_SQUARED_ERROR");
436 }
437 
438 
439 // tinyxml2::XMLDocument* to_XML(void) const method
440 
443 
444 tinyxml2::XMLDocument* InverseSumSquaredError::to_XML(void) const
445 {
446  std::ostringstream buffer;
447 
448  tinyxml2::XMLDocument* document = new tinyxml2::XMLDocument;
449 
450  // Inverse sum squared error
451 
452  tinyxml2::XMLElement* inverse_sum_squared_error_element = document->NewElement("InverseSumSquaredError");
453 
454  document->InsertFirstChild(inverse_sum_squared_error_element);
455 
456  // Numerical differentiation
457 
459  {
460  tinyxml2::XMLElement* element = numerical_differentiation_pointer->to_XML()->FirstChildElement();
461  inverse_sum_squared_error_element->LinkEndChild(element);
462  }
463 
464  // Unknowns method
465  {
466  tinyxml2::XMLElement* element = document->NewElement("UnknownsMethod");
467  inverse_sum_squared_error_element->LinkEndChild(element);
468 
469  tinyxml2::XMLText* text = document->NewText(write_unknowns_method().c_str());
470  element->LinkEndChild(text);
471  }
472 
473  // Display
474 
475  {
476  tinyxml2::XMLElement* display_element = document->NewElement("Display");
477  inverse_sum_squared_error_element->LinkEndChild(display_element);
478 
479  buffer.str("");
480  buffer << display;
481 
482  tinyxml2::XMLText* display_text = document->NewText(buffer.str().c_str());
483  display_element->LinkEndChild(display_text);
484  }
485 
486  return(document);
487 
488 }
489 
490 
491 // void from_XML(const tinyxml2::XMLDocument&) method
492 
494 
495 void InverseSumSquaredError::from_XML(const tinyxml2::XMLDocument&)
496 {
497 
498 }
499 
500 }
501 
502 
503 // OpenNN: Open Neural Networks Library.
504 // Copyright (c) 2005-2015 Roberto Lopez.
505 //
506 // This library is free software; you can redistribute it and/or
507 // modify it under the terms of the GNU Lesser General Public
508 // License as published by the Free Software Foundation; either
509 // version 2.1 of the License, or any later version.
510 //
511 // This library is distributed in the hope that it will be useful,
512 // but WITHOUT ANY WARRANTY; without even the implied warranty of
513 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
514 // Lesser General Public License for more details.
515 // You should have received a copy of the GNU Lesser General Public
516 // License along with this library; if not, write to the Free Software
517 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
size_t count_parameters_number(void) const
Matrix< double > arrange_training_input_data(void) const
Definition: data_set.cpp:589
UnknownsMethod
Enumeration of the different methods for putting the unknowns into the mathematical model...
const Variables & get_variables(void) const
Returns a constant reference to the variables object composing this data set object.
Definition: data_set.cpp:202
double calculate_sum_squared_error(const Matrix< double > &) const
Definition: matrix.h:4723
MathematicalModel * mathematical_model_pointer
Pointer to a mathematical model object.
size_t get_inputs_number(void) const
Returns the number of inputs to the multilayer perceptron.
bool has_missing_values(void) const
tinyxml2::XMLDocument * to_XML(void) const
size_t get_outputs_number(void) const
Returns the number of outputs neurons in the multilayer perceptron.
Vector< double > calculate_outputs(const Vector< double > &) const
const MissingValues & get_missing_values(void) const
Returns a reference to the missing values object in the data set.
Definition: data_set.cpp:275
virtual void set_neural_network_pointer(NeuralNetwork *)
void set_unknowns_method(const UnknownsMethod &)
Vector< size_t > arrange_targets_indices(void) const
Returns the indices of the target variables.
Definition: variables.cpp:519
size_t count_generalization_instances_number(void) const
Returns the number of instances in the data set which will be used for generalization.
Definition: instances.cpp:409
Vector< double > get_instance(const size_t &) const
Definition: data_set.cpp:684
void from_XML(const tinyxml2::XMLDocument &)
MultilayerPerceptron * get_multilayer_perceptron_pointer(void) const
Returns a pointer to the multilayer perceptron composing this neural network.
void construct_numerical_differentiation(void)
This method constructs the numerical differentiation object which composes the performance term class...
double calculate_sum_squared_error(const Vector< double > &) const
Definition: vector.h:2569
std::string write_performance_term_type(void) const
Returns a string with the name of the inverser sum squared error performance type, "INVERSE_SUM_SQUARED_ERROR".
NeuralNetwork * neural_network_pointer
Pointer to a multilayer perceptron object.
NumericalDifferentiation * numerical_differentiation_pointer
Numerical differentiation object.
const size_t & get_rows_number(void) const
Returns the number of rows in the matrix.
Definition: matrix.h:1079
std::string write_unknowns_method(void) const
This returns a string with the name of the method for entering the unknown values or functions into t...
virtual Matrix< double > calculate_dependent_variables(const NeuralNetwork &, const Matrix< double > &) const
bool display
Display messages to screen.
Matrix< double > arrange_training_target_data(void) const
Definition: data_set.cpp:605
const UnknownsMethod & get_unknowns_method(void) const
This returns the method for entering the unknown values or functions into the mathematical model...
DataSet * data_set_pointer
Pointer to a data set object.
UnknownsMethod unknowns_method
Variable containing the method for putting the unknowns into the mathematical model.
double calculate_generalization_performance(void) const
tinyxml2::XMLDocument * to_XML(void) const
Serializes this numerical differentiation object into a XML document->
Vector< size_t > arrange_generalization_indices(void) const
Returns the indices of the instances which will be used for generalization.
Definition: instances.cpp:516
void set_parameters(const Vector< double > &)
Vector< size_t > arrange_inputs_indices(void) const
Returns the indices of the input variables.
Definition: variables.cpp:493
const Instances & get_instances(void) const
Returns a constant reference to the instances object composing this data set object.
Definition: data_set.cpp:222