OpenNN  2.2
Open Neural Networks Library
normalized_squared_error.cpp
1 /****************************************************************************************************************/
2 /* */
3 /* OpenNN: Open Neural Networks Library */
4 /* www.artelnics.com/opennn */
5 /* */
6 /* N O R M A L I Z E D S Q U A R E D E R R O R C L A S S */
7 /* */
8 /* Roberto Lopez */
9 /* Artelnics - Making intelligent use of data */
11 /* */
12 /****************************************************************************************************************/
13 
14 // OpenNN includes
15 
16 #include "normalized_squared_error.h"
17 
18 namespace OpenNN
19 {
20 
21 // DEFAULT CONSTRUCTOR
22 
27 
29 {
30 }
31 
32 
33 // NEURAL NETWORK CONSTRUCTOR
34 
39 
41 : PerformanceTerm(new_neural_network_pointer)
42 {
43 }
44 
45 
46 // DATA SET CONSTRUCTOR
47 
53 
55 : PerformanceTerm(new_data_set_pointer)
56 {
57 }
58 
59 
60 // NEURAL NETWORK AND DATA SET CONSTRUCTOR
61 
67 
68 NormalizedSquaredError::NormalizedSquaredError(NeuralNetwork* new_neural_network_pointer, DataSet* new_data_set_pointer)
69 : PerformanceTerm(new_neural_network_pointer, new_data_set_pointer)
70 {
71 }
72 
73 
74 // XML CONSTRUCTOR
75 
80 
81 NormalizedSquaredError::NormalizedSquaredError(const tinyxml2::XMLDocument& normalized_squared_error_document)
82  : PerformanceTerm(normalized_squared_error_document)
83 {
84 }
85 
86 
87 // DESTRUCTOR
88 
90 
92 {
93 }
94 
95 
96 // METHODS
97 
98 // double calculate_training_normalization_coefficient(const Matrix<double>&, const Vector<double>&) const method
99 
102 
103 double NormalizedSquaredError::calculate_normalization_coefficient(const Matrix<double>& target_data, const Vector<double>& target_data_mean) const
104 {
105  return(target_data.calculate_sum_squared_error(target_data_mean));
106 }
107 
108 
109 // void check(void) const method
110 
114 
116 {
117  std::ostringstream buffer;
118 
119  // Neural network stuff
120 
122  {
123  buffer << "OpenNN Exception: NormalizedquaredError class.\n"
124  << "void check(void) const method.\n"
125  << "Pointer to neural network is NULL.\n";
126 
127  throw std::logic_error(buffer.str());
128  }
129 
130  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
131 
132  if(!multilayer_perceptron_pointer)
133  {
134  buffer << "OpenNN Exception: NormalizedquaredError class.\n"
135  << "void check(void) const method.\n"
136  << "Pointer to multilayer perceptron is NULL.\n";
137 
138  throw std::logic_error(buffer.str());
139  }
140 
141  const size_t multilayer_perceptron_inputs_number = multilayer_perceptron_pointer->get_inputs_number();
142  const size_t multilayer_perceptron_outputs_number = multilayer_perceptron_pointer->get_outputs_number();
143 
144  if(multilayer_perceptron_inputs_number == 0)
145  {
146  buffer << "OpenNN Exception: NormalizedquaredError class.\n"
147  << "void check(void) const method.\n"
148  << "Number of inputs in multilayer perceptron object is zero.\n";
149 
150  throw std::logic_error(buffer.str());
151  }
152 
153  if(multilayer_perceptron_outputs_number == 0)
154  {
155  buffer << "OpenNN Exception: NormalizedquaredError class.\n"
156  << "void check(void) const method.\n"
157  << "Number of outputs in multilayer perceptron object is zero.\n";
158 
159  throw std::logic_error(buffer.str());
160  }
161 
162  // Data set stuff
163 
164  if(!data_set_pointer)
165  {
166  buffer << "OpenNN Exception: NormalizedquaredError class.\n"
167  << "void check(void) const method.\n"
168  << "Pointer to data set is NULL.\n";
169 
170  throw std::logic_error(buffer.str());
171  }
172 
173  // Sum squared error stuff
174 
175  const Variables& variables = data_set_pointer->get_variables();
176 
177  const size_t data_set_inputs_number = variables.count_inputs_number();
178  const size_t data_set_targets_number = variables.count_targets_number();
179 
180  if(multilayer_perceptron_inputs_number != data_set_inputs_number)
181  {
182  buffer << "OpenNN Exception: NormalizedquaredError class.\n"
183  << "void check(void) const method.\n"
184  << "Number of inputs in multilayer perceptron (" << multilayer_perceptron_inputs_number << ") must be equal to number of inputs in data set (" << data_set_inputs_number << ").\n";
185 
186  throw std::logic_error(buffer.str());
187  }
188 
189  if(multilayer_perceptron_outputs_number != data_set_targets_number)
190  {
191  buffer << "OpenNN Exception: NormalizedquaredError class.\n"
192  << "void check(void) const method.\n"
193  << "Number of outputs in multilayer perceptron (" << multilayer_perceptron_outputs_number << ") must be equal to number of targets in data set (" << data_set_targets_number << ").\n";
194 
195  throw std::logic_error(buffer.str());
196  }
197 }
198 
199 
200 // double calculate_performance(void) const method
201 
203 
205 {
206  // Control sentence
207 
208  #ifndef NDEBUG
209 
210  check();
211 
212  #endif
213 
214  // Neural network stuff
215 
216  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
217 
218  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
219  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
220 
221  // Data set stuff
222 
223  const Instances& instances = data_set_pointer->get_instances();
224 
225  const size_t training_instances_number = instances.count_training_instances_number();
226 
227  const Vector<size_t> training_indices = instances.arrange_training_indices();
228 
229  size_t training_index;
230 
231  const Variables& variables = data_set_pointer->get_variables();
232 
233  const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
234  const Vector<size_t> targets_indices = variables.arrange_targets_indices();
235 
236  const MissingValues& missing_values = data_set_pointer->get_missing_values();
237 
238  const Vector<size_t> missing_instances = missing_values.arrange_missing_instances();
239 
240  const Vector<double> training_target_data_mean = data_set_pointer->calculate_training_target_data_mean();
241 
242  // Normalized squared error stuff
243 
244  Vector<double> inputs(inputs_number);
245  Vector<double> outputs(outputs_number);
246  Vector<double> targets(outputs_number);
247 
248  int i = 0;
249 
250  double sum_squared_error = 0.0;
251  double normalization_coefficient = 0.0;
252 
253  #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+ : sum_squared_error, normalization_coefficient)
254 
255  for(i = 0; i < (int)training_instances_number; i++)
256  {
257  training_index = training_indices[i];
258 
259  if(missing_values.has_missing_values(training_index))
260  {
261  continue;
262  }
263 
264  // Input vector
265 
266  inputs = data_set_pointer->get_instance(training_index, inputs_indices);
267 
268  // Output vector
269 
270  outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
271 
272  // Target vector
273 
274  targets = data_set_pointer->get_instance(training_index, targets_indices);
275 
276  // Sum squared error
277 
278  sum_squared_error += outputs.calculate_sum_squared_error(targets);
279 
280  // Normalization coefficient
281 
282  normalization_coefficient += targets.calculate_sum_squared_error(training_target_data_mean);
283  }
284 
285  if(normalization_coefficient < 1.0e-99)
286  {
287  std::ostringstream buffer;
288 
289  buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
290  << "double calculate_performance(void) const method.\n"
291  << "Normalization coefficient is zero.\n";
292 
293  throw std::logic_error(buffer.str());
294  }
295 
296  return(sum_squared_error/normalization_coefficient);
297 }
298 
299 
300 // double calculate_performance(const Vector<double>&) const method
301 
305 
307 {
308  // Control sentence (if debug)
309 
310  #ifndef NDEBUG
311 
312  check();
313 
314  #endif
315 
316  #ifndef NDEBUG
317 
318  std::ostringstream buffer;
319 
320  const size_t size = parameters.size();
321 
322  const size_t parameters_number = neural_network_pointer->count_parameters_number();
323 
324  if(size != parameters_number)
325  {
326  buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
327  << "double calculate_performance(const Vector<double>&) method.\n"
328  << "Size (" << size << ") must be equal to number of parameters (" << parameters_number << ").\n";
329 
330  throw std::logic_error(buffer.str());
331  }
332 
333  #endif
334 
335  // Neural network stuff
336 
337  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
338 
339  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
340  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
341 
342  // Data set stuff
343 
344  const Instances& instances = data_set_pointer->get_instances();
345 
346  const size_t training_instances_number = instances.count_training_instances_number();
347 
348  const Vector<size_t> training_indices = instances.arrange_training_indices();
349 
350  size_t training_index;
351 
352  const Variables& variables = data_set_pointer->get_variables();
353 
354  const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
355  const Vector<size_t> targets_indices = variables.arrange_targets_indices();
356 
357  const MissingValues& missing_values = data_set_pointer->get_missing_values();
358 
359  const Vector<double> training_target_data_mean = data_set_pointer->calculate_training_target_data_mean();
360 
361  // Normalized squared error stuff
362 
363  Vector<double> inputs(inputs_number);
364  Vector<double> outputs(outputs_number);
365  Vector<double> targets(outputs_number);
366 
367  double sum_squared_error = 0.0;
368  double normalization_coefficient = 0.0;
369 
370  int i = 0;
371 
372  #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+ : sum_squared_error, normalization_coefficient)
373 
374  for(i = 0; i < (int)training_instances_number; i++)
375  {
376  training_index = training_indices[i];
377 
378  if(missing_values.has_missing_values(training_index))
379  {
380  continue;
381  }
382 
383  // Input vector
384 
385  inputs = data_set_pointer->get_instance(training_index, inputs_indices);
386 
387  // Output vector
388 
389  outputs = multilayer_perceptron_pointer->calculate_outputs(inputs, parameters);
390 
391  // Target vector
392 
393  targets = data_set_pointer->get_instance(training_index, targets_indices);
394 
395  // Sum squared error
396 
397  sum_squared_error += outputs.calculate_sum_squared_error(targets);
398 
399  // Normalization coefficient
400 
401  normalization_coefficient += targets.calculate_sum_squared_error(training_target_data_mean);
402  }
403 
404  if(normalization_coefficient < 1.0e-99)
405  {
406  std::ostringstream buffer;
407 
408  buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
409  << "double calculate_performance(const Vector<double>&) const method.\n"
410  << "Normalization coefficient is zero.\n";
411 
412  throw std::logic_error(buffer.str());
413  }
414 
415  return(sum_squared_error/normalization_coefficient);
416 }
417 
418 
419 // double calculate_generalization_performance(void) const method
420 
422 {
423  // Control sentence
424 
425  #ifndef NDEBUG
426 
427  check();
428 
429  #endif
430 
431  // Neural network stuff
432 
433  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
434 
435  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
436  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
437 
438  // Data set stuff
439 
440  const Instances& instances = data_set_pointer->get_instances();
441 
442  const size_t generalization_instances_number = instances.count_generalization_instances_number();
443 
444  if(generalization_instances_number < 2)
445  {
446  return(0.0);
447  }
448 
449  const Vector<size_t> generalization_indices = instances.arrange_generalization_indices();
450 
451  size_t generalization_index;
452 
453  const Variables& variables = data_set_pointer->get_variables();
454 
455  const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
456  const Vector<size_t> targets_indices = variables.arrange_targets_indices();
457 
458  const MissingValues& missing_values = data_set_pointer->get_missing_values();
459 
460  const Vector<double> generalization_target_data_mean = data_set_pointer->calculate_generalization_target_data_mean();
461 
462  Vector<double> inputs(inputs_number);
463  Vector<double> outputs(outputs_number);
464  Vector<double> targets(outputs_number);
465 
466  double sum_squared_error = 0.0;
467  double normalization_coefficient = 0.0;
468 
469  int i = 0;
470 
471  #pragma omp parallel for private(i, generalization_index, inputs, outputs, targets) reduction(+ : sum_squared_error, normalization_coefficient)
472 
473  for(i = 0; i < (int)generalization_instances_number; i++)
474  {
475  generalization_index = generalization_indices[i];
476 
477  if(missing_values.has_missing_values(generalization_index))
478  {
479  continue;
480  }
481 
482  // Input vector
483 
484  inputs = data_set_pointer->get_instance(generalization_index, inputs_indices);
485 
486  // Output vector
487 
488  outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
489 
490  // Target vector
491 
492  targets = data_set_pointer->get_instance(generalization_index, targets_indices);
493 
494  // Sum squared error
495 
496  sum_squared_error += outputs.calculate_sum_squared_error(targets);
497 
498  // Normalization coefficient
499 
500  normalization_coefficient += targets.calculate_sum_squared_error(generalization_target_data_mean);
501  }
502 
503  if(normalization_coefficient < 1.0e-99)
504  {
505  std::ostringstream buffer;
506 
507  buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
508  << "double calculate_generalization_performance(void) const method.\n"
509  << "Normalization coefficient is zero.\n";
510 
511  throw std::logic_error(buffer.str());
512  }
513 
514  return(sum_squared_error/normalization_coefficient);
515 }
516 
517 
518 // Vector<double> calculate_gradient(void) const method
519 
522 
524 {
525  // Control sentence (if debug)
526 
527  #ifndef NDEBUG
528 
529  check();
530 
531  #endif
532 
533  // Neural network stuff
534 
535  const size_t parameters_number = neural_network_pointer->count_parameters_number();
536 
537  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
538 
539  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
540  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
541 
542  const size_t layers_number = multilayer_perceptron_pointer->get_layers_number();
543 
544  Vector< Vector< Vector<double> > > first_order_forward_propagation(2);
545 
546  Vector< Vector<double> > layers_inputs(layers_number);
547 
548  Vector< Matrix<double> > layers_combination_parameters_Jacobian;
549 
550  const bool has_conditions_layer = neural_network_pointer->has_conditions_layer();
551 
552  const ConditionsLayer* conditions_layer_pointer = has_conditions_layer ? neural_network_pointer->get_conditions_layer_pointer() : NULL;
553 
554  Vector<double> particular_solution;
555  Vector<double> homogeneous_solution;
556 
557  // Data set stuff
558 
559  const Instances& instances = data_set_pointer->get_instances();
560 
561  const size_t training_instances_number = instances.count_training_instances_number();
562 
563  const Vector<size_t> training_indices = instances.arrange_training_indices();
564 
565  size_t training_index;
566 
567  const Variables& variables = data_set_pointer->get_variables();
568 
569  const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
570  const Vector<size_t> targets_indices = variables.arrange_targets_indices();
571 
572  const MissingValues& missing_values = data_set_pointer->get_missing_values();
573 
574  const Vector<double> training_target_data_mean = data_set_pointer->calculate_training_target_data_mean();
575 
576  Vector<double> inputs(inputs_number);
577  Vector<double> targets(outputs_number);
578 
579  // Normalized squared error stuff
580 
581  Vector<double> output_gradient(outputs_number);
582 
583  Vector< Vector<double> > layers_delta;
584 
585  Vector<double> point_gradient(parameters_number, 0.0);
586 
587  double normalization_coefficient = 0.0;
588 
589  // Main loop
590 
591  Vector<double> gradient(parameters_number, 0.0);
592 
593  int i = 0;
594 
595  #pragma omp parallel for private(i, training_index, inputs, targets, first_order_forward_propagation, layers_inputs, layers_combination_parameters_Jacobian,\
596  output_gradient, layers_delta, particular_solution, homogeneous_solution, point_gradient) \
597  reduction(+ : normalization_coefficient)
598 
599  for(i = 0; i < (int)training_instances_number; i++)
600  {
601  training_index = training_indices[i];
602 
603  if(missing_values.has_missing_values(training_index))
604  {
605  continue;
606  }
607 
608  // Data set
609 
610  inputs = data_set_pointer->get_instance(training_index, inputs_indices);
611 
612  targets = data_set_pointer->get_instance(training_index, targets_indices);
613 
614  // Multilayer perceptron
615 
616  first_order_forward_propagation = multilayer_perceptron_pointer->calculate_first_order_forward_propagation(inputs);
617 
618  const Vector< Vector<double> >& layers_activation = first_order_forward_propagation[0];
619  const Vector< Vector<double> >& layers_activation_derivative = first_order_forward_propagation[1];
620 
621  layers_inputs = multilayer_perceptron_pointer->arrange_layers_input(inputs, layers_activation);
622 
623  layers_combination_parameters_Jacobian = multilayer_perceptron_pointer->calculate_layers_combination_parameters_Jacobian(layers_inputs);
624 
625  // Performance functional
626 
627  if(!has_conditions_layer)
628  {
629  output_gradient = (layers_activation[layers_number-1]-targets)*2.0;
630 
631  layers_delta = calculate_layers_delta(layers_activation_derivative, output_gradient);
632  }
633  else
634  {
635  particular_solution = conditions_layer_pointer->calculate_particular_solution(inputs);
636  homogeneous_solution = conditions_layer_pointer->calculate_homogeneous_solution(inputs);
637 
638  output_gradient = (particular_solution+homogeneous_solution*layers_activation[layers_number-1] - targets)*2.0;
639 
640  layers_delta = calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_gradient);
641  }
642 
643  point_gradient = calculate_point_gradient(layers_combination_parameters_Jacobian, layers_delta);
644 
645  #pragma omp critical
646 
647  gradient += point_gradient;
648 
649  normalization_coefficient += targets.calculate_sum_squared_error(training_target_data_mean);
650  }
651 
652  if(normalization_coefficient < 1.0e-99)
653  {
654  std::ostringstream buffer;
655 
656  buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
657  << "Vector<double> calculate_gradient(void) const method.\n"
658  << "Normalization coefficient is zero.\n";
659 
660  throw std::logic_error(buffer.str());
661  }
662 
663  return(gradient/normalization_coefficient);
664 }
665 
666 
667 // Matrix<double> calculate_Hessian(void) const method
668 
672 
674 {
675  Matrix<double> Hessian;
676 
677  return(Hessian);
678 }
679 
680 
681 // Vector<double> calculate_terms(void) const method
682 
685 
687 {
688  // Control sentence (if debug)
689 
690  #ifndef NDEBUG
691 
692  check();
693 
694  #endif
695 
696  // Neural network stuff
697 
698  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
699 
700  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
701  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
702 
703  // Data set stuff
704 
705  const Instances& instances = data_set_pointer->get_instances();
706 
707  const size_t training_instances_number = instances.count_training_instances_number();
708 
709  const Vector<size_t> training_indices = instances.arrange_training_indices();
710 
711  size_t training_index;
712 
713  const Variables& variables = data_set_pointer->get_variables();
714 
715  const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
716  const Vector<size_t> targets_indices = variables.arrange_targets_indices();
717 
718  const MissingValues& missing_values = data_set_pointer->get_missing_values();
719 
720  const Vector<double> training_target_data_mean = data_set_pointer->calculate_training_target_data_mean();
721 
722  // Calculate
723 
724  Vector<double> performance_terms(training_instances_number);
725 
726  Vector<double> inputs(inputs_number);
727  Vector<double> outputs(outputs_number);
728  Vector<double> targets(outputs_number);
729 
730  double normalization_coefficient = 0.0;
731 
732  int i = 0;
733 
734  #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+ : normalization_coefficient)
735 
736  for(i = 0; i < (int)training_instances_number; i++)
737  {
738  training_index = training_indices[i];
739 
740  if(missing_values.has_missing_values(training_index))
741  {
742  continue;
743  }
744 
745  // Input vector
746 
747  inputs = data_set_pointer->get_instance(training_index, inputs_indices);
748 
749  // Output vector
750 
751  outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
752 
753  // Target vector
754 
755  targets = data_set_pointer->get_instance(training_index, targets_indices);
756 
757  // Sum squared error
758 
759  performance_terms[i] = outputs.calculate_distance(targets);
760 
761  // Normalization coefficient
762 
763  normalization_coefficient += targets.calculate_sum_squared_error(training_target_data_mean);
764  }
765 
766  if(normalization_coefficient < 1.0e-99)
767  {
768  std::ostringstream buffer;
769 
770  buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
771  << "Vector<double> calculate_terms(void) const method.\n"
772  << "Normalization coefficient is zero.\n";
773 
774  throw std::logic_error(buffer.str());
775  }
776 
777  return(performance_terms/sqrt(normalization_coefficient));
778 }
779 
780 
781 // Vector<double> calculate_terms(const Vector<double>&) const method
782 
786 
788 {
789  // Control sentence (if debug)
790 
791  #ifndef NDEBUG
792 
793  check();
794 
795  #endif
796 
797 
798  #ifndef NDEBUG
799 
800  const size_t size = network_parameters.size();
801 
802  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
803 
804  const size_t neural_parameters_number = multilayer_perceptron_pointer->count_parameters_number();
805 
806  if(size != neural_parameters_number)
807  {
808  std::ostringstream buffer;
809 
810  buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
811  << "double calculate_terms(const Vector<double>&) const method.\n"
812  << "Size (" << size << ") must be equal to number of multilayer perceptron parameters (" << neural_parameters_number << ").\n";
813 
814  throw std::logic_error(buffer.str());
815  }
816 
817  #endif
818 
819  NeuralNetwork neural_network_copy(*neural_network_pointer);
820 
821  neural_network_copy.set_parameters(network_parameters);
822 
823  NormalizedSquaredError normalized_squared_error_copy(*this);
824 
825  normalized_squared_error_copy.set_neural_network_pointer(&neural_network_copy);
826 
827  return(normalized_squared_error_copy.calculate_terms());
828 }
829 
830 
831 // Matrix<double> calculate_terms_Jacobian(void) const method
832 
836 
838 {
839  // Control sentence
840 
841  #ifndef NDEBUG
842 
843  check();
844 
845  #endif
846 
847  // Neural network stuff
848 
849  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
850 
851  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
852  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
853  const size_t layers_number = multilayer_perceptron_pointer->get_layers_number();
854 
855  const size_t parameters_number = multilayer_perceptron_pointer->count_parameters_number();
856 
857  Vector< Vector< Vector<double> > > first_order_forward_propagation(2);
858 
859  Vector< Matrix<double> > layers_combination_parameters_Jacobian;
860 
861  Vector< Vector<double> > layers_inputs(layers_number);
862 
863  Vector<double> particular_solution;
864  Vector<double> homogeneous_solution;
865 
866  const bool has_conditions_layer = neural_network_pointer->has_conditions_layer();
867 
868  const ConditionsLayer* conditions_layer_pointer = has_conditions_layer ? neural_network_pointer->get_conditions_layer_pointer() : NULL;
869 
870  // Data set stuff
871 
872  const Instances& instances = data_set_pointer->get_instances();
873 
874  const size_t training_instances_number = instances.count_training_instances_number();
875 
876  const Vector<size_t> training_indices = instances.arrange_training_indices();
877 
878  size_t training_index;
879 
880  const Variables& variables = data_set_pointer->get_variables();
881 
882  const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
883  const Vector<size_t> targets_indices = variables.arrange_targets_indices();
884 
885  const MissingValues& missing_values = data_set_pointer->get_missing_values();
886 
887  const Vector<double> training_target_data_mean = data_set_pointer->calculate_training_target_data_mean();
888 
889  Vector<double> inputs(inputs_number);
890  Vector<double> targets(outputs_number);
891 
892  // Normalized squared error
893 
894  Vector<double> term(outputs_number);
895  double term_norm;
896 
897  Vector<double> output_gradient(outputs_number);
898 
899  Vector< Vector<double> > layers_delta(layers_number);
900  Vector<double> point_gradient(parameters_number);
901 
902  Matrix<double> terms_Jacobian(training_instances_number, parameters_number);
903 
904  double normalization_coefficient = 0.0;
905 
906  // Main loop
907 
908  int i = 0;
909 
910  #pragma omp parallel for private(i, training_index, inputs, targets, first_order_forward_propagation, layers_inputs, \
911  layers_combination_parameters_Jacobian, term, term_norm, output_gradient, layers_delta, particular_solution, homogeneous_solution, point_gradient)
912 
913  for(i = 0; i < (int)training_instances_number; i++)
914  {
915  training_index = training_indices[i];
916 
917  if(missing_values.has_missing_values(training_index))
918  {
919  continue;
920  }
921 
922  // Data set
923 
924  inputs = data_set_pointer->get_instance(training_index, inputs_indices);
925 
926  targets = data_set_pointer->get_instance(training_index, targets_indices);
927 
928  // Neural network
929 
930  first_order_forward_propagation = multilayer_perceptron_pointer->calculate_first_order_forward_propagation(inputs);
931 
932  const Vector< Vector<double> >& layers_activation = first_order_forward_propagation[0];
933  const Vector< Vector<double> >& layers_activation_derivative = first_order_forward_propagation[1];
934 
935  layers_inputs = multilayer_perceptron_pointer->arrange_layers_input(inputs, layers_activation);
936 
937  layers_combination_parameters_Jacobian = multilayer_perceptron_pointer->calculate_layers_combination_parameters_Jacobian(layers_inputs);
938 
939  // Performance functional
940 
941  if(!has_conditions_layer) // No conditions
942  {
943  const Vector<double>& outputs = layers_activation[layers_number-1];
944 
945  term = outputs-targets;
946  term_norm = term.calculate_norm();
947 
948  if(term_norm == 0.0)
949  {
950  output_gradient.initialize(0.0);
951  }
952  else
953  {
954  output_gradient = term/term_norm;
955  }
956 
957  layers_delta = calculate_layers_delta(layers_activation_derivative, output_gradient);
958  }
959  else // Conditions
960  {
961  particular_solution = conditions_layer_pointer->calculate_particular_solution(inputs);
962  homogeneous_solution = conditions_layer_pointer->calculate_homogeneous_solution(inputs);
963 
964  const Vector<double>& output_layer_activation = layers_activation[layers_number-1];
965 
966  term = (particular_solution+homogeneous_solution*output_layer_activation - targets);
967  term_norm = term.calculate_norm();
968 
969  if(term_norm == 0.0)
970  {
971  output_gradient.initialize(0.0);
972  }
973  else
974  {
975  output_gradient = term/term_norm;
976  }
977 
978  layers_delta = calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_gradient);
979  }
980 
981  normalization_coefficient += targets.calculate_sum_squared_error(training_target_data_mean);
982 
983  point_gradient = calculate_point_gradient(layers_combination_parameters_Jacobian, layers_delta);
984 
985  terms_Jacobian.set_row(i, point_gradient);
986 
987  }
988 
989  if(normalization_coefficient < 1.0e-99)
990  {
991  std::ostringstream buffer;
992 
993  buffer << "OpenNN Exception: NormalizedSquaredError class.\n"
994  << "Matrix<double> calculate_terms_Jacobian(void) const method.\n"
995  << "Normalization coefficient is zero.\n";
996 
997  throw std::logic_error(buffer.str());
998  }
999 
1000  return(terms_Jacobian/sqrt(normalization_coefficient));
1001 }
1002 
1003 
1004 // FirstOrderTerms calculate_first_order_terms(void) const method
1005 
1008 
1010 {
1011  FirstOrderTerms first_order_terms;
1012 
1013  first_order_terms.terms = calculate_terms();
1014 
1015  first_order_terms.Jacobian = calculate_terms_Jacobian();
1016 
1017  return(first_order_terms);
1018 }
1019 
1020 
1021 // Vector<double> calculate_squared_errors(void) const method
1022 
1024 
1026 {
1027  // Control sentence (if debug)
1028 
1029  #ifndef NDEBUG
1030 
1031  check();
1032 
1033  #endif
1034 
1035  // Neural network stuff
1036 
1037  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
1038 
1039  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
1040  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
1041 
1042  // Data set stuff
1043 
1044  const Instances& instances = data_set_pointer->get_instances();
1045 
1046  const size_t training_instances_number = instances.count_training_instances_number();
1047 
1048  const Vector<size_t> training_indices = instances.arrange_training_indices();
1049 
1050  size_t training_index;
1051 
1052  const Variables& variables = data_set_pointer->get_variables();
1053 
1054  const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
1055  const Vector<size_t> targets_indices = variables.arrange_targets_indices();
1056 
1057  const MissingValues& missing_values = data_set_pointer->get_missing_values();
1058 
1059  // Calculate
1060 
1061  Vector<double> squared_errors(training_instances_number);
1062 
1063  Vector<double> inputs(inputs_number);
1064  Vector<double> outputs(outputs_number);
1065  Vector<double> targets(outputs_number);
1066 
1067  // Main loop
1068 
1069  int i = 0;
1070 
1071  #pragma omp parallel for private(i, training_index, inputs, outputs, targets)
1072 
1073  for(i = 0; i < (int)training_instances_number; i++)
1074  {
1075  training_index = training_indices[i];
1076 
1077  if(missing_values.has_missing_values(training_index))
1078  {
1079  continue;
1080  }
1081 
1082  // Input vector
1083 
1084  inputs = data_set_pointer->get_instance(training_index, inputs_indices);
1085 
1086  // Output vector
1087 
1088  outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
1089 
1090  // Target vector
1091 
1092  targets = data_set_pointer->get_instance(training_index, targets_indices);
1093 
1094  // Error
1095 
1096  squared_errors[i] = outputs.calculate_sum_squared_error(targets);
1097  }
1098 
1099  return(squared_errors);
1100 }
1101 
1102 
1103 // Vector<size_t> calculate_maximal_errors(void) const method
1104 
1107 
1108 Vector<size_t> NormalizedSquaredError::calculate_maximal_errors(const size_t& maximal_errors_number) const
1109 {
1110  // Control sentence (if debug)
1111 
1112  #ifndef NDEBUG
1113 
1114  check();
1115 
1116  const Instances& instances = data_set_pointer->get_instances();
1117 
1118  const size_t training_instances_number = instances.count_training_instances_number();
1119 
1120  if(maximal_errors_number > training_instances_number)
1121  {
1122  std::ostringstream buffer;
1123 
1124  buffer << "OpenNN Exception: NormalizedquaredError class.\n"
1125  << "Vector<size_t> calculate_maximal_errors(void) const method.\n"
1126  << "Number of maximal errors (" << maximal_errors_number << ") must be equal or less than number of training instances (" << training_instances_number << ").\n";
1127 
1128  throw std::logic_error(buffer.str());
1129  }
1130 
1131  #endif
1132 
1133  return(calculate_squared_errors().calculate_maximal_indices(maximal_errors_number));
1134 }
1135 
1136 
1137 // std::string write_performance_term_type(void) const method
1138 
1140 
1142 {
1143  return("NORMALIZED_SQUARED_ERROR");
1144 }
1145 
1146 
1147 // tinyxml2::XMLDocument* to_XML(void) const method
1148 
1151 
1152 tinyxml2::XMLDocument* NormalizedSquaredError::to_XML(void) const
1153 {
1154  std::ostringstream buffer;
1155 
1156  tinyxml2::XMLDocument* document = new tinyxml2::XMLDocument;
1157 
1158  // Normalized squared error
1159 
1160  tinyxml2::XMLElement* normalized_squared_error_element = document->NewElement("NormalizedSquaredError");
1161 
1162  document->InsertFirstChild(normalized_squared_error_element);
1163 
1164  // Display
1165  {
1166  tinyxml2::XMLElement* display_element = document->NewElement("Display");
1167  normalized_squared_error_element->LinkEndChild(display_element);
1168 
1169  buffer.str("");
1170  buffer << display;
1171 
1172  tinyxml2::XMLText* display_text = document->NewText(buffer.str().c_str());
1173  display_element->LinkEndChild(display_text);
1174  }
1175 
1176  return(document);
1177 }
1178 
1179 
1180 // void from_XML(const tinyxml2::XMLDocument&) method
1181 
1184 
1185 void NormalizedSquaredError::from_XML(const tinyxml2::XMLDocument& document)
1186 {
1187  const tinyxml2::XMLElement* root_element = document.FirstChildElement("NormalizedSquaredError");
1188 
1189  if(!root_element)
1190  {
1191  return;
1192  }
1193 
1194  const tinyxml2::XMLElement* display_element = root_element->FirstChildElement("Display");
1195 
1196  if(display_element)
1197  {
1198  const std::string new_display_string = display_element->GetText();
1199 
1200  try
1201  {
1202  set_display(new_display_string != "0");
1203  }
1204  catch(const std::logic_error& e)
1205  {
1206  std::cout << e.what() << std::endl;
1207  }
1208  }
1209 }
1210 
1211 
1212 // std::string write_information(void) const method
1213 
1215 {
1216  std::ostringstream buffer;
1217 
1218  buffer << "Normalized squared error: " << calculate_performance() << "\n";
1219 
1220  return(buffer.str());
1221 
1222 }
1223 
1224 }
1225 
1226 // OpenNN: Open Neural Networks Library.
1227 // Copyright (c) 2005-2015 Roberto Lopez.
1228 //
1229 // This library is free software; you can redistribute it and/or
1230 // modify it under the terms of the GNU Lesser General Public
1231 // License as published by the Free Software Foundation; either
1232 // version 2.1 of the License, or any later version.
1233 //
1234 // This library is distributed in the hope that it will be useful,
1235 // but WITHOUT ANY WARRANTY; without even the implied warranty of
1236 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1237 // Lesser General Public License for more details.
1238 
1239 // You should have received a copy of the GNU Lesser General Public
1240 // License along with this library; if not, write to the Free Software
1241 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
double calculate_normalization_coefficient(const Matrix< double > &, const Vector< double > &) const
size_t count_parameters_number(void) const
Vector< size_t > calculate_maximal_errors(const size_t &=10) const
void initialize(const T &)
Definition: vector.h:753
Matrix< double > calculate_Hessian(void) const
const Variables & get_variables(void) const
Returns a constant reference to the variables object composing this data set object.
Definition: data_set.cpp:202
Vector< size_t > arrange_missing_instances(void) const
Returns a vector with the indices of those instances with missing values.
Vector< double > terms
Subterms performance vector.
double calculate_sum_squared_error(const Matrix< double > &) const
Definition: matrix.h:4723
size_t count_training_instances_number(void) const
Returns the number of instances in the data set which will be used for training.
Definition: instances.cpp:387
size_t get_inputs_number(void) const
Returns the number of inputs to the multilayer perceptron.
double calculate_generalization_performance(void) const
Returns an performance of the performance term for generalization purposes.
tinyxml2::XMLDocument * to_XML(void) const
virtual ~NormalizedSquaredError(void)
Destructor.
Vector< double > calculate_point_gradient(const Vector< double > &, const Vector< Vector< double > > &, const Vector< Vector< double > > &) const
size_t get_layers_number(void) const
Returns the number of layers in the multilayer perceptron.
Vector< size_t > calculate_maximal_indices(const size_t &) const
Definition: vector.h:1700
bool has_missing_values(void) const
size_t get_outputs_number(void) const
Returns the number of outputs neurons in the multilayer perceptron.
Vector< double > calculate_outputs(const Vector< double > &) const
Matrix< double > Jacobian
Subterms Jacobian matrix.
const MissingValues & get_missing_values(void) const
Returns a reference to the missing values object in the data set.
Definition: data_set.cpp:275
Vector< double > calculate_training_target_data_mean(void) const
Returns the mean values of the target variables on the training instances.
Definition: data_set.cpp:1719
void set_display(const bool &)
void from_XML(const tinyxml2::XMLDocument &)
virtual void set_neural_network_pointer(NeuralNetwork *)
Vector< size_t > arrange_targets_indices(void) const
Returns the indices of the target variables.
Definition: variables.cpp:519
Vector< size_t > arrange_training_indices(void) const
Returns the indices of the instances which will be used for training.
Definition: instances.cpp:489
size_t count_generalization_instances_number(void) const
Returns the number of instances in the data set which will be used for generalization.
Definition: instances.cpp:409
PerformanceTerm::FirstOrderTerms calculate_first_order_terms(void) const
Vector< double > get_instance(const size_t &) const
Definition: data_set.cpp:684
bool has_conditions_layer(void) const
MultilayerPerceptron * get_multilayer_perceptron_pointer(void) const
Returns a pointer to the multilayer perceptron composing this neural network.
double calculate_norm(void) const
Returns the vector norm.
Definition: vector.h:2358
Matrix< double > calculate_terms_Jacobian(void) const
double calculate_sum_squared_error(const Vector< double > &) const
Definition: vector.h:2569
virtual Vector< double > calculate_homogeneous_solution(const Vector< double > &) const
Returns the homogeneous solution for applying boundary conditions.
Vector< Matrix< double > > calculate_layers_combination_parameters_Jacobian(const Vector< Vector< double > > &) const
NeuralNetwork * neural_network_pointer
Pointer to a multilayer perceptron object.
double calculate_performance(void) const
Returns the performance value of a neural network according to the normalized squared error on a data...
virtual Vector< double > calculate_particular_solution(const Vector< double > &) const
Returns the particular solution for applying boundary conditions.
bool display
Display messages to screen.
double calculate_distance(const Vector< double > &) const
Definition: vector.h:2557
ConditionsLayer * get_conditions_layer_pointer(void) const
Returns a pointer to the conditions layer composing this neural network.
Vector< double > calculate_squared_errors(void) const
Returns the squared errors of the training instances.
size_t count_inputs_number(void) const
Returns the number of input variables of the data set.
Definition: variables.cpp:249
std::string write_performance_term_type(void) const
Returns a string with the name of the normalized squared error performance type, "NORMALIZED_SQUARED_...
Vector< Vector< Vector< double > > > calculate_first_order_forward_propagation(const Vector< double > &) const
Vector< double > calculate_generalization_target_data_mean(void) const
Returns the mean values of the target variables on the generalization instances.
Definition: data_set.cpp:1735
DataSet * data_set_pointer
Pointer to a data set object.
size_t count_targets_number(void) const
Returns the number of target variables of the data set.
Definition: variables.cpp:271
Vector< double > calculate_gradient(void) const
void set_row(const size_t &, const Vector< T > &)
Definition: matrix.h:1691
Vector< Vector< double > > calculate_layers_delta(const Vector< Vector< double > > &, const Vector< double > &) const
Vector< size_t > arrange_generalization_indices(void) const
Returns the indices of the instances which will be used for generalization.
Definition: instances.cpp:516
Vector< double > calculate_terms(void) const
Vector< Vector< double > > arrange_layers_input(const Vector< double > &, const Vector< Vector< double > > &) const
void set_parameters(const Vector< double > &)
Vector< size_t > arrange_inputs_indices(void) const
Returns the indices of the input variables.
Definition: variables.cpp:493
const Instances & get_instances(void) const
Returns a constant reference to the instances object composing this data set object.
Definition: data_set.cpp:222
size_t count_parameters_number(void) const
Returns the number of parameters (biases and synaptic weights) in the multilayer perceptron.