OpenNN  2.2
Open Neural Networks Library
minkowski_error.cpp
1 /****************************************************************************************************************/
2 /* */
3 /* OpenNN: Open Neural Networks Library */
4 /* www.artelnics.com/opennn */
5 /* */
6 /* M I N K O W S K I E R R O R C L A S S */
7 /* */
8 /* Roberto Lopez */
9 /* Artelnics - Making intelligent use of data */
11 /* */
12 /****************************************************************************************************************/
13 
14 // OpenNN includes
15 
16 #include "minkowski_error.h"
17 
18 namespace OpenNN
19 {
20 
21 // DEFAULT CONSTRUCTOR
22 
26 
28 {
29  set_default();
30 }
31 
32 
33 // NEURAL NETWORK CONSTRUCTOR
34 
39 
40 MinkowskiError::MinkowskiError(NeuralNetwork* new_neural_network_pointer)
41 : PerformanceTerm(new_neural_network_pointer)
42 {
43  set_default();
44 }
45 
46 
47 // DATA SET CONSTRUCTOR
48 
53 
55 : PerformanceTerm(new_data_set_pointer)
56 {
57  set_default();
58 }
59 
60 
61 // NEURAL NETWORK AND DATA SET CONSTRUCTOR
62 
68 
69 MinkowskiError::MinkowskiError(NeuralNetwork* new_neural_network_pointer, DataSet* new_data_set_pointer)
70  : PerformanceTerm(new_neural_network_pointer, new_data_set_pointer)
71 {
72  set_default();
73 }
74 
75 
76 // XML CONSTRUCTOR
77 
82 
83 MinkowskiError::MinkowskiError(const tinyxml2::XMLDocument& mean_squared_error_document)
84  : PerformanceTerm(mean_squared_error_document)
85 {
86  set_default();
87 
88  from_XML(mean_squared_error_document);
89 }
90 
91 
92 // DESTRUCTOR
93 
96 
98 {
99 }
100 
101 
102 // METHODS
103 
104 // double get_Minkowski_parameter(void) const method
105 
107 
109 {
110  return(Minkowski_parameter);
111 }
112 
113 
114 // void set_default(void) method
115 
121 
123 {
124  Minkowski_parameter = 1.5;
125 
126  display = true;
127 }
128 
129 
130 // void set_Minkowski_parameter(const double&) method
131 
135 
136 void MinkowskiError::set_Minkowski_parameter(const double& new_Minkowski_parameter)
137 {
138  // Control sentence
139 
140  if(new_Minkowski_parameter < 1.0 || new_Minkowski_parameter > 2.0)
141  {
142  std::ostringstream buffer;
143 
144  buffer << "OpenNN Error. MinkowskiError class.\n"
145  << "void set_Minkowski_parameter(const double&) method.\n"
146  << "The Minkowski parameter must be comprised between 1 and 2\n";
147 
148  throw std::logic_error(buffer.str());
149  }
150 
151  // Set Minkowski parameter
152 
153  Minkowski_parameter = new_Minkowski_parameter;
154 }
155 
156 
157 // void check(void) const method
158 
162 
163 void MinkowskiError::check(void) const
164 {
165  std::ostringstream buffer;
166 
167  // Neural network stuff
168 
170  {
171  buffer << "OpenNN Exception: MinkowskiError class.\n"
172  << "void check(void) const method.\n"
173  << "Pointer to neural network is NULL.\n";
174 
175  throw std::logic_error(buffer.str());
176  }
177 
178  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
179 
180  if(!multilayer_perceptron_pointer)
181  {
182  buffer << "OpenNN Exception: MinkowskiError class.\n"
183  << "void check(void) const method.\n"
184  << "Pointer to multilayer perceptron is NULL.\n";
185 
186  throw std::logic_error(buffer.str());
187  }
188 
189  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
190  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
191 
192  if(inputs_number == 0)
193  {
194  buffer << "OpenNN Exception: MinkowskiError class.\n"
195  << "void check(void) const method.\n"
196  << "Number of inputs in multilayer perceptron object is zero.\n";
197 
198  throw std::logic_error(buffer.str());
199  }
200 
201  if(outputs_number == 0)
202  {
203  buffer << "OpenNN Exception: MinkowskiError class.\n"
204  << "void check(void) const method.\n"
205  << "Number of outputs in multilayer perceptron object is zero.\n";
206 
207  throw std::logic_error(buffer.str());
208  }
209 
210  // Data set stuff
211 
212  if(!data_set_pointer)
213  {
214  buffer << "OpenNN Exception: MinkowskiError class.\n"
215  << "void check(void) const method.\n"
216  << "Pointer to data set is NULL.\n";
217 
218  throw std::logic_error(buffer.str());
219  }
220 
221  // Sum squared error stuff
222 
223  const Variables& variables = data_set_pointer->get_variables();
224 
225  const size_t data_set_inputs_number = variables.count_inputs_number();
226  const size_t targets_number = variables.count_targets_number();
227 
228  if(data_set_inputs_number != inputs_number)
229  {
230  buffer << "OpenNN Exception: MinkowskiError class.\n"
231  << "void check(void) const method.\n"
232  << "Number of inputs in neural network must be equal to number of inputs in data set.\n";
233 
234  throw std::logic_error(buffer.str());
235  }
236 
237  if(outputs_number != targets_number)
238  {
239  buffer << "OpenNN Exception: MinkowskiError class.\n"
240  << "void check(void) const method.\n"
241  << "Number of outputs in neural network must be equal to number of targets in data set.\n";
242 
243  throw std::logic_error(buffer.str());
244  }
245 }
246 
247 
248 // double calculate_performance(void) const method
249 
251 
253 {
254  // Control sentence
255 
256  #ifndef NDEBUG
257 
258  check();
259 
260  #endif
261 
262  // Neural network stuff
263 
264  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
265 
266  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
267  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
268 
269  // Data set
270 
271  const Instances& instances = data_set_pointer->get_instances();
272 
273  const size_t training_instances_number = instances.count_training_instances_number();
274 
275  const Vector<size_t> training_indices = instances.arrange_training_indices();
276 
277  size_t training_index;
278 
279  const Variables& variables = data_set_pointer->get_variables();
280 
281  const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
282  const Vector<size_t> targets_indices = variables.arrange_targets_indices();
283 
284  const MissingValues& missing_values = data_set_pointer->get_missing_values();
285 
286  Vector<double> inputs(inputs_number);
287  Vector<double> outputs(outputs_number);
288  Vector<double> targets(outputs_number);
289 
290  double Minkowski_error = 0.0;
291 
292  int i = 0;
293 
294  #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+ : Minkowski_error)
295 
296  for(i = 0; i < (int)training_instances_number; i++)
297  {
298  training_index = training_indices[i];
299 
300  if(missing_values.has_missing_values(training_index))
301  {
302  continue;
303  }
304 
305  // Input vector
306 
307  inputs = data_set_pointer->get_instance(training_index, inputs_indices);
308 
309  // Output vector
310 
311  outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
312 
313  // Target vector
314 
315  targets = data_set_pointer->get_instance(training_index, targets_indices);
316 
317  // Minkowski error
318 
319  Minkowski_error += (outputs-targets).calculate_p_norm(Minkowski_parameter);
320  }
321 
322  return(Minkowski_error);
323 }
324 
325 
326 // double calculate_performance(const Vector<double>&) const method
327 
331 
333 {
334  // Control sentence (if debug)
335 
336  #ifndef NDEBUG
337 
338  check();
339 
340  #endif
341 
342  #ifndef NDEBUG
343 
344  const size_t size = parameters.size();
345 
346  const size_t parameters_number = neural_network_pointer->count_parameters_number();
347 
348  if(size != parameters_number)
349  {
350  std::ostringstream buffer;
351 
352  buffer << "OpenNN Exception: MeanSquaredError class.\n"
353  << "double calculate_performance(const Vector<double>&) const method.\n"
354  << "Size (" << size << ") must be equal to number of parameters (" << parameters_number << ").\n";
355 
356  throw std::logic_error(buffer.str());
357  }
358 
359  #endif
360 
361  // Neural network stuff
362 
363  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
364 
365  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
366  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
367 
368  // Data set
369 
370  const Instances& instances = data_set_pointer->get_instances();
371  const size_t training_instances_number = instances.count_training_instances_number();
372 
373  const Vector<size_t> training_indices = instances.arrange_training_indices();
374 
375  size_t training_index;
376 
377  const Variables& variables = data_set_pointer->get_variables();
378 
379  const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
380  const Vector<size_t> targets_indices = variables.arrange_targets_indices();
381 
382  const MissingValues& missing_values = data_set_pointer->get_missing_values();
383 
384  Vector<double> inputs(inputs_number);
385  Vector<double> outputs(outputs_number);
386  Vector<double> targets(outputs_number);
387 
388  double Minkowski_error = 0.0;
389 
390  int i = 0;
391 
392  #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+ : Minkowski_error)
393 
394  for(i = 0; i < (int)training_instances_number; i++)
395  {
396  training_index = training_indices[i];
397 
398  if(missing_values.has_missing_values(training_index))
399  {
400  continue;
401  }
402 
403  // Input vector
404 
405  inputs = data_set_pointer->get_instance(training_index, inputs_indices);
406 
407  // Output vector
408 
409  outputs = multilayer_perceptron_pointer->calculate_outputs(inputs, parameters);
410 
411  // Target vector
412 
413  targets = data_set_pointer->get_instance(training_index, targets_indices);
414 
415  // Minkowski error
416 
417  Minkowski_error += (outputs-targets).calculate_p_norm(Minkowski_parameter);
418  }
419 
420  return(Minkowski_error);
421 }
422 
423 
424 // double calculate_generalization_performance(void) const method
425 
428 
430 {
431  // Control sentence (if debug)
432 
433  #ifndef NDEBUG
434 
435  check();
436 
437  #endif
438 
439  // Neural network
440 
441  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
442 
443  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
444  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
445 
446  // Data set
447 
448  const Instances& instances = data_set_pointer->get_instances();
449 
450  const size_t generalization_instances_number = instances.count_generalization_instances_number();
451 
452  const Vector<size_t> generalization_indices = instances.arrange_generalization_indices();
453 
454  size_t generalization_index;
455 
456  const Variables& variables = data_set_pointer->get_variables();
457 
458  const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
459  const Vector<size_t> targets_indices = variables.arrange_targets_indices();
460 
461  const MissingValues& missing_values = data_set_pointer->get_missing_values();
462 
463  // Performance functional
464 
465  Vector<double> inputs(inputs_number);
466  Vector<double> outputs(outputs_number);
467  Vector<double> targets(outputs_number);
468 
469  double generalization_performance = 0.0;
470 
471  int i = 0;
472 
473  #pragma omp parallel for private(i, generalization_index, inputs, outputs, targets) reduction(+ : generalization_performance)
474 
475  for(i = 0; i < (int)generalization_instances_number; i++)
476  {
477  generalization_index = generalization_indices[i];
478 
479  if(missing_values.has_missing_values(generalization_index))
480  {
481  continue;
482  }
483 
484  // Input vector
485 
486  inputs = data_set_pointer->get_instance(generalization_index, inputs_indices);
487 
488  // Output vector
489 
490  outputs = multilayer_perceptron_pointer->calculate_outputs(inputs);
491 
492  // Target vector
493 
494  targets = data_set_pointer->get_instance(generalization_index, targets_indices);
495 
496  // Minkowski error
497 
498  generalization_performance += (outputs-targets).calculate_p_norm(Minkowski_parameter);
499  }
500 
501  return(generalization_performance);
502 }
503 
504 
505 // Vector<double> calculate_gradient(void) const method
506 
509 
511 {
512  // Control sentence (if debug)
513 
514  #ifndef NDEBUG
515 
516  check();
517 
518  #endif
519 
520  // Neural network stuff
521 
522  const MultilayerPerceptron* multilayer_perceptron_pointer = neural_network_pointer->get_multilayer_perceptron_pointer();
523 
524  // Neural network stuff
525 
526  const bool has_conditions_layer = neural_network_pointer->has_conditions_layer();
527 
528  const ConditionsLayer* conditions_layer_pointer = has_conditions_layer ? neural_network_pointer->get_conditions_layer_pointer() : NULL;
529 
530  const size_t inputs_number = multilayer_perceptron_pointer->get_inputs_number();
531  const size_t outputs_number = multilayer_perceptron_pointer->get_outputs_number();
532 
533  const size_t layers_number = multilayer_perceptron_pointer->get_layers_number();
534 
535  const size_t neural_parameters_number = multilayer_perceptron_pointer->count_parameters_number();
536 
537  Vector< Vector< Vector<double> > > first_order_forward_propagation(2);
538 
539  Vector<double> particular_solution;
540  Vector<double> homogeneous_solution;
541 
542  // Data set stuff
543 
544  const Instances& instances = data_set_pointer->get_instances();
545 
546  const size_t training_instances_number = instances.count_training_instances_number();
547 
548  const Vector<size_t> training_indices = instances.arrange_training_indices();
549 
550  size_t training_index;
551 
552  const Variables& variables = data_set_pointer->get_variables();
553 
554  const Vector<size_t> inputs_indices = variables.arrange_inputs_indices();
555  const Vector<size_t> targets_indices = variables.arrange_targets_indices();
556 
557  const MissingValues& missing_values = data_set_pointer->get_missing_values();
558 
559  Vector<double> inputs(inputs_number);
560  Vector<double> targets(outputs_number);
561 
562  // Minkowski error stuff
563 
564  Vector<double> output_gradient(outputs_number);
565 
566  Vector< Matrix<double> > layers_combination_parameters_Jacobian;
567 
568  Vector< Vector<double> > layers_inputs(layers_number);
569  Vector< Vector<double> > layers_delta;
570 
571  Vector<double> point_gradient(neural_parameters_number, 0.0);
572 
573  Vector<double> gradient(neural_parameters_number, 0.0);
574 
575  int i = 0;
576 
577  #pragma omp parallel for private(i, training_index, inputs, targets, first_order_forward_propagation, layers_inputs, layers_combination_parameters_Jacobian, \
578  output_gradient, layers_delta, particular_solution, homogeneous_solution, point_gradient)
579 
580  for(i = 0; i < (int)training_instances_number; i++)
581  {
582  training_index = training_indices[i];
583 
584  if(missing_values.has_missing_values(training_index))
585  {
586  continue;
587  }
588 
589  // Data set
590 
591  inputs = data_set_pointer->get_instance(training_index, inputs_indices);
592 
593  targets = data_set_pointer->get_instance(training_index, targets_indices);
594 
595  // Neural network
596 
597  first_order_forward_propagation = multilayer_perceptron_pointer->calculate_first_order_forward_propagation(inputs);
598 
599  const Vector< Vector<double> >& layers_activation = first_order_forward_propagation[0];
600  const Vector< Vector<double> >& layers_activation_derivative = first_order_forward_propagation[1];
601 
602  layers_inputs = multilayer_perceptron_pointer->arrange_layers_input(inputs, layers_activation);
603 
604  layers_combination_parameters_Jacobian = multilayer_perceptron_pointer->calculate_layers_combination_parameters_Jacobian(layers_inputs);
605 
606  // Performance functional
607 
608  if(!has_conditions_layer)
609  {
610  output_gradient = (layers_activation[layers_number-1]-targets).calculate_p_norm_gradient(Minkowski_parameter);
611 
612  layers_delta = calculate_layers_delta(layers_activation_derivative, output_gradient);
613  }
614  else
615  {
616  particular_solution = conditions_layer_pointer->calculate_particular_solution(inputs);
617  homogeneous_solution = conditions_layer_pointer->calculate_homogeneous_solution(inputs);
618 
619  output_gradient = (particular_solution+homogeneous_solution*layers_activation[layers_number-1] - targets).calculate_pow(Minkowski_parameter-1.0)*Minkowski_parameter;
620 
621  layers_delta = calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_gradient);
622  }
623 
624  point_gradient = calculate_point_gradient(layers_combination_parameters_Jacobian, layers_delta);
625 
626  #pragma omp critical
627 
628  gradient += point_gradient;
629  }
630 
631  return(gradient);
632 }
633 
634 
635 // Matrix<double> calculate_Hessian(void) const method
636 
638 
640 {
641  // Control sentence (if debug)
642 
643  #ifndef NDEBUG
644 
645  check();
646 
647  #endif
648 
649  Matrix<double> Hessian;
650 
651  return(Hessian);
652 }
653 
654 
655 // std::string write_performance_term_type(void) const method
656 
658 
660 {
661  return("MINKOWSKI_ERROR");
662 }
663 
664 
665 // tinyxml2::XMLDocument* to_XML(void) const method
666 
669 
670 tinyxml2::XMLDocument* MinkowskiError::to_XML(void) const
671 {
672  std::ostringstream buffer;
673 
674  tinyxml2::XMLDocument* document = new tinyxml2::XMLDocument;
675 
676  // Minkowski error
677 
678  tinyxml2::XMLElement* Minkowski_error_element = document->NewElement("MinkowskiError");
679 
680  document->InsertFirstChild(Minkowski_error_element);
681 
682  // Minkowski parameter
683  {
684  tinyxml2::XMLElement* Minkowski_parameter_element = document->NewElement("MinkowskiParameter");
685  Minkowski_error_element->LinkEndChild(Minkowski_parameter_element);
686 
687  buffer.str("");
688  buffer << Minkowski_parameter;
689 
690  tinyxml2::XMLText* Minkowski_parameter_text = document->NewText(buffer.str().c_str());
691  Minkowski_parameter_element->LinkEndChild(Minkowski_parameter_text);
692  }
693 
694  // Display
695  {
696  tinyxml2::XMLElement* display_element = document->NewElement("Display");
697  Minkowski_error_element->LinkEndChild(display_element);
698 
699  buffer.str("");
700  buffer << display;
701 
702  tinyxml2::XMLText* display_text = document->NewText(buffer.str().c_str());
703  display_element->LinkEndChild(display_text);
704  }
705 
706  return(document);
707 }
708 
709 
710 // void from_XML(const tinyxml2::XMLDocument&) method
711 
714 
715 void MinkowskiError::from_XML(const tinyxml2::XMLDocument& document)
716 {
717  const tinyxml2::XMLElement* root_element = document.FirstChildElement("MinkowskiError");
718 
719  if(!root_element)
720  {
721  std::ostringstream buffer;
722 
723  buffer << "OpenNN Exception: MinkowskiError class.\n"
724  << "void from_XML(const tinyxml2::XMLDocument&) method.\n"
725  << "Minkowski error element is NULL.\n";
726 
727  throw std::logic_error(buffer.str());
728  }
729 
730  // Minkowski parameter
731  {
732  const tinyxml2::XMLElement* element = root_element->FirstChildElement("MinkowskiParameter");
733 
734  if(element)
735  {
736  const double new_Minkowski_parameter = atof(element->GetText());
737 
738  try
739  {
740  set_Minkowski_parameter(new_Minkowski_parameter);
741  }
742  catch(const std::logic_error& e)
743  {
744  std::cout << e.what() << std::endl;
745  }
746  }
747  }
748 
749  // Display
750  {
751  const tinyxml2::XMLElement* display_element = root_element->FirstChildElement("Display");
752 
753  if(display_element)
754  {
755  const std::string new_display_string = display_element->GetText();
756 
757  try
758  {
759  set_display(new_display_string != "0");
760  }
761  catch(const std::logic_error& e)
762  {
763  std::cout << e.what() << std::endl;
764  }
765  }
766  }
767 }
768 
769 }
770 
771 
772 // OpenNN: Open Neural Networks Library.
773 // Copyright (c) 2005-2015 Roberto Lopez.
774 //
775 // This library is free software; you can redistribute it and/or
776 // modify it under the terms of the GNU Lesser General Public
777 // License as published by the Free Software Foundation; either
778 // version 2.1 of the License, or any later version.
779 //
780 // This library is distributed in the hope that it will be useful,
781 // but WITHOUT ANY WARRANTY; without even the implied warranty of
782 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
783 // Lesser General Public License for more details.
784 
785 // You should have received a copy of the GNU Lesser General Public
786 // License along with this library; if not, write to the Free Software
787 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
double Minkowski_parameter
Minkowski exponent value.
double calculate_performance(void) const
Returns the Minkowski error performance.
size_t count_parameters_number(void) const
void set_Minkowski_parameter(const double &)
Vector< double > calculate_gradient(void) const
const Variables & get_variables(void) const
Returns a constant reference to the variables object composing this data set object.
Definition: data_set.cpp:202
virtual ~MinkowskiError(void)
size_t count_training_instances_number(void) const
Returns the number of instances in the data set which will be used for training.
Definition: instances.cpp:387
size_t get_inputs_number(void) const
Returns the number of inputs to the multilayer perceptron.
void from_XML(const tinyxml2::XMLDocument &)
Vector< double > calculate_point_gradient(const Vector< double > &, const Vector< Vector< double > > &, const Vector< Vector< double > > &) const
size_t get_layers_number(void) const
Returns the number of layers in the multilayer perceptron.
bool has_missing_values(void) const
size_t get_outputs_number(void) const
Returns the number of outputs neurons in the multilayer perceptron.
Vector< double > calculate_outputs(const Vector< double > &) const
const MissingValues & get_missing_values(void) const
Returns a reference to the missing values object in the data set.
Definition: data_set.cpp:275
void set_display(const bool &)
Matrix< double > calculate_Hessian(void) const
Vector< size_t > arrange_targets_indices(void) const
Returns the indices of the target variables.
Definition: variables.cpp:519
Vector< size_t > arrange_training_indices(void) const
Returns the indices of the instances which will be used for training.
Definition: instances.cpp:489
size_t count_generalization_instances_number(void) const
Returns the number of instances in the data set which will be used for generalization.
Definition: instances.cpp:409
Vector< double > get_instance(const size_t &) const
Definition: data_set.cpp:684
bool has_conditions_layer(void) const
MultilayerPerceptron * get_multilayer_perceptron_pointer(void) const
Returns a pointer to the multilayer perceptron composing this neural network.
virtual Vector< double > calculate_homogeneous_solution(const Vector< double > &) const
Returns the homogeneous solution for applying boundary conditions.
Vector< Matrix< double > > calculate_layers_combination_parameters_Jacobian(const Vector< Vector< double > > &) const
NeuralNetwork * neural_network_pointer
Pointer to a multilayer perceptron object.
double calculate_generalization_performance(void) const
tinyxml2::XMLDocument * to_XML(void) const
double get_Minkowski_parameter(void) const
Returns the Minkowski exponent value used to calculate the error.
virtual Vector< double > calculate_particular_solution(const Vector< double > &) const
Returns the particular solution for applying boundary conditions.
bool display
Display messages to screen.
ConditionsLayer * get_conditions_layer_pointer(void) const
Returns a pointer to the conditions layer composing this neural network.
size_t count_inputs_number(void) const
Returns the number of input variables of the data set.
Definition: variables.cpp:249
void check(void) const
Vector< Vector< Vector< double > > > calculate_first_order_forward_propagation(const Vector< double > &) const
DataSet * data_set_pointer
Pointer to a data set object.
size_t count_targets_number(void) const
Returns the number of target variables of the data set.
Definition: variables.cpp:271
Vector< Vector< double > > calculate_layers_delta(const Vector< Vector< double > > &, const Vector< double > &) const
Vector< size_t > arrange_generalization_indices(void) const
Returns the indices of the instances which will be used for generalization.
Definition: instances.cpp:516
Vector< Vector< double > > arrange_layers_input(const Vector< double > &, const Vector< Vector< double > > &) const
std::string write_performance_term_type(void) const
Returns a string with the name of the Minkowski error performance type, "MINKOWSKI_ERROR".
Vector< size_t > arrange_inputs_indices(void) const
Returns the indices of the input variables.
Definition: variables.cpp:493
const Instances & get_instances(void) const
Returns a constant reference to the instances object composing this data set object.
Definition: data_set.cpp:222
size_t count_parameters_number(void) const
Returns the number of parameters (biases and synaptic weights) in the multilayer perceptron.