17 #include "root_mean_squared_error.h"
107 std::ostringstream buffer;
113 buffer <<
"OpenNN Exception: RootMeanSquaredError class.\n"
114 <<
"void check(void) const method.\n"
115 <<
"Pointer to neural network is NULL.\n";
117 throw std::logic_error(buffer.str());
122 if(!multilayer_perceptron_pointer)
124 buffer <<
"OpenNN Exception: RootMeanSquaredError class.\n"
125 <<
"void check(void) const method.\n"
126 <<
"Pointer to multilayer perceptron is NULL.\n";
128 throw std::logic_error(buffer.str());
134 if(inputs_number == 0)
136 buffer <<
"OpenNN Exception: RootMeanSquaredError class.\n"
137 <<
"void check(void) const method.\n"
138 <<
"Number of inputs in multilayer perceptron object is zero.\n";
140 throw std::logic_error(buffer.str());
143 if(outputs_number == 0)
145 buffer <<
"OpenNN Exception: RootMeanSquaredError class.\n"
146 <<
"void check(void) const method.\n"
147 <<
"Number of outputs in multilayer perceptron object is zero.\n";
149 throw std::logic_error(buffer.str());
156 buffer <<
"OpenNN Exception: RootMeanSquaredError class.\n"
157 <<
"void check(void) const method.\n"
158 <<
"Pointer to data set is NULL.\n";
160 throw std::logic_error(buffer.str());
170 if(data_set_inputs_number != inputs_number)
172 buffer <<
"OpenNN Exception: RootMeanSquaredError class.\n"
173 <<
"void check(void) const method.\n"
174 <<
"Number of inputs in neural network must be equal to number of inputs in data set.\n";
176 throw std::logic_error(buffer.str());
179 if(outputs_number != targets_number)
181 buffer <<
"OpenNN Exception: RootMeanSquaredError class.\n"
182 <<
"void check(void) const method.\n"
183 <<
"Number of outputs in neural network must be equal to number of targets in data set.\n";
185 throw std::logic_error(buffer.str());
220 size_t training_index;
235 double sum_squared_error = 0.0;
239 #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+:sum_squared_error)
241 for(i = 0; i < (int)training_instances_number; i++)
243 training_index = training_indices[i];
267 return(sqrt(sum_squared_error/(
double)training_instances_number));
290 std::ostringstream buffer;
292 const size_t size = parameters.size();
296 if(size != parameters_number)
298 buffer <<
"OpenNN Exception: RootMeanSquaredError class.\n"
299 <<
"double calculate_performance(const Vector<double>&) const method.\n"
300 <<
"Size (" << size <<
") must be equal to number of parameters (" << parameters_number <<
").\n";
302 throw std::logic_error(buffer.str());
322 size_t training_index;
337 double sum_squared_error = 0.0;
341 #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+:sum_squared_error)
343 for(i = 0; i < (int)training_instances_number; i++)
345 training_index = training_indices[i];
369 return(sqrt(sum_squared_error/(
double)training_instances_number));
417 size_t training_index;
445 #pragma omp parallel for private(i, training_index, inputs, targets, first_order_forward_propagation, output_gradient, \
446 layers_delta, particular_solution, homogeneous_solution, point_gradient)
448 for(i = 0; i < (int)training_instances_number; i++)
450 training_index = training_indices[i];
466 if(!has_conditions_layer)
468 output_gradient = (layers_activation[layers_number-1]-targets)/(training_instances_number*performance);
477 output_gradient = (particular_solution+homogeneous_solution*layers_activation[layers_number-1] - targets)/(training_instances_number*performance);
479 layers_delta =
calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_gradient);
486 gradient += point_gradient;
519 if(generalization_instances_number == 0)
526 size_t generalization_index;
541 double generalization_performance = 0.0;
543 #pragma omp parallel for private(i, generalization_index, inputs, outputs, targets) reduction(+ : generalization_performance)
545 for(i = 0; i < (int)generalization_instances_number; i++)
547 generalization_index = generalization_indices[i];
571 return(sqrt(generalization_performance/(
double)generalization_instances_number));
593 return(
"ROOT_MEAN_SQUARED_ERROR");
604 std::ostringstream buffer;
606 tinyxml2::XMLDocument* document =
new tinyxml2::XMLDocument;
610 tinyxml2::XMLElement* root_mean_squared_error_element = document->NewElement(
"RootMeanSquaredError");
612 document->InsertFirstChild(root_mean_squared_error_element);
616 tinyxml2::XMLElement* display_element = document->NewElement(
"Display");
617 root_mean_squared_error_element->LinkEndChild(display_element);
622 tinyxml2::XMLText* display_text = document->NewText(buffer.str().c_str());
623 display_element->LinkEndChild(display_text);
637 const tinyxml2::XMLElement* root_element = document.FirstChildElement(
"RootMeanSquaredError");
641 std::ostringstream buffer;
643 buffer <<
"OpenNN Exception: RootMeanSquaredError class.\n"
644 <<
"void from_XML(const tinyxml2::XMLDocument&) method.\n"
645 <<
"Root mean squared error element is NULL.\n";
647 throw std::logic_error(buffer.str());
652 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"Display");
656 const std::string new_display_string = element->GetText();
662 catch(
const std::logic_error& e)
664 std::cout << e.what() << std::endl;
size_t count_parameters_number(void) const
const Variables & get_variables(void) const
Returns a constant reference to the variables object composing this data set object.
double calculate_generalization_performance(void) const
Returns the root mean squared error of the multilayer perceptron measured on the generalization insta...
RootMeanSquaredError(void)
size_t count_training_instances_number(void) const
Returns the number of instances in the data set which will be used for training.
size_t get_inputs_number(void) const
Returns the number of inputs to the multilayer perceptron.
size_t get_layers_number(void) const
Returns the number of layers in the multilayer perceptron.
tinyxml2::XMLDocument * to_XML(void) const
bool has_missing_values(void) const
size_t get_outputs_number(void) const
Returns the number of outputs neurons in the multilayer perceptron.
Vector< double > calculate_outputs(const Vector< double > &) const
const MissingValues & get_missing_values(void) const
Returns a reference to the missing values object in the data set.
virtual ~RootMeanSquaredError(void)
Destructor.
Vector< size_t > arrange_targets_indices(void) const
Returns the indices of the target variables.
Vector< size_t > arrange_training_indices(void) const
Returns the indices of the instances which will be used for training.
size_t count_generalization_instances_number(void) const
Returns the number of instances in the data set which will be used for generalization.
Vector< double > get_instance(const size_t &) const
bool has_conditions_layer(void) const
MultilayerPerceptron * get_multilayer_perceptron_pointer(void) const
Returns a pointer to the multilayer perceptron composing this neural network.
double calculate_sum_squared_error(const Vector< double > &) const
virtual Vector< double > calculate_homogeneous_solution(const Vector< double > &) const
Returns the homogeneous solution for applying boundary conditions.
Vector< double > calculate_gradient(void) const
Calculates the gradient the root mean squared error funcion by means of the back-propagation algorith...
std::string write_performance_term_type(void) const
Returns a string with the name of the root mean squared error performance type, "ROOT_MEAN_SQUARED_ER...
virtual Vector< double > calculate_particular_solution(const Vector< double > &) const
Returns the particular solution for applying boundary conditions.
ConditionsLayer * get_conditions_layer_pointer(void) const
Returns a pointer to the conditions layer composing this neural network.
double calculate_performance(void) const
size_t count_inputs_number(void) const
Returns the number of input variables of the data set.
Vector< Vector< Vector< double > > > calculate_first_order_forward_propagation(const Vector< double > &) const
size_t count_targets_number(void) const
Returns the number of target variables of the data set.
Matrix< double > calculate_Hessian(void) const
Vector< size_t > arrange_generalization_indices(void) const
Returns the indices of the instances which will be used for generalization.
void from_XML(const tinyxml2::XMLDocument &)
Vector< size_t > arrange_inputs_indices(void) const
Returns the indices of the input variables.
const Instances & get_instances(void) const
Returns a constant reference to the instances object composing this data set object.
size_t count_parameters_number(void) const
Returns the number of parameters (biases and synaptic weights) in the multilayer perceptron.