16 #include "mean_squared_error.h"
120 std::ostringstream buffer;
126 buffer <<
"OpenNN Exception: MeanSquaredError class.\n"
127 <<
"void check(void) const method.\n"
128 <<
"Pointer to neural network is NULL.\n";
130 throw std::logic_error(buffer.str());
135 if(!multilayer_perceptron_pointer)
137 buffer <<
"OpenNN Exception: MeanSquaredError class.\n"
138 <<
"void check(void) const method.\n"
139 <<
"Pointer to multilayer perceptron is NULL.\n";
141 throw std::logic_error(buffer.str());
147 if(inputs_number == 0)
149 buffer <<
"OpenNN Exception: MeanSquaredError class.\n"
150 <<
"void check(void) const method.\n"
151 <<
"Number of inputs in multilayer perceptron object is zero.\n";
153 throw std::logic_error(buffer.str());
156 if(outputs_number == 0)
158 buffer <<
"OpenNN Exception: MeanSquaredError class.\n"
159 <<
"void check(void) const method.\n"
160 <<
"Number of outputs in multilayer perceptron object is zero.\n";
162 throw std::logic_error(buffer.str());
169 buffer <<
"OpenNN Exception: MeanSquaredError class.\n"
170 <<
"void check(void) const method.\n"
171 <<
"Pointer to data set is NULL.\n";
173 throw std::logic_error(buffer.str());
183 if(inputs_number != data_set_inputs_number)
185 buffer <<
"OpenNN Exception: MeanSquaredError class.\n"
186 <<
"void check(void) const method.\n"
187 <<
"Number of inputs in multilayer perceptron must be equal to number of inputs in data set.\n";
189 throw std::logic_error(buffer.str());
192 if(outputs_number != data_set_targets_number)
194 buffer <<
"OpenNN Exception: MeanSquaredError class.\n"
195 <<
"void check(void) const method.\n"
196 <<
"Number of outputs in multilayer perceptron must be equal to number of targets in data set.\n";
198 throw std::logic_error(buffer.str());
232 size_t training_index;
249 double sum_squared_error = 0.0;
251 #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+:sum_squared_error)
253 for(i = 0; i < (int)training_instances_number; i++)
255 training_index = training_indices[i];
279 return(sum_squared_error/(
double)training_instances_number);
301 const size_t size = parameters.size();
305 if(size != parameters_number)
307 std::ostringstream buffer;
309 buffer <<
"OpenNN Exception: MeanSquaredError class.\n"
310 <<
"double calculate_performance(const Vector<double>&) const method.\n"
311 <<
"Size (" << size <<
") must be equal to number of parameters (" << parameters_number <<
").\n";
313 throw std::logic_error(buffer.str());
333 size_t training_index;
348 double sum_squared_error = 0.0;
352 #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+:sum_squared_error)
354 for(i = 0; i < (int)training_instances_number; i++)
356 training_index = training_indices[i];
380 return(sum_squared_error/(
double)training_instances_number);
408 if(generalization_instances_number == 0)
415 size_t generalization_index;
429 double generalization_performance = 0.0;
433 #pragma omp parallel for private(i, generalization_index, inputs, outputs, targets) reduction(+:generalization_performance)
435 for(i = 0; i < (int)generalization_instances_number; i++)
437 generalization_index = generalization_indices[i];
461 return(generalization_performance/(
double)generalization_instances_number);
508 size_t training_index;
532 #pragma omp parallel for private(i, training_index, inputs, targets, first_order_forward_propagation, \
533 output_gradient, layers_delta, particular_solution, homogeneous_solution, point_gradient)
535 for(i = 0; i < (int)training_instances_number; i++)
537 training_index = training_indices[i];
553 if(!has_conditions_layer)
555 output_gradient = (layers_activation[layers_number-1]-targets)*(2.0/(
double)training_instances_number);
564 output_gradient = (particular_solution+homogeneous_solution*layers_activation[layers_number-1] - targets)*(2.0/(
double)training_instances_number);
566 layers_delta =
calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_gradient);
572 gradient += point_gradient;
610 return(first_order_performance);
634 return(second_order_performance);
668 size_t training_index;
687 #pragma omp parallel for private(i, training_index, inputs, outputs, targets)
689 for(i = 0; i < (int)training_instances_number; i++)
691 training_index = training_indices[i];
715 return(performance_terms/sqrt((
double)training_instances_number));
737 std::ostringstream buffer;
739 const size_t size = network_parameters.size();
743 if(size != parameters_number)
745 buffer <<
"OpenNN Exception: MeanSquaredError class.\n"
746 <<
"double calculate_terms(const Vector<double>&) const method.\n"
747 <<
"Size (" << size <<
") must be equal to number of multilayer perceptron parameters (" << parameters_number <<
").\n";
749 throw std::logic_error(buffer.str());
809 size_t training_index;
831 Matrix<double> terms_Jacobian(training_instances_number, neural_parameters_number);
837 #pragma omp parallel for private(i, training_index, inputs, targets, first_order_forward_propagation, \
838 term, term_norm, output_gradient, layers_delta, particular_solution, homogeneous_solution, point_gradient)
840 for(i = 0; i < (int)training_instances_number; i++)
842 training_index = training_indices[i];
858 if(!has_conditions_layer)
860 const Vector<double>& outputs = first_order_forward_propagation[0][layers_number-1];
862 term = (outputs-targets);
867 output_gradient.
set(outputs_number, 0.0);
871 output_gradient = term/term_norm;
881 term = (particular_solution+homogeneous_solution*layers_activation[layers_number-1] - targets)/sqrt((
double)training_instances_number);
886 output_gradient.
set(outputs_number, 0.0);
890 output_gradient = term/term_norm;
893 layers_delta =
calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_gradient);
898 terms_Jacobian.
set_row(i, point_gradient);
901 return(terms_Jacobian/sqrt((
double)training_instances_number));
927 return(first_order_terms);
937 return(
"MEAN_SQUARED_ERROR");
948 std::ostringstream buffer;
950 tinyxml2::XMLDocument* document =
new tinyxml2::XMLDocument;
954 tinyxml2::XMLElement* mean_squared_error_element = document->NewElement(
"MeanSquaredError");
956 document->InsertFirstChild(mean_squared_error_element);
960 tinyxml2::XMLElement* element = document->NewElement(
"Display");
961 mean_squared_error_element->LinkEndChild(element);
966 tinyxml2::XMLText* text = document->NewText(buffer.str().c_str());
967 element->LinkEndChild(text);
virtual ~MeanSquaredError(void)
Destructor.
size_t count_parameters_number(void) const
Vector< double > calculate_terms(void) const
const Variables & get_variables(void) const
Returns a constant reference to the variables object composing this data set object.
Vector< double > calculate_gradient(void) const
size_t count_training_instances_number(void) const
Returns the number of instances in the data set which will be used for training.
size_t get_inputs_number(void) const
Returns the number of inputs to the multilayer perceptron.
void set(void)
Sets the size of a vector to zero.
size_t get_layers_number(void) const
Returns the number of layers in the multilayer perceptron.
bool has_missing_values(void) const
size_t get_outputs_number(void) const
Returns the number of outputs neurons in the multilayer perceptron.
Vector< double > calculate_outputs(const Vector< double > &) const
tinyxml2::XMLDocument * to_XML(void) const
FirstOrderTerms calculate_first_order_terms(void) const
Returns a first order terms performance structure, which contains the values and the Jacobian of the ...
const MissingValues & get_missing_values(void) const
Returns a reference to the missing values object in the data set.
Vector< size_t > arrange_targets_indices(void) const
Returns the indices of the target variables.
Vector< size_t > arrange_training_indices(void) const
Returns the indices of the instances which will be used for training.
size_t count_generalization_instances_number(void) const
Returns the number of instances in the data set which will be used for generalization.
Vector< double > get_instance(const size_t &) const
bool has_conditions_layer(void) const
MultilayerPerceptron * get_multilayer_perceptron_pointer(void) const
Returns a pointer to the multilayer perceptron composing this neural network.
double calculate_norm(void) const
Returns the vector norm.
Matrix< double > calculate_Hessian(void) const
double calculate_generalization_performance(void) const
double calculate_sum_squared_error(const Vector< double > &) const
std::string write_performance_term_type(void) const
Returns a string with the name of the mean squared error performance type, "MEAN_SQUARED_ERROR".
virtual Vector< double > calculate_homogeneous_solution(const Vector< double > &) const
Returns the homogeneous solution for applying boundary conditions.
double calculate_performance(void) const
Returns the mean squared error of a neural network on a data set.
virtual Vector< double > calculate_particular_solution(const Vector< double > &) const
Returns the particular solution for applying boundary conditions.
double calculate_distance(const Vector< double > &) const
ConditionsLayer * get_conditions_layer_pointer(void) const
Returns a pointer to the conditions layer composing this neural network.
size_t count_inputs_number(void) const
Returns the number of input variables of the data set.
FirstOrderPerformance calculate_first_order_performance(void) const
Vector< Vector< Vector< double > > > calculate_first_order_forward_propagation(const Vector< double > &) const
size_t count_targets_number(void) const
Returns the number of target variables of the data set.
SecondOrderPerformance calculate_second_order_performance(void) const
void set_row(const size_t &, const Vector< T > &)
Matrix< double > calculate_terms_Jacobian(void) const
Vector< size_t > arrange_generalization_indices(void) const
Returns the indices of the instances which will be used for generalization.
void set_parameters(const Vector< double > &)
Vector< size_t > arrange_inputs_indices(void) const
Returns the indices of the input variables.
const Instances & get_instances(void) const
Returns a constant reference to the instances object composing this data set object.
size_t count_parameters_number(void) const
Returns the number of parameters (biases and synaptic weights) in the multilayer perceptron.