16 #include "normalized_squared_error.h"
117 std::ostringstream buffer;
123 buffer <<
"OpenNN Exception: NormalizedquaredError class.\n"
124 <<
"void check(void) const method.\n"
125 <<
"Pointer to neural network is NULL.\n";
127 throw std::logic_error(buffer.str());
132 if(!multilayer_perceptron_pointer)
134 buffer <<
"OpenNN Exception: NormalizedquaredError class.\n"
135 <<
"void check(void) const method.\n"
136 <<
"Pointer to multilayer perceptron is NULL.\n";
138 throw std::logic_error(buffer.str());
141 const size_t multilayer_perceptron_inputs_number = multilayer_perceptron_pointer->
get_inputs_number();
142 const size_t multilayer_perceptron_outputs_number = multilayer_perceptron_pointer->
get_outputs_number();
144 if(multilayer_perceptron_inputs_number == 0)
146 buffer <<
"OpenNN Exception: NormalizedquaredError class.\n"
147 <<
"void check(void) const method.\n"
148 <<
"Number of inputs in multilayer perceptron object is zero.\n";
150 throw std::logic_error(buffer.str());
153 if(multilayer_perceptron_outputs_number == 0)
155 buffer <<
"OpenNN Exception: NormalizedquaredError class.\n"
156 <<
"void check(void) const method.\n"
157 <<
"Number of outputs in multilayer perceptron object is zero.\n";
159 throw std::logic_error(buffer.str());
166 buffer <<
"OpenNN Exception: NormalizedquaredError class.\n"
167 <<
"void check(void) const method.\n"
168 <<
"Pointer to data set is NULL.\n";
170 throw std::logic_error(buffer.str());
180 if(multilayer_perceptron_inputs_number != data_set_inputs_number)
182 buffer <<
"OpenNN Exception: NormalizedquaredError class.\n"
183 <<
"void check(void) const method.\n"
184 <<
"Number of inputs in multilayer perceptron (" << multilayer_perceptron_inputs_number <<
") must be equal to number of inputs in data set (" << data_set_inputs_number <<
").\n";
186 throw std::logic_error(buffer.str());
189 if(multilayer_perceptron_outputs_number != data_set_targets_number)
191 buffer <<
"OpenNN Exception: NormalizedquaredError class.\n"
192 <<
"void check(void) const method.\n"
193 <<
"Number of outputs in multilayer perceptron (" << multilayer_perceptron_outputs_number <<
") must be equal to number of targets in data set (" << data_set_targets_number <<
").\n";
195 throw std::logic_error(buffer.str());
229 size_t training_index;
250 double sum_squared_error = 0.0;
251 double normalization_coefficient = 0.0;
253 #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+ : sum_squared_error, normalization_coefficient)
255 for(i = 0; i < (int)training_instances_number; i++)
257 training_index = training_indices[i];
285 if(normalization_coefficient < 1.0e-99)
287 std::ostringstream buffer;
289 buffer <<
"OpenNN Exception: NormalizedSquaredError class.\n"
290 <<
"double calculate_performance(void) const method.\n"
291 <<
"Normalization coefficient is zero.\n";
293 throw std::logic_error(buffer.str());
296 return(sum_squared_error/normalization_coefficient);
318 std::ostringstream buffer;
320 const size_t size = parameters.size();
324 if(size != parameters_number)
326 buffer <<
"OpenNN Exception: NormalizedSquaredError class.\n"
327 <<
"double calculate_performance(const Vector<double>&) method.\n"
328 <<
"Size (" << size <<
") must be equal to number of parameters (" << parameters_number <<
").\n";
330 throw std::logic_error(buffer.str());
350 size_t training_index;
367 double sum_squared_error = 0.0;
368 double normalization_coefficient = 0.0;
372 #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+ : sum_squared_error, normalization_coefficient)
374 for(i = 0; i < (int)training_instances_number; i++)
376 training_index = training_indices[i];
404 if(normalization_coefficient < 1.0e-99)
406 std::ostringstream buffer;
408 buffer <<
"OpenNN Exception: NormalizedSquaredError class.\n"
409 <<
"double calculate_performance(const Vector<double>&) const method.\n"
410 <<
"Normalization coefficient is zero.\n";
412 throw std::logic_error(buffer.str());
415 return(sum_squared_error/normalization_coefficient);
444 if(generalization_instances_number < 2)
451 size_t generalization_index;
466 double sum_squared_error = 0.0;
467 double normalization_coefficient = 0.0;
471 #pragma omp parallel for private(i, generalization_index, inputs, outputs, targets) reduction(+ : sum_squared_error, normalization_coefficient)
473 for(i = 0; i < (int)generalization_instances_number; i++)
475 generalization_index = generalization_indices[i];
503 if(normalization_coefficient < 1.0e-99)
505 std::ostringstream buffer;
507 buffer <<
"OpenNN Exception: NormalizedSquaredError class.\n"
508 <<
"double calculate_generalization_performance(void) const method.\n"
509 <<
"Normalization coefficient is zero.\n";
511 throw std::logic_error(buffer.str());
514 return(sum_squared_error/normalization_coefficient);
565 size_t training_index;
587 double normalization_coefficient = 0.0;
595 #pragma omp parallel for private(i, training_index, inputs, targets, first_order_forward_propagation, layers_inputs, layers_combination_parameters_Jacobian,\
596 output_gradient, layers_delta, particular_solution, homogeneous_solution, point_gradient) \
597 reduction(+ : normalization_coefficient)
599 for(i = 0; i < (int)training_instances_number; i++)
601 training_index = training_indices[i];
627 if(!has_conditions_layer)
629 output_gradient = (layers_activation[layers_number-1]-targets)*2.0;
638 output_gradient = (particular_solution+homogeneous_solution*layers_activation[layers_number-1] - targets)*2.0;
640 layers_delta =
calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_gradient);
647 gradient += point_gradient;
652 if(normalization_coefficient < 1.0e-99)
654 std::ostringstream buffer;
656 buffer <<
"OpenNN Exception: NormalizedSquaredError class.\n"
657 <<
"Vector<double> calculate_gradient(void) const method.\n"
658 <<
"Normalization coefficient is zero.\n";
660 throw std::logic_error(buffer.str());
663 return(gradient/normalization_coefficient);
711 size_t training_index;
730 double normalization_coefficient = 0.0;
734 #pragma omp parallel for private(i, training_index, inputs, outputs, targets) reduction(+ : normalization_coefficient)
736 for(i = 0; i < (int)training_instances_number; i++)
738 training_index = training_indices[i];
766 if(normalization_coefficient < 1.0e-99)
768 std::ostringstream buffer;
770 buffer <<
"OpenNN Exception: NormalizedSquaredError class.\n"
771 <<
"Vector<double> calculate_terms(void) const method.\n"
772 <<
"Normalization coefficient is zero.\n";
774 throw std::logic_error(buffer.str());
777 return(performance_terms/sqrt(normalization_coefficient));
800 const size_t size = network_parameters.size();
806 if(size != neural_parameters_number)
808 std::ostringstream buffer;
810 buffer <<
"OpenNN Exception: NormalizedSquaredError class.\n"
811 <<
"double calculate_terms(const Vector<double>&) const method.\n"
812 <<
"Size (" << size <<
") must be equal to number of multilayer perceptron parameters (" << neural_parameters_number <<
").\n";
814 throw std::logic_error(buffer.str());
878 size_t training_index;
902 Matrix<double> terms_Jacobian(training_instances_number, parameters_number);
904 double normalization_coefficient = 0.0;
910 #pragma omp parallel for private(i, training_index, inputs, targets, first_order_forward_propagation, layers_inputs, \
911 layers_combination_parameters_Jacobian, term, term_norm, output_gradient, layers_delta, particular_solution, homogeneous_solution, point_gradient)
913 for(i = 0; i < (int)training_instances_number; i++)
915 training_index = training_indices[i];
941 if(!has_conditions_layer)
943 const Vector<double>& outputs = layers_activation[layers_number-1];
945 term = outputs-targets;
954 output_gradient = term/term_norm;
964 const Vector<double>& output_layer_activation = layers_activation[layers_number-1];
966 term = (particular_solution+homogeneous_solution*output_layer_activation - targets);
975 output_gradient = term/term_norm;
978 layers_delta =
calculate_layers_delta(layers_activation_derivative, homogeneous_solution, output_gradient);
985 terms_Jacobian.
set_row(i, point_gradient);
989 if(normalization_coefficient < 1.0e-99)
991 std::ostringstream buffer;
993 buffer <<
"OpenNN Exception: NormalizedSquaredError class.\n"
994 <<
"Matrix<double> calculate_terms_Jacobian(void) const method.\n"
995 <<
"Normalization coefficient is zero.\n";
997 throw std::logic_error(buffer.str());
1000 return(terms_Jacobian/sqrt(normalization_coefficient));
1017 return(first_order_terms);
1039 const size_t inputs_number = multilayer_perceptron_pointer->
get_inputs_number();
1050 size_t training_index;
1071 #pragma omp parallel for private(i, training_index, inputs, outputs, targets)
1073 for(i = 0; i < (int)training_instances_number; i++)
1075 training_index = training_indices[i];
1099 return(squared_errors);
1120 if(maximal_errors_number > training_instances_number)
1122 std::ostringstream buffer;
1124 buffer <<
"OpenNN Exception: NormalizedquaredError class.\n"
1125 <<
"Vector<size_t> calculate_maximal_errors(void) const method.\n"
1126 <<
"Number of maximal errors (" << maximal_errors_number <<
") must be equal or less than number of training instances (" << training_instances_number <<
").\n";
1128 throw std::logic_error(buffer.str());
1143 return(
"NORMALIZED_SQUARED_ERROR");
1154 std::ostringstream buffer;
1156 tinyxml2::XMLDocument* document =
new tinyxml2::XMLDocument;
1160 tinyxml2::XMLElement* normalized_squared_error_element = document->NewElement(
"NormalizedSquaredError");
1162 document->InsertFirstChild(normalized_squared_error_element);
1166 tinyxml2::XMLElement* display_element = document->NewElement(
"Display");
1167 normalized_squared_error_element->LinkEndChild(display_element);
1172 tinyxml2::XMLText* display_text = document->NewText(buffer.str().c_str());
1173 display_element->LinkEndChild(display_text);
1187 const tinyxml2::XMLElement* root_element = document.FirstChildElement(
"NormalizedSquaredError");
1194 const tinyxml2::XMLElement* display_element = root_element->FirstChildElement(
"Display");
1198 const std::string new_display_string = display_element->GetText();
1204 catch(
const std::logic_error& e)
1206 std::cout << e.what() << std::endl;
1216 std::ostringstream buffer;
1220 return(buffer.str());
double calculate_normalization_coefficient(const Matrix< double > &, const Vector< double > &) const
size_t count_parameters_number(void) const
Vector< size_t > calculate_maximal_errors(const size_t &=10) const
void initialize(const T &)
NormalizedSquaredError(void)
std::string write_information(void) const
Matrix< double > calculate_Hessian(void) const
const Variables & get_variables(void) const
Returns a constant reference to the variables object composing this data set object.
Vector< size_t > arrange_missing_instances(void) const
Returns a vector with the indices of those instances with missing values.
double calculate_sum_squared_error(const Matrix< double > &) const
size_t count_training_instances_number(void) const
Returns the number of instances in the data set which will be used for training.
size_t get_inputs_number(void) const
Returns the number of inputs to the multilayer perceptron.
double calculate_generalization_performance(void) const
Returns an performance of the performance term for generalization purposes.
tinyxml2::XMLDocument * to_XML(void) const
virtual ~NormalizedSquaredError(void)
Destructor.
size_t get_layers_number(void) const
Returns the number of layers in the multilayer perceptron.
Vector< size_t > calculate_maximal_indices(const size_t &) const
bool has_missing_values(void) const
size_t get_outputs_number(void) const
Returns the number of outputs neurons in the multilayer perceptron.
Vector< double > calculate_outputs(const Vector< double > &) const
const MissingValues & get_missing_values(void) const
Returns a reference to the missing values object in the data set.
Vector< double > calculate_training_target_data_mean(void) const
Returns the mean values of the target variables on the training instances.
void from_XML(const tinyxml2::XMLDocument &)
Vector< size_t > arrange_targets_indices(void) const
Returns the indices of the target variables.
Vector< size_t > arrange_training_indices(void) const
Returns the indices of the instances which will be used for training.
size_t count_generalization_instances_number(void) const
Returns the number of instances in the data set which will be used for generalization.
PerformanceTerm::FirstOrderTerms calculate_first_order_terms(void) const
Vector< double > get_instance(const size_t &) const
bool has_conditions_layer(void) const
MultilayerPerceptron * get_multilayer_perceptron_pointer(void) const
Returns a pointer to the multilayer perceptron composing this neural network.
double calculate_norm(void) const
Returns the vector norm.
Matrix< double > calculate_terms_Jacobian(void) const
double calculate_sum_squared_error(const Vector< double > &) const
virtual Vector< double > calculate_homogeneous_solution(const Vector< double > &) const
Returns the homogeneous solution for applying boundary conditions.
Vector< Matrix< double > > calculate_layers_combination_parameters_Jacobian(const Vector< Vector< double > > &) const
double calculate_performance(void) const
Returns the performance value of a neural network according to the normalized squared error on a data...
virtual Vector< double > calculate_particular_solution(const Vector< double > &) const
Returns the particular solution for applying boundary conditions.
double calculate_distance(const Vector< double > &) const
ConditionsLayer * get_conditions_layer_pointer(void) const
Returns a pointer to the conditions layer composing this neural network.
Vector< double > calculate_squared_errors(void) const
Returns the squared errors of the training instances.
size_t count_inputs_number(void) const
Returns the number of input variables of the data set.
std::string write_performance_term_type(void) const
Returns a string with the name of the normalized squared error performance type, "NORMALIZED_SQUARED_...
Vector< Vector< Vector< double > > > calculate_first_order_forward_propagation(const Vector< double > &) const
Vector< double > calculate_generalization_target_data_mean(void) const
Returns the mean values of the target variables on the generalization instances.
size_t count_targets_number(void) const
Returns the number of target variables of the data set.
Vector< double > calculate_gradient(void) const
void set_row(const size_t &, const Vector< T > &)
Vector< size_t > arrange_generalization_indices(void) const
Returns the indices of the instances which will be used for generalization.
Vector< double > calculate_terms(void) const
Vector< Vector< double > > arrange_layers_input(const Vector< double > &, const Vector< Vector< double > > &) const
void set_parameters(const Vector< double > &)
Vector< size_t > arrange_inputs_indices(void) const
Returns the indices of the input variables.
const Instances & get_instances(void) const
Returns a constant reference to the instances object composing this data set object.
size_t count_parameters_number(void) const
Returns the number of parameters (biases and synaptic weights) in the multilayer perceptron.