16 #include "conjugate_gradient.h"
61 from_XML(conjugate_gradient_document);
128 std::ostringstream buffer;
130 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
131 <<
"std::string write_training_direction_method(void) const method.\n"
132 <<
"Unknown training direction method.\n";
134 throw std::logic_error(buffer.str());
388 training_direction_method = new_training_direction_method;
404 if(new_training_direction_method_name ==
"PR")
408 else if(new_training_direction_method_name ==
"FR")
414 std::ostringstream buffer;
416 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
417 <<
"void set_training_direction_method(const std::string&) method.\n"
418 <<
"Unknown training direction method: " << new_training_direction_method_name <<
".\n";
420 throw std::logic_error(buffer.str());
563 if(new_warning_parameters_norm < 0.0)
565 std::ostringstream buffer;
567 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
568 <<
"void set_warning_parameters_norm(const double&) method.\n"
569 <<
"Warning parameters norm must be equal or greater than 0.\n";
571 throw std::logic_error(buffer.str());
594 if(new_warning_gradient_norm < 0.0)
596 std::ostringstream buffer;
598 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
599 <<
"void set_warning_gradient_norm(const double&) method.\n"
600 <<
"Warning gradient norm must be equal or greater than 0.\n";
602 throw std::logic_error(buffer.str());
625 if(new_warning_training_rate < 0.0)
627 std::ostringstream buffer;
629 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
630 <<
"void set_warning_training_rate(const double&) method.\n"
631 <<
"Warning training rate must be equal or greater than 0.\n";
633 throw std::logic_error(buffer.str());
654 if(new_error_parameters_norm < 0.0)
656 std::ostringstream buffer;
658 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
659 <<
"void set_error_parameters_norm(const double&) method.\n"
660 <<
"Error parameters norm must be equal or greater than 0.\n";
662 throw std::logic_error(buffer.str());
685 if(new_error_gradient_norm < 0.0)
687 std::ostringstream buffer;
689 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
690 <<
"void set_error_gradient_norm(const double&) method.\n"
691 <<
"Error gradient norm must be equal or greater than 0.\n";
693 throw std::logic_error(buffer.str());
716 if(new_error_training_rate < 0.0)
718 std::ostringstream buffer;
720 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
721 <<
"void set_error_training_rate(const double&) method.\n"
722 <<
"Error training rate must be equal or greater than 0.\n";
724 throw std::logic_error(buffer.str());
746 if(new_minimum_parameters_increment_norm < 0.0)
748 std::ostringstream buffer;
750 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
751 <<
"void new_minimum_parameters_increment_norm(const double&) method.\n"
752 <<
"Minimum parameters increment norm must be equal or greater than 0.\n";
754 throw std::logic_error(buffer.str());
776 if(new_minimum_performance_increase < 0.0)
778 std::ostringstream buffer;
780 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
781 <<
"void set_minimum_performance_increase(const double&) method.\n"
782 <<
"Minimum performance improvement must be equal or greater than 0.\n";
784 throw std::logic_error(buffer.str());
819 if(new_gradient_norm_goal < 0.0)
821 std::ostringstream buffer;
823 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
824 <<
"void set_gradient_norm_goal(const double&) method.\n"
825 <<
"Gradient norm goal must be equal or greater than 0.\n";
827 throw std::logic_error(buffer.str());
871 if(new_maximum_time < 0.0)
873 std::ostringstream buffer;
875 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
876 <<
"void set_maximum_time(const double&) method.\n"
877 <<
"Maximum time must be equal or greater than 0.\n";
879 throw std::logic_error(buffer.str());
1006 if(new_display_period <= 0)
1008 std::ostringstream buffer;
1010 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1011 <<
"void set_display_period(const double&) method.\n"
1012 <<
"Display period must be greater than 0.\n";
1014 throw std::logic_error(buffer.str());
1035 if(new_save_period <= 0)
1037 std::ostringstream buffer;
1039 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1040 <<
"void set_save_period(const double&) method.\n"
1041 <<
"Save period must be greater than 0.\n";
1043 throw std::logic_error(buffer.str());
1065 std::ostringstream buffer;
1069 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1070 <<
"double calculate_FR_parameter(const Vector<double>&, const Vector<double>&) const method.\n"
1072 <<
"Performance functional pointer is NULL.\n";
1074 throw std::logic_error(buffer.str());
1081 const size_t old_gradient_size = old_gradient.size();
1083 if(old_gradient_size != parameters_number)
1085 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1086 <<
"double calculate_FR_parameter(const Vector<double>&, const Vector<double>&) const method.\n"
1087 <<
"Size of old gradient (" << old_gradient_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1089 throw std::logic_error(buffer.str());
1092 const size_t gradient_size = gradient.size();
1094 if(gradient_size != parameters_number)
1096 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1097 <<
"double calculate_FR_parameter(const Vector<double>&, const Vector<double>&) const method.\n"
1098 <<
"Size of gradient (" << gradient_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1100 throw std::logic_error(buffer.str());
1105 double FR_parameter = 0.0;
1107 const double numerator = gradient.
dot(gradient);
1108 const double denominator = old_gradient.
dot(old_gradient);
1112 if(denominator == 0.0)
1118 FR_parameter = numerator/denominator;
1123 if(FR_parameter < 0.0)
1126 if(FR_parameter > 1.0)
1129 return(FR_parameter);
1145 std::ostringstream buffer;
1149 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1150 <<
"double calculate_PR_parameter(const Vector<double>&, const Vector<double>&) const method.\n"
1152 <<
"Performance functional pointer is NULL.\n";
1154 throw std::logic_error(buffer.str());
1161 const size_t old_gradient_size = old_gradient.size();
1163 if(old_gradient_size != parameters_number)
1165 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1166 <<
"double calculate_PR_parameter(const Vector<double>&, const Vector<double>&) const method.\n"
1167 <<
"Size of old gradient (" << old_gradient_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1169 throw std::logic_error(buffer.str());
1172 const size_t gradient_size = gradient.size();
1174 if(gradient_size != parameters_number)
1176 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1177 <<
"double calculate_PR_parameter(const Vector<double>&, const Vector<double>&) const method.\n"
1178 <<
"Size of gradient (" << gradient_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1180 throw std::logic_error(buffer.str());
1185 double PR_parameter = 0.0;
1187 const double numerator = (gradient-old_gradient).dot(gradient);
1188 const double denominator = old_gradient.
dot(old_gradient);
1192 if(denominator == 0.0)
1198 PR_parameter = numerator/denominator;
1203 if(PR_parameter < 0.0)
1208 if(PR_parameter > 1.0)
1213 return(PR_parameter);
1231 std::ostringstream buffer;
1233 if(!performance_functional_pointer)
1235 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1236 <<
"Vector<double> calculate_PR_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1237 <<
"Performance functional pointer is NULL.\n";
1239 throw std::logic_error(buffer.str());
1242 const NeuralNetwork* neural_network_pointer = performance_functional_pointer->get_neural_network_pointer();
1246 const size_t old_gradient_size = old_gradient.size();
1248 if(old_gradient_size != parameters_number)
1250 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1251 <<
"Vector<double> calculate_PR_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1252 <<
"Size of old gradient (" << old_gradient_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1254 throw std::logic_error(buffer.str());
1257 const size_t gradient_size = gradient.size();
1259 if(gradient_size != parameters_number)
1261 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1262 <<
"Vector<double> calculate_PR_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1263 <<
"Size of gradient (" << gradient_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1265 throw std::logic_error(buffer.str());
1268 const size_t old_training_direction_size = old_training_direction.size();
1270 if(old_training_direction_size != parameters_number)
1272 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1273 <<
"Vector<double> calculate_PR_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1274 <<
"Size of old training direction (" << old_training_direction_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1276 throw std::logic_error(buffer.str());
1281 const double PR_parameter = calculate_PR_parameter(old_gradient, gradient);
1283 const Vector<double> gradient_descent_term = calculate_gradient_descent_training_direction(gradient);
1284 const Vector<double> conjugate_direction_term = old_training_direction*PR_parameter;
1286 const Vector<double> PR_training_direction = gradient_descent_term + conjugate_direction_term;
1288 const double PR_training_direction_norm = PR_training_direction.
calculate_norm();
1290 return(PR_training_direction/PR_training_direction_norm);
1308 std::ostringstream buffer;
1310 if(!performance_functional_pointer)
1312 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1313 <<
"Vector<double> calculate_FR_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1314 <<
"Performance functional pointer is NULL.\n";
1316 throw std::logic_error(buffer.str());
1319 const NeuralNetwork* neural_network_pointer = performance_functional_pointer->get_neural_network_pointer();
1323 const size_t old_gradient_size = old_gradient.size();
1325 if(old_gradient_size != parameters_number)
1327 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1328 <<
"Vector<double> calculate_FR_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1329 <<
"Size of old gradient (" << old_gradient_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1331 throw std::logic_error(buffer.str());
1334 const size_t gradient_size = gradient.size();
1336 if(gradient_size != parameters_number)
1338 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1339 <<
"Vector<double> calculate_FR_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1340 <<
"Size of gradient (" << gradient_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1342 throw std::logic_error(buffer.str());
1345 const size_t old_training_direction_size = old_training_direction.size();
1347 if(old_training_direction_size != parameters_number)
1349 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1350 <<
"Vector<double> calculate_FR_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1351 <<
"Size of old training direction (" << old_training_direction_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1353 throw std::logic_error(buffer.str());
1358 const double FR_parameter = calculate_FR_parameter(old_gradient, gradient);
1360 const Vector<double> gradient_descent_term = calculate_gradient_descent_training_direction(gradient);
1361 const Vector<double> conjugate_direction_term = old_training_direction*FR_parameter;
1363 const Vector<double> FR_training_direction = gradient_descent_term + conjugate_direction_term;
1365 const double FR_training_direction_norm = FR_training_direction.
calculate_norm();
1367 return(FR_training_direction/FR_training_direction_norm);
1381 const NeuralNetwork* neural_network_pointer = performance_functional_pointer->get_neural_network_pointer();
1389 std::ostringstream buffer;
1391 if(!performance_functional_pointer)
1393 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1394 <<
"Vector<double> calculate_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1395 <<
"Performance functional pointer is NULL.\n";
1397 throw std::logic_error(buffer.str());
1400 const size_t old_gradient_size = old_gradient.size();
1402 if(old_gradient_size != parameters_number)
1404 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1405 <<
"Vector<double> calculate_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1406 <<
"Size of old gradient (" << old_gradient_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1408 throw std::logic_error(buffer.str());
1411 const size_t gradient_size = gradient.size();
1413 if(gradient_size != parameters_number)
1415 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1416 <<
"Vector<double> calculate_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1417 <<
"Size of gradient (" << gradient_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1419 throw std::logic_error(buffer.str());
1422 const size_t old_training_direction_size = old_training_direction.size();
1424 if(old_training_direction_size != parameters_number)
1426 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1427 <<
"Vector<double> calculate_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1428 <<
"Size of old training direction (" << old_training_direction_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1430 throw std::logic_error(buffer.str());
1435 switch(training_direction_method)
1439 return(calculate_FR_training_direction(old_gradient, gradient, old_training_direction));
1445 return(calculate_PR_training_direction(old_gradient, gradient, old_training_direction));
1451 std::ostringstream buffer;
1453 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1454 <<
"Vector<double> calculate_training_direction(const Vector<double>&, const Vector<double>&, const Vector<double>&) const method.\n"
1455 <<
"Unknown training direction method: " << training_direction_method <<
".\n";
1457 throw std::logic_error(buffer.str());
1466 return(training_direction);
1482 std::ostringstream buffer;
1486 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1487 <<
"Vector<double> calculate_gradient_descent_training_direction(const Vector<double>&) const method.\n"
1488 <<
"Performance functional pointer is NULL.\n";
1490 throw std::logic_error(buffer.str());
1497 const size_t gradient_size = gradient.size();
1499 if(gradient_size != parameters_number)
1501 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1502 <<
"Vector<double> calculate_gradient_descent_training_direction(const Vector<double>&) const method.\n"
1503 <<
"Size of gradient (" << gradient_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
1505 throw std::logic_error(buffer.str());
1527 std::ostringstream buffer;
1529 buffer <<
"OpenNN Exception: ConjugateGradientResults structure.\n"
1530 <<
"void resize_training_history(const size_t&) method.\n"
1531 <<
"Conjugate gradient pointer is NULL.\n";
1533 throw std::logic_error(buffer.str());
1589 std::ostringstream buffer;
1593 if(!parameters_history.empty())
1595 if(!parameters_history[0].empty())
1597 buffer <<
"% Parameters history:\n"
1598 << parameters_history <<
"\n";
1604 if(!parameters_norm_history.empty())
1606 buffer <<
"% Parameters norm history:\n"
1607 << parameters_norm_history <<
"\n";
1612 if(!performance_history.empty())
1614 buffer <<
"% performance history:\n"
1615 << performance_history <<
"\n";
1620 if(!generalization_performance_history.empty())
1622 buffer <<
"% Generalization performance history:\n"
1623 << generalization_performance_history <<
"\n";
1628 if(!gradient_history.empty())
1630 if(!gradient_history[0].empty())
1632 buffer <<
"% Gradient history:\n"
1633 << gradient_history <<
"\n";
1639 if(!gradient_norm_history.empty())
1641 buffer <<
"% Gradient norm history:\n"
1642 << gradient_norm_history <<
"\n";
1647 if(!training_direction_history.empty())
1649 if(!training_direction_history[0].empty())
1651 buffer <<
"% Training direction history:\n"
1652 << training_direction_history <<
"\n";
1658 if(!training_rate_history.empty())
1660 buffer <<
"% Training rate history:\n"
1661 << training_rate_history <<
"\n";
1666 if(!elapsed_time_history.empty())
1668 buffer <<
"% Elapsed time history:\n"
1669 << elapsed_time_history <<
"\n";
1672 return(buffer.str());
1680 std::ostringstream buffer;
1687 names.push_back(
"Final parameters norm");
1690 buffer << std::setprecision(precision) << final_parameters_norm;
1692 values.push_back(buffer.str());
1696 names.push_back(
"Final performance");
1699 buffer << std::setprecision(precision) << final_performance;
1701 values.push_back(buffer.str());
1709 names.push_back(
"Final generalization performance");
1712 buffer << std::setprecision(precision) << final_generalization_performance;
1714 values.push_back(buffer.str());
1719 names.push_back(
"Final gradient norm");
1722 buffer << std::setprecision(precision) << final_gradient_norm;
1724 values.push_back(buffer.str());
1737 names.push_back(
"Iterations number");
1740 buffer << iterations_number;
1742 values.push_back(buffer.str());
1746 names.push_back(
"Elapsed time");
1749 buffer << elapsed_time;
1751 values.push_back(buffer.str());
1753 const size_t rows_number = names.size();
1754 const size_t columns_number = 2;
1761 return(final_results);
1784 std::cout <<
"Training with conjugate gradient...\n";
1792 time_t beginning_time, current_time;
1793 time(&beginning_time);
1794 double elapsed_time;
1803 double parameters_norm;
1807 double performance = 0.0;
1808 double old_performance = 0.0;
1809 double performance_increase = 0.0;
1812 double gradient_norm;
1814 double generalization_performance = 0.0;
1815 double old_generalization_performance = 0.0;
1817 std::string information;
1822 const double first_training_rate = 0.01;
1825 double parameters_increment_norm;
1831 double training_slope;
1833 double initial_training_rate = 0.0;
1834 double training_rate = 0.0;
1835 double old_training_rate = 0.0;
1839 bool stop_training =
false;
1841 size_t generalization_failures = 0;
1856 std::ostringstream buffer;
1858 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
1859 <<
"ConjugateGradientResults* perform_training(void) method.\n"
1860 <<
"Parameters norm is greater than error parameters norm.\n";
1862 throw std::logic_error(buffer.str());
1866 std::cout <<
"OpenNN Warning: Parameters norm is " << parameters_norm <<
".\n";
1874 performance_increase = 0.0;
1878 performance = directional_point[1];
1879 performance_increase = old_performance - performance;
1888 std::cout <<
"OpenNN Warning: Gradient norm is " << gradient_norm <<
".\n";
1893 if(iteration != 0 && generalization_performance > old_generalization_performance)
1895 generalization_failures++;
1900 if(iteration == 0 || iteration % parameters_number == 0)
1906 else if(fabs(old_gradient.
dot(gradient)) >= 0.2*gradient.
dot(gradient))
1921 training_slope = (gradient/gradient_norm).dot(training_direction);
1925 if(training_slope >= 0.0)
1936 initial_training_rate = first_training_rate;
1940 initial_training_rate = old_training_rate;
1945 training_rate = directional_point[0];
1947 if(iteration != 0 && training_rate < 1.0e-99)
1955 training_rate = directional_point[0];
1958 parameters_increment = training_direction*training_rate;
1959 parameters_increment_norm = parameters_increment.
calculate_norm();
1963 time(¤t_time);
1964 elapsed_time = difftime(current_time, beginning_time);
2023 std::cout <<
"Iteration " << iteration <<
": Minimum parameters increment norm reached.\n";
2024 std::cout <<
"Parameters increment norm: " << parameters_increment_norm << std::endl;
2027 stop_training =
true;
2034 std::cout <<
"Iteration " << iteration <<
": Minimum performance increase reached.\n";
2035 std::cout <<
"Performance increase: " << performance_increase << std::endl;
2038 stop_training =
true;
2045 std::cout <<
"Iteration " << iteration <<
": Performance goal reached.\n";
2048 stop_training =
true;
2055 std::cout <<
"Iteration " << iteration <<
": Gradient norm goal reached.\n";
2058 stop_training =
true;
2065 std::cout <<
"Iteration " << iteration <<
": Maximum generalization failures reached.\n"
2066 <<
"Generalization failures: " << generalization_failures << std::endl;
2069 stop_training =
true;
2076 std::cout <<
"Iteration " << iteration <<
": Maximum number of iterations reached.\n";
2079 stop_training =
true;
2086 std::cout <<
"Iteration " << iteration <<
": Maximum training time reached.\n";
2089 stop_training =
true;
2092 if(iteration != 0 && iteration %
save_period == 0)
2103 std::cout <<
"Parameters norm: " << parameters_norm <<
"\n"
2104 <<
"Performance: " << performance <<
"\n"
2105 <<
"Gradient norm: " << gradient_norm <<
"\n"
2107 <<
"Training rate: " << training_rate <<
"\n"
2108 <<
"Elapsed time: " << elapsed_time << std::endl;
2110 if(generalization_performance != 0)
2112 std::cout <<
"Generalization performance: " << generalization_performance << std::endl;
2140 std::cout <<
"Iteration " << iteration <<
";\n"
2141 <<
"Parameters norm: " << parameters_norm <<
"\n"
2142 <<
"Performance: " << performance <<
"\n"
2143 <<
"Gradient norm: " << gradient_norm <<
"\n"
2145 <<
"Training rate: " << training_rate <<
"\n"
2146 <<
"Elapsed time: " << elapsed_time << std::endl;
2148 if(generalization_performance != 0)
2150 std::cout <<
"Generalization performance: " << generalization_performance << std::endl;
2156 parameters += parameters_increment;
2162 old_performance = performance;
2163 old_gradient = gradient;
2164 old_generalization_performance = generalization_performance;
2166 old_training_direction = training_direction;
2167 old_training_rate = training_rate;
2170 return(results_pointer);
2179 return(
"CONJUGATE_GRADIENT");
2189 std::ostringstream buffer;
2196 labels.push_back(
"Training direction method");
2200 values.push_back(training_direction_method_string);
2204 labels.push_back(
"Training rate method");
2208 values.push_back(training_rate_method);
2212 labels.push_back(
"Training rate tolerance");
2217 values.push_back(buffer.str());
2221 labels.push_back(
"Minimum parameters increment norm");
2226 values.push_back(buffer.str());
2230 labels.push_back(
"Minimum performance increase");
2235 values.push_back(buffer.str());
2239 labels.push_back(
"Performance goal");
2244 values.push_back(buffer.str());
2248 labels.push_back(
"Gradient norm goal");
2253 values.push_back(buffer.str());
2257 labels.push_back(
"Maximum generalization failures");
2262 values.push_back(buffer.str());
2266 labels.push_back(
"Maximum iterations number");
2271 values.push_back(buffer.str());
2275 labels.push_back(
"Maximum time");
2280 values.push_back(buffer.str());
2284 labels.push_back(
"Reserve parameters norm history");
2289 values.push_back(buffer.str());
2293 labels.push_back(
"Reserve performance history");
2298 values.push_back(buffer.str());
2302 labels.push_back(
"Reserve gradient norm history");
2307 values.push_back(buffer.str());
2311 labels.push_back(
"Reserve generalization performance history");
2316 values.push_back(buffer.str());
2336 labels.push_back(
"Reserve elapsed time history");
2341 values.push_back(buffer.str());
2343 const size_t rows_number = labels.size();
2344 const size_t columns_number = 2;
2351 return(string_matrix);
2362 std::ostringstream buffer;
2364 tinyxml2::XMLDocument* document =
new tinyxml2::XMLDocument;
2368 tinyxml2::XMLElement* root_element = document->NewElement(
"ConjugateGradient");
2370 document->InsertFirstChild(root_element);
2372 tinyxml2::XMLElement* element = NULL;
2373 tinyxml2::XMLText* text = NULL;
2378 element = document->NewElement(
"TrainingDirectionMethod");
2379 root_element->LinkEndChild(element);
2382 element->LinkEndChild(text);
2387 tinyxml2::XMLElement* element = document->NewElement(
"TrainingRateAlgorithm");
2388 root_element->LinkEndChild(element);
2392 const tinyxml2::XMLElement* training_rate_algorithm_element = training_rate_algorithm_document->FirstChildElement(
"TrainingRateAlgorithm");
2394 DeepClone(element, training_rate_algorithm_element, document, NULL);
2396 delete training_rate_algorithm_document;
2401 element = document->NewElement(
"WarningParametersNorm");
2402 root_element->LinkEndChild(element);
2407 text = document->NewText(buffer.str().c_str());
2408 element->LinkEndChild(text);
2413 element = document->NewElement(
"WarningGradientNorm");
2414 root_element->LinkEndChild(element);
2419 text = document->NewText(buffer.str().c_str());
2420 element->LinkEndChild(text);
2425 element = document->NewElement(
"WarningTrainingRate");
2426 root_element->LinkEndChild(element);
2431 text = document->NewText(buffer.str().c_str());
2432 element->LinkEndChild(text);
2437 element = document->NewElement(
"ErrorParametersNorm");
2438 root_element->LinkEndChild(element);
2443 text = document->NewText(buffer.str().c_str());
2444 element->LinkEndChild(text);
2449 element = document->NewElement(
"ErrorGradientNorm");
2450 root_element->LinkEndChild(element);
2455 text = document->NewText(buffer.str().c_str());
2456 element->LinkEndChild(text);
2461 element = document->NewElement(
"ErrorTrainingRate");
2462 root_element->LinkEndChild(element);
2467 text = document->NewText(buffer.str().c_str());
2468 element->LinkEndChild(text);
2473 element = document->NewElement(
"MinimumParametersIncrementNorm");
2474 root_element->LinkEndChild(element);
2479 text = document->NewText(buffer.str().c_str());
2480 element->LinkEndChild(text);
2485 element = document->NewElement(
"MinimumPerformanceIncrease");
2486 root_element->LinkEndChild(element);
2491 text = document->NewText(buffer.str().c_str());
2492 element->LinkEndChild(text);
2497 element = document->NewElement(
"PerformanceGoal");
2498 root_element->LinkEndChild(element);
2503 text = document->NewText(buffer.str().c_str());
2504 element->LinkEndChild(text);
2509 element = document->NewElement(
"GradientNormGoal");
2510 root_element->LinkEndChild(element);
2515 text = document->NewText(buffer.str().c_str());
2516 element->LinkEndChild(text);
2521 element = document->NewElement(
"MaximumGeneralizationPerformanceDecreases");
2522 root_element->LinkEndChild(element);
2527 text = document->NewText(buffer.str().c_str());
2528 element->LinkEndChild(text);
2533 element = document->NewElement(
"MaximumIterationsNumber");
2534 root_element->LinkEndChild(element);
2539 text = document->NewText(buffer.str().c_str());
2540 element->LinkEndChild(text);
2545 element = document->NewElement(
"MaximumTime");
2546 root_element->LinkEndChild(element);
2551 text = document->NewText(buffer.str().c_str());
2552 element->LinkEndChild(text);
2557 element = document->NewElement(
"ReserveParametersHistory");
2558 root_element->LinkEndChild(element);
2563 text = document->NewText(buffer.str().c_str());
2564 element->LinkEndChild(text);
2569 element = document->NewElement(
"ReserveParametersNormHistory");
2570 root_element->LinkEndChild(element);
2575 text = document->NewText(buffer.str().c_str());
2576 element->LinkEndChild(text);
2581 element = document->NewElement(
"ReservePerformanceHistory");
2582 root_element->LinkEndChild(element);
2587 text = document->NewText(buffer.str().c_str());
2588 element->LinkEndChild(text);
2593 element = document->NewElement(
"ReserveGeneralizationPerformanceHistory");
2594 root_element->LinkEndChild(element);
2599 text = document->NewText(buffer.str().c_str());
2600 element->LinkEndChild(text);
2605 element = document->NewElement(
"ReserveGradientHistory");
2606 root_element->LinkEndChild(element);
2611 text = document->NewText(buffer.str().c_str());
2612 element->LinkEndChild(text);
2617 element = document->NewElement(
"ReserveGradientNormHistory");
2618 root_element->LinkEndChild(element);
2623 text = document->NewText(buffer.str().c_str());
2624 element->LinkEndChild(text);
2629 element = document->NewElement(
"ReserveTrainingDirectionHistory");
2630 root_element->LinkEndChild(element);
2635 text = document->NewText(buffer.str().c_str());
2636 element->LinkEndChild(text);
2641 tinyxml2::XMLElement* element = document->NewElement(
"ReserveTrainingRateHistory");
2642 root_element->LinkEndChild(element);
2647 text = document->NewText(buffer.str().c_str());
2648 element->LinkEndChild(text);
2653 element = document->NewElement(
"ReserveElapsedTimeHistory");
2654 root_element->LinkEndChild(element);
2659 text = document->NewText(buffer.str().c_str());
2660 element->LinkEndChild(text);
2665 element = document->NewElement(
"ReserveGeneralizationPerformanceHistory");
2666 root_element->LinkEndChild(element);
2671 text = document->NewText(buffer.str().c_str());
2672 element->LinkEndChild(text);
2677 element = document->NewElement(
"DisplayPeriod");
2678 root_element->LinkEndChild(element);
2683 text = document->NewText(buffer.str().c_str());
2684 element->LinkEndChild(text);
2689 element = document->NewElement(
"SavePeriod");
2690 root_element->LinkEndChild(element);
2695 text = document->NewText(buffer.str().c_str());
2696 element->LinkEndChild(text);
2701 element = document->NewElement(
"NeuralNetworkFileName");
2702 root_element->LinkEndChild(element);
2705 element->LinkEndChild(text);
2710 element = document->NewElement(
"Display");
2711 root_element->LinkEndChild(element);
2716 text = document->NewText(buffer.str().c_str());
2717 element->LinkEndChild(text);
2731 const tinyxml2::XMLElement* root_element = document.FirstChildElement(
"ConjugateGradient");
2735 std::ostringstream buffer;
2737 buffer <<
"OpenNN Exception: ConjugateGradient class.\n"
2738 <<
"void from_XML(const tinyxml2::XMLDocument&) method.\n"
2739 <<
"Conjugate gradient element is NULL.\n";
2741 throw std::logic_error(buffer.str());
2746 const tinyxml2::XMLElement* training_direction_method_element = root_element->FirstChildElement(
"TrainingDirectionMethod");
2748 if(training_direction_method_element)
2750 const std::string new_training_direction_method = training_direction_method_element->GetText();
2756 catch(
const std::logic_error& e)
2758 std::cout << e.what() << std::endl;
2765 const tinyxml2::XMLElement* training_rate_algorithm_element = root_element->FirstChildElement(
"TrainingRateAlgorithm");
2767 if(training_rate_algorithm_element)
2769 tinyxml2::XMLDocument training_rate_algorithm_document;
2771 tinyxml2::XMLElement* element_clone = training_rate_algorithm_document.NewElement(
"TrainingRateAlgorithm");
2772 training_rate_algorithm_document.InsertFirstChild(element_clone);
2774 DeepClone(element_clone, training_rate_algorithm_element, &training_rate_algorithm_document, NULL);
2782 const tinyxml2::XMLElement* warning_parameters_norm_element = root_element->FirstChildElement(
"WarningParametersNorm");
2784 if(warning_parameters_norm_element)
2786 const double new_warning_parameters_norm = atof(warning_parameters_norm_element->GetText());
2792 catch(
const std::logic_error& e)
2794 std::cout << e.what() << std::endl;
2801 const tinyxml2::XMLElement* warning_gradient_norm_element = root_element->FirstChildElement(
"WarningGradientNorm");
2803 if(warning_gradient_norm_element)
2805 const double new_warning_gradient_norm = atof(warning_gradient_norm_element->GetText());
2811 catch(
const std::logic_error& e)
2813 std::cout << e.what() << std::endl;
2820 const tinyxml2::XMLElement* warning_training_rate_element = root_element->FirstChildElement(
"WarningTrainingRate");
2822 if(warning_training_rate_element)
2824 const double new_warning_training_rate = atof(warning_training_rate_element->GetText());
2830 catch(
const std::logic_error& e)
2832 std::cout << e.what() << std::endl;
2839 const tinyxml2::XMLElement* error_parameters_norm_element = root_element->FirstChildElement(
"ErrorParametersNorm");
2841 if(error_parameters_norm_element)
2843 const double new_error_parameters_norm = atof(error_parameters_norm_element->GetText());
2849 catch(
const std::logic_error& e)
2851 std::cout << e.what() << std::endl;
2858 const tinyxml2::XMLElement* error_gradient_norm_element = root_element->FirstChildElement(
"ErrorGradientNorm");
2860 if(error_gradient_norm_element)
2862 const double new_error_gradient_norm = atof(error_gradient_norm_element->GetText());
2868 catch(
const std::logic_error& e)
2870 std::cout << e.what() << std::endl;
2877 const tinyxml2::XMLElement* error_training_rate_element = root_element->FirstChildElement(
"ErrorTrainingRate");
2879 if(error_training_rate_element)
2881 const double new_error_training_rate = atof(error_training_rate_element->GetText());
2887 catch(
const std::logic_error& e)
2889 std::cout << e.what() << std::endl;
2896 const tinyxml2::XMLElement* minimum_parameters_increment_norm_element = root_element->FirstChildElement(
"MinimumParametersIncrementNorm");
2898 if(minimum_parameters_increment_norm_element)
2900 const double new_minimum_parameters_increment_norm = atof(minimum_parameters_increment_norm_element->GetText());
2906 catch(
const std::logic_error& e)
2908 std::cout << e.what() << std::endl;
2915 const tinyxml2::XMLElement* minimum_performance_increase_element = root_element->FirstChildElement(
"MinimumPerformanceIncrease");
2917 if(minimum_performance_increase_element)
2919 const double new_minimum_performance_increase = atof(minimum_performance_increase_element->GetText());
2925 catch(
const std::logic_error& e)
2927 std::cout << e.what() << std::endl;
2934 const tinyxml2::XMLElement* performance_goal_element = root_element->FirstChildElement(
"PerformanceGoal");
2936 if(performance_goal_element)
2938 const double new_performance_goal = atof(performance_goal_element->GetText());
2944 catch(
const std::logic_error& e)
2946 std::cout << e.what() << std::endl;
2953 const tinyxml2::XMLElement* gradient_norm_goal_element = root_element->FirstChildElement(
"GradientNormGoal");
2955 if(gradient_norm_goal_element)
2957 const double new_gradient_norm_goal = atof(gradient_norm_goal_element->GetText());
2963 catch(
const std::logic_error& e)
2965 std::cout << e.what() << std::endl;
2972 const tinyxml2::XMLElement* maximum_generalization_performance_decreases_element = root_element->FirstChildElement(
"MaximumGeneralizationPerformanceDecreases");
2974 if(maximum_generalization_performance_decreases_element)
2976 const size_t new_maximum_generalization_performance_decreases = atoi(maximum_generalization_performance_decreases_element->GetText());
2982 catch(
const std::logic_error& e)
2984 std::cout << e.what() << std::endl;
2991 const tinyxml2::XMLElement* maximum_iterations_number_element = root_element->FirstChildElement(
"MaximumIterationsNumber");
2993 if(maximum_iterations_number_element)
2995 const size_t new_maximum_iterations_number = atoi(maximum_iterations_number_element->GetText());
3001 catch(
const std::logic_error& e)
3003 std::cout << e.what() << std::endl;
3010 const tinyxml2::XMLElement* maximum_time_element = root_element->FirstChildElement(
"MaximumTime");
3012 if(maximum_time_element)
3014 const double new_maximum_time = atof(maximum_time_element->GetText());
3020 catch(
const std::logic_error& e)
3022 std::cout << e.what() << std::endl;
3029 const tinyxml2::XMLElement* reserve_parameters_history_element = root_element->FirstChildElement(
"ReserveParametersHistory");
3031 if(reserve_parameters_history_element)
3033 const std::string new_reserve_parameters_history = reserve_parameters_history_element->GetText();
3039 catch(
const std::logic_error& e)
3041 std::cout << e.what() << std::endl;
3048 const tinyxml2::XMLElement* reserve_parameters_norm_history_element = root_element->FirstChildElement(
"ReserveParametersNormHistory");
3050 if(reserve_parameters_norm_history_element)
3052 const std::string new_reserve_parameters_norm_history = reserve_parameters_norm_history_element->GetText();
3058 catch(
const std::logic_error& e)
3060 std::cout << e.what() << std::endl;
3067 const tinyxml2::XMLElement* reserve_performance_history_element = root_element->FirstChildElement(
"ReservePerformanceHistory");
3069 if(reserve_performance_history_element)
3071 const std::string new_reserve_performance_history = reserve_performance_history_element->GetText();
3077 catch(
const std::logic_error& e)
3079 std::cout << e.what() << std::endl;
3086 const tinyxml2::XMLElement* reserve_generalization_performance_history_element = root_element->FirstChildElement(
"ReserveGeneralizationPerformanceHistory");
3088 if(reserve_generalization_performance_history_element)
3090 const std::string new_reserve_generalization_performance_history = reserve_generalization_performance_history_element->GetText();
3096 catch(
const std::logic_error& e)
3098 std::cout << e.what() << std::endl;
3105 const tinyxml2::XMLElement* reserve_gradient_history_element = root_element->FirstChildElement(
"ReserveGradientHistory");
3107 if(reserve_gradient_history_element)
3109 const std::string new_reserve_gradient_history = reserve_gradient_history_element->GetText();
3115 catch(
const std::logic_error& e)
3117 std::cout << e.what() << std::endl;
3123 const tinyxml2::XMLElement* reserve_gradient_norm_history_element = root_element->FirstChildElement(
"ReserveGradientNormHistory");
3125 if(reserve_gradient_norm_history_element)
3127 const std::string new_reserve_gradient_norm_history = reserve_gradient_norm_history_element->GetText();
3133 catch(
const std::logic_error& e)
3135 std::cout << e.what() << std::endl;
3142 const tinyxml2::XMLElement* reserve_training_direction_history_element = root_element->FirstChildElement(
"ReserveTrainingDirectionHistory");
3144 if(reserve_training_direction_history_element)
3146 const std::string new_reserve_training_direction_history = reserve_training_direction_history_element->GetText();
3152 catch(
const std::logic_error& e)
3154 std::cout << e.what() << std::endl;
3161 const tinyxml2::XMLElement* reserve_training_rate_history_element = root_element->FirstChildElement(
"ReserveTrainingRateHistory");
3163 if(reserve_training_rate_history_element)
3165 const std::string new_reserve_training_rate_history = reserve_training_rate_history_element->GetText();
3171 catch(
const std::logic_error& e)
3173 std::cout << e.what() << std::endl;
3180 const tinyxml2::XMLElement* reserve_elapsed_time_history_element = root_element->FirstChildElement(
"ReserveElapsedTimeHistory");
3182 if(reserve_elapsed_time_history_element)
3184 const std::string new_reserve_elapsed_time_history = reserve_elapsed_time_history_element->GetText();
3190 catch(
const std::logic_error& e)
3192 std::cout << e.what() << std::endl;
3199 const tinyxml2::XMLElement* reserve_generalization_performance_history_element = root_element->FirstChildElement(
"ReserveGeneralizationPerformanceHistory");
3201 if(reserve_generalization_performance_history_element)
3203 const std::string new_reserve_generalization_performance_history = reserve_generalization_performance_history_element->GetText();
3209 catch(
const std::logic_error& e)
3211 std::cout << e.what() << std::endl;
3218 const tinyxml2::XMLElement* display_period_element = root_element->FirstChildElement(
"DisplayPeriod");
3220 if(display_period_element)
3222 const size_t new_display_period = atoi(display_period_element->GetText());
3228 catch(
const std::logic_error& e)
3230 std::cout << e.what() << std::endl;
3237 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"SavePeriod");
3241 const size_t new_save_period = atoi(element->GetText());
3247 catch(
const std::logic_error& e)
3249 std::cout << e.what() << std::endl;
3256 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"NeuralNetworkFileName");
3260 const std::string new_neural_network_file_name = element->GetText();
3266 catch(
const std::logic_error& e)
3268 std::cout << e.what() << std::endl;
3275 const tinyxml2::XMLElement* display_element = root_element->FirstChildElement(
"Display");
3279 const std::string new_display = display_element->GetText();
3285 catch(
const std::logic_error& e)
3287 std::cout << e.what() << std::endl;
Vector< double > calculate_FR_training_direction(const Vector< double > &, const Vector< double > &, const Vector< double > &) const
TrainingDirectionMethod training_direction_method
Applied method for calculating the conjugate gradient direction.
Vector< double > calculate_training_direction(const Vector< double > &, const Vector< double > &, const Vector< double > &) const
void set_gradient_norm_goal(const double &)
size_t count_parameters_number(void) const
void set_performance_goal(const double &)
const double & get_warning_parameters_norm(void) const
Returns the minimum value for the norm of the parameters vector at wich a warning message is written ...
double warning_training_rate
Training rate value at wich a warning message is written to the screen.
Vector< Vector< double > > gradient_history
History of the performance function gradient over the training iterations.
double elapsed_time
Elapsed time of the training process.
const bool & get_reserve_performance_history(void) const
Returns true if the performance history vector is to be reserved, and false otherwise.
Matrix< std::string > write_final_results(const size_t &precision=3) const
Returns a default (empty) string matrix with the final results from training.
double maximum_time
Maximum training time. It is used as a stopping criterion.
Vector< double > gradient_norm_history
History of the gradient norm over the training iterations.
const TrainingDirectionMethod & get_training_direction_method(void) const
Returns the conjugate gradient training direction method used for training.
void from_XML(const tinyxml2::XMLDocument &)
const bool & get_reserve_training_rate_history(void) const
Returns true if the training rate history vector is to be reserved, and false otherwise.
Vector< double > arrange_parameters(void) const
void set_reserve_training_rate_history(const bool &)
void set_performance_functional_pointer(PerformanceFunctional *)
const bool & get_reserve_parameters_history(void) const
Returns true if the parameters history matrix is to be reserved, and false otherwise.
double error_parameters_norm
Value for the parameters norm at which the training process is assumed to fail.
double error_training_rate
Training rate at wich the line minimization algorithm is assumed to be unable to bracket a minimum...
bool display
Display messages to screen.
double warning_parameters_norm
Value for the parameters norm at which a warning message is written to the screen.
const double & get_minimum_performance_increase(void) const
Returns the minimum performance improvement during training.
double calculate_FR_parameter(const Vector< double > &, const Vector< double > &) const
void set_reserve_gradient_history(const bool &)
void set_reserve_parameters_norm_history(const bool &)
bool reserve_parameters_history
True if the parameters history matrix is to be reserved, false otherwise.
double final_parameters_norm
Final neural network parameters norm.
const double & get_warning_gradient_norm(void) const
Returns the minimum value for the norm of the gradient vector at wich a warning message is written to...
const double & get_gradient_norm_goal(void) const
std::string to_string(void) const
Returns a string representation of the results structure.
const bool & get_reserve_generalization_performance_history(void) const
Returns true if the Generalization performance history vector is to be reserved, and false otherwise...
std::string write_training_rate_method(void) const
Returns a string with the name of the training rate method to be used.
tinyxml2::XMLDocument * to_XML(void) const
void set_warning_parameters_norm(const double &)
double minimum_parameters_increment_norm
Norm of the parameters increment vector at which training stops.
Vector< double > calculate_gradient_descent_training_direction(const Vector< double > &) const
void set_error_training_rate(const double &)
ConjugateGradient * conjugate_gradient_pointer
Pointer to the conjugate gradient object for which the training results are to be stored...
double performance_goal
Goal value for the performance. It is used as a stopping criterion.
double error_gradient_norm
Value for the gradient norm at which the training process is assumed to fail.
Vector< double > performance_history
History of the performance function performance over the training iterations.
const double & get_warning_training_rate(void) const
Returns the training rate value at wich a warning message is written to the screen during line minimi...
void set_reserve_generalization_performance_history(const bool &)
virtual ~ConjugateGradient(void)
Destructor.
void set_maximum_iterations_number(const size_t &)
Vector< Vector< double > > parameters_history
History of the neural network parameters over the training iterations.
const double & get_minimum_parameters_increment_norm(void) const
Returns the minimum norm of the parameter increment vector used as a stopping criteria when training...
void set_performance_functional_pointer(PerformanceFunctional *)
void resize_training_history(const size_t &)
size_t save_period
Number of iterations between the training saving progress.
bool reserve_parameters_norm_history
True if the parameters norm history vector is to be reserved, false otherwise.
bool reserve_gradient_norm_history
True if the gradient norm history vector is to be reserved, false otherwise.
Vector< double > calculate_directional_point(const double &, const Vector< double > &, const double &) const
double warning_gradient_norm
Value for the gradient norm at which a warning message is written to the screen.
ConjugateGradientResults * perform_training(void)
Vector< double > final_parameters
Final neural network parameters vector.
void set_error_parameters_norm(const double &)
double calculate_norm(void) const
Returns the vector norm.
void set_display(const bool &)
void set_maximum_generalization_performance_decreases(const size_t &)
void set_minimum_parameters_increment_norm(const double &)
std::string neural_network_file_name
Path where the neural network is saved.
void set_reserve_elapsed_time_history(const bool &)
Vector< double > calculate_PR_training_direction(const Vector< double > &, const Vector< double > &, const Vector< double > &) const
void set_neural_network_file_name(const std::string &)
void set_minimum_performance_increase(const double &)
void set_column(const size_t &, const Vector< T > &)
bool reserve_performance_history
True if the performance history vector is to be reserved, false otherwise.
Vector< T > calculate_normalized(void) const
Returns this vector divided by its norm.
std::string write_training_algorithm_type(void) const
This method writes a string with the type of training algoritm.
Vector< double > training_rate_history
History of the training rate over the training iterations.
size_t maximum_iterations_number
Maximum number of iterations to perform_training. It is used as a stopping criterion.
TrainingRateAlgorithm training_rate_algorithm
Training rate algorithm object for one-dimensional minimization.
const bool & get_reserve_gradient_history(void) const
Returns true if the gradient history vector of vectors is to be reserved, and false otherwise...
void from_XML(const tinyxml2::XMLDocument &)
double calculate_PR_parameter(const Vector< double > &, const Vector< double > &) const
double dot(const Vector< double > &) const
size_t maximum_generalization_performance_decreases
Vector< double > final_training_direction
Final conjugate gradient training direction.
const double & get_performance_goal(void) const
void set_training_direction_method(const TrainingDirectionMethod &)
const bool & get_reserve_gradient_norm_history(void) const
Returns true if the gradient norm history vector is to be reserved, and false otherwise.
const double & get_training_rate_tolerance(void) const
Returns the tolerance value in line minimization.
Vector< double > final_gradient
Final performance function gradient.
size_t iterations_number
Maximum number of training iterations.
void save(const std::string &) const
bool reserve_elapsed_time_history
True if the elapsed time history vector is to be reserved, false otherwise.
virtual void check(void) const
double final_training_rate
Final conjugate gradient training rate.
double minimum_performance_increase
Minimum performance improvement between two successive iterations. It is used as a stopping criterion...
Vector< double > generalization_performance_history
History of the generalization performance over the training iterations.
void set_warning_gradient_norm(const double &)
const size_t & get_maximum_generalization_performance_decreases(void) const
Returns the maximum number of generalization failures during the training process.
const bool & get_reserve_elapsed_time_history(void) const
Returns true if the elapsed time history vector is to be reserved, and false otherwise.
tinyxml2::XMLDocument * to_XML(void) const
Matrix< std::string > to_string_matrix(void) const
const bool & get_reserve_training_direction_history(void) const
Returns true if the training direction history matrix is to be reserved, and false otherwise...
std::string write_training_direction_method(void) const
Returns a string with the name of the training direction.
Vector< Vector< double > > training_direction_history
History of the conjugate gradient training direction over the training iterations.
double gradient_norm_goal
Goal value for the norm of the objective function gradient. It is used as a stopping criterion...
const TrainingRateAlgorithm & get_training_rate_algorithm(void) const
Returns a constant reference to the training rate algorithm object inside the conjugate gradient meth...
Vector< double > parameters_norm_history
History of the parameters norm over the training iterations.
void set_error_gradient_norm(const double &)
bool reserve_training_rate_history
True if the training rate history vector is to be reserved, false otherwise.
bool reserve_generalization_performance_history
True if the Generalization performance history vector is to be reserved, false otherwise.
double final_performance
Final performance function evaluation.
double final_gradient_norm
Final gradient norm.
void set_maximum_time(const double &)
const bool & get_reserve_parameters_norm_history(void) const
Returns true if the parameters norm history vector is to be reserved, and false otherwise.
bool reserve_training_direction_history
True if the training direction history matrix is to be reserved, false otherwise. ...
TrainingDirectionMethod
Enumeration of the available training operators for obtaining the training direction.
PerformanceFunctional * performance_functional_pointer
Pointer to a performance functional for a multilayer perceptron object.
TrainingRateAlgorithm * get_training_rate_algorithm_pointer(void)
Returns a pointer to the training rate algorithm object inside the conjugate gradient method object...
void set_save_period(const size_t &)
void set_warning_training_rate(const double &)
const size_t & get_maximum_iterations_number(void) const
Returns the maximum number of iterations for training.
void set_reserve_gradient_norm_history(const bool &)
void set_reserve_all_training_history(const bool &)
bool reserve_gradient_history
True if the gradient history matrix is to be reserved, false otherwise.
size_t display_period
Number of iterations between the training showing progress.
void set_display_period(const size_t &)
const double & get_error_training_rate(void) const
const double & get_maximum_time(void) const
Returns the maximum training time.
double final_generalization_performance
Final generalization performance.
void set_reserve_parameters_history(const bool &)
void set_reserve_training_direction_history(const bool &)
void set_reserve_performance_history(const bool &)
const double & get_error_gradient_norm(void) const
void set_parameters(const Vector< double > &)
const double & get_error_parameters_norm(void) const
Returns the value for the norm of the parameters vector at wich an error message is written to the sc...
Vector< double > elapsed_time_history
History of the elapsed time over the training iterations.