16 #include "newton_method.h"
408 if(new_warning_parameters_norm < 0.0)
410 std::ostringstream buffer;
412 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
413 <<
"void set_warning_parameters_norm(const double&) method.\n"
414 <<
"Warning parameters norm must be equal or greater than 0.\n";
416 throw std::logic_error(buffer.str());
439 if(new_warning_gradient_norm < 0.0)
441 std::ostringstream buffer;
443 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
444 <<
"void set_warning_gradient_norm(const double&) method.\n"
445 <<
"Warning gradient norm must be equal or greater than 0.\n";
447 throw std::logic_error(buffer.str());
470 if(new_warning_training_rate < 0.0)
472 std::ostringstream buffer;
474 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
475 <<
"void set_warning_training_rate(const double&) method.\n"
476 <<
"Warning training rate must be equal or greater than 0.\n";
478 throw std::logic_error(buffer.str());
499 if(new_error_parameters_norm < 0.0)
501 std::ostringstream buffer;
503 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
504 <<
"void set_error_parameters_norm(const double&) method.\n"
505 <<
"Error parameters norm must be equal or greater than 0.\n";
507 throw std::logic_error(buffer.str());
530 if(new_error_gradient_norm < 0.0)
532 std::ostringstream buffer;
534 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
535 <<
"void set_error_gradient_norm(const double&) method.\n"
536 <<
"Error gradient norm must be equal or greater than 0.\n";
538 throw std::logic_error(buffer.str());
561 if(new_error_training_rate < 0.0)
563 std::ostringstream buffer;
565 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
566 <<
"void set_error_training_rate(const double&) method.\n"
567 <<
"Error training rate must be equal or greater than 0.\n";
569 throw std::logic_error(buffer.str());
591 if(new_minimum_parameters_increment_norm < 0.0)
593 std::ostringstream buffer;
595 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
596 <<
"void new_minimum_parameters_increment_norm(const double&) method.\n"
597 <<
"Minimum parameters increment norm must be equal or greater than 0.\n";
599 throw std::logic_error(buffer.str());
621 if(new_minimum_performance_increase < 0.0)
623 std::ostringstream buffer;
625 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
626 <<
"void set_minimum_performance_increase(const double&) method.\n"
627 <<
"Minimum performance improvement must be equal or greater than 0.\n";
629 throw std::logic_error(buffer.str());
664 if(new_gradient_norm_goal < 0.0)
666 std::ostringstream buffer;
668 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
669 <<
"void set_gradient_norm_goal(const double&) method.\n"
670 <<
"Gradient norm goal must be equal or greater than 0.\n";
672 throw std::logic_error(buffer.str());
716 if(new_maximum_time < 0.0)
718 std::ostringstream buffer;
720 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
721 <<
"void set_maximum_time(const double&) method.\n"
722 <<
"Maximum time must be equal or greater than 0.\n";
724 throw std::logic_error(buffer.str());
863 if(new_display_period <= 0)
865 std::ostringstream buffer;
867 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
868 <<
"void set_display_period(const double&) method.\n"
869 <<
"First training rate must be greater than 0.\n";
871 throw std::logic_error(buffer.str());
892 std::ostringstream buffer;
896 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
897 <<
"Vector<double> calculate_gradient_descent_training_direction(const Vector<double>&) const method.\n"
898 <<
"Performance functional pointer is NULL.\n";
900 throw std::logic_error(buffer.str());
907 const size_t gradient_size = gradient.size();
909 if(gradient_size != parameters_number)
911 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
912 <<
"Vector<double> calculate_gradient_descent_training_direction(const Vector<double>&) const method.\n"
913 <<
"Size of gradient (" << gradient_size <<
") is not equal to number of parameters (" << parameters_number <<
").\n";
915 throw std::logic_error(buffer.str());
933 return((inverse_Hessian.
dot(gradient)*(-1.0)).calculate_normalized());
950 std::ostringstream buffer;
952 buffer <<
"OpenNN Exception: NewtonMethodResults structure.\n"
953 <<
"void resize_training_history(const size_t&) method.\n"
954 <<
"NewtonMethod pointer is NULL.\n";
956 throw std::logic_error(buffer.str());
1019 std::ostringstream buffer;
1023 if(!parameters_history.empty())
1025 if(!parameters_history[0].empty())
1027 buffer <<
"% Parameters history:\n"
1028 << parameters_history <<
"\n";
1034 if(!parameters_norm_history.empty())
1036 buffer <<
"% Parameters norm history:\n"
1037 << parameters_norm_history <<
"\n";
1042 if(!performance_history.empty())
1044 buffer <<
"% performance history:\n"
1045 << performance_history <<
"\n";
1050 if(!generalization_performance_history.empty())
1052 buffer <<
"% Generalization performance history:\n"
1053 << generalization_performance_history <<
"\n";
1058 if(!gradient_history.empty())
1060 if(!gradient_history[0].empty())
1062 buffer <<
"% Gradient history:\n"
1063 << gradient_history <<
"\n";
1069 if(!gradient_norm_history.empty())
1071 buffer <<
"% Gradient norm history:\n"
1072 << gradient_norm_history <<
"\n";
1077 if(!inverse_Hessian_history.empty())
1079 if(!inverse_Hessian_history[0].empty())
1081 buffer <<
"% Inverse Hessian history:\n"
1082 << inverse_Hessian_history <<
"\n";
1088 if(!training_direction_history.empty())
1090 if(!training_direction_history[0].empty())
1092 buffer <<
"% Training direction history:\n"
1093 << training_direction_history <<
"\n";
1099 if(!training_rate_history.empty())
1101 buffer <<
"% Training rate history:\n"
1102 << training_rate_history <<
"\n";
1107 if(!elapsed_time_history.empty())
1109 buffer <<
"% Elapsed time history:\n"
1110 << elapsed_time_history <<
"\n";
1113 return(buffer.str());
1121 std::ostringstream buffer;
1128 names.push_back(
"Final parameters norm");
1131 buffer << std::setprecision(precision) << final_parameters_norm;
1133 values.push_back(buffer.str());
1137 names.push_back(
"Final performance");
1140 buffer << std::setprecision(precision) << final_performance;
1142 values.push_back(buffer.str());
1150 names.push_back(
"Final generalization performance");
1153 buffer << std::setprecision(precision) << final_generalization_performance;
1155 values.push_back(buffer.str());
1160 names.push_back(
"Final gradient norm");
1163 buffer << std::setprecision(precision) << final_gradient_norm;
1165 values.push_back(buffer.str());
1178 names.push_back(
"Iterations number");
1181 buffer << iterations_number;
1183 values.push_back(buffer.str());
1187 names.push_back(
"Elapsed time");
1190 buffer << elapsed_time;
1192 values.push_back(buffer.str());
1194 const size_t rows_number = names.size();
1195 const size_t columns_number = 2;
1202 return(final_results);
1214 std::ostringstream buffer;
1216 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
1217 <<
"NewtonMethodResults* perform_training(void) method.\n"
1218 <<
"This method is under development.\n";
1220 throw std::logic_error(buffer.str());
1593 return(
"NEWTON_METHOD");
1603 std::ostringstream buffer;
1610 labels.push_back(
"Training rate method");
1614 values.push_back(training_rate_method);
1618 labels.push_back(
"Training rate tolerance");
1623 values.push_back(buffer.str());
1627 labels.push_back(
"Minimum parameters increment norm");
1632 values.push_back(buffer.str());
1636 labels.push_back(
"Minimum performance increase");
1641 values.push_back(buffer.str());
1645 labels.push_back(
"Performance goal");
1650 values.push_back(buffer.str());
1654 labels.push_back(
"Gradient norm goal");
1659 values.push_back(buffer.str());
1663 labels.push_back(
"Maximum generalization failures");
1668 values.push_back(buffer.str());
1672 labels.push_back(
"Maximum iterations number");
1677 values.push_back(buffer.str());
1681 labels.push_back(
"Maximum time");
1686 values.push_back(buffer.str());
1690 labels.push_back(
"Reserve parameters norm history");
1695 values.push_back(buffer.str());
1699 labels.push_back(
"Reserve performance history");
1704 values.push_back(buffer.str());
1708 labels.push_back(
"Reserve gradient norm history");
1713 values.push_back(buffer.str());
1717 labels.push_back(
"Reserve generalization performance history");
1722 values.push_back(buffer.str());
1742 labels.push_back(
"Reserve elapsed time history");
1747 values.push_back(buffer.str());
1749 const size_t rows_number = labels.size();
1750 const size_t columns_number = 2;
1757 return(string_matrix);
1780 std::ostringstream buffer;
1782 tinyxml2::XMLDocument* document =
new tinyxml2::XMLDocument;
1786 tinyxml2::XMLElement* root_element = document->NewElement(
"NewtonMethod");
1788 document->InsertFirstChild(root_element);
1790 tinyxml2::XMLElement* element = NULL;
1791 tinyxml2::XMLText* text = NULL;
1795 tinyxml2::XMLElement* element = document->NewElement(
"TrainingRateAlgorithm");
1796 root_element->LinkEndChild(element);
1800 const tinyxml2::XMLElement* training_rate_algorithm_element = training_rate_algorithm_document->FirstChildElement(
"TrainingRateAlgorithm");
1802 DeepClone(element, training_rate_algorithm_element, document, NULL);
1804 delete training_rate_algorithm_document;
1809 element = document->NewElement(
"WarningParametersNorm");
1810 root_element->LinkEndChild(element);
1815 text = document->NewText(buffer.str().c_str());
1816 element->LinkEndChild(text);
1821 element = document->NewElement(
"WarningGradientNorm");
1822 root_element->LinkEndChild(element);
1827 text = document->NewText(buffer.str().c_str());
1828 element->LinkEndChild(text);
1833 element = document->NewElement(
"WarningTrainingRate");
1834 root_element->LinkEndChild(element);
1839 text = document->NewText(buffer.str().c_str());
1840 element->LinkEndChild(text);
1845 element = document->NewElement(
"ErrorParametersNorm");
1846 root_element->LinkEndChild(element);
1851 text = document->NewText(buffer.str().c_str());
1852 element->LinkEndChild(text);
1857 element = document->NewElement(
"ErrorGradientNorm");
1858 root_element->LinkEndChild(element);
1863 text = document->NewText(buffer.str().c_str());
1864 element->LinkEndChild(text);
1869 element = document->NewElement(
"ErrorTrainingRate");
1870 root_element->LinkEndChild(element);
1875 text = document->NewText(buffer.str().c_str());
1876 element->LinkEndChild(text);
1881 element = document->NewElement(
"MinimumParametersIncrementNorm");
1882 root_element->LinkEndChild(element);
1887 text = document->NewText(buffer.str().c_str());
1888 element->LinkEndChild(text);
1893 element = document->NewElement(
"MinimumPerformanceIncrease");
1894 root_element->LinkEndChild(element);
1899 text = document->NewText(buffer.str().c_str());
1900 element->LinkEndChild(text);
1905 element = document->NewElement(
"PerformanceGoal");
1906 root_element->LinkEndChild(element);
1911 text = document->NewText(buffer.str().c_str());
1912 element->LinkEndChild(text);
1917 element = document->NewElement(
"GradientNormGoal");
1918 root_element->LinkEndChild(element);
1923 text = document->NewText(buffer.str().c_str());
1924 element->LinkEndChild(text);
1929 element = document->NewElement(
"MaximumGeneralizationPerformanceDecreases");
1930 root_element->LinkEndChild(element);
1935 text = document->NewText(buffer.str().c_str());
1936 element->LinkEndChild(text);
1941 element = document->NewElement(
"MaximumIterationsNumber");
1942 root_element->LinkEndChild(element);
1947 text = document->NewText(buffer.str().c_str());
1948 element->LinkEndChild(text);
1953 element = document->NewElement(
"MaximumTime");
1954 root_element->LinkEndChild(element);
1959 text = document->NewText(buffer.str().c_str());
1960 element->LinkEndChild(text);
1965 element = document->NewElement(
"ReserveParametersHistory");
1966 root_element->LinkEndChild(element);
1971 text = document->NewText(buffer.str().c_str());
1972 element->LinkEndChild(text);
1977 element = document->NewElement(
"ReserveParametersNormHistory");
1978 root_element->LinkEndChild(element);
1983 text = document->NewText(buffer.str().c_str());
1984 element->LinkEndChild(text);
1989 element = document->NewElement(
"ReservePerformanceHistory");
1990 root_element->LinkEndChild(element);
1995 text = document->NewText(buffer.str().c_str());
1996 element->LinkEndChild(text);
2001 element = document->NewElement(
"ReserveGradientHistory");
2002 root_element->LinkEndChild(element);
2007 text = document->NewText(buffer.str().c_str());
2008 element->LinkEndChild(text);
2013 element = document->NewElement(
"ReserveGradientNormHistory");
2014 root_element->LinkEndChild(element);
2019 text = document->NewText(buffer.str().c_str());
2020 element->LinkEndChild(text);
2025 element = document->NewElement(
"ReserveInverseHessianHistory");
2026 root_element->LinkEndChild(element);
2031 text = document->NewText(buffer.str().c_str());
2032 element->LinkEndChild(text);
2037 element = document->NewElement(
"ReserveTrainingDirectionHistory");
2038 root_element->LinkEndChild(element);
2043 text = document->NewText(buffer.str().c_str());
2044 element->LinkEndChild(text);
2049 element = document->NewElement(
"ReserveTrainingRateHistory");
2050 root_element->LinkEndChild(element);
2055 text = document->NewText(buffer.str().c_str());
2056 element->LinkEndChild(text);
2061 element = document->NewElement(
"ReserveElapsedTimeHistory");
2062 root_element->LinkEndChild(element);
2067 text = document->NewText(buffer.str().c_str());
2068 element->LinkEndChild(text);
2073 element = document->NewElement(
"ReserveGeneralizationPerformanceHistory");
2074 root_element->LinkEndChild(element);
2079 text = document->NewText(buffer.str().c_str());
2080 element->LinkEndChild(text);
2085 element = document->NewElement(
"DisplayPeriod");
2086 root_element->LinkEndChild(element);
2091 text = document->NewText(buffer.str().c_str());
2092 element->LinkEndChild(text);
2097 element = document->NewElement(
"SavePeriod");
2098 root_element->LinkEndChild(element);
2103 text = document->NewText(buffer.str().c_str());
2104 element->LinkEndChild(text);
2109 element = document->NewElement(
"NeuralNetworkFileName");
2110 root_element->LinkEndChild(element);
2113 element->LinkEndChild(text);
2118 element = document->NewElement(
"Display");
2119 root_element->LinkEndChild(element);
2124 text = document->NewText(buffer.str().c_str());
2125 element->LinkEndChild(text);
2136 const tinyxml2::XMLElement* root_element = document.FirstChildElement(
"NewtonMethod");
2140 std::ostringstream buffer;
2142 buffer <<
"OpenNN Exception: NewtonMethod class.\n"
2143 <<
"void from_XML(const tinyxml2::XMLDocument&) method.\n"
2144 <<
"Newton method element is NULL.\n";
2146 throw std::logic_error(buffer.str());
2151 const tinyxml2::XMLElement* training_rate_algorithm_element = root_element->FirstChildElement(
"TrainingRateAlgorithm");
2153 if(training_rate_algorithm_element)
2155 tinyxml2::XMLDocument training_rate_algorithm_document;
2157 tinyxml2::XMLElement* element_clone = training_rate_algorithm_document.NewElement(
"TrainingRateAlgorithm");
2158 training_rate_algorithm_document.InsertFirstChild(element_clone);
2160 DeepClone(element_clone, training_rate_algorithm_element, &training_rate_algorithm_document, NULL);
2168 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"WarningParametersNorm");
2172 const double new_warning_parameters_norm = atof(element->GetText());
2178 catch(
const std::logic_error& e)
2180 std::cout << e.what() << std::endl;
2187 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"WarningGradientNorm");
2191 const double new_warning_gradient_norm = atof(element->GetText());
2197 catch(
const std::logic_error& e)
2199 std::cout << e.what() << std::endl;
2206 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"WarningTrainingRate");
2210 const double new_warning_training_rate = atof(element->GetText());
2216 catch(
const std::logic_error& e)
2218 std::cout << e.what() << std::endl;
2225 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"ErrorParametersNorm");
2229 const double new_error_parameters_norm = atof(element->GetText());
2235 catch(
const std::logic_error& e)
2237 std::cout << e.what() << std::endl;
2244 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"ErrorGradientNorm");
2248 const double new_error_gradient_norm = atof(element->GetText());
2254 catch(
const std::logic_error& e)
2256 std::cout << e.what() << std::endl;
2263 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"ErrorTrainingRate");
2267 const double new_error_training_rate = atof(element->GetText());
2273 catch(
const std::logic_error& e)
2275 std::cout << e.what() << std::endl;
2282 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"MinimumParametersIncrementNorm");
2286 const double new_minimum_parameters_increment_norm = atof(element->GetText());
2292 catch(
const std::logic_error& e)
2294 std::cout << e.what() << std::endl;
2301 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"MinimumPerformanceIncrease");
2305 const double new_minimum_performance_increase = atof(element->GetText());
2311 catch(
const std::logic_error& e)
2313 std::cout << e.what() << std::endl;
2320 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"PerformanceGoal");
2324 const double new_performance_goal = atof(element->GetText());
2330 catch(
const std::logic_error& e)
2332 std::cout << e.what() << std::endl;
2339 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"GradientNormGoal");
2343 const double new_gradient_norm_goal = atof(element->GetText());
2349 catch(
const std::logic_error& e)
2351 std::cout << e.what() << std::endl;
2358 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"MaximumGeneralizationPerformanceDecreases");
2362 const size_t new_maximum_generalization_performance_decreases = atoi(element->GetText());
2368 catch(
const std::logic_error& e)
2370 std::cout << e.what() << std::endl;
2377 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"MaximumIterationsNumber");
2381 const size_t new_maximum_iterations_number = atoi(element->GetText());
2387 catch(
const std::logic_error& e)
2389 std::cout << e.what() << std::endl;
2396 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"MaximumTime");
2400 const double new_maximum_time = atof(element->GetText());
2406 catch(
const std::logic_error& e)
2408 std::cout << e.what() << std::endl;
2415 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"ReserveParametersHistory");
2419 const std::string new_reserve_parameters_history = element->GetText();
2425 catch(
const std::logic_error& e)
2427 std::cout << e.what() << std::endl;
2434 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"ReserveParametersNormHistory");
2438 const std::string new_reserve_parameters_norm_history = element->GetText();
2444 catch(
const std::logic_error& e)
2446 std::cout << e.what() << std::endl;
2453 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"ReservePerformanceHistory");
2457 const std::string new_reserve_performance_history = element->GetText();
2463 catch(
const std::logic_error& e)
2465 std::cout << e.what() << std::endl;
2472 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"ReserveGradientHistory");
2476 const std::string new_reserve_gradient_history = element->GetText();
2482 catch(
const std::logic_error& e)
2484 std::cout << e.what() << std::endl;
2491 const tinyxml2::XMLElement* reserve_gradient_norm_history_element = root_element->FirstChildElement(
"ReserveGradientNormHistory");
2493 if(reserve_gradient_norm_history_element)
2495 const std::string new_reserve_gradient_norm_history = reserve_gradient_norm_history_element->GetText();
2501 catch(
const std::logic_error& e)
2503 std::cout << e.what() << std::endl;
2510 const tinyxml2::XMLElement* reserve_training_direction_history_element = root_element->FirstChildElement(
"ReserveTrainingDirectionHistory");
2512 if(reserve_training_direction_history_element)
2514 const std::string new_reserve_training_direction_history = reserve_training_direction_history_element->GetText();
2520 catch(
const std::logic_error& e)
2522 std::cout << e.what() << std::endl;
2529 const tinyxml2::XMLElement* reserve_training_rate_history_element = root_element->FirstChildElement(
"ReserveTrainingRateHistory");
2531 if(reserve_training_rate_history_element)
2533 const std::string new_reserve_training_rate_history = reserve_training_rate_history_element->GetText();
2539 catch(
const std::logic_error& e)
2541 std::cout << e.what() << std::endl;
2548 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"ReserveElapsedTimeHistory");
2552 const std::string new_reserve_elapsed_time_history = element->GetText();
2558 catch(
const std::logic_error& e)
2560 std::cout << e.what() << std::endl;
2567 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"ReserveGeneralizationPerformanceHistory");
2571 const std::string new_reserve_generalization_performance_history = element->GetText();
2577 catch(
const std::logic_error& e)
2579 std::cout << e.what() << std::endl;
2586 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"DisplayPeriod");
2590 const size_t new_display_period = atoi(element->GetText());
2596 catch(
const std::logic_error& e)
2598 std::cout << e.what() << std::endl;
2605 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"SavePeriod");
2609 const size_t new_save_period = atoi(element->GetText());
2615 catch(
const std::logic_error& e)
2617 std::cout << e.what() << std::endl;
2624 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"NeuralNetworkFileName");
2628 const std::string new_neural_network_file_name = element->GetText();
2634 catch(
const std::logic_error& e)
2636 std::cout << e.what() << std::endl;
2643 const tinyxml2::XMLElement* element = root_element->FirstChildElement(
"Display");
2647 const std::string new_display = element->GetText();
2653 catch(
const std::logic_error& e)
2655 std::cout << e.what() << std::endl;
void set_error_gradient_norm(const double &)
bool reserve_parameters_history
True if the parameters history matrix is to be reserved, false otherwise.
void set_reserve_parameters_norm_history(const bool &)
const bool & get_reserve_inverse_Hessian_history(void) const
Returns true if the inverse Hessian history vector of matrices is to be reserved, and false otherwise...
size_t count_parameters_number(void) const
NewtonMethodResults * perform_training(void)
void set_minimum_parameters_increment_norm(const double &)
tinyxml2::XMLDocument * to_XML(void) const
const bool & get_reserve_elapsed_time_history(void) const
Returns true if the elapsed time history vector is to be reserved, and false otherwise.
void resize_training_history(const size_t &)
void set_error_parameters_norm(const double &)
double warning_parameters_norm
Value for the parameters norm at which a warning message is written to the screen.
void set_reserve_generalization_performance_history(const bool &)
void set_error_training_rate(const double &)
std::string to_string(void) const
Returns a string representation of the current Newton method results structure.
const bool & get_reserve_gradient_history(void) const
Returns true if the gradient history vector of vectors is to be reserved, and false otherwise...
size_t maximum_generalization_performance_decreases
const bool & get_reserve_training_rate_history(void) const
Returns true if the training rate history vector is to be reserved, and false otherwise.
const double & get_error_training_rate(void) const
const double & get_error_parameters_norm(void) const
size_t maximum_iterations_number
Maximum number of iterations to perform_training. It is used as a stopping criterion.
void set_performance_functional_pointer(PerformanceFunctional *)
void set_display_period(const size_t &)
void set_maximum_iterations_number(const size_t &)
const bool & get_reserve_gradient_norm_history(void) const
Returns true if the gradient norm history vector is to be reserved, and false otherwise.
void set_reserve_training_direction_history(const bool &)
bool display
Display messages to screen.
void set_reserve_inverse_Hessian_history(const bool &)
Matrix< std::string > to_string_matrix(void) const
const double & get_gradient_norm_goal(void) const
void set_warning_gradient_norm(const double &)
void set_reserve_elapsed_time_history(const bool &)
const bool & get_reserve_performance_history(void) const
Returns true if the performance history vector is to be reserved, and false otherwise.
Vector< Vector< double > > gradient_history
History of the performance function gradient over the training iterations.
Vector< double > calculate_gradient_descent_training_direction(const Vector< double > &) const
void set_reserve_performance_history(const bool &)
void from_XML(const tinyxml2::XMLDocument &)
std::string write_training_rate_method(void) const
Returns a string with the name of the training rate method to be used.
const bool & get_reserve_generalization_performance_history(void) const
Returns true if the Generalization performance history vector is to be reserved, and false otherwise...
tinyxml2::XMLDocument * to_XML(void) const
bool reserve_performance_history
True if the performance history vector is to be reserved, false otherwise.
void set_save_period(const size_t &)
const double & get_warning_gradient_norm(void) const
TrainingRateAlgorithm training_rate_algorithm
const double & get_performance_goal(void) const
Vector< Vector< double > > training_direction_history
History of the random search training direction over the training iterations.
Vector< double > performance_history
History of the performance function performance over the training iterations.
size_t save_period
Number of iterations between the training saving progress.
const double & get_minimum_parameters_increment_norm(void) const
Returns the minimum norm of the parameter increment vector used as a stopping criteria when training...
Vector< double > elapsed_time_history
History of the elapsed time over the training iterations.
void set_reserve_gradient_norm_history(const bool &)
void set_maximum_time(const double &)
const double & get_error_gradient_norm(void) const
void set_performance_functional_pointer(PerformanceFunctional *)
const bool & get_reserve_parameters_norm_history(void) const
Returns true if the parameters norm history vector is to be reserved, and false otherwise.
NewtonMethod * Newton_method_pointer
Pointer to the Newton method object for which the training results are to be stored.
void set_display(const bool &)
bool reserve_elapsed_time_history
True if the elapsed time history vector is to be reserved, false otherwise.
std::string neural_network_file_name
Path where the neural network is saved.
std::string write_training_algorithm_type(void) const
This method writes a string with the type of training algoritm.
void set_neural_network_file_name(const std::string &)
double performance_goal
Goal value for the performance. It is used as a stopping criterion.
double error_training_rate
Training rate at wich the line minimization algorithm is assumed to be unable to bracket a minimum...
void set_default(void)
Sets the members of the training algorithm object to their default values.
Vector< double > gradient_norm_history
History of the gradient norm over the training iterations.
double error_parameters_norm
Value for the parameters norm at which the training process is assumed to fail.
bool reserve_inverse_Hessian_history
True if the inverse Hessian history vector of matrices is to be reserved, false otherwise.
void set_column(const size_t &, const Vector< T > &)
Vector< double > calculate_training_direction(const Vector< double > &, const Matrix< double > &) const
Vector< T > calculate_normalized(void) const
Returns this vector divided by its norm.
void set_reserve_parameters_history(const bool &)
bool reserve_gradient_norm_history
True if the gradient norm history vector is to be reserved, false otherwise.
const double & get_maximum_time(void) const
Returns the maximum training time.
void set_reserve_gradient_history(const bool &)
void set_performance_goal(const double &)
void from_XML(const tinyxml2::XMLDocument &)
bool reserve_training_direction_history
True if the training direction history matrix is to be reserved, false otherwise. ...
Vector< double > training_rate_history
History of the random search training rate over the training iterations.
bool reserve_training_rate_history
True if the training rate history vector is to be reserved, false otherwise.
TrainingRateAlgorithm * get_training_rate_algorithm_pointer(void)
Returns a pointer to the training rate algorithm object inside the Newton method object.
void set_reserve_all_training_history(const bool &)
Makes the training history of all variables to be reseved or not in memory.
const double & get_training_rate_tolerance(void) const
Returns the tolerance value in line minimization.
const double & get_warning_training_rate(void) const
void set_reserve_training_rate_history(const bool &)
const bool & get_reserve_parameters_history(void) const
Returns true if the parameters history matrix is to be reserved, and false otherwise.
bool reserve_gradient_history
True if the gradient history matrix is to be reserved, false otherwise.
const size_t & get_maximum_generalization_performance_decreases(void) const
Returns the maximum number of generalization failures during the training process.
Vector< double > dot(const Vector< double > &) const
void set_gradient_norm_goal(const double &)
double warning_gradient_norm
Value for the gradient norm at which a warning message is written to the screen.
Vector< Vector< double > > parameters_history
History of the neural network parameters over the training iterations.
const size_t & get_maximum_iterations_number(void) const
Returns the maximum number of iterations for training.
void set_warning_training_rate(const double &)
virtual ~NewtonMethod(void)
Destructor.
void set_minimum_performance_increase(const double &)
void set_maximum_generalization_performance_decreases(const size_t &)
const TrainingRateAlgorithm & get_training_rate_algorithm(void) const
Returns a constant reference to the training rate algorithm object inside the Newton method object...
Matrix< std::string > write_final_results(const size_t &precision=3) const
Returns a default (empty) string matrix with the final results from training.
void set_warning_parameters_norm(const double &)
double error_gradient_norm
Value for the gradient norm at which the training process is assumed to fail.
const bool & get_reserve_training_direction_history(void) const
Returns true if the training direction history matrix is to be reserved, and false otherwise...
Vector< double > parameters_norm_history
History of the parameters norm over the training iterations.
double gradient_norm_goal
Goal value for the norm of the objective function gradient. It is used as a stopping criterion...
bool reserve_generalization_performance_history
True if the Generalization performance history vector is to be reserved, false otherwise.
PerformanceFunctional * performance_functional_pointer
Pointer to a performance functional for a multilayer perceptron object.
Vector< Matrix< double > > inverse_Hessian_history
History of the inverse Hessian over the training iterations.
Vector< double > generalization_performance_history
History of the generalization performance over the training iterations.
size_t display_period
Number of iterations between the training showing progress.
double minimum_parameters_increment_norm
Norm of the parameters increment vector at which training stops.
double maximum_time
Maximum training time. It is used as a stopping criterion.
double warning_training_rate
Training rate value at wich a warning message is written to the screen.
const double & get_warning_parameters_norm(void) const
double minimum_performance_increase
Minimum performance improvement between two successive iterations. It is used as a stopping criterion...
const double & get_minimum_performance_increase(void) const
Returns the minimum performance improvement during training.
bool reserve_parameters_norm_history
True if the parameters norm history vector is to be reserved, false otherwise.