line_search.cc 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883
  1. // Ceres Solver - A fast non-linear least squares minimizer
  2. // Copyright 2023 Google Inc. All rights reserved.
  3. // http://ceres-solver.org/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are met:
  7. //
  8. // * Redistributions of source code must retain the above copyright notice,
  9. // this list of conditions and the following disclaimer.
  10. // * Redistributions in binary form must reproduce the above copyright notice,
  11. // this list of conditions and the following disclaimer in the documentation
  12. // and/or other materials provided with the distribution.
  13. // * Neither the name of Google Inc. nor the names of its contributors may be
  14. // used to endorse or promote products derived from this software without
  15. // specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  24. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  25. // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  26. // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  27. // POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Author: sameeragarwal@google.com (Sameer Agarwal)
  30. #include "ceres/line_search.h"
  31. #include <algorithm>
  32. #include <cmath>
  33. #include <iomanip>
  34. #include <map>
  35. #include <memory>
  36. #include <ostream> // NOLINT
  37. #include <string>
  38. #include <vector>
  39. #include "ceres/evaluator.h"
  40. #include "ceres/function_sample.h"
  41. #include "ceres/internal/eigen.h"
  42. #include "ceres/map_util.h"
  43. #include "ceres/polynomial.h"
  44. #include "ceres/stringprintf.h"
  45. #include "ceres/wall_time.h"
  46. #include "glog/logging.h"
  47. namespace ceres::internal {
  48. namespace {
  49. // Precision used for floating point values in error message output.
  50. const int kErrorMessageNumericPrecision = 8;
  51. } // namespace
  52. std::ostream& operator<<(std::ostream& os, const FunctionSample& sample);
  53. // Convenience stream operator for pushing FunctionSamples into log messages.
  54. std::ostream& operator<<(std::ostream& os, const FunctionSample& sample) {
  55. os << sample.ToDebugString();
  56. return os;
  57. }
  58. LineSearch::~LineSearch() = default;
  59. LineSearch::LineSearch(const LineSearch::Options& options)
  60. : options_(options) {}
  61. std::unique_ptr<LineSearch> LineSearch::Create(
  62. const LineSearchType line_search_type,
  63. const LineSearch::Options& options,
  64. std::string* error) {
  65. switch (line_search_type) {
  66. case ceres::ARMIJO:
  67. return std::make_unique<ArmijoLineSearch>(options);
  68. case ceres::WOLFE:
  69. return std::make_unique<WolfeLineSearch>(options);
  70. default:
  71. *error = std::string("Invalid line search algorithm type: ") +
  72. LineSearchTypeToString(line_search_type) +
  73. std::string(", unable to create line search.");
  74. }
  75. return nullptr;
  76. }
  77. LineSearchFunction::LineSearchFunction(Evaluator* evaluator)
  78. : evaluator_(evaluator),
  79. position_(evaluator->NumParameters()),
  80. direction_(evaluator->NumEffectiveParameters()),
  81. scaled_direction_(evaluator->NumEffectiveParameters()),
  82. initial_evaluator_residual_time_in_seconds(0.0),
  83. initial_evaluator_jacobian_time_in_seconds(0.0) {}
  84. void LineSearchFunction::Init(const Vector& position, const Vector& direction) {
  85. position_ = position;
  86. direction_ = direction;
  87. }
  88. void LineSearchFunction::Evaluate(const double x,
  89. const bool evaluate_gradient,
  90. FunctionSample* output) {
  91. output->x = x;
  92. output->vector_x_is_valid = false;
  93. output->value_is_valid = false;
  94. output->gradient_is_valid = false;
  95. output->vector_gradient_is_valid = false;
  96. scaled_direction_ = output->x * direction_;
  97. output->vector_x.resize(position_.rows(), 1);
  98. if (!evaluator_->Plus(position_.data(),
  99. scaled_direction_.data(),
  100. output->vector_x.data())) {
  101. return;
  102. }
  103. output->vector_x_is_valid = true;
  104. double* gradient = nullptr;
  105. if (evaluate_gradient) {
  106. output->vector_gradient.resize(direction_.rows(), 1);
  107. gradient = output->vector_gradient.data();
  108. }
  109. const bool eval_status = evaluator_->Evaluate(
  110. output->vector_x.data(), &(output->value), nullptr, gradient, nullptr);
  111. if (!eval_status || !std::isfinite(output->value)) {
  112. return;
  113. }
  114. output->value_is_valid = true;
  115. if (!evaluate_gradient) {
  116. return;
  117. }
  118. output->gradient = direction_.dot(output->vector_gradient);
  119. if (!std::isfinite(output->gradient)) {
  120. return;
  121. }
  122. output->gradient_is_valid = true;
  123. output->vector_gradient_is_valid = true;
  124. }
  125. double LineSearchFunction::DirectionInfinityNorm() const {
  126. return direction_.lpNorm<Eigen::Infinity>();
  127. }
  128. void LineSearchFunction::ResetTimeStatistics() {
  129. const std::map<std::string, CallStatistics> evaluator_statistics =
  130. evaluator_->Statistics();
  131. initial_evaluator_residual_time_in_seconds =
  132. FindWithDefault(
  133. evaluator_statistics, "Evaluator::Residual", CallStatistics())
  134. .time;
  135. initial_evaluator_jacobian_time_in_seconds =
  136. FindWithDefault(
  137. evaluator_statistics, "Evaluator::Jacobian", CallStatistics())
  138. .time;
  139. }
  140. void LineSearchFunction::TimeStatistics(
  141. double* cost_evaluation_time_in_seconds,
  142. double* gradient_evaluation_time_in_seconds) const {
  143. const std::map<std::string, CallStatistics> evaluator_time_statistics =
  144. evaluator_->Statistics();
  145. *cost_evaluation_time_in_seconds =
  146. FindWithDefault(
  147. evaluator_time_statistics, "Evaluator::Residual", CallStatistics())
  148. .time -
  149. initial_evaluator_residual_time_in_seconds;
  150. // Strictly speaking this will slightly underestimate the time spent
  151. // evaluating the gradient of the line search univariate cost function as it
  152. // does not count the time spent performing the dot product with the direction
  153. // vector. However, this will typically be small by comparison, and also
  154. // allows direct subtraction of the timing information from the totals for
  155. // the evaluator returned in the solver summary.
  156. *gradient_evaluation_time_in_seconds =
  157. FindWithDefault(
  158. evaluator_time_statistics, "Evaluator::Jacobian", CallStatistics())
  159. .time -
  160. initial_evaluator_jacobian_time_in_seconds;
  161. }
  162. void LineSearch::Search(double step_size_estimate,
  163. double initial_cost,
  164. double initial_gradient,
  165. Summary* summary) const {
  166. const double start_time = WallTimeInSeconds();
  167. CHECK(summary != nullptr);
  168. *summary = LineSearch::Summary();
  169. summary->cost_evaluation_time_in_seconds = 0.0;
  170. summary->gradient_evaluation_time_in_seconds = 0.0;
  171. summary->polynomial_minimization_time_in_seconds = 0.0;
  172. options().function->ResetTimeStatistics();
  173. this->DoSearch(step_size_estimate, initial_cost, initial_gradient, summary);
  174. options().function->TimeStatistics(
  175. &summary->cost_evaluation_time_in_seconds,
  176. &summary->gradient_evaluation_time_in_seconds);
  177. summary->total_time_in_seconds = WallTimeInSeconds() - start_time;
  178. }
  179. // Returns step_size \in [min_step_size, max_step_size] which minimizes the
  180. // polynomial of degree defined by interpolation_type which interpolates all
  181. // of the provided samples with valid values.
  182. double LineSearch::InterpolatingPolynomialMinimizingStepSize(
  183. const LineSearchInterpolationType& interpolation_type,
  184. const FunctionSample& lowerbound,
  185. const FunctionSample& previous,
  186. const FunctionSample& current,
  187. const double min_step_size,
  188. const double max_step_size) const {
  189. if (!current.value_is_valid ||
  190. (interpolation_type == BISECTION && max_step_size <= current.x)) {
  191. // Either: sample is invalid; or we are using BISECTION and contracting
  192. // the step size.
  193. return std::min(std::max(current.x * 0.5, min_step_size), max_step_size);
  194. } else if (interpolation_type == BISECTION) {
  195. CHECK_GT(max_step_size, current.x);
  196. // We are expanding the search (during a Wolfe bracketing phase) using
  197. // BISECTION interpolation. Using BISECTION when trying to expand is
  198. // strictly speaking an oxymoron, but we define this to mean always taking
  199. // the maximum step size so that the Armijo & Wolfe implementations are
  200. // agnostic to the interpolation type.
  201. return max_step_size;
  202. }
  203. // Only check if lower-bound is valid here, where it is required
  204. // to avoid replicating current.value_is_valid == false
  205. // behaviour in WolfeLineSearch.
  206. CHECK(lowerbound.value_is_valid)
  207. << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
  208. << "Ceres bug: lower-bound sample for interpolation is invalid, "
  209. << "please contact the developers!, interpolation_type: "
  210. << LineSearchInterpolationTypeToString(interpolation_type)
  211. << ", lowerbound: " << lowerbound << ", previous: " << previous
  212. << ", current: " << current;
  213. // Select step size by interpolating the function and gradient values
  214. // and minimizing the corresponding polynomial.
  215. std::vector<FunctionSample> samples;
  216. samples.push_back(lowerbound);
  217. if (interpolation_type == QUADRATIC) {
  218. // Two point interpolation using function values and the
  219. // gradient at the lower bound.
  220. samples.emplace_back(current.x, current.value);
  221. if (previous.value_is_valid) {
  222. // Three point interpolation, using function values and the
  223. // gradient at the lower bound.
  224. samples.emplace_back(previous.x, previous.value);
  225. }
  226. } else if (interpolation_type == CUBIC) {
  227. // Two point interpolation using the function values and the gradients.
  228. samples.push_back(current);
  229. if (previous.value_is_valid) {
  230. // Three point interpolation using the function values and
  231. // the gradients.
  232. samples.push_back(previous);
  233. }
  234. } else {
  235. LOG(FATAL) << "Ceres bug: No handler for interpolation_type: "
  236. << LineSearchInterpolationTypeToString(interpolation_type)
  237. << ", please contact the developers!";
  238. }
  239. double step_size = 0.0, unused_min_value = 0.0;
  240. MinimizeInterpolatingPolynomial(
  241. samples, min_step_size, max_step_size, &step_size, &unused_min_value);
  242. return step_size;
  243. }
  244. ArmijoLineSearch::ArmijoLineSearch(const LineSearch::Options& options)
  245. : LineSearch(options) {}
  246. void ArmijoLineSearch::DoSearch(const double step_size_estimate,
  247. const double initial_cost,
  248. const double initial_gradient,
  249. Summary* summary) const {
  250. CHECK_GE(step_size_estimate, 0.0);
  251. CHECK_GT(options().sufficient_decrease, 0.0);
  252. CHECK_LT(options().sufficient_decrease, 1.0);
  253. CHECK_GT(options().max_num_iterations, 0);
  254. LineSearchFunction* function = options().function;
  255. // Note initial_cost & initial_gradient are evaluated at step_size = 0,
  256. // not step_size_estimate, which is our starting guess.
  257. FunctionSample initial_position(0.0, initial_cost, initial_gradient);
  258. initial_position.vector_x = function->position();
  259. initial_position.vector_x_is_valid = true;
  260. const double descent_direction_max_norm = function->DirectionInfinityNorm();
  261. FunctionSample previous;
  262. FunctionSample current;
  263. // As the Armijo line search algorithm always uses the initial point, for
  264. // which both the function value and derivative are known, when fitting a
  265. // minimizing polynomial, we can fit up to a quadratic without requiring the
  266. // gradient at the current query point.
  267. const bool kEvaluateGradient = options().interpolation_type == CUBIC;
  268. ++summary->num_function_evaluations;
  269. if (kEvaluateGradient) {
  270. ++summary->num_gradient_evaluations;
  271. }
  272. function->Evaluate(step_size_estimate, kEvaluateGradient, &current);
  273. while (!current.value_is_valid ||
  274. current.value > (initial_cost + options().sufficient_decrease *
  275. initial_gradient * current.x)) {
  276. // If current.value_is_valid is false, we treat it as if the cost at that
  277. // point is not large enough to satisfy the sufficient decrease condition.
  278. ++summary->num_iterations;
  279. if (summary->num_iterations >= options().max_num_iterations) {
  280. summary->error = StringPrintf(
  281. "Line search failed: Armijo failed to find a point "
  282. "satisfying the sufficient decrease condition within "
  283. "specified max_num_iterations: %d.",
  284. options().max_num_iterations);
  285. if (!options().is_silent) {
  286. LOG(WARNING) << summary->error;
  287. }
  288. return;
  289. }
  290. const double polynomial_minimization_start_time = WallTimeInSeconds();
  291. const double step_size = this->InterpolatingPolynomialMinimizingStepSize(
  292. options().interpolation_type,
  293. initial_position,
  294. previous,
  295. current,
  296. (options().max_step_contraction * current.x),
  297. (options().min_step_contraction * current.x));
  298. summary->polynomial_minimization_time_in_seconds +=
  299. (WallTimeInSeconds() - polynomial_minimization_start_time);
  300. if (step_size * descent_direction_max_norm < options().min_step_size) {
  301. summary->error = StringPrintf(
  302. "Line search failed: step_size too small: %.5e "
  303. "with descent_direction_max_norm: %.5e.",
  304. step_size,
  305. descent_direction_max_norm);
  306. if (!options().is_silent) {
  307. LOG(WARNING) << summary->error;
  308. }
  309. return;
  310. }
  311. previous = current;
  312. ++summary->num_function_evaluations;
  313. if (kEvaluateGradient) {
  314. ++summary->num_gradient_evaluations;
  315. }
  316. function->Evaluate(step_size, kEvaluateGradient, &current);
  317. }
  318. summary->optimal_point = current;
  319. summary->success = true;
  320. }
  321. WolfeLineSearch::WolfeLineSearch(const LineSearch::Options& options)
  322. : LineSearch(options) {}
  323. void WolfeLineSearch::DoSearch(const double step_size_estimate,
  324. const double initial_cost,
  325. const double initial_gradient,
  326. Summary* summary) const {
  327. // All parameters should have been validated by the Solver, but as
  328. // invalid values would produce crazy nonsense, hard check them here.
  329. CHECK_GE(step_size_estimate, 0.0);
  330. CHECK_GT(options().sufficient_decrease, 0.0);
  331. CHECK_GT(options().sufficient_curvature_decrease,
  332. options().sufficient_decrease);
  333. CHECK_LT(options().sufficient_curvature_decrease, 1.0);
  334. CHECK_GT(options().max_step_expansion, 1.0);
  335. // Note initial_cost & initial_gradient are evaluated at step_size = 0,
  336. // not step_size_estimate, which is our starting guess.
  337. FunctionSample initial_position(0.0, initial_cost, initial_gradient);
  338. initial_position.vector_x = options().function->position();
  339. initial_position.vector_x_is_valid = true;
  340. bool do_zoom_search = false;
  341. // Important: The high/low in bracket_high & bracket_low refer to their
  342. // _function_ values, not their step sizes i.e. it is _not_ required that
  343. // bracket_low.x < bracket_high.x.
  344. FunctionSample solution, bracket_low, bracket_high;
  345. // Wolfe bracketing phase: Increases step_size until either it finds a point
  346. // that satisfies the (strong) Wolfe conditions, or an interval that brackets
  347. // step sizes which satisfy the conditions. From Nocedal & Wright [1] p61 the
  348. // interval: (step_size_{k-1}, step_size_{k}) contains step lengths satisfying
  349. // the strong Wolfe conditions if one of the following conditions are met:
  350. //
  351. // 1. step_size_{k} violates the sufficient decrease (Armijo) condition.
  352. // 2. f(step_size_{k}) >= f(step_size_{k-1}).
  353. // 3. f'(step_size_{k}) >= 0.
  354. //
  355. // Caveat: If f(step_size_{k}) is invalid, then step_size is reduced, ignoring
  356. // this special case, step_size monotonically increases during bracketing.
  357. if (!this->BracketingPhase(initial_position,
  358. step_size_estimate,
  359. &bracket_low,
  360. &bracket_high,
  361. &do_zoom_search,
  362. summary)) {
  363. // Failed to find either a valid point, a valid bracket satisfying the Wolfe
  364. // conditions, or even a step size > minimum tolerance satisfying the Armijo
  365. // condition.
  366. return;
  367. }
  368. if (!do_zoom_search) {
  369. // Either: Bracketing phase already found a point satisfying the strong
  370. // Wolfe conditions, thus no Zoom required.
  371. //
  372. // Or: Bracketing failed to find a valid bracket or a point satisfying the
  373. // strong Wolfe conditions within max_num_iterations, or whilst searching
  374. // shrank the bracket width until it was below our minimum tolerance.
  375. // As these are 'artificial' constraints, and we would otherwise fail to
  376. // produce a valid point when ArmijoLineSearch would succeed, we return the
  377. // point with the lowest cost found thus far which satisfies the Armijo
  378. // condition (but not the Wolfe conditions).
  379. summary->optimal_point = bracket_low;
  380. summary->success = true;
  381. return;
  382. }
  383. VLOG(3) << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
  384. << "Starting line search zoom phase with bracket_low: " << bracket_low
  385. << ", bracket_high: " << bracket_high
  386. << ", bracket width: " << fabs(bracket_low.x - bracket_high.x)
  387. << ", bracket abs delta cost: "
  388. << fabs(bracket_low.value - bracket_high.value);
  389. // Wolfe Zoom phase: Called when the Bracketing phase finds an interval of
  390. // non-zero, finite width that should bracket step sizes which satisfy the
  391. // (strong) Wolfe conditions (before finding a step size that satisfies the
  392. // conditions). Zoom successively decreases the size of the interval until a
  393. // step size which satisfies the Wolfe conditions is found. The interval is
  394. // defined by bracket_low & bracket_high, which satisfy:
  395. //
  396. // 1. The interval bounded by step sizes: bracket_low.x & bracket_high.x
  397. // contains step sizes that satisfy the strong Wolfe conditions.
  398. // 2. bracket_low.x is of all the step sizes evaluated *which satisfied the
  399. // Armijo sufficient decrease condition*, the one which generated the
  400. // smallest function value, i.e. bracket_low.value <
  401. // f(all other steps satisfying Armijo).
  402. // - Note that this does _not_ (necessarily) mean that initially
  403. // bracket_low.value < bracket_high.value (although this is typical)
  404. // e.g. when bracket_low = initial_position, and bracket_high is the
  405. // first sample, and which does not satisfy the Armijo condition,
  406. // but still has bracket_high.value < initial_position.value.
  407. // 3. bracket_high is chosen after bracket_low, s.t.
  408. // bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0.
  409. if (!this->ZoomPhase(
  410. initial_position, bracket_low, bracket_high, &solution, summary) &&
  411. !solution.value_is_valid) {
  412. // Failed to find a valid point (given the specified decrease parameters)
  413. // within the specified bracket.
  414. return;
  415. }
  416. // Ensure that if we ran out of iterations whilst zooming the bracket, or
  417. // shrank the bracket width to < tolerance and failed to find a point which
  418. // satisfies the strong Wolfe curvature condition, that we return the point
  419. // amongst those found thus far, which minimizes f() and satisfies the Armijo
  420. // condition.
  421. if (!solution.value_is_valid || solution.value > bracket_low.value) {
  422. summary->optimal_point = bracket_low;
  423. } else {
  424. summary->optimal_point = solution;
  425. }
  426. summary->success = true;
  427. }
  428. // Returns true if either:
  429. //
  430. // A termination condition satisfying the (strong) Wolfe bracketing conditions
  431. // is found:
  432. //
  433. // - A valid point, defined as a bracket of zero width [zoom not required].
  434. // - A valid bracket (of width > tolerance), [zoom required].
  435. //
  436. // Or, searching was stopped due to an 'artificial' constraint, i.e. not
  437. // a condition imposed / required by the underlying algorithm, but instead an
  438. // engineering / implementation consideration. But a step which exceeds the
  439. // minimum step size, and satisfies the Armijo condition was still found,
  440. // and should thus be used [zoom not required].
  441. //
  442. // Returns false if no step size > minimum step size was found which
  443. // satisfies at least the Armijo condition.
  444. bool WolfeLineSearch::BracketingPhase(const FunctionSample& initial_position,
  445. const double step_size_estimate,
  446. FunctionSample* bracket_low,
  447. FunctionSample* bracket_high,
  448. bool* do_zoom_search,
  449. Summary* summary) const {
  450. LineSearchFunction* function = options().function;
  451. FunctionSample previous = initial_position;
  452. FunctionSample current;
  453. const double descent_direction_max_norm = function->DirectionInfinityNorm();
  454. *do_zoom_search = false;
  455. *bracket_low = initial_position;
  456. // As we require the gradient to evaluate the Wolfe condition, we always
  457. // calculate it together with the value, irrespective of the interpolation
  458. // type. As opposed to only calculating the gradient after the Armijo
  459. // condition is satisfied, as the computational saving from this approach
  460. // would be slight (perhaps even negative due to the extra call). Also,
  461. // always calculating the value & gradient together protects against us
  462. // reporting invalid solutions if the cost function returns slightly different
  463. // function values when evaluated with / without gradients (due to numerical
  464. // issues).
  465. ++summary->num_function_evaluations;
  466. ++summary->num_gradient_evaluations;
  467. const bool kEvaluateGradient = true;
  468. function->Evaluate(step_size_estimate, kEvaluateGradient, &current);
  469. while (true) {
  470. ++summary->num_iterations;
  471. if (current.value_is_valid &&
  472. (current.value > (initial_position.value +
  473. options().sufficient_decrease *
  474. initial_position.gradient * current.x) ||
  475. (previous.value_is_valid && current.value > previous.value))) {
  476. // Bracket found: current step size violates Armijo sufficient decrease
  477. // condition, or has stepped past an inflection point of f() relative to
  478. // previous step size.
  479. *do_zoom_search = true;
  480. *bracket_low = previous;
  481. *bracket_high = current;
  482. VLOG(3) << std::scientific
  483. << std::setprecision(kErrorMessageNumericPrecision)
  484. << "Bracket found: current step (" << current.x
  485. << ") violates Armijo sufficient condition, or has passed an "
  486. << "inflection point of f() based on value.";
  487. break;
  488. }
  489. if (current.value_is_valid &&
  490. fabs(current.gradient) <= -options().sufficient_curvature_decrease *
  491. initial_position.gradient) {
  492. // Current step size satisfies the strong Wolfe conditions, and is thus a
  493. // valid termination point, therefore a Zoom not required.
  494. *bracket_low = current;
  495. *bracket_high = current;
  496. VLOG(3) << std::scientific
  497. << std::setprecision(kErrorMessageNumericPrecision)
  498. << "Bracketing phase found step size: " << current.x
  499. << ", satisfying strong Wolfe conditions, initial_position: "
  500. << initial_position << ", current: " << current;
  501. break;
  502. } else if (current.value_is_valid && current.gradient >= 0) {
  503. // Bracket found: current step size has stepped past an inflection point
  504. // of f(), but Armijo sufficient decrease is still satisfied and
  505. // f(current) is our best minimum thus far. Remember step size
  506. // monotonically increases, thus previous_step_size < current_step_size
  507. // even though f(previous) > f(current).
  508. *do_zoom_search = true;
  509. // Note inverse ordering from first bracket case.
  510. *bracket_low = current;
  511. *bracket_high = previous;
  512. VLOG(3) << "Bracket found: current step (" << current.x
  513. << ") satisfies Armijo, but has gradient >= 0, thus have passed "
  514. << "an inflection point of f().";
  515. break;
  516. } else if (current.value_is_valid &&
  517. fabs(current.x - previous.x) * descent_direction_max_norm <
  518. options().min_step_size) {
  519. // We have shrunk the search bracket to a width less than our tolerance,
  520. // and still not found either a point satisfying the strong Wolfe
  521. // conditions, or a valid bracket containing such a point. Stop searching
  522. // and set bracket_low to the size size amongst all those tested which
  523. // minimizes f() and satisfies the Armijo condition.
  524. if (!options().is_silent) {
  525. LOG(WARNING) << "Line search failed: Wolfe bracketing phase shrank "
  526. << "bracket width: " << fabs(current.x - previous.x)
  527. << ", to < tolerance: " << options().min_step_size
  528. << ", with descent_direction_max_norm: "
  529. << descent_direction_max_norm << ", and failed to find "
  530. << "a point satisfying the strong Wolfe conditions or a "
  531. << "bracketing containing such a point. Accepting "
  532. << "point found satisfying Armijo condition only, to "
  533. << "allow continuation.";
  534. }
  535. *bracket_low = current;
  536. break;
  537. } else if (summary->num_iterations >= options().max_num_iterations) {
  538. // Check num iterations bound here so that we always evaluate the
  539. // max_num_iterations-th iteration against all conditions, and
  540. // then perform no additional (unused) evaluations.
  541. summary->error = StringPrintf(
  542. "Line search failed: Wolfe bracketing phase failed to "
  543. "find a point satisfying strong Wolfe conditions, or a "
  544. "bracket containing such a point within specified "
  545. "max_num_iterations: %d",
  546. options().max_num_iterations);
  547. if (!options().is_silent) {
  548. LOG(WARNING) << summary->error;
  549. }
  550. // Ensure that bracket_low is always set to the step size amongst all
  551. // those tested which minimizes f() and satisfies the Armijo condition
  552. // when we terminate due to the 'artificial' max_num_iterations condition.
  553. *bracket_low =
  554. current.value_is_valid && current.value < bracket_low->value
  555. ? current
  556. : *bracket_low;
  557. break;
  558. }
  559. // Either: f(current) is invalid; or, f(current) is valid, but does not
  560. // satisfy the strong Wolfe conditions itself, or the conditions for
  561. // being a boundary of a bracket.
  562. // If f(current) is valid, (but meets no criteria) expand the search by
  563. // increasing the step size. If f(current) is invalid, contract the step
  564. // size.
  565. //
  566. // In Nocedal & Wright [1] (p60), the step-size can only increase in the
  567. // bracketing phase: step_size_{k+1} \in [step_size_k, step_size_k *
  568. // factor]. However this does not account for the function returning invalid
  569. // values which we support, in which case we need to contract the step size
  570. // whilst ensuring that we do not invert the bracket, i.e, we require that:
  571. // step_size_{k-1} <= step_size_{k+1} < step_size_k.
  572. const double min_step_size =
  573. current.value_is_valid ? current.x : previous.x;
  574. const double max_step_size =
  575. current.value_is_valid ? (current.x * options().max_step_expansion)
  576. : current.x;
  577. // We are performing 2-point interpolation only here, but the API of
  578. // InterpolatingPolynomialMinimizingStepSize() allows for up to
  579. // 3-point interpolation, so pad call with a sample with an invalid
  580. // value that will therefore be ignored.
  581. const FunctionSample unused_previous;
  582. DCHECK(!unused_previous.value_is_valid);
  583. // Contracts step size if f(current) is not valid.
  584. const double polynomial_minimization_start_time = WallTimeInSeconds();
  585. const double step_size = this->InterpolatingPolynomialMinimizingStepSize(
  586. options().interpolation_type,
  587. previous,
  588. unused_previous,
  589. current,
  590. min_step_size,
  591. max_step_size);
  592. summary->polynomial_minimization_time_in_seconds +=
  593. (WallTimeInSeconds() - polynomial_minimization_start_time);
  594. if (step_size * descent_direction_max_norm < options().min_step_size) {
  595. summary->error = StringPrintf(
  596. "Line search failed: step_size too small: %.5e "
  597. "with descent_direction_max_norm: %.5e",
  598. step_size,
  599. descent_direction_max_norm);
  600. if (!options().is_silent) {
  601. LOG(WARNING) << summary->error;
  602. }
  603. return false;
  604. }
  605. // Only advance the lower boundary (in x) of the bracket if f(current)
  606. // is valid such that we can support contracting the step size when
  607. // f(current) is invalid without risking inverting the bracket in x, i.e.
  608. // prevent previous.x > current.x.
  609. previous = current.value_is_valid ? current : previous;
  610. ++summary->num_function_evaluations;
  611. ++summary->num_gradient_evaluations;
  612. function->Evaluate(step_size, kEvaluateGradient, &current);
  613. }
  614. // Ensure that even if a valid bracket was found, we will only mark a zoom
  615. // as required if the bracket's width is greater than our minimum tolerance.
  616. if (*do_zoom_search &&
  617. fabs(bracket_high->x - bracket_low->x) * descent_direction_max_norm <
  618. options().min_step_size) {
  619. *do_zoom_search = false;
  620. }
  621. return true;
  622. }
  623. // Returns true iff solution satisfies the strong Wolfe conditions. Otherwise,
  624. // on return false, if we stopped searching due to the 'artificial' condition of
  625. // reaching max_num_iterations, solution is the step size amongst all those
  626. // tested, which satisfied the Armijo decrease condition and minimized f().
  627. bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
  628. FunctionSample bracket_low,
  629. FunctionSample bracket_high,
  630. FunctionSample* solution,
  631. Summary* summary) const {
  632. LineSearchFunction* function = options().function;
  633. CHECK(bracket_low.value_is_valid && bracket_low.gradient_is_valid)
  634. << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
  635. << "Ceres bug: f_low input to Wolfe Zoom invalid, please contact "
  636. << "the developers!, initial_position: " << initial_position
  637. << ", bracket_low: " << bracket_low << ", bracket_high: " << bracket_high;
  638. // We do not require bracket_high.gradient_is_valid as the gradient condition
  639. // for a valid bracket is only dependent upon bracket_low.gradient, and
  640. // in order to minimize jacobian evaluations, bracket_high.gradient may
  641. // not have been calculated (if bracket_high.value does not satisfy the
  642. // Armijo sufficient decrease condition and interpolation method does not
  643. // require it).
  644. //
  645. // We also do not require that: bracket_low.value < bracket_high.value,
  646. // although this is typical. This is to deal with the case when
  647. // bracket_low = initial_position, bracket_high is the first sample,
  648. // and bracket_high does not satisfy the Armijo condition, but still has
  649. // bracket_high.value < initial_position.value.
  650. CHECK(bracket_high.value_is_valid)
  651. << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
  652. << "Ceres bug: f_high input to Wolfe Zoom invalid, please "
  653. << "contact the developers!, initial_position: " << initial_position
  654. << ", bracket_low: " << bracket_low << ", bracket_high: " << bracket_high;
  655. if (bracket_low.gradient * (bracket_high.x - bracket_low.x) >= 0) {
  656. // The third condition for a valid initial bracket:
  657. //
  658. // 3. bracket_high is chosen after bracket_low, s.t.
  659. // bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0.
  660. //
  661. // is not satisfied. As this can happen when the users' cost function
  662. // returns inconsistent gradient values relative to the function values,
  663. // we do not CHECK_LT(), but we do stop processing and return an invalid
  664. // value.
  665. summary->error = StringPrintf(
  666. "Line search failed: Wolfe zoom phase passed a bracket "
  667. "which does not satisfy: bracket_low.gradient * "
  668. "(bracket_high.x - bracket_low.x) < 0 [%.8e !< 0] "
  669. "with initial_position: %s, bracket_low: %s, bracket_high:"
  670. " %s, the most likely cause of which is the cost function "
  671. "returning inconsistent gradient & function values.",
  672. bracket_low.gradient * (bracket_high.x - bracket_low.x),
  673. initial_position.ToDebugString().c_str(),
  674. bracket_low.ToDebugString().c_str(),
  675. bracket_high.ToDebugString().c_str());
  676. if (!options().is_silent) {
  677. LOG(WARNING) << summary->error;
  678. }
  679. solution->value_is_valid = false;
  680. return false;
  681. }
  682. const int num_bracketing_iterations = summary->num_iterations;
  683. const double descent_direction_max_norm = function->DirectionInfinityNorm();
  684. while (true) {
  685. // Set solution to bracket_low, as it is our best step size (smallest f())
  686. // found thus far and satisfies the Armijo condition, even though it does
  687. // not satisfy the Wolfe condition.
  688. *solution = bracket_low;
  689. if (summary->num_iterations >= options().max_num_iterations) {
  690. summary->error = StringPrintf(
  691. "Line search failed: Wolfe zoom phase failed to "
  692. "find a point satisfying strong Wolfe conditions "
  693. "within specified max_num_iterations: %d, "
  694. "(num iterations taken for bracketing: %d).",
  695. options().max_num_iterations,
  696. num_bracketing_iterations);
  697. if (!options().is_silent) {
  698. LOG(WARNING) << summary->error;
  699. }
  700. return false;
  701. }
  702. if (fabs(bracket_high.x - bracket_low.x) * descent_direction_max_norm <
  703. options().min_step_size) {
  704. // Bracket width has been reduced below tolerance, and no point satisfying
  705. // the strong Wolfe conditions has been found.
  706. summary->error = StringPrintf(
  707. "Line search failed: Wolfe zoom bracket width: %.5e "
  708. "too small with descent_direction_max_norm: %.5e.",
  709. fabs(bracket_high.x - bracket_low.x),
  710. descent_direction_max_norm);
  711. if (!options().is_silent) {
  712. LOG(WARNING) << summary->error;
  713. }
  714. return false;
  715. }
  716. ++summary->num_iterations;
  717. // Polynomial interpolation requires inputs ordered according to step size,
  718. // not f(step size).
  719. const FunctionSample& lower_bound_step =
  720. bracket_low.x < bracket_high.x ? bracket_low : bracket_high;
  721. const FunctionSample& upper_bound_step =
  722. bracket_low.x < bracket_high.x ? bracket_high : bracket_low;
  723. // We are performing 2-point interpolation only here, but the API of
  724. // InterpolatingPolynomialMinimizingStepSize() allows for up to
  725. // 3-point interpolation, so pad call with a sample with an invalid
  726. // value that will therefore be ignored.
  727. const FunctionSample unused_previous;
  728. DCHECK(!unused_previous.value_is_valid);
  729. const double polynomial_minimization_start_time = WallTimeInSeconds();
  730. const double step_size = this->InterpolatingPolynomialMinimizingStepSize(
  731. options().interpolation_type,
  732. lower_bound_step,
  733. unused_previous,
  734. upper_bound_step,
  735. lower_bound_step.x,
  736. upper_bound_step.x);
  737. summary->polynomial_minimization_time_in_seconds +=
  738. (WallTimeInSeconds() - polynomial_minimization_start_time);
  739. // No check on magnitude of step size being too small here as it is
  740. // lower-bounded by the initial bracket start point, which was valid.
  741. //
  742. // As we require the gradient to evaluate the Wolfe condition, we always
  743. // calculate it together with the value, irrespective of the interpolation
  744. // type. As opposed to only calculating the gradient after the Armijo
  745. // condition is satisfied, as the computational saving from this approach
  746. // would be slight (perhaps even negative due to the extra call). Also,
  747. // always calculating the value & gradient together protects against us
  748. // reporting invalid solutions if the cost function returns slightly
  749. // different function values when evaluated with / without gradients (due
  750. // to numerical issues).
  751. ++summary->num_function_evaluations;
  752. ++summary->num_gradient_evaluations;
  753. const bool kEvaluateGradient = true;
  754. function->Evaluate(step_size, kEvaluateGradient, solution);
  755. if (!solution->value_is_valid || !solution->gradient_is_valid) {
  756. summary->error = StringPrintf(
  757. "Line search failed: Wolfe Zoom phase found "
  758. "step_size: %.5e, for which function is invalid, "
  759. "between low_step: %.5e and high_step: %.5e "
  760. "at which function is valid.",
  761. solution->x,
  762. bracket_low.x,
  763. bracket_high.x);
  764. if (!options().is_silent) {
  765. LOG(WARNING) << summary->error;
  766. }
  767. return false;
  768. }
  769. VLOG(3) << "Zoom iteration: "
  770. << summary->num_iterations - num_bracketing_iterations
  771. << ", bracket_low: " << bracket_low
  772. << ", bracket_high: " << bracket_high
  773. << ", minimizing solution: " << *solution;
  774. if ((solution->value > (initial_position.value +
  775. options().sufficient_decrease *
  776. initial_position.gradient * solution->x)) ||
  777. (solution->value >= bracket_low.value)) {
  778. // Armijo sufficient decrease not satisfied, or not better
  779. // than current lowest sample, use as new upper bound.
  780. bracket_high = *solution;
  781. continue;
  782. }
  783. // Armijo sufficient decrease satisfied, check strong Wolfe condition.
  784. if (fabs(solution->gradient) <=
  785. -options().sufficient_curvature_decrease * initial_position.gradient) {
  786. // Found a valid termination point satisfying strong Wolfe conditions.
  787. VLOG(3) << std::scientific
  788. << std::setprecision(kErrorMessageNumericPrecision)
  789. << "Zoom phase found step size: " << solution->x
  790. << ", satisfying strong Wolfe conditions.";
  791. break;
  792. } else if (solution->gradient * (bracket_high.x - bracket_low.x) >= 0) {
  793. bracket_high = bracket_low;
  794. }
  795. bracket_low = *solution;
  796. }
  797. // Solution contains a valid point which satisfies the strong Wolfe
  798. // conditions.
  799. return true;
  800. }
  801. } // namespace ceres::internal