123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836 |
- // Ceres Solver - A fast non-linear least squares minimizer
- // Copyright 2016 Google Inc. All rights reserved.
- // http://ceres-solver.org/
- //
- // Redistribution and use in source and binary forms, with or without
- // modification, are permitted provided that the following conditions are met:
- //
- // * Redistributions of source code must retain the above copyright notice,
- // this list of conditions and the following disclaimer.
- // * Redistributions in binary form must reproduce the above copyright notice,
- // this list of conditions and the following disclaimer in the documentation
- // and/or other materials provided with the distribution.
- // * Neither the name of Google Inc. nor the names of its contributors may be
- // used to endorse or promote products derived from this software without
- // specific prior written permission.
- //
- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- // POSSIBILITY OF SUCH DAMAGE.
- //
- // Author: sameeragarwal@google.com (Sameer Agarwal)
- #include "ceres/trust_region_minimizer.h"
- #include <algorithm>
- #include <cmath>
- #include <cstdlib>
- #include <cstring>
- #include <limits>
- #include <memory>
- #include <string>
- #include <vector>
- #include "Eigen/Core"
- #include "ceres/array_utils.h"
- #include "ceres/coordinate_descent_minimizer.h"
- #include "ceres/eigen_vector_ops.h"
- #include "ceres/evaluator.h"
- #include "ceres/file.h"
- #include "ceres/line_search.h"
- #include "ceres/parallel_for.h"
- #include "ceres/stringprintf.h"
- #include "ceres/types.h"
- #include "ceres/wall_time.h"
- #include "glog/logging.h"
- // Helper macro to simplify some of the control flow.
- #define RETURN_IF_ERROR_AND_LOG(expr) \
- do { \
- if (!(expr)) { \
- LOG(ERROR) << "Terminating: " << solver_summary_->message; \
- return; \
- } \
- } while (0)
- namespace ceres::internal {
- void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
- double* parameters,
- Solver::Summary* solver_summary) {
- start_time_in_secs_ = WallTimeInSeconds();
- iteration_start_time_in_secs_ = start_time_in_secs_;
- Init(options, parameters, solver_summary);
- RETURN_IF_ERROR_AND_LOG(IterationZero());
- // Create the TrustRegionStepEvaluator. The construction needs to be
- // delayed to this point because we need the cost for the starting
- // point to initialize the step evaluator.
- step_evaluator_ = std::make_unique<TrustRegionStepEvaluator>(
- x_cost_,
- options_.use_nonmonotonic_steps
- ? options_.max_consecutive_nonmonotonic_steps
- : 0);
- bool atleast_one_successful_step = false;
- while (FinalizeIterationAndCheckIfMinimizerCanContinue()) {
- iteration_start_time_in_secs_ = WallTimeInSeconds();
- const double previous_gradient_norm = iteration_summary_.gradient_norm;
- const double previous_gradient_max_norm =
- iteration_summary_.gradient_max_norm;
- iteration_summary_ = IterationSummary();
- iteration_summary_.iteration =
- solver_summary->iterations.back().iteration + 1;
- RETURN_IF_ERROR_AND_LOG(ComputeTrustRegionStep());
- if (!iteration_summary_.step_is_valid) {
- RETURN_IF_ERROR_AND_LOG(HandleInvalidStep());
- continue;
- }
- if (options_.is_constrained &&
- options_.max_num_line_search_step_size_iterations > 0) {
- // Use a projected line search to enforce the bounds constraints
- // and improve the quality of the step.
- DoLineSearch(x_, gradient_, x_cost_, &delta_);
- }
- ComputeCandidatePointAndEvaluateCost();
- DoInnerIterationsIfNeeded();
- if (atleast_one_successful_step && ParameterToleranceReached()) {
- return;
- }
- if (FunctionToleranceReached()) {
- return;
- }
- if (IsStepSuccessful()) {
- atleast_one_successful_step = true;
- RETURN_IF_ERROR_AND_LOG(HandleSuccessfulStep());
- } else {
- // Declare the step unsuccessful and inform the trust region strategy.
- iteration_summary_.step_is_successful = false;
- iteration_summary_.cost = candidate_cost_ + solver_summary_->fixed_cost;
- // When the step is unsuccessful, we do not compute the gradient
- // (or update x), so we preserve its value from the last
- // successful iteration.
- iteration_summary_.gradient_norm = previous_gradient_norm;
- iteration_summary_.gradient_max_norm = previous_gradient_max_norm;
- strategy_->StepRejected(iteration_summary_.relative_decrease);
- }
- }
- }
- // Initialize the minimizer, allocate working space and set some of
- // the fields in the solver_summary.
- void TrustRegionMinimizer::Init(const Minimizer::Options& options,
- double* parameters,
- Solver::Summary* solver_summary) {
- options_ = options;
- std::sort(options_.trust_region_minimizer_iterations_to_dump.begin(),
- options_.trust_region_minimizer_iterations_to_dump.end());
- parameters_ = parameters;
- solver_summary_ = solver_summary;
- solver_summary_->termination_type = NO_CONVERGENCE;
- solver_summary_->num_successful_steps = 0;
- solver_summary_->num_unsuccessful_steps = 0;
- solver_summary_->is_constrained = options.is_constrained;
- CHECK(options_.evaluator != nullptr);
- CHECK(options_.jacobian != nullptr);
- CHECK(options_.trust_region_strategy != nullptr);
- evaluator_ = options_.evaluator.get();
- jacobian_ = options_.jacobian.get();
- strategy_ = options_.trust_region_strategy.get();
- is_not_silent_ = !options.is_silent;
- inner_iterations_are_enabled_ =
- options.inner_iteration_minimizer.get() != nullptr;
- inner_iterations_were_useful_ = false;
- num_parameters_ = evaluator_->NumParameters();
- num_effective_parameters_ = evaluator_->NumEffectiveParameters();
- num_residuals_ = evaluator_->NumResiduals();
- num_consecutive_invalid_steps_ = 0;
- x_ = ConstVectorRef(parameters_, num_parameters_);
- residuals_.resize(num_residuals_);
- trust_region_step_.resize(num_effective_parameters_);
- delta_.resize(num_effective_parameters_);
- candidate_x_.resize(num_parameters_);
- gradient_.resize(num_effective_parameters_);
- model_residuals_.resize(num_residuals_);
- negative_gradient_.resize(num_effective_parameters_);
- projected_gradient_step_.resize(num_parameters_);
- // By default scaling is one, if the user requests Jacobi scaling of
- // the Jacobian, we will compute and overwrite this vector.
- jacobian_scaling_ = Vector::Ones(num_effective_parameters_);
- x_cost_ = std::numeric_limits<double>::max();
- minimum_cost_ = x_cost_;
- model_cost_change_ = 0.0;
- }
- // 1. Project the initial solution onto the feasible set if needed.
- // 2. Compute the initial cost, jacobian & gradient.
- //
- // Return true if all computations can be performed successfully.
- bool TrustRegionMinimizer::IterationZero() {
- iteration_summary_ = IterationSummary();
- iteration_summary_.iteration = 0;
- iteration_summary_.step_is_valid = false;
- iteration_summary_.step_is_successful = false;
- iteration_summary_.cost_change = 0.0;
- iteration_summary_.gradient_max_norm = 0.0;
- iteration_summary_.gradient_norm = 0.0;
- iteration_summary_.step_norm = 0.0;
- iteration_summary_.relative_decrease = 0.0;
- iteration_summary_.eta = options_.eta;
- iteration_summary_.linear_solver_iterations = 0;
- iteration_summary_.step_solver_time_in_seconds = 0;
- if (options_.is_constrained) {
- delta_.setZero();
- if (!evaluator_->Plus(x_.data(), delta_.data(), candidate_x_.data())) {
- solver_summary_->message =
- "Unable to project initial point onto the feasible set.";
- solver_summary_->termination_type = FAILURE;
- return false;
- }
- x_ = candidate_x_;
- }
- if (!EvaluateGradientAndJacobian(/*new_evaluation_point=*/true)) {
- return false;
- }
- solver_summary_->initial_cost = x_cost_ + solver_summary_->fixed_cost;
- iteration_summary_.step_is_valid = true;
- iteration_summary_.step_is_successful = true;
- return true;
- }
- // For the current x_, compute
- //
- // 1. Cost
- // 2. Jacobian
- // 3. Gradient
- // 4. Scale the Jacobian if needed (and compute the scaling if we are
- // in iteration zero).
- // 5. Compute the 2 and max norm of the gradient.
- //
- // Returns true if all computations could be performed
- // successfully. Any failures are considered fatal and the
- // Solver::Summary is updated to indicate this.
- bool TrustRegionMinimizer::EvaluateGradientAndJacobian(
- bool new_evaluation_point) {
- Evaluator::EvaluateOptions evaluate_options;
- evaluate_options.new_evaluation_point = new_evaluation_point;
- if (!evaluator_->Evaluate(evaluate_options,
- x_.data(),
- &x_cost_,
- residuals_.data(),
- gradient_.data(),
- jacobian_)) {
- solver_summary_->message = "Residual and Jacobian evaluation failed.";
- solver_summary_->termination_type = FAILURE;
- return false;
- }
- iteration_summary_.cost = x_cost_ + solver_summary_->fixed_cost;
- if (options_.jacobi_scaling) {
- if (iteration_summary_.iteration == 0) {
- // Compute a scaling vector that is used to improve the
- // conditioning of the Jacobian.
- //
- // jacobian_scaling_ = diag(J'J)^{-1}
- jacobian_->SquaredColumnNorm(jacobian_scaling_.data());
- for (int i = 0; i < jacobian_->num_cols(); ++i) {
- // Add one to the denominator to prevent division by zero.
- jacobian_scaling_[i] = 1.0 / (1.0 + sqrt(jacobian_scaling_[i]));
- }
- }
- // jacobian = jacobian * diag(J'J) ^{-1}
- jacobian_->ScaleColumns(
- jacobian_scaling_.data(), options_.context, options_.num_threads);
- }
- // The gradient exists in the local tangent space. To account for
- // the bounds constraints correctly, instead of just computing the
- // norm of the gradient vector, we compute
- //
- // |Plus(x, -gradient) - x|
- //
- // Where the Plus operator lifts the negative gradient to the
- // ambient space, adds it to x and projects it on the hypercube
- // defined by the bounds.
- negative_gradient_ = -gradient_;
- if (!evaluator_->Plus(x_.data(),
- negative_gradient_.data(),
- projected_gradient_step_.data())) {
- solver_summary_->message =
- "projected_gradient_step = Plus(x, -gradient) failed.";
- solver_summary_->termination_type = FAILURE;
- return false;
- }
- iteration_summary_.gradient_max_norm =
- (x_ - projected_gradient_step_).lpNorm<Eigen::Infinity>();
- iteration_summary_.gradient_norm = (x_ - projected_gradient_step_).norm();
- return true;
- }
- // 1. Add the final timing information to the iteration summary.
- // 2. Run the callbacks
- // 3. Check for termination based on
- // a. Run time
- // b. Iteration count
- // c. Max norm of the gradient
- // d. Size of the trust region radius.
- //
- // Returns true if user did not terminate the solver and none of these
- // termination criterion are met.
- bool TrustRegionMinimizer::FinalizeIterationAndCheckIfMinimizerCanContinue() {
- if (iteration_summary_.step_is_successful) {
- ++solver_summary_->num_successful_steps;
- if (x_cost_ < minimum_cost_) {
- minimum_cost_ = x_cost_;
- VectorRef(parameters_, num_parameters_) = x_;
- iteration_summary_.step_is_nonmonotonic = false;
- } else {
- iteration_summary_.step_is_nonmonotonic = true;
- }
- } else {
- ++solver_summary_->num_unsuccessful_steps;
- }
- iteration_summary_.trust_region_radius = strategy_->Radius();
- iteration_summary_.iteration_time_in_seconds =
- WallTimeInSeconds() - iteration_start_time_in_secs_;
- iteration_summary_.cumulative_time_in_seconds =
- WallTimeInSeconds() - start_time_in_secs_ +
- solver_summary_->preprocessor_time_in_seconds;
- solver_summary_->iterations.push_back(iteration_summary_);
- if (!RunCallbacks(options_, iteration_summary_, solver_summary_)) {
- return false;
- }
- if (MaxSolverTimeReached()) {
- return false;
- }
- if (MaxSolverIterationsReached()) {
- return false;
- }
- if (GradientToleranceReached()) {
- return false;
- }
- if (MinTrustRegionRadiusReached()) {
- return false;
- }
- return true;
- }
- // Compute the trust region step using the TrustRegionStrategy chosen
- // by the user.
- //
- // If the strategy returns with LinearSolverTerminationType::FATAL_ERROR, which
- // indicates an unrecoverable error, return false. This is the only
- // condition that returns false.
- //
- // If the strategy returns with LinearSolverTerminationType::FAILURE, which
- // indicates a numerical failure that could be recovered from by retrying (e.g.
- // by increasing the strength of the regularization), we set
- // iteration_summary_.step_is_valid to false and return true.
- //
- // In all other cases, we compute the decrease in the trust region
- // model problem. In exact arithmetic, this should always be
- // positive, but due to numerical problems in the TrustRegionStrategy
- // or round off error when computing the decrease it may be
- // negative. In which case again, we set
- // iteration_summary_.step_is_valid to false.
- bool TrustRegionMinimizer::ComputeTrustRegionStep() {
- const double strategy_start_time = WallTimeInSeconds();
- iteration_summary_.step_is_valid = false;
- TrustRegionStrategy::PerSolveOptions per_solve_options;
- per_solve_options.eta = options_.eta;
- if (find(options_.trust_region_minimizer_iterations_to_dump.begin(),
- options_.trust_region_minimizer_iterations_to_dump.end(),
- iteration_summary_.iteration) !=
- options_.trust_region_minimizer_iterations_to_dump.end()) {
- per_solve_options.dump_format_type =
- options_.trust_region_problem_dump_format_type;
- per_solve_options.dump_filename_base =
- JoinPath(options_.trust_region_problem_dump_directory,
- StringPrintf("ceres_solver_iteration_%03d",
- iteration_summary_.iteration));
- }
- TrustRegionStrategy::Summary strategy_summary =
- strategy_->ComputeStep(per_solve_options,
- jacobian_,
- residuals_.data(),
- trust_region_step_.data());
- if (strategy_summary.termination_type ==
- LinearSolverTerminationType::FATAL_ERROR) {
- solver_summary_->message =
- "Linear solver failed due to unrecoverable "
- "non-numeric causes. Please see the error log for clues. ";
- solver_summary_->termination_type = FAILURE;
- return false;
- }
- iteration_summary_.step_solver_time_in_seconds =
- WallTimeInSeconds() - strategy_start_time;
- iteration_summary_.linear_solver_iterations = strategy_summary.num_iterations;
- if (strategy_summary.termination_type ==
- LinearSolverTerminationType::FAILURE) {
- return true;
- }
- // new_model_cost
- // = 1/2 [f + J * step]^2
- // = 1/2 [ f'f + 2f'J * step + step' * J' * J * step ]
- // model_cost_change
- // = cost - new_model_cost
- // = f'f/2 - 1/2 [ f'f + 2f'J * step + step' * J' * J * step]
- // = -f'J * step - step' * J' * J * step / 2
- // = -(J * step)'(f + J * step / 2)
- ParallelSetZero(options_.context, options_.num_threads, model_residuals_);
- jacobian_->RightMultiplyAndAccumulate(trust_region_step_.data(),
- model_residuals_.data(),
- options_.context,
- options_.num_threads);
- model_cost_change_ = -Dot(model_residuals_,
- residuals_ + model_residuals_ / 2.0,
- options_.context,
- options_.num_threads);
- // TODO(sameeragarwal)
- //
- // 1. What happens if model_cost_change_ = 0
- // 2. What happens if -epsilon <= model_cost_change_ < 0 for some
- // small epsilon due to round off error.
- iteration_summary_.step_is_valid = (model_cost_change_ > 0.0);
- if (iteration_summary_.step_is_valid) {
- // Undo the Jacobian column scaling.
- ParallelAssign(options_.context,
- options_.num_threads,
- delta_,
- (trust_region_step_.array() * jacobian_scaling_.array()));
- num_consecutive_invalid_steps_ = 0;
- }
- if (is_not_silent_ && !iteration_summary_.step_is_valid) {
- VLOG(1) << "Invalid step: current_cost: " << x_cost_
- << " absolute model cost change: " << model_cost_change_
- << " relative model cost change: "
- << (model_cost_change_ / x_cost_);
- }
- return true;
- }
- // Invalid steps can happen due to a number of reasons, and we allow a
- // limited number of consecutive failures, and return false if this
- // limit is exceeded.
- bool TrustRegionMinimizer::HandleInvalidStep() {
- // TODO(sameeragarwal): Should we be returning FAILURE or
- // NO_CONVERGENCE? The solution value is still usable in many cases,
- // it is not clear if we should declare the solver a failure
- // entirely. For example the case where model_cost_change ~ 0.0, but
- // just slightly negative.
- if (++num_consecutive_invalid_steps_ >=
- options_.max_num_consecutive_invalid_steps) {
- solver_summary_->message = StringPrintf(
- "Number of consecutive invalid steps more "
- "than Solver::Options::max_num_consecutive_invalid_steps: %d",
- options_.max_num_consecutive_invalid_steps);
- solver_summary_->termination_type = FAILURE;
- return false;
- }
- strategy_->StepIsInvalid();
- // We are going to try and reduce the trust region radius and
- // solve again. To do this, we are going to treat this iteration
- // as an unsuccessful iteration. Since the various callbacks are
- // still executed, we are going to fill the iteration summary
- // with data that assumes a step of length zero and no progress.
- iteration_summary_.cost = x_cost_ + solver_summary_->fixed_cost;
- iteration_summary_.cost_change = 0.0;
- iteration_summary_.gradient_max_norm =
- solver_summary_->iterations.back().gradient_max_norm;
- iteration_summary_.gradient_norm =
- solver_summary_->iterations.back().gradient_norm;
- iteration_summary_.step_norm = 0.0;
- iteration_summary_.relative_decrease = 0.0;
- iteration_summary_.eta = options_.eta;
- return true;
- }
- // Use the supplied coordinate descent minimizer to perform inner
- // iterations and compute the improvement due to it. Returns the cost
- // after performing the inner iterations.
- //
- // The optimization is performed with candidate_x_ as the starting
- // point, and if the optimization is successful, candidate_x_ will be
- // updated with the optimized parameters.
- void TrustRegionMinimizer::DoInnerIterationsIfNeeded() {
- inner_iterations_were_useful_ = false;
- if (!inner_iterations_are_enabled_ ||
- candidate_cost_ >= std::numeric_limits<double>::max()) {
- return;
- }
- double inner_iteration_start_time = WallTimeInSeconds();
- ++solver_summary_->num_inner_iteration_steps;
- inner_iteration_x_ = candidate_x_;
- Solver::Summary inner_iteration_summary;
- options_.inner_iteration_minimizer->Minimize(
- options_, inner_iteration_x_.data(), &inner_iteration_summary);
- double inner_iteration_cost;
- if (!evaluator_->Evaluate(inner_iteration_x_.data(),
- &inner_iteration_cost,
- nullptr,
- nullptr,
- nullptr)) {
- if (is_not_silent_) {
- VLOG(2) << "Inner iteration failed.";
- }
- return;
- }
- if (is_not_silent_) {
- VLOG(2) << "Inner iteration succeeded; Current cost: " << x_cost_
- << " Trust region step cost: " << candidate_cost_
- << " Inner iteration cost: " << inner_iteration_cost;
- }
- candidate_x_ = inner_iteration_x_;
- // Normally, the quality of a trust region step is measured by
- // the ratio
- //
- // cost_change
- // r = -----------------
- // model_cost_change
- //
- // All the change in the nonlinear objective is due to the trust
- // region step so this ratio is a good measure of the quality of
- // the trust region radius. However, when inner iterations are
- // being used, cost_change includes the contribution of the
- // inner iterations and its not fair to credit it all to the
- // trust region algorithm. So we change the ratio to be
- //
- // cost_change
- // r = ------------------------------------------------
- // (model_cost_change + inner_iteration_cost_change)
- //
- // Practically we do this by increasing model_cost_change by
- // inner_iteration_cost_change.
- const double inner_iteration_cost_change =
- candidate_cost_ - inner_iteration_cost;
- model_cost_change_ += inner_iteration_cost_change;
- inner_iterations_were_useful_ = inner_iteration_cost < x_cost_;
- const double inner_iteration_relative_progress =
- 1.0 - inner_iteration_cost / candidate_cost_;
- // Disable inner iterations once the relative improvement
- // drops below tolerance.
- inner_iterations_are_enabled_ =
- (inner_iteration_relative_progress > options_.inner_iteration_tolerance);
- if (is_not_silent_ && !inner_iterations_are_enabled_) {
- VLOG(2) << "Disabling inner iterations. Progress : "
- << inner_iteration_relative_progress;
- }
- candidate_cost_ = inner_iteration_cost;
- solver_summary_->inner_iteration_time_in_seconds +=
- WallTimeInSeconds() - inner_iteration_start_time;
- }
- // Perform a projected line search to improve the objective function
- // value along delta.
- //
- // TODO(sameeragarwal): The current implementation does not do
- // anything illegal but is incorrect and not terribly effective.
- //
- // https://github.com/ceres-solver/ceres-solver/issues/187
- void TrustRegionMinimizer::DoLineSearch(const Vector& x,
- const Vector& gradient,
- const double cost,
- Vector* delta) {
- LineSearchFunction line_search_function(evaluator_);
- LineSearch::Options line_search_options;
- line_search_options.is_silent = true;
- line_search_options.interpolation_type =
- options_.line_search_interpolation_type;
- line_search_options.min_step_size = options_.min_line_search_step_size;
- line_search_options.sufficient_decrease =
- options_.line_search_sufficient_function_decrease;
- line_search_options.max_step_contraction =
- options_.max_line_search_step_contraction;
- line_search_options.min_step_contraction =
- options_.min_line_search_step_contraction;
- line_search_options.max_num_iterations =
- options_.max_num_line_search_step_size_iterations;
- line_search_options.sufficient_curvature_decrease =
- options_.line_search_sufficient_curvature_decrease;
- line_search_options.max_step_expansion =
- options_.max_line_search_step_expansion;
- line_search_options.function = &line_search_function;
- std::string message;
- std::unique_ptr<LineSearch> line_search(
- LineSearch::Create(ceres::ARMIJO, line_search_options, &message));
- LineSearch::Summary line_search_summary;
- line_search_function.Init(x, *delta);
- line_search->Search(1.0, cost, gradient.dot(*delta), &line_search_summary);
- solver_summary_->num_line_search_steps += line_search_summary.num_iterations;
- solver_summary_->line_search_cost_evaluation_time_in_seconds +=
- line_search_summary.cost_evaluation_time_in_seconds;
- solver_summary_->line_search_gradient_evaluation_time_in_seconds +=
- line_search_summary.gradient_evaluation_time_in_seconds;
- solver_summary_->line_search_polynomial_minimization_time_in_seconds +=
- line_search_summary.polynomial_minimization_time_in_seconds;
- solver_summary_->line_search_total_time_in_seconds +=
- line_search_summary.total_time_in_seconds;
- if (line_search_summary.success) {
- *delta *= line_search_summary.optimal_point.x;
- }
- }
- // Check if the maximum amount of time allowed by the user for the
- // solver has been exceeded, and if so return false after updating
- // Solver::Summary::message.
- bool TrustRegionMinimizer::MaxSolverTimeReached() {
- const double total_solver_time =
- WallTimeInSeconds() - start_time_in_secs_ +
- solver_summary_->preprocessor_time_in_seconds;
- if (total_solver_time < options_.max_solver_time_in_seconds) {
- return false;
- }
- solver_summary_->message = StringPrintf(
- "Maximum solver time reached. "
- "Total solver time: %e >= %e.",
- total_solver_time,
- options_.max_solver_time_in_seconds);
- solver_summary_->termination_type = NO_CONVERGENCE;
- if (is_not_silent_) {
- VLOG(1) << "Terminating: " << solver_summary_->message;
- }
- return true;
- }
- // Check if the maximum number of iterations allowed by the user for
- // the solver has been exceeded, and if so return false after updating
- // Solver::Summary::message.
- bool TrustRegionMinimizer::MaxSolverIterationsReached() {
- if (iteration_summary_.iteration < options_.max_num_iterations) {
- return false;
- }
- solver_summary_->message = StringPrintf(
- "Maximum number of iterations reached. "
- "Number of iterations: %d.",
- iteration_summary_.iteration);
- solver_summary_->termination_type = NO_CONVERGENCE;
- if (is_not_silent_) {
- VLOG(1) << "Terminating: " << solver_summary_->message;
- }
- return true;
- }
- // Check convergence based on the max norm of the gradient (only for
- // iterations where the step was declared successful).
- bool TrustRegionMinimizer::GradientToleranceReached() {
- if (!iteration_summary_.step_is_successful ||
- iteration_summary_.gradient_max_norm > options_.gradient_tolerance) {
- return false;
- }
- solver_summary_->message = StringPrintf(
- "Gradient tolerance reached. "
- "Gradient max norm: %e <= %e",
- iteration_summary_.gradient_max_norm,
- options_.gradient_tolerance);
- solver_summary_->termination_type = CONVERGENCE;
- if (is_not_silent_) {
- VLOG(1) << "Terminating: " << solver_summary_->message;
- }
- return true;
- }
- // Check convergence based the size of the trust region radius.
- bool TrustRegionMinimizer::MinTrustRegionRadiusReached() {
- if (iteration_summary_.trust_region_radius >
- options_.min_trust_region_radius) {
- return false;
- }
- solver_summary_->message = StringPrintf(
- "Minimum trust region radius reached. "
- "Trust region radius: %e <= %e",
- iteration_summary_.trust_region_radius,
- options_.min_trust_region_radius);
- solver_summary_->termination_type = CONVERGENCE;
- if (is_not_silent_) {
- VLOG(1) << "Terminating: " << solver_summary_->message;
- }
- return true;
- }
- // Solver::Options::parameter_tolerance based convergence check.
- bool TrustRegionMinimizer::ParameterToleranceReached() {
- const double x_norm = x_.norm();
- // Compute the norm of the step in the ambient space.
- iteration_summary_.step_norm = (x_ - candidate_x_).norm();
- const double step_size_tolerance =
- options_.parameter_tolerance * (x_norm + options_.parameter_tolerance);
- if (iteration_summary_.step_norm > step_size_tolerance) {
- return false;
- }
- solver_summary_->message = StringPrintf(
- "Parameter tolerance reached. "
- "Relative step_norm: %e <= %e.",
- (iteration_summary_.step_norm / (x_norm + options_.parameter_tolerance)),
- options_.parameter_tolerance);
- solver_summary_->termination_type = CONVERGENCE;
- if (is_not_silent_) {
- VLOG(1) << "Terminating: " << solver_summary_->message;
- }
- return true;
- }
- // Solver::Options::function_tolerance based convergence check.
- bool TrustRegionMinimizer::FunctionToleranceReached() {
- iteration_summary_.cost_change = x_cost_ - candidate_cost_;
- const double absolute_function_tolerance =
- options_.function_tolerance * x_cost_;
- if (fabs(iteration_summary_.cost_change) > absolute_function_tolerance) {
- return false;
- }
- solver_summary_->message = StringPrintf(
- "Function tolerance reached. "
- "|cost_change|/cost: %e <= %e",
- fabs(iteration_summary_.cost_change) / x_cost_,
- options_.function_tolerance);
- solver_summary_->termination_type = CONVERGENCE;
- if (is_not_silent_) {
- VLOG(1) << "Terminating: " << solver_summary_->message;
- }
- return true;
- }
- // Compute candidate_x_ = Plus(x_, delta_)
- // Evaluate the cost of candidate_x_ as candidate_cost_.
- //
- // Failure to compute the step or the cost mean that candidate_cost_ is set to
- // std::numeric_limits<double>::max(). Unlike EvaluateGradientAndJacobian,
- // failure in this function is not fatal as we are only computing and evaluating
- // a candidate point, and if for some reason we are unable to evaluate it, we
- // consider it to be a point with very high cost. This allows the user to deal
- // with edge cases/constraints as part of the Manifold and CostFunction objects.
- void TrustRegionMinimizer::ComputeCandidatePointAndEvaluateCost() {
- if (!evaluator_->Plus(x_.data(), delta_.data(), candidate_x_.data())) {
- if (is_not_silent_) {
- LOG(WARNING) << "x_plus_delta = Plus(x, delta) failed. "
- << "Treating it as a step with infinite cost";
- }
- candidate_cost_ = std::numeric_limits<double>::max();
- return;
- }
- if (!evaluator_->Evaluate(
- candidate_x_.data(), &candidate_cost_, nullptr, nullptr, nullptr)) {
- if (is_not_silent_) {
- LOG(WARNING) << "Step failed to evaluate. "
- << "Treating it as a step with infinite cost";
- }
- candidate_cost_ = std::numeric_limits<double>::max();
- }
- }
- bool TrustRegionMinimizer::IsStepSuccessful() {
- iteration_summary_.relative_decrease =
- step_evaluator_->StepQuality(candidate_cost_, model_cost_change_);
- // In most cases, boosting the model_cost_change by the
- // improvement caused by the inner iterations is fine, but it can
- // be the case that the original trust region step was so bad that
- // the resulting improvement in the cost was negative, and the
- // change caused by the inner iterations was large enough to
- // improve the step, but also to make relative decrease quite
- // small.
- //
- // This can cause the trust region loop to reject this step. To
- // get around this, we explicitly check if the inner iterations
- // led to a net decrease in the objective function value. If
- // they did, we accept the step even if the trust region ratio
- // is small.
- //
- // Notice that we do not just check that cost_change is positive
- // which is a weaker condition and would render the
- // min_relative_decrease threshold useless. Instead, we keep
- // track of inner_iterations_were_useful, which is true only
- // when inner iterations lead to a net decrease in the cost.
- return (inner_iterations_were_useful_ ||
- iteration_summary_.relative_decrease >
- options_.min_relative_decrease);
- }
- // Declare the step successful, move to candidate_x, update the
- // derivatives and let the trust region strategy and the step
- // evaluator know that the step has been accepted.
- bool TrustRegionMinimizer::HandleSuccessfulStep() {
- x_ = candidate_x_;
- // Since the step was successful, this point has already had the residual
- // evaluated (but not the jacobian). So indicate that to the evaluator.
- if (!EvaluateGradientAndJacobian(/*new_evaluation_point=*/false)) {
- return false;
- }
- iteration_summary_.step_is_successful = true;
- strategy_->StepAccepted(iteration_summary_.relative_decrease);
- step_evaluator_->StepAccepted(candidate_cost_, model_cost_change_);
- return true;
- }
- } // namespace ceres::internal
|