gradient_tutorial.rst 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. .. highlight:: c++
  2. .. default-domain:: cpp
  3. .. _chapter-gradient_tutorial:
  4. ==================================
  5. General Unconstrained Minimization
  6. ==================================
  7. Ceres Solver besides being able to solve non-linear least squares
  8. problem can also solve general unconstrained problems using just their
  9. objective function value and gradients. In this chapter we will see
  10. how to do this.
  11. Rosenbrock's Function
  12. =====================
  13. Consider minimizing the famous `Rosenbrock's function
  14. <http://en.wikipedia.org/wiki/Rosenbrock_function>`_ [#f1]_.
  15. The simplest way to minimize is to define a templated functor to
  16. evaluate the objective value of this function and then use Ceres
  17. Solver's automatic differentiation to compute its derivatives.
  18. We begin by defining a templated functor and then using
  19. ``AutoDiffFirstOrderFunction`` to construct an instance of the
  20. ``FirstOrderFunction`` interface. This is the object that is
  21. responsible for computing the objective function value and the
  22. gradient (if required). This is the analog of the
  23. :class:`CostFunction` when defining non-linear least squares problems
  24. in Ceres.
  25. .. code::
  26. // f(x,y) = (1-x)^2 + 100(y - x^2)^2;
  27. struct Rosenbrock {
  28. template <typename T>
  29. bool operator()(const T* parameters, T* cost) const {
  30. const T x = parameters[0];
  31. const T y = parameters[1];
  32. cost[0] = (1.0 - x) * (1.0 - x) + 100.0 * (y - x * x) * (y - x * x);
  33. return true;
  34. }
  35. static ceres::FirstOrderFunction* Create() {
  36. constexpr int kNumParameters = 2;
  37. return new ceres::AutoDiffFirstOrderFunction<Rosenbrock, kNumParameters>(
  38. new Rosenbrock);
  39. }
  40. };
  41. Minimizing it then is a straightforward matter of constructing a
  42. :class:`GradientProblem` object and calling :func:`Solve` on it.
  43. .. code::
  44. double parameters[2] = {-1.2, 1.0};
  45. ceres::GradientProblem problem(Rosenbrock::Create());
  46. ceres::GradientProblemSolver::Options options;
  47. options.minimizer_progress_to_stdout = true;
  48. ceres::GradientProblemSolver::Summary summary;
  49. ceres::Solve(options, problem, parameters, &summary);
  50. std::cout << summary.FullReport() << "\n";
  51. Executing this code results, solve the problem using limited memory
  52. `BFGS
  53. <http://en.wikipedia.org/wiki/Broyden%E2%80%93Fletcher%E2%80%93Goldfarb%E2%80%93Shanno_algorithm>`_
  54. algorithm.
  55. .. code-block:: bash
  56. 0: f: 2.420000e+01 d: 0.00e+00 g: 2.16e+02 h: 0.00e+00 s: 0.00e+00 e: 0 it: 2.29e-05 tt: 2.29e-05
  57. 1: f: 4.280493e+00 d: 1.99e+01 g: 1.52e+01 h: 2.01e-01 s: 8.62e-04 e: 2 it: 8.39e-05 tt: 1.62e-04
  58. 2: f: 3.571154e+00 d: 7.09e-01 g: 1.35e+01 h: 3.78e-01 s: 1.34e-01 e: 3 it: 2.22e-05 tt: 1.91e-04
  59. 3: f: 3.440869e+00 d: 1.30e-01 g: 1.73e+01 h: 1.36e-01 s: 1.00e+00 e: 1 it: 5.01e-06 tt: 2.01e-04
  60. 4: f: 3.213597e+00 d: 2.27e-01 g: 1.55e+01 h: 1.06e-01 s: 4.59e-01 e: 1 it: 3.81e-06 tt: 2.10e-04
  61. 5: f: 2.839723e+00 d: 3.74e-01 g: 1.05e+01 h: 1.34e-01 s: 5.24e-01 e: 1 it: 4.05e-06 tt: 2.19e-04
  62. 6: f: 2.448490e+00 d: 3.91e-01 g: 1.29e+01 h: 3.04e-01 s: 1.00e+00 e: 1 it: 5.01e-06 tt: 2.28e-04
  63. 7: f: 1.943019e+00 d: 5.05e-01 g: 4.00e+00 h: 8.81e-02 s: 7.43e-01 e: 1 it: 4.05e-06 tt: 2.36e-04
  64. 8: f: 1.731469e+00 d: 2.12e-01 g: 7.36e+00 h: 1.71e-01 s: 4.60e-01 e: 2 it: 1.22e-05 tt: 2.52e-04
  65. 9: f: 1.503267e+00 d: 2.28e-01 g: 6.47e+00 h: 8.66e-02 s: 1.00e+00 e: 1 it: 5.96e-06 tt: 2.66e-04
  66. 10: f: 1.228331e+00 d: 2.75e-01 g: 2.00e+00 h: 7.70e-02 s: 7.90e-01 e: 1 it: 4.05e-06 tt: 2.75e-04
  67. 11: f: 1.016523e+00 d: 2.12e-01 g: 5.15e+00 h: 1.39e-01 s: 3.76e-01 e: 2 it: 9.06e-06 tt: 2.88e-04
  68. 12: f: 9.145773e-01 d: 1.02e-01 g: 6.74e+00 h: 7.98e-02 s: 1.00e+00 e: 1 it: 5.01e-06 tt: 2.97e-04
  69. 13: f: 7.508302e-01 d: 1.64e-01 g: 3.88e+00 h: 5.76e-02 s: 4.93e-01 e: 1 it: 5.01e-06 tt: 3.05e-04
  70. 14: f: 5.832378e-01 d: 1.68e-01 g: 5.56e+00 h: 1.42e-01 s: 1.00e+00 e: 1 it: 4.77e-06 tt: 3.13e-04
  71. 15: f: 3.969581e-01 d: 1.86e-01 g: 1.64e+00 h: 1.17e-01 s: 1.00e+00 e: 1 it: 4.05e-06 tt: 3.20e-04
  72. 16: f: 3.171557e-01 d: 7.98e-02 g: 3.84e+00 h: 1.18e-01 s: 3.97e-01 e: 2 it: 8.82e-06 tt: 3.33e-04
  73. 17: f: 2.641257e-01 d: 5.30e-02 g: 3.27e+00 h: 6.14e-02 s: 1.00e+00 e: 1 it: 4.05e-06 tt: 3.42e-04
  74. 18: f: 1.909730e-01 d: 7.32e-02 g: 5.29e-01 h: 8.55e-02 s: 6.82e-01 e: 1 it: 1.00e-05 tt: 4.64e-04
  75. 19: f: 1.472012e-01 d: 4.38e-02 g: 3.11e+00 h: 1.20e-01 s: 3.47e-01 e: 2 it: 1.29e-05 tt: 4.87e-04
  76. 20: f: 1.093558e-01 d: 3.78e-02 g: 2.97e+00 h: 8.43e-02 s: 1.00e+00 e: 1 it: 5.01e-06 tt: 4.97e-04
  77. 21: f: 6.710346e-02 d: 4.23e-02 g: 1.42e+00 h: 9.64e-02 s: 8.85e-01 e: 1 it: 4.05e-06 tt: 5.06e-04
  78. 22: f: 3.993377e-02 d: 2.72e-02 g: 2.30e+00 h: 1.29e-01 s: 4.63e-01 e: 2 it: 1.00e-05 tt: 5.25e-04
  79. 23: f: 2.911794e-02 d: 1.08e-02 g: 2.55e+00 h: 6.55e-02 s: 1.00e+00 e: 1 it: 5.01e-06 tt: 5.34e-04
  80. 24: f: 1.457683e-02 d: 1.45e-02 g: 2.77e-01 h: 6.37e-02 s: 6.14e-01 e: 1 it: 4.05e-06 tt: 5.42e-04
  81. 25: f: 8.577515e-03 d: 6.00e-03 g: 2.86e+00 h: 1.40e-01 s: 1.00e+00 e: 1 it: 3.81e-06 tt: 5.49e-04
  82. 26: f: 3.486574e-03 d: 5.09e-03 g: 1.76e-01 h: 1.23e-02 s: 1.00e+00 e: 1 it: 4.05e-06 tt: 5.57e-04
  83. 27: f: 1.257570e-03 d: 2.23e-03 g: 1.39e-01 h: 5.08e-02 s: 1.00e+00 e: 1 it: 3.81e-06 tt: 5.65e-04
  84. 28: f: 2.783568e-04 d: 9.79e-04 g: 6.20e-01 h: 6.47e-02 s: 1.00e+00 e: 1 it: 4.05e-06 tt: 5.73e-04
  85. 29: f: 2.533399e-05 d: 2.53e-04 g: 1.68e-02 h: 1.98e-03 s: 1.00e+00 e: 1 it: 4.05e-06 tt: 5.81e-04
  86. 30: f: 7.591572e-07 d: 2.46e-05 g: 5.40e-03 h: 9.27e-03 s: 1.00e+00 e: 1 it: 5.96e-06 tt: 6.30e-04
  87. 31: f: 1.902460e-09 d: 7.57e-07 g: 1.62e-03 h: 1.89e-03 s: 1.00e+00 e: 1 it: 4.05e-06 tt: 6.39e-04
  88. 32: f: 1.003030e-12 d: 1.90e-09 g: 3.50e-05 h: 3.52e-05 s: 1.00e+00 e: 1 it: 3.81e-06 tt: 6.47e-04
  89. 33: f: 4.835994e-17 d: 1.00e-12 g: 1.05e-07 h: 1.13e-06 s: 1.00e+00 e: 1 it: 4.05e-06 tt: 6.59e-04
  90. 34: f: 1.885250e-22 d: 4.84e-17 g: 2.69e-10 h: 1.45e-08 s: 1.00e+00 e: 1 it: 4.05e-06 tt: 6.67e-04
  91. Solver Summary (v 2.1.0-eigen-(3.4.0)-lapack-suitesparse-(5.10.1)-cxsparse-(3.2.0)-acceleratesparse-eigensparse-no_openmp)
  92. Parameters 2
  93. Line search direction LBFGS (20)
  94. Line search type CUBIC WOLFE
  95. Cost:
  96. Initial 2.420000e+01
  97. Final 1.955192e-27
  98. Change 2.420000e+01
  99. Minimizer iterations 36
  100. Time (in seconds):
  101. Cost evaluation 0.000000 (0)
  102. Gradient & cost evaluation 0.000005 (44)
  103. Polynomial minimization 0.000041
  104. Total 0.000368
  105. Termination: CONVERGENCE (Parameter tolerance reached. Relative step_norm: 1.890726e-11 <= 1.000000e-08.)
  106. Initial x: -1.2 y: 1
  107. Final x: 1 y: 1
  108. If you are unable to use automatic differentiation for some reason
  109. (say because you need to call an external library), then you can
  110. use numeric differentiation. In that case the functor is defined as
  111. follows [#f2]_.
  112. .. code::
  113. // f(x,y) = (1-x)^2 + 100(y - x^2)^2;
  114. struct Rosenbrock {
  115. bool operator()(const double* parameters, double* cost) const {
  116. const double x = parameters[0];
  117. const double y = parameters[1];
  118. cost[0] = (1.0 - x) * (1.0 - x) + 100.0 * (y - x * x) * (y - x * x);
  119. return true;
  120. }
  121. static ceres::FirstOrderFunction* Create() {
  122. constexpr int kNumParameters = 2;
  123. return new ceres::NumericDiffFirstOrderFunction<Rosenbrock,
  124. ceres::CENTRAL,
  125. kNumParameters>(
  126. new Rosenbrock);
  127. }
  128. };
  129. And finally, if you would rather compute the derivatives by hand (say
  130. because the size of the parameter vector is too large to be
  131. automatically differentiated). Then you should define an instance of
  132. `FirstOrderFunction`, which is the analog of :class:`CostFunction` for
  133. non-linear least squares problems [#f3]_.
  134. .. code::
  135. // f(x,y) = (1-x)^2 + 100(y - x^2)^2;
  136. class Rosenbrock final : public ceres::FirstOrderFunction {
  137. public:
  138. ~Rosenbrock() override {}
  139. bool Evaluate(const double* parameters,
  140. double* cost,
  141. double* gradient) const override {
  142. const double x = parameters[0];
  143. const double y = parameters[1];
  144. cost[0] = (1.0 - x) * (1.0 - x) + 100.0 * (y - x * x) * (y - x * x);
  145. if (gradient) {
  146. gradient[0] = -2.0 * (1.0 - x) - 200.0 * (y - x * x) * 2.0 * x;
  147. gradient[1] = 200.0 * (y - x * x);
  148. }
  149. return true;
  150. }
  151. int NumParameters() const override { return 2; }
  152. };
  153. .. rubric:: Footnotes
  154. .. [#f1] `examples/rosenbrock.cc
  155. <https://ceres-solver.googlesource.com/ceres-solver/+/master/examples/rosenbrock.cc>`_
  156. .. [#f2] `examples/rosenbrock_numeric_diff.cc
  157. <https://ceres-solver.googlesource.com/ceres-solver/+/master/examples/rosenbrock_numeric_diff.cc>`_
  158. .. [#f3] `examples/rosenbrock_analytic_diff.cc
  159. <https://ceres-solver.googlesource.com/ceres-solver/+/master/examples/rosenbrock_analytic_diff.cc>`_