covariance_impl.cc 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889
  1. // Ceres Solver - A fast non-linear least squares minimizer
  2. // Copyright 2022 Google Inc. All rights reserved.
  3. // http://ceres-solver.org/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are met:
  7. //
  8. // * Redistributions of source code must retain the above copyright notice,
  9. // this list of conditions and the following disclaimer.
  10. // * Redistributions in binary form must reproduce the above copyright notice,
  11. // this list of conditions and the following disclaimer in the documentation
  12. // and/or other materials provided with the distribution.
  13. // * Neither the name of Google Inc. nor the names of its contributors may be
  14. // used to endorse or promote products derived from this software without
  15. // specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  18. // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  21. // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  24. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  25. // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  26. // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  27. // POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Author: sameeragarwal@google.com (Sameer Agarwal)
  30. #include "ceres/covariance_impl.h"
  31. #include <algorithm>
  32. #include <cstdlib>
  33. #include <memory>
  34. #include <numeric>
  35. #include <sstream>
  36. #include <unordered_set>
  37. #include <utility>
  38. #include <vector>
  39. #include "Eigen/SVD"
  40. #include "Eigen/SparseCore"
  41. #include "Eigen/SparseQR"
  42. #include "ceres/compressed_col_sparse_matrix_utils.h"
  43. #include "ceres/compressed_row_sparse_matrix.h"
  44. #include "ceres/covariance.h"
  45. #include "ceres/crs_matrix.h"
  46. #include "ceres/internal/eigen.h"
  47. #include "ceres/map_util.h"
  48. #include "ceres/parallel_for.h"
  49. #include "ceres/parallel_utils.h"
  50. #include "ceres/parameter_block.h"
  51. #include "ceres/problem_impl.h"
  52. #include "ceres/residual_block.h"
  53. #include "ceres/suitesparse.h"
  54. #include "ceres/wall_time.h"
  55. #include "glog/logging.h"
  56. namespace ceres::internal {
  57. using CovarianceBlocks = std::vector<std::pair<const double*, const double*>>;
  58. CovarianceImpl::CovarianceImpl(const Covariance::Options& options)
  59. : options_(options), is_computed_(false), is_valid_(false) {
  60. evaluate_options_.num_threads = options_.num_threads;
  61. evaluate_options_.apply_loss_function = options_.apply_loss_function;
  62. }
  63. CovarianceImpl::~CovarianceImpl() = default;
  64. template <typename T>
  65. void CheckForDuplicates(std::vector<T> blocks) {
  66. std::sort(blocks.begin(), blocks.end());
  67. auto it = std::adjacent_find(blocks.begin(), blocks.end());
  68. if (it != blocks.end()) {
  69. // In case there are duplicates, we search for their location.
  70. std::map<T, std::vector<int>> blocks_map;
  71. for (int i = 0; i < blocks.size(); ++i) {
  72. blocks_map[blocks[i]].push_back(i);
  73. }
  74. std::ostringstream duplicates;
  75. while (it != blocks.end()) {
  76. duplicates << "(";
  77. for (int i = 0; i < blocks_map[*it].size() - 1; ++i) {
  78. duplicates << blocks_map[*it][i] << ", ";
  79. }
  80. duplicates << blocks_map[*it].back() << ")";
  81. it = std::adjacent_find(it + 1, blocks.end());
  82. if (it < blocks.end()) {
  83. duplicates << " and ";
  84. }
  85. }
  86. LOG(FATAL) << "Covariance::Compute called with duplicate blocks at "
  87. << "indices " << duplicates.str();
  88. }
  89. }
  90. bool CovarianceImpl::Compute(const CovarianceBlocks& covariance_blocks,
  91. ProblemImpl* problem) {
  92. CheckForDuplicates<std::pair<const double*, const double*>>(
  93. covariance_blocks);
  94. problem_ = problem;
  95. parameter_block_to_row_index_.clear();
  96. covariance_matrix_ = nullptr;
  97. is_valid_ = (ComputeCovarianceSparsity(covariance_blocks, problem) &&
  98. ComputeCovarianceValues());
  99. is_computed_ = true;
  100. return is_valid_;
  101. }
  102. bool CovarianceImpl::Compute(const std::vector<const double*>& parameter_blocks,
  103. ProblemImpl* problem) {
  104. CheckForDuplicates<const double*>(parameter_blocks);
  105. CovarianceBlocks covariance_blocks;
  106. for (int i = 0; i < parameter_blocks.size(); ++i) {
  107. for (int j = i; j < parameter_blocks.size(); ++j) {
  108. covariance_blocks.push_back(
  109. std::make_pair(parameter_blocks[i], parameter_blocks[j]));
  110. }
  111. }
  112. return Compute(covariance_blocks, problem);
  113. }
  114. bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
  115. const double* original_parameter_block1,
  116. const double* original_parameter_block2,
  117. bool lift_covariance_to_ambient_space,
  118. double* covariance_block) const {
  119. CHECK(is_computed_)
  120. << "Covariance::GetCovarianceBlock called before Covariance::Compute";
  121. CHECK(is_valid_)
  122. << "Covariance::GetCovarianceBlock called when Covariance::Compute "
  123. << "returned false.";
  124. // If either of the two parameter blocks is constant, then the
  125. // covariance block is also zero.
  126. if (constant_parameter_blocks_.count(original_parameter_block1) > 0 ||
  127. constant_parameter_blocks_.count(original_parameter_block2) > 0) {
  128. const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map();
  129. ParameterBlock* block1 = FindOrDie(
  130. parameter_map, const_cast<double*>(original_parameter_block1));
  131. ParameterBlock* block2 = FindOrDie(
  132. parameter_map, const_cast<double*>(original_parameter_block2));
  133. const int block1_size = block1->Size();
  134. const int block2_size = block2->Size();
  135. const int block1_tangent_size = block1->TangentSize();
  136. const int block2_tangent_size = block2->TangentSize();
  137. if (!lift_covariance_to_ambient_space) {
  138. MatrixRef(covariance_block, block1_tangent_size, block2_tangent_size)
  139. .setZero();
  140. } else {
  141. MatrixRef(covariance_block, block1_size, block2_size).setZero();
  142. }
  143. return true;
  144. }
  145. const double* parameter_block1 = original_parameter_block1;
  146. const double* parameter_block2 = original_parameter_block2;
  147. const bool transpose = parameter_block1 > parameter_block2;
  148. if (transpose) {
  149. std::swap(parameter_block1, parameter_block2);
  150. }
  151. // Find where in the covariance matrix the block is located.
  152. const int row_begin =
  153. FindOrDie(parameter_block_to_row_index_, parameter_block1);
  154. const int col_begin =
  155. FindOrDie(parameter_block_to_row_index_, parameter_block2);
  156. const int* rows = covariance_matrix_->rows();
  157. const int* cols = covariance_matrix_->cols();
  158. const int row_size = rows[row_begin + 1] - rows[row_begin];
  159. const int* cols_begin = cols + rows[row_begin];
  160. // The only part that requires work is walking the compressed column
  161. // vector to determine where the set of columns corresponding to the
  162. // covariance block begin.
  163. int offset = 0;
  164. while (cols_begin[offset] != col_begin && offset < row_size) {
  165. ++offset;
  166. }
  167. if (offset == row_size) {
  168. LOG(ERROR) << "Unable to find covariance block for "
  169. << original_parameter_block1 << " " << original_parameter_block2;
  170. return false;
  171. }
  172. const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map();
  173. ParameterBlock* block1 =
  174. FindOrDie(parameter_map, const_cast<double*>(parameter_block1));
  175. ParameterBlock* block2 =
  176. FindOrDie(parameter_map, const_cast<double*>(parameter_block2));
  177. const Manifold* manifold1 = block1->manifold();
  178. const Manifold* manifold2 = block2->manifold();
  179. const int block1_size = block1->Size();
  180. const int block1_tangent_size = block1->TangentSize();
  181. const int block2_size = block2->Size();
  182. const int block2_tangent_size = block2->TangentSize();
  183. ConstMatrixRef cov(covariance_matrix_->values() + rows[row_begin],
  184. block1_tangent_size,
  185. row_size);
  186. // Fast path when there are no manifolds or if the user does not want it
  187. // lifted to the ambient space.
  188. if ((manifold1 == nullptr && manifold2 == nullptr) ||
  189. !lift_covariance_to_ambient_space) {
  190. if (transpose) {
  191. MatrixRef(covariance_block, block2_tangent_size, block1_tangent_size) =
  192. cov.block(0, offset, block1_tangent_size, block2_tangent_size)
  193. .transpose();
  194. } else {
  195. MatrixRef(covariance_block, block1_tangent_size, block2_tangent_size) =
  196. cov.block(0, offset, block1_tangent_size, block2_tangent_size);
  197. }
  198. return true;
  199. }
  200. // If manifolds are used then the covariance that has been computed is in the
  201. // tangent space and it needs to be lifted back to the ambient space.
  202. //
  203. // This is given by the formula
  204. //
  205. // C'_12 = J_1 C_12 J_2'
  206. //
  207. // Where C_12 is the local tangent space covariance for parameter
  208. // blocks 1 and 2. J_1 and J_2 are respectively the local to global
  209. // jacobians for parameter blocks 1 and 2.
  210. //
  211. // See Result 5.11 on page 142 of Hartley & Zisserman (2nd Edition)
  212. // for a proof.
  213. //
  214. // TODO(sameeragarwal): Add caching the manifold plus_jacobian, so that they
  215. // are computed just once per parameter block.
  216. Matrix block1_jacobian(block1_size, block1_tangent_size);
  217. if (manifold1 == nullptr) {
  218. block1_jacobian.setIdentity();
  219. } else {
  220. manifold1->PlusJacobian(parameter_block1, block1_jacobian.data());
  221. }
  222. Matrix block2_jacobian(block2_size, block2_tangent_size);
  223. // Fast path if the user is requesting a diagonal block.
  224. if (parameter_block1 == parameter_block2) {
  225. block2_jacobian = block1_jacobian;
  226. } else {
  227. if (manifold2 == nullptr) {
  228. block2_jacobian.setIdentity();
  229. } else {
  230. manifold2->PlusJacobian(parameter_block2, block2_jacobian.data());
  231. }
  232. }
  233. if (transpose) {
  234. MatrixRef(covariance_block, block2_size, block1_size) =
  235. block2_jacobian *
  236. cov.block(0, offset, block1_tangent_size, block2_tangent_size)
  237. .transpose() *
  238. block1_jacobian.transpose();
  239. } else {
  240. MatrixRef(covariance_block, block1_size, block2_size) =
  241. block1_jacobian *
  242. cov.block(0, offset, block1_tangent_size, block2_tangent_size) *
  243. block2_jacobian.transpose();
  244. }
  245. return true;
  246. }
  247. bool CovarianceImpl::GetCovarianceMatrixInTangentOrAmbientSpace(
  248. const std::vector<const double*>& parameters,
  249. bool lift_covariance_to_ambient_space,
  250. double* covariance_matrix) const {
  251. CHECK(is_computed_)
  252. << "Covariance::GetCovarianceMatrix called before Covariance::Compute";
  253. CHECK(is_valid_)
  254. << "Covariance::GetCovarianceMatrix called when Covariance::Compute "
  255. << "returned false.";
  256. const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map();
  257. // For OpenMP compatibility we need to define these vectors in advance
  258. const int num_parameters = parameters.size();
  259. std::vector<int> parameter_sizes;
  260. std::vector<int> cum_parameter_size;
  261. parameter_sizes.reserve(num_parameters);
  262. cum_parameter_size.resize(num_parameters + 1);
  263. cum_parameter_size[0] = 0;
  264. for (int i = 0; i < num_parameters; ++i) {
  265. ParameterBlock* block =
  266. FindOrDie(parameter_map, const_cast<double*>(parameters[i]));
  267. if (lift_covariance_to_ambient_space) {
  268. parameter_sizes.push_back(block->Size());
  269. } else {
  270. parameter_sizes.push_back(block->TangentSize());
  271. }
  272. }
  273. std::partial_sum(parameter_sizes.begin(),
  274. parameter_sizes.end(),
  275. cum_parameter_size.begin() + 1);
  276. const int max_covariance_block_size =
  277. *std::max_element(parameter_sizes.begin(), parameter_sizes.end());
  278. const int covariance_size = cum_parameter_size.back();
  279. // Assemble the blocks in the covariance matrix.
  280. MatrixRef covariance(covariance_matrix, covariance_size, covariance_size);
  281. const int num_threads = options_.num_threads;
  282. auto workspace = std::make_unique<double[]>(
  283. num_threads * max_covariance_block_size * max_covariance_block_size);
  284. bool success = true;
  285. // Technically the following code is a double nested loop where
  286. // i = 1:n, j = i:n.
  287. int iteration_count = (num_parameters * (num_parameters + 1)) / 2;
  288. problem_->context()->EnsureMinimumThreads(num_threads);
  289. ParallelFor(problem_->context(),
  290. 0,
  291. iteration_count,
  292. num_threads,
  293. [&](int thread_id, int k) {
  294. int i, j;
  295. LinearIndexToUpperTriangularIndex(k, num_parameters, &i, &j);
  296. int covariance_row_idx = cum_parameter_size[i];
  297. int covariance_col_idx = cum_parameter_size[j];
  298. int size_i = parameter_sizes[i];
  299. int size_j = parameter_sizes[j];
  300. double* covariance_block =
  301. workspace.get() + thread_id * max_covariance_block_size *
  302. max_covariance_block_size;
  303. if (!GetCovarianceBlockInTangentOrAmbientSpace(
  304. parameters[i],
  305. parameters[j],
  306. lift_covariance_to_ambient_space,
  307. covariance_block)) {
  308. success = false;
  309. }
  310. covariance.block(
  311. covariance_row_idx, covariance_col_idx, size_i, size_j) =
  312. MatrixRef(covariance_block, size_i, size_j);
  313. if (i != j) {
  314. covariance.block(
  315. covariance_col_idx, covariance_row_idx, size_j, size_i) =
  316. MatrixRef(covariance_block, size_i, size_j).transpose();
  317. }
  318. });
  319. return success;
  320. }
  321. // Determine the sparsity pattern of the covariance matrix based on
  322. // the block pairs requested by the user.
  323. bool CovarianceImpl::ComputeCovarianceSparsity(
  324. const CovarianceBlocks& original_covariance_blocks, ProblemImpl* problem) {
  325. EventLogger event_logger("CovarianceImpl::ComputeCovarianceSparsity");
  326. // Determine an ordering for the parameter block, by sorting the
  327. // parameter blocks by their pointers.
  328. std::vector<double*> all_parameter_blocks;
  329. problem->GetParameterBlocks(&all_parameter_blocks);
  330. const ProblemImpl::ParameterMap& parameter_map = problem->parameter_map();
  331. std::unordered_set<ParameterBlock*> parameter_blocks_in_use;
  332. std::vector<ResidualBlock*> residual_blocks;
  333. problem->GetResidualBlocks(&residual_blocks);
  334. for (auto* residual_block : residual_blocks) {
  335. parameter_blocks_in_use.insert(residual_block->parameter_blocks(),
  336. residual_block->parameter_blocks() +
  337. residual_block->NumParameterBlocks());
  338. }
  339. constant_parameter_blocks_.clear();
  340. std::vector<double*>& active_parameter_blocks =
  341. evaluate_options_.parameter_blocks;
  342. active_parameter_blocks.clear();
  343. for (auto* parameter_block : all_parameter_blocks) {
  344. ParameterBlock* block = FindOrDie(parameter_map, parameter_block);
  345. if (!block->IsConstant() && (parameter_blocks_in_use.count(block) > 0)) {
  346. active_parameter_blocks.push_back(parameter_block);
  347. } else {
  348. constant_parameter_blocks_.insert(parameter_block);
  349. }
  350. }
  351. std::sort(active_parameter_blocks.begin(), active_parameter_blocks.end());
  352. // Compute the number of rows. Map each parameter block to the
  353. // first row corresponding to it in the covariance matrix using the
  354. // ordering of parameter blocks just constructed.
  355. int num_rows = 0;
  356. parameter_block_to_row_index_.clear();
  357. for (auto* parameter_block : active_parameter_blocks) {
  358. const int parameter_block_size =
  359. problem->ParameterBlockTangentSize(parameter_block);
  360. parameter_block_to_row_index_[parameter_block] = num_rows;
  361. num_rows += parameter_block_size;
  362. }
  363. // Compute the number of non-zeros in the covariance matrix. Along
  364. // the way flip any covariance blocks which are in the lower
  365. // triangular part of the matrix.
  366. int num_nonzeros = 0;
  367. CovarianceBlocks covariance_blocks;
  368. for (const auto& block_pair : original_covariance_blocks) {
  369. if (constant_parameter_blocks_.count(block_pair.first) > 0 ||
  370. constant_parameter_blocks_.count(block_pair.second) > 0) {
  371. continue;
  372. }
  373. int index1 = FindOrDie(parameter_block_to_row_index_, block_pair.first);
  374. int index2 = FindOrDie(parameter_block_to_row_index_, block_pair.second);
  375. const int size1 = problem->ParameterBlockTangentSize(block_pair.first);
  376. const int size2 = problem->ParameterBlockTangentSize(block_pair.second);
  377. num_nonzeros += size1 * size2;
  378. // Make sure we are constructing a block upper triangular matrix.
  379. if (index1 > index2) {
  380. covariance_blocks.push_back(
  381. std::make_pair(block_pair.second, block_pair.first));
  382. } else {
  383. covariance_blocks.push_back(block_pair);
  384. }
  385. }
  386. if (covariance_blocks.empty()) {
  387. VLOG(2) << "No non-zero covariance blocks found";
  388. covariance_matrix_ = nullptr;
  389. return true;
  390. }
  391. // Sort the block pairs. As a consequence we get the covariance
  392. // blocks as they will occur in the CompressedRowSparseMatrix that
  393. // will store the covariance.
  394. std::sort(covariance_blocks.begin(), covariance_blocks.end());
  395. // Fill the sparsity pattern of the covariance matrix.
  396. covariance_matrix_ = std::make_unique<CompressedRowSparseMatrix>(
  397. num_rows, num_rows, num_nonzeros);
  398. int* rows = covariance_matrix_->mutable_rows();
  399. int* cols = covariance_matrix_->mutable_cols();
  400. // Iterate over parameter blocks and in turn over the rows of the
  401. // covariance matrix. For each parameter block, look in the upper
  402. // triangular part of the covariance matrix to see if there are any
  403. // blocks requested by the user. If this is the case then fill out a
  404. // set of compressed rows corresponding to this parameter block.
  405. //
  406. // The key thing that makes this loop work is the fact that the
  407. // row/columns of the covariance matrix are ordered by the pointer
  408. // values of the parameter blocks. Thus iterating over the keys of
  409. // parameter_block_to_row_index_ corresponds to iterating over the
  410. // rows of the covariance matrix in order.
  411. int i = 0; // index into covariance_blocks.
  412. int cursor = 0; // index into the covariance matrix.
  413. for (const auto& entry : parameter_block_to_row_index_) {
  414. const double* row_block = entry.first;
  415. const int row_block_size = problem->ParameterBlockTangentSize(row_block);
  416. int row_begin = entry.second;
  417. // Iterate over the covariance blocks contained in this row block
  418. // and count the number of columns in this row block.
  419. int num_col_blocks = 0;
  420. for (int j = i; j < covariance_blocks.size(); ++j, ++num_col_blocks) {
  421. const std::pair<const double*, const double*>& block_pair =
  422. covariance_blocks[j];
  423. if (block_pair.first != row_block) {
  424. break;
  425. }
  426. }
  427. // Fill out all the compressed rows for this parameter block.
  428. for (int r = 0; r < row_block_size; ++r) {
  429. rows[row_begin + r] = cursor;
  430. for (int c = 0; c < num_col_blocks; ++c) {
  431. const double* col_block = covariance_blocks[i + c].second;
  432. const int col_block_size =
  433. problem->ParameterBlockTangentSize(col_block);
  434. int col_begin = FindOrDie(parameter_block_to_row_index_, col_block);
  435. for (int k = 0; k < col_block_size; ++k) {
  436. cols[cursor++] = col_begin++;
  437. }
  438. }
  439. }
  440. i += num_col_blocks;
  441. }
  442. rows[num_rows] = cursor;
  443. return true;
  444. }
  445. bool CovarianceImpl::ComputeCovarianceValues() {
  446. if (options_.algorithm_type == DENSE_SVD) {
  447. return ComputeCovarianceValuesUsingDenseSVD();
  448. }
  449. if (options_.algorithm_type == SPARSE_QR) {
  450. if (options_.sparse_linear_algebra_library_type == EIGEN_SPARSE) {
  451. return ComputeCovarianceValuesUsingEigenSparseQR();
  452. }
  453. if (options_.sparse_linear_algebra_library_type == SUITE_SPARSE) {
  454. #if !defined(CERES_NO_SUITESPARSE)
  455. return ComputeCovarianceValuesUsingSuiteSparseQR();
  456. #else
  457. LOG(ERROR) << "SuiteSparse is required to use the SPARSE_QR algorithm "
  458. << "with "
  459. << "Covariance::Options::sparse_linear_algebra_library_type "
  460. << "= SUITE_SPARSE.";
  461. return false;
  462. #endif
  463. }
  464. LOG(ERROR) << "Unsupported "
  465. << "Covariance::Options::sparse_linear_algebra_library_type "
  466. << "= "
  467. << SparseLinearAlgebraLibraryTypeToString(
  468. options_.sparse_linear_algebra_library_type);
  469. return false;
  470. }
  471. LOG(ERROR) << "Unsupported Covariance::Options::algorithm_type = "
  472. << CovarianceAlgorithmTypeToString(options_.algorithm_type);
  473. return false;
  474. }
  475. bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
  476. EventLogger event_logger(
  477. "CovarianceImpl::ComputeCovarianceValuesUsingSparseQR");
  478. #ifndef CERES_NO_SUITESPARSE
  479. if (covariance_matrix_ == nullptr) {
  480. // Nothing to do, all zeros covariance matrix.
  481. return true;
  482. }
  483. CRSMatrix jacobian;
  484. problem_->Evaluate(evaluate_options_, nullptr, nullptr, nullptr, &jacobian);
  485. event_logger.AddEvent("Evaluate");
  486. // Construct a compressed column form of the Jacobian.
  487. const int num_rows = jacobian.num_rows;
  488. const int num_cols = jacobian.num_cols;
  489. const int num_nonzeros = jacobian.values.size();
  490. std::vector<SuiteSparse_long> transpose_rows(num_cols + 1, 0);
  491. std::vector<SuiteSparse_long> transpose_cols(num_nonzeros, 0);
  492. std::vector<double> transpose_values(num_nonzeros, 0);
  493. for (int idx = 0; idx < num_nonzeros; ++idx) {
  494. transpose_rows[jacobian.cols[idx] + 1] += 1;
  495. }
  496. for (int i = 1; i < transpose_rows.size(); ++i) {
  497. transpose_rows[i] += transpose_rows[i - 1];
  498. }
  499. for (int r = 0; r < num_rows; ++r) {
  500. for (int idx = jacobian.rows[r]; idx < jacobian.rows[r + 1]; ++idx) {
  501. const int c = jacobian.cols[idx];
  502. const int transpose_idx = transpose_rows[c];
  503. transpose_cols[transpose_idx] = r;
  504. transpose_values[transpose_idx] = jacobian.values[idx];
  505. ++transpose_rows[c];
  506. }
  507. }
  508. for (int i = transpose_rows.size() - 1; i > 0; --i) {
  509. transpose_rows[i] = transpose_rows[i - 1];
  510. }
  511. transpose_rows[0] = 0;
  512. cholmod_sparse cholmod_jacobian;
  513. cholmod_jacobian.nrow = num_rows;
  514. cholmod_jacobian.ncol = num_cols;
  515. cholmod_jacobian.nzmax = num_nonzeros;
  516. cholmod_jacobian.nz = nullptr;
  517. cholmod_jacobian.p = reinterpret_cast<void*>(transpose_rows.data());
  518. cholmod_jacobian.i = reinterpret_cast<void*>(transpose_cols.data());
  519. cholmod_jacobian.x = reinterpret_cast<void*>(transpose_values.data());
  520. cholmod_jacobian.z = nullptr;
  521. cholmod_jacobian.stype = 0; // Matrix is not symmetric.
  522. cholmod_jacobian.itype = CHOLMOD_LONG;
  523. cholmod_jacobian.xtype = CHOLMOD_REAL;
  524. cholmod_jacobian.dtype = CHOLMOD_DOUBLE;
  525. cholmod_jacobian.sorted = 1;
  526. cholmod_jacobian.packed = 1;
  527. cholmod_common cc;
  528. cholmod_l_start(&cc);
  529. cholmod_sparse* R = nullptr;
  530. SuiteSparse_long* permutation = nullptr;
  531. // Compute a Q-less QR factorization of the Jacobian. Since we are
  532. // only interested in inverting J'J = R'R, we do not need Q. This
  533. // saves memory and gives us R as a permuted compressed column
  534. // sparse matrix.
  535. //
  536. // TODO(sameeragarwal): Currently the symbolic factorization and the
  537. // numeric factorization is done at the same time, and this does not
  538. // explicitly account for the block column and row structure in the
  539. // matrix. When using AMD, we have observed in the past that
  540. // computing the ordering with the block matrix is significantly
  541. // more efficient, both in runtime as well as the quality of
  542. // ordering computed. So, it maybe worth doing that analysis
  543. // separately.
  544. const SuiteSparse_long rank = SuiteSparseQR<double>(
  545. SPQR_ORDERING_BESTAMD,
  546. options_.column_pivot_threshold < 0 ? SPQR_DEFAULT_TOL
  547. : options_.column_pivot_threshold,
  548. cholmod_jacobian.ncol,
  549. &cholmod_jacobian,
  550. &R,
  551. &permutation,
  552. &cc);
  553. event_logger.AddEvent("Numeric Factorization");
  554. if (R == nullptr) {
  555. LOG(ERROR) << "Something is wrong. SuiteSparseQR returned R = nullptr.";
  556. free(permutation);
  557. cholmod_l_finish(&cc);
  558. return false;
  559. }
  560. if (rank < cholmod_jacobian.ncol) {
  561. LOG(WARNING) << "Jacobian matrix is rank deficient. "
  562. << "Number of columns: " << cholmod_jacobian.ncol
  563. << " rank: " << rank;
  564. free(permutation);
  565. cholmod_l_free_sparse(&R, &cc);
  566. cholmod_l_finish(&cc);
  567. return false;
  568. }
  569. std::vector<int> inverse_permutation(num_cols);
  570. if (permutation) {
  571. for (SuiteSparse_long i = 0; i < num_cols; ++i) {
  572. inverse_permutation[permutation[i]] = i;
  573. }
  574. } else {
  575. for (SuiteSparse_long i = 0; i < num_cols; ++i) {
  576. inverse_permutation[i] = i;
  577. }
  578. }
  579. const int* rows = covariance_matrix_->rows();
  580. const int* cols = covariance_matrix_->cols();
  581. double* values = covariance_matrix_->mutable_values();
  582. // The following loop exploits the fact that the i^th column of A^{-1}
  583. // is given by the solution to the linear system
  584. //
  585. // A x = e_i
  586. //
  587. // where e_i is a vector with e(i) = 1 and all other entries zero.
  588. //
  589. // Since the covariance matrix is symmetric, the i^th row and column
  590. // are equal.
  591. const int num_threads = options_.num_threads;
  592. auto workspace = std::make_unique<double[]>(num_threads * num_cols);
  593. problem_->context()->EnsureMinimumThreads(num_threads);
  594. ParallelFor(
  595. problem_->context(), 0, num_cols, num_threads, [&](int thread_id, int r) {
  596. const int row_begin = rows[r];
  597. const int row_end = rows[r + 1];
  598. if (row_end != row_begin) {
  599. double* solution = workspace.get() + thread_id * num_cols;
  600. SolveRTRWithSparseRHS<SuiteSparse_long>(
  601. num_cols,
  602. static_cast<SuiteSparse_long*>(R->i),
  603. static_cast<SuiteSparse_long*>(R->p),
  604. static_cast<double*>(R->x),
  605. inverse_permutation[r],
  606. solution);
  607. for (int idx = row_begin; idx < row_end; ++idx) {
  608. const int c = cols[idx];
  609. values[idx] = solution[inverse_permutation[c]];
  610. }
  611. }
  612. });
  613. free(permutation);
  614. cholmod_l_free_sparse(&R, &cc);
  615. cholmod_l_finish(&cc);
  616. event_logger.AddEvent("Inversion");
  617. return true;
  618. #else // CERES_NO_SUITESPARSE
  619. return false;
  620. #endif // CERES_NO_SUITESPARSE
  621. }
  622. bool CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD() {
  623. EventLogger event_logger(
  624. "CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD");
  625. if (covariance_matrix_ == nullptr) {
  626. // Nothing to do, all zeros covariance matrix.
  627. return true;
  628. }
  629. CRSMatrix jacobian;
  630. problem_->Evaluate(evaluate_options_, nullptr, nullptr, nullptr, &jacobian);
  631. event_logger.AddEvent("Evaluate");
  632. Matrix dense_jacobian(jacobian.num_rows, jacobian.num_cols);
  633. dense_jacobian.setZero();
  634. for (int r = 0; r < jacobian.num_rows; ++r) {
  635. for (int idx = jacobian.rows[r]; idx < jacobian.rows[r + 1]; ++idx) {
  636. const int c = jacobian.cols[idx];
  637. dense_jacobian(r, c) = jacobian.values[idx];
  638. }
  639. }
  640. event_logger.AddEvent("ConvertToDenseMatrix");
  641. Eigen::BDCSVD<Matrix> svd(dense_jacobian,
  642. Eigen::ComputeThinU | Eigen::ComputeThinV);
  643. event_logger.AddEvent("SingularValueDecomposition");
  644. const Vector singular_values = svd.singularValues();
  645. const int num_singular_values = singular_values.rows();
  646. Vector inverse_squared_singular_values(num_singular_values);
  647. inverse_squared_singular_values.setZero();
  648. const double max_singular_value = singular_values[0];
  649. const double min_singular_value_ratio =
  650. sqrt(options_.min_reciprocal_condition_number);
  651. const bool automatic_truncation = (options_.null_space_rank < 0);
  652. const int max_rank = std::min(num_singular_values,
  653. num_singular_values - options_.null_space_rank);
  654. // Compute the squared inverse of the singular values. Truncate the
  655. // computation based on min_singular_value_ratio and
  656. // null_space_rank. When either of these two quantities are active,
  657. // the resulting covariance matrix is a Moore-Penrose inverse
  658. // instead of a regular inverse.
  659. for (int i = 0; i < max_rank; ++i) {
  660. const double singular_value_ratio = singular_values[i] / max_singular_value;
  661. if (singular_value_ratio < min_singular_value_ratio) {
  662. // Since the singular values are in decreasing order, if
  663. // automatic truncation is enabled, then from this point on
  664. // all values will fail the ratio test and there is nothing to
  665. // do in this loop.
  666. if (automatic_truncation) {
  667. break;
  668. } else {
  669. LOG(ERROR) << "Error: Covariance matrix is near rank deficient "
  670. << "and the user did not specify a non-zero"
  671. << "Covariance::Options::null_space_rank "
  672. << "to enable the computation of a Pseudo-Inverse. "
  673. << "Reciprocal condition number: "
  674. << singular_value_ratio * singular_value_ratio << " "
  675. << "min_reciprocal_condition_number: "
  676. << options_.min_reciprocal_condition_number;
  677. return false;
  678. }
  679. }
  680. inverse_squared_singular_values[i] =
  681. 1.0 / (singular_values[i] * singular_values[i]);
  682. }
  683. Matrix dense_covariance = svd.matrixV() *
  684. inverse_squared_singular_values.asDiagonal() *
  685. svd.matrixV().transpose();
  686. event_logger.AddEvent("PseudoInverse");
  687. const int num_rows = covariance_matrix_->num_rows();
  688. const int* rows = covariance_matrix_->rows();
  689. const int* cols = covariance_matrix_->cols();
  690. double* values = covariance_matrix_->mutable_values();
  691. for (int r = 0; r < num_rows; ++r) {
  692. for (int idx = rows[r]; idx < rows[r + 1]; ++idx) {
  693. const int c = cols[idx];
  694. values[idx] = dense_covariance(r, c);
  695. }
  696. }
  697. event_logger.AddEvent("CopyToCovarianceMatrix");
  698. return true;
  699. }
  700. bool CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR() {
  701. EventLogger event_logger(
  702. "CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR");
  703. if (covariance_matrix_ == nullptr) {
  704. // Nothing to do, all zeros covariance matrix.
  705. return true;
  706. }
  707. CRSMatrix jacobian;
  708. problem_->Evaluate(evaluate_options_, nullptr, nullptr, nullptr, &jacobian);
  709. event_logger.AddEvent("Evaluate");
  710. using EigenSparseMatrix = Eigen::SparseMatrix<double, Eigen::ColMajor>;
  711. // Convert the matrix to column major order as required by SparseQR.
  712. EigenSparseMatrix sparse_jacobian =
  713. Eigen::Map<Eigen::SparseMatrix<double, Eigen::RowMajor>>(
  714. jacobian.num_rows,
  715. jacobian.num_cols,
  716. static_cast<int>(jacobian.values.size()),
  717. jacobian.rows.data(),
  718. jacobian.cols.data(),
  719. jacobian.values.data());
  720. event_logger.AddEvent("ConvertToSparseMatrix");
  721. Eigen::SparseQR<EigenSparseMatrix, Eigen::COLAMDOrdering<int>> qr;
  722. if (options_.column_pivot_threshold > 0) {
  723. qr.setPivotThreshold(options_.column_pivot_threshold);
  724. }
  725. qr.compute(sparse_jacobian);
  726. event_logger.AddEvent("QRDecomposition");
  727. if (qr.info() != Eigen::Success) {
  728. LOG(ERROR) << "Eigen::SparseQR decomposition failed.";
  729. return false;
  730. }
  731. if (qr.rank() < jacobian.num_cols) {
  732. LOG(ERROR) << "Jacobian matrix is rank deficient. "
  733. << "Number of columns: " << jacobian.num_cols
  734. << " rank: " << qr.rank();
  735. return false;
  736. }
  737. const int* rows = covariance_matrix_->rows();
  738. const int* cols = covariance_matrix_->cols();
  739. double* values = covariance_matrix_->mutable_values();
  740. // Compute the inverse column permutation used by QR factorization.
  741. Eigen::PermutationMatrix<Eigen::Dynamic, Eigen::Dynamic> inverse_permutation =
  742. qr.colsPermutation().inverse();
  743. // The following loop exploits the fact that the i^th column of A^{-1}
  744. // is given by the solution to the linear system
  745. //
  746. // A x = e_i
  747. //
  748. // where e_i is a vector with e(i) = 1 and all other entries zero.
  749. //
  750. // Since the covariance matrix is symmetric, the i^th row and column
  751. // are equal.
  752. const int num_cols = jacobian.num_cols;
  753. const int num_threads = options_.num_threads;
  754. auto workspace = std::make_unique<double[]>(num_threads * num_cols);
  755. problem_->context()->EnsureMinimumThreads(num_threads);
  756. ParallelFor(
  757. problem_->context(), 0, num_cols, num_threads, [&](int thread_id, int r) {
  758. const int row_begin = rows[r];
  759. const int row_end = rows[r + 1];
  760. if (row_end != row_begin) {
  761. double* solution = workspace.get() + thread_id * num_cols;
  762. SolveRTRWithSparseRHS<int>(num_cols,
  763. qr.matrixR().innerIndexPtr(),
  764. qr.matrixR().outerIndexPtr(),
  765. &qr.matrixR().data().value(0),
  766. inverse_permutation.indices().coeff(r),
  767. solution);
  768. // Assign the values of the computed covariance using the
  769. // inverse permutation used in the QR factorization.
  770. for (int idx = row_begin; idx < row_end; ++idx) {
  771. const int c = cols[idx];
  772. values[idx] = solution[inverse_permutation.indices().coeff(c)];
  773. }
  774. }
  775. });
  776. event_logger.AddEvent("Inverse");
  777. return true;
  778. }
  779. } // namespace ceres::internal