elaboratePoint2KalmanFilter.cpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /* ----------------------------------------------------------------------------
  2. * GTSAM Copyright 2010, Georgia Tech Research Corporation,
  3. * Atlanta, Georgia 30332-0415
  4. * All Rights Reserved
  5. * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
  6. * See LICENSE for the license information
  7. * -------------------------------------------------------------------------- */
  8. /**
  9. * @file elaboratePoint2KalmanFilter.cpp
  10. *
  11. * simple linear Kalman filter on a moving 2D point, but done using factor graphs
  12. * This example manually creates all of the needed data structures
  13. *
  14. * @date Aug 19, 2011
  15. * @author Frank Dellaert
  16. * @author Stephen Williams
  17. */
  18. #include <gtsam/nonlinear/PriorFactor.h>
  19. #include <gtsam/slam/BetweenFactor.h>
  20. //#include <gtsam/nonlinear/Ordering.h>
  21. #include <gtsam/inference/Symbol.h>
  22. #include <gtsam/linear/GaussianBayesNet.h>
  23. #include <gtsam/linear/GaussianFactorGraph.h>
  24. #include <gtsam/linear/NoiseModel.h>
  25. #include <gtsam/geometry/Point2.h>
  26. using namespace std;
  27. using namespace gtsam;
  28. int main() {
  29. // [code below basically does SRIF with Cholesky]
  30. // Create a factor graph to perform the inference
  31. GaussianFactorGraph::shared_ptr linearFactorGraph(new GaussianFactorGraph);
  32. // Create the desired ordering
  33. Ordering::shared_ptr ordering(new Ordering);
  34. // Create a structure to hold the linearization points
  35. Values linearizationPoints;
  36. // Ground truth example
  37. // Start at origin, move to the right (x-axis): 0,0 0,1 0,2
  38. // Motion model is just moving to the right (x'-x)^2
  39. // Measurements are GPS like, (x-z)^2, where z is a 2D measurement
  40. // i.e., we should get 0,0 0,1 0,2 if there is no noise
  41. // Create new state variable
  42. Symbol x0('x',0);
  43. ordering->insert(x0, 0);
  44. // Initialize state x0 (2D point) at origin by adding a prior factor, i.e., Bayes net P(x0)
  45. // This is equivalent to x_0 and P_0
  46. Point2 x_initial(0,0);
  47. SharedDiagonal P_initial = noiseModel::Diagonal::Sigmas((Vec(2) << 0.1, 0.1));
  48. PriorFactor<Point2> factor1(x0, x_initial, P_initial);
  49. // Linearize the factor and add it to the linear factor graph
  50. linearizationPoints.insert(x0, x_initial);
  51. linearFactorGraph->push_back(factor1.linearize(linearizationPoints, *ordering));
  52. // Now predict the state at t=1, i.e. argmax_{x1} P(x1) = P(x1|x0) P(x0)
  53. // In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t}
  54. // For the Kalman Filter, this requires a motion model, f(x_{t}) = x_{t+1|t)
  55. // Assuming the system is linear, this will be of the form f(x_{t}) = F*x_{t} + B*u_{t} + w
  56. // where F is the state transition model/matrix, B is the control input model,
  57. // and w is zero-mean, Gaussian white noise with covariance Q
  58. // Note, in some models, Q is actually derived as G*w*G^T where w models uncertainty of some
  59. // physical property, such as velocity or acceleration, and G is derived from physics
  60. //
  61. // For the purposes of this example, let us assume we are using a constant-position model and
  62. // the controls are driving the point to the right at 1 m/s. Then, F = [1 0 ; 0 1], B = [1 0 ; 0 1]
  63. // and u = [1 ; 0]. Let us also assume that the process noise Q = [0.1 0 ; 0 0.1];
  64. //
  65. // In the case of factor graphs, the factor related to the motion model would be defined as
  66. // f2 = (f(x_{t}) - x_{t+1}) * Q^-1 * (f(x_{t}) - x_{t+1})^T
  67. // Conveniently, there is a factor type, called a BetweenFactor, that can generate this factor
  68. // given the expected difference, f(x_{t}) - x_{t+1}, and Q.
  69. // so, difference = x_{t+1} - x_{t} = F*x_{t} + B*u_{t} - I*x_{t}
  70. // = (F - I)*x_{t} + B*u_{t}
  71. // = B*u_{t} (for our example)
  72. Symbol x1('x',1);
  73. ordering->insert(x1, 1);
  74. Point2 difference(1,0);
  75. SharedDiagonal Q = noiseModel::Diagonal::Sigmas((Vec(2) << 0.1, 0.1));
  76. BetweenFactor<Point2> factor2(x0, x1, difference, Q);
  77. // Linearize the factor and add it to the linear factor graph
  78. linearizationPoints.insert(x1, x_initial);
  79. linearFactorGraph->push_back(factor2.linearize(linearizationPoints, *ordering));
  80. // We have now made the small factor graph f1-(x0)-f2-(x1)
  81. // where factor f1 is just the prior from time t0, P(x0)
  82. // and factor f2 is from the motion model
  83. // Eliminate this in order x0, x1, to get Bayes net P(x0|x1)P(x1)
  84. // As this is a filter, all we need is the posterior P(x1), so we just keep the root of the Bayes net
  85. //
  86. // Because of the way GTSAM works internally, we have used nonlinear class even though this example
  87. // system is linear. We first convert the nonlinear factor graph into a linear one, using the specified
  88. // ordering. Linear factors are simply numbered, and are not accessible via named key like the nonlinear
  89. // variables. Also, the nonlinear factors are linearized around an initial estimate. For a true linear
  90. // system, the initial estimate is not important.
  91. // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) )
  92. GaussianSequentialSolver solver0(*linearFactorGraph);
  93. GaussianBayesNet::shared_ptr linearBayesNet = solver0.eliminate();
  94. // Extract the current estimate of x1,P1 from the Bayes Network
  95. VectorValues result = optimize(*linearBayesNet);
  96. Point2 x1_predict = linearizationPoints.at<Point2>(x1).retract(result[ordering->at(x1)]);
  97. x1_predict.print("X1 Predict");
  98. // Update the new linearization point to the new estimate
  99. linearizationPoints.update(x1, x1_predict);
  100. // Create a new, empty graph and add the prior from the previous step
  101. linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph);
  102. // Convert the root conditional, P(x1) in this case, into a Prior for the next step
  103. // Some care must be done here, as the linearization point in future steps will be different
  104. // than what was used when the factor was created.
  105. // f = || F*dx1' - (F*x0 - x1) ||^2, originally linearized at x1 = x0
  106. // After this step, the factor needs to be linearized around x1 = x1_predict
  107. // This changes the factor to f = || F*dx1'' - b'' ||^2
  108. // = || F*(dx1' - (dx1' - dx1'')) - b'' ||^2
  109. // = || F*dx1' - F*(dx1' - dx1'') - b'' ||^2
  110. // = || F*dx1' - (b'' + F(dx1' - dx1'')) ||^2
  111. // -> b' = b'' + F(dx1' - dx1'')
  112. // -> b'' = b' - F(dx1' - dx1'')
  113. // = || F*dx1'' - (b' - F(dx1' - dx1'')) ||^2
  114. // = || F*dx1'' - (b' - F(x_predict - x_inital)) ||^2
  115. const GaussianConditional::shared_ptr& cg0 = linearBayesNet->back();
  116. assert(cg0->nrFrontals() == 1);
  117. assert(cg0->nrParents() == 0);
  118. linearFactorGraph->add(0, cg0->R(), cg0->d() - cg0->R()*result[ordering->at(x1)], noiseModel::Diagonal::Sigmas(cg0->get_sigmas(), true));
  119. // Create the desired ordering
  120. ordering = Ordering::shared_ptr(new Ordering);
  121. ordering->insert(x1, 0);
  122. // Now, a measurement, z1, has been received, and the Kalman Filter should be "Updated"/"Corrected"
  123. // This is equivalent to saying P(x1|z1) ~ P(z1|x1)*P(x1) ~ f3(x1)*f4(x1;z1)
  124. // where f3 is the prior from the previous step, and
  125. // where f4 is a measurement factor
  126. //
  127. // So, now we need to create the measurement factor, f4
  128. // For the Kalman Filter, this is the measurement function, h(x_{t}) = z_{t}
  129. // Assuming the system is linear, this will be of the form h(x_{t}) = H*x_{t} + v
  130. // where H is the observation model/matrix, and v is zero-mean, Gaussian white noise with covariance R
  131. //
  132. // For the purposes of this example, let us assume we have something like a GPS that returns
  133. // the current position of the robot. For this simple example, we can use a PriorFactor to model the
  134. // observation as it depends on only a single state variable, x1. To model real sensor observations
  135. // generally requires the creation of a new factor type. For example, factors for range sensors, bearing
  136. // sensors, and camera projections have already been added to GTSAM.
  137. //
  138. // In the case of factor graphs, the factor related to the measurements would be defined as
  139. // f4 = (h(x_{t}) - z_{t}) * R^-1 * (h(x_{t}) - z_{t})^T
  140. // = (x_{t} - z_{t}) * R^-1 * (x_{t} - z_{t})^T
  141. // This can be modeled using the PriorFactor, where the mean is z_{t} and the covariance is R.
  142. Point2 z1(1.0, 0.0);
  143. SharedDiagonal R1 = noiseModel::Diagonal::Sigmas((Vec(2) << 0.25, 0.25));
  144. PriorFactor<Point2> factor4(x1, z1, R1);
  145. // Linearize the factor and add it to the linear factor graph
  146. linearFactorGraph->push_back(factor4.linearize(linearizationPoints, *ordering));
  147. // We have now made the small factor graph f3-(x1)-f4
  148. // where factor f3 is the prior from previous time ( P(x1) )
  149. // and factor f4 is from the measurement, z1 ( P(x1|z1) )
  150. // Eliminate this in order x1, to get Bayes net P(x1)
  151. // As this is a filter, all we need is the posterior P(x1), so we just keep the root of the Bayes net
  152. // We solve as before...
  153. // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) )
  154. GaussianSequentialSolver solver1(*linearFactorGraph);
  155. linearBayesNet = solver1.eliminate();
  156. // Extract the current estimate of x1 from the Bayes Network
  157. result = optimize(*linearBayesNet);
  158. Point2 x1_update = linearizationPoints.at<Point2>(x1).retract(result[ordering->at(x1)]);
  159. x1_update.print("X1 Update");
  160. // Update the linearization point to the new estimate
  161. linearizationPoints.update(x1, x1_update);
  162. // Wash, rinse, repeat for another time step
  163. // Create a new, empty graph and add the prior from the previous step
  164. linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph);
  165. // Convert the root conditional, P(x1) in this case, into a Prior for the next step
  166. // The linearization point of this prior must be moved to the new estimate of x, and the key/index needs to be reset to 0,
  167. // the first key in the next iteration
  168. const GaussianConditional::shared_ptr& cg1 = linearBayesNet->back();
  169. assert(cg1->nrFrontals() == 1);
  170. assert(cg1->nrParents() == 0);
  171. JacobianFactor tmpPrior1 = JacobianFactor(*cg1);
  172. linearFactorGraph->add(0, tmpPrior1.getA(tmpPrior1.begin()), tmpPrior1.getb() - tmpPrior1.getA(tmpPrior1.begin()) * result[ordering->at(x1)], tmpPrior1.get_model());
  173. // Create a key for the new state
  174. Symbol x2('x',2);
  175. // Create the desired ordering
  176. ordering = Ordering::shared_ptr(new Ordering);
  177. ordering->insert(x1, 0);
  178. ordering->insert(x2, 1);
  179. // Create a nonlinear factor describing the motion model
  180. difference = Point2(1,0);
  181. Q = noiseModel::Diagonal::Sigmas((Vec(2) <, 0.1, 0.1));
  182. BetweenFactor<Point2> factor6(x1, x2, difference, Q);
  183. // Linearize the factor and add it to the linear factor graph
  184. linearizationPoints.insert(x2, x1_update);
  185. linearFactorGraph->push_back(factor6.linearize(linearizationPoints, *ordering));
  186. // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x1,x2) = P(x1|x2)*P(x2) )
  187. GaussianSequentialSolver solver2(*linearFactorGraph);
  188. linearBayesNet = solver2.eliminate();
  189. // Extract the current estimate of x2 from the Bayes Network
  190. result = optimize(*linearBayesNet);
  191. Point2 x2_predict = linearizationPoints.at<Point2>(x2).retract(result[ordering->at(x2)]);
  192. x2_predict.print("X2 Predict");
  193. // Update the linearization point to the new estimate
  194. linearizationPoints.update(x2, x2_predict);
  195. // Now add the next measurement
  196. // Create a new, empty graph and add the prior from the previous step
  197. linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph);
  198. // Convert the root conditional, P(x1) in this case, into a Prior for the next step
  199. const GaussianConditional::shared_ptr& cg2 = linearBayesNet->back();
  200. assert(cg2->nrFrontals() == 1);
  201. assert(cg2->nrParents() == 0);
  202. JacobianFactor tmpPrior2 = JacobianFactor(*cg2);
  203. linearFactorGraph->add(0, tmpPrior2.getA(tmpPrior2.begin()), tmpPrior2.getb() - tmpPrior2.getA(tmpPrior2.begin()) * result[ordering->at(x2)], tmpPrior2.get_model());
  204. // Create the desired ordering
  205. ordering = Ordering::shared_ptr(new Ordering);
  206. ordering->insert(x2, 0);
  207. // And update using z2 ...
  208. Point2 z2(2.0, 0.0);
  209. SharedDiagonal R2 = noiseModel::Diagonal::Sigmas((Vec(2) << 0.25, 0.25));
  210. PriorFactor<Point2> factor8(x2, z2, R2);
  211. // Linearize the factor and add it to the linear factor graph
  212. linearFactorGraph->push_back(factor8.linearize(linearizationPoints, *ordering));
  213. // We have now made the small factor graph f7-(x2)-f8
  214. // where factor f7 is the prior from previous time ( P(x2) )
  215. // and factor f8 is from the measurement, z2 ( P(x2|z2) )
  216. // Eliminate this in order x2, to get Bayes net P(x2)
  217. // As this is a filter, all we need is the posterior P(x2), so we just keep the root of the Bayes net
  218. // We solve as before...
  219. // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) )
  220. GaussianSequentialSolver solver3(*linearFactorGraph);
  221. linearBayesNet = solver3.eliminate();
  222. // Extract the current estimate of x2 from the Bayes Network
  223. result = optimize(*linearBayesNet);
  224. Point2 x2_update = linearizationPoints.at<Point2>(x2).retract(result[ordering->at(x2)]);
  225. x2_update.print("X2 Update");
  226. // Update the linearization point to the new estimate
  227. linearizationPoints.update(x2, x2_update);
  228. // Wash, rinse, repeat for a third time step
  229. // Create a new, empty graph and add the prior from the previous step
  230. linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph);
  231. // Convert the root conditional, P(x1) in this case, into a Prior for the next step
  232. const GaussianConditional::shared_ptr& cg3 = linearBayesNet->back();
  233. assert(cg3->nrFrontals() == 1);
  234. assert(cg3->nrParents() == 0);
  235. JacobianFactor tmpPrior3 = JacobianFactor(*cg3);
  236. linearFactorGraph->add(0, tmpPrior3.getA(tmpPrior3.begin()), tmpPrior3.getb() - tmpPrior3.getA(tmpPrior3.begin()) * result[ordering->at(x2)], tmpPrior3.get_model());
  237. // Create a key for the new state
  238. Symbol x3('x',3);
  239. // Create the desired ordering
  240. ordering = Ordering::shared_ptr(new Ordering);
  241. ordering->insert(x2, 0);
  242. ordering->insert(x3, 1);
  243. // Create a nonlinear factor describing the motion model
  244. difference = Point2(1,0);
  245. Q = noiseModel::Diagonal::Sigmas((Vec(2) << 0.1, 0.1));
  246. BetweenFactor<Point2> factor10(x2, x3, difference, Q);
  247. // Linearize the factor and add it to the linear factor graph
  248. linearizationPoints.insert(x3, x2_update);
  249. linearFactorGraph->push_back(factor10.linearize(linearizationPoints, *ordering));
  250. // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x1,x2) = P(x1|x2)*P(x2) )
  251. GaussianSequentialSolver solver4(*linearFactorGraph);
  252. linearBayesNet = solver4.eliminate();
  253. // Extract the current estimate of x3 from the Bayes Network
  254. result = optimize(*linearBayesNet);
  255. Point2 x3_predict = linearizationPoints.at<Point2>(x3).retract(result[ordering->at(x3)]);
  256. x3_predict.print("X3 Predict");
  257. // Update the linearization point to the new estimate
  258. linearizationPoints.update(x3, x3_predict);
  259. // Now add the next measurement
  260. // Create a new, empty graph and add the prior from the previous step
  261. linearFactorGraph = GaussianFactorGraph::shared_ptr(new GaussianFactorGraph);
  262. // Convert the root conditional, P(x1) in this case, into a Prior for the next step
  263. const GaussianConditional::shared_ptr& cg4 = linearBayesNet->back();
  264. assert(cg4->nrFrontals() == 1);
  265. assert(cg4->nrParents() == 0);
  266. JacobianFactor tmpPrior4 = JacobianFactor(*cg4);
  267. linearFactorGraph->add(0, tmpPrior4.getA(tmpPrior4.begin()), tmpPrior4.getb() - tmpPrior4.getA(tmpPrior4.begin()) * result[ordering->at(x3)], tmpPrior4.get_model());
  268. // Create the desired ordering
  269. ordering = Ordering::shared_ptr(new Ordering);
  270. ordering->insert(x3, 0);
  271. // And update using z3 ...
  272. Point2 z3(3.0, 0.0);
  273. SharedDiagonal R3 = noiseModel::Diagonal::Sigmas((Vec(2) << 0.25, 0.25));
  274. PriorFactor<Point2> factor12(x3, z3, R3);
  275. // Linearize the factor and add it to the linear factor graph
  276. linearFactorGraph->push_back(factor12.linearize(linearizationPoints, *ordering));
  277. // We have now made the small factor graph f11-(x3)-f12
  278. // where factor f11 is the prior from previous time ( P(x3) )
  279. // and factor f12 is from the measurement, z3 ( P(x3|z3) )
  280. // Eliminate this in order x3, to get Bayes net P(x3)
  281. // As this is a filter, all we need is the posterior P(x3), so we just keep the root of the Bayes net
  282. // We solve as before...
  283. // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) )
  284. GaussianSequentialSolver solver5(*linearFactorGraph);
  285. linearBayesNet = solver5.eliminate();
  286. // Extract the current estimate of x2 from the Bayes Network
  287. result = optimize(*linearBayesNet);
  288. Point2 x3_update = linearizationPoints.at<Point2>(x3).retract(result[ordering->at(x3)]);
  289. x3_update.print("X3 Update");
  290. // Update the linearization point to the new estimate
  291. linearizationPoints.update(x3, x3_update);
  292. return 0;
  293. }