File: testDoglegOptimizer.cpp

package info (click to toggle)
gtsam 4.2.0%2Bdfsg-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 46,108 kB
  • sloc: cpp: 127,191; python: 14,312; xml: 8,442; makefile: 252; sh: 119; ansic: 101
file content (191 lines) | stat: -rw-r--r-- 8,029 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
/* ----------------------------------------------------------------------------

 * GTSAM Copyright 2010, Georgia Tech Research Corporation,
 * Atlanta, Georgia 30332-0415
 * All Rights Reserved
 * Authors: Frank Dellaert, et al. (see THANKS for the full author list)

 * See LICENSE for the license information

 * -------------------------------------------------------------------------- */

/**
 * @file    testDoglegOptimizer.cpp
 * @brief   Unit tests for DoglegOptimizer
 * @author  Richard Roberts
 * @author  Frank dellaert
 */

#include <CppUnitLite/TestHarness.h>

#include <tests/smallExample.h>
#include <gtsam/geometry/Pose2.h>
#include <gtsam/nonlinear/DoglegOptimizer.h>
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
#include <gtsam/nonlinear/NonlinearEquality.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/GaussianBayesTree.h>
#include <gtsam/base/numericalDerivative.h>

#include <functional>
#include <boost/iterator/counting_iterator.hpp>

using namespace std;
using namespace gtsam;

// Convenience for named keys
using symbol_shorthand::X;
using symbol_shorthand::L;

/* ************************************************************************* */
TEST(DoglegOptimizer, ComputeBlend) {
  // Create an arbitrary Bayes Net
  GaussianBayesNet gbn;
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      0, Vector2(1.0,2.0), (Matrix(2, 2) << 3.0,4.0,0.0,6.0).finished(),
      3, (Matrix(2, 2) << 7.0,8.0,9.0,10.0).finished(),
      4, (Matrix(2, 2) << 11.0,12.0,13.0,14.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      1, Vector2(15.0,16.0), (Matrix(2, 2) << 17.0,18.0,0.0,20.0).finished(),
      2, (Matrix(2, 2) << 21.0,22.0,23.0,24.0).finished(),
      4, (Matrix(2, 2) << 25.0,26.0,27.0,28.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      2, Vector2(29.0,30.0), (Matrix(2, 2) << 31.0,32.0,0.0,34.0).finished(),
      3, (Matrix(2, 2) << 35.0,36.0,37.0,38.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      3, Vector2(39.0,40.0), (Matrix(2, 2) << 41.0,42.0,0.0,44.0).finished(),
      4, (Matrix(2, 2) << 45.0,46.0,47.0,48.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      4, Vector2(49.0,50.0), (Matrix(2, 2) << 51.0,52.0,0.0,54.0).finished()));

  // Compute steepest descent point
  VectorValues xu = gbn.optimizeGradientSearch();

  // Compute Newton's method point
  VectorValues xn = gbn.optimize();

  // The Newton's method point should be more "adventurous", i.e. larger, than the steepest descent point
  EXPECT(xu.vector().norm() < xn.vector().norm());

  // Compute blend
  double Delta = 1.5;
  VectorValues xb = DoglegOptimizerImpl::ComputeBlend(Delta, xu, xn);
  DOUBLES_EQUAL(Delta, xb.vector().norm(), 1e-10);
}

/* ************************************************************************* */
TEST(DoglegOptimizer, ComputeDoglegPoint) {
  // Create an arbitrary Bayes Net
  GaussianBayesNet gbn;
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      0, Vector2(1.0,2.0), (Matrix(2, 2) << 3.0,4.0,0.0,6.0).finished(),
      3, (Matrix(2, 2) << 7.0,8.0,9.0,10.0).finished(),
      4, (Matrix(2, 2) << 11.0,12.0,13.0,14.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      1, Vector2(15.0,16.0), (Matrix(2, 2) << 17.0,18.0,0.0,20.0).finished(),
      2, (Matrix(2, 2) << 21.0,22.0,23.0,24.0).finished(),
      4, (Matrix(2, 2) << 25.0,26.0,27.0,28.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      2, Vector2(29.0,30.0), (Matrix(2, 2) << 31.0,32.0,0.0,34.0).finished(),
      3, (Matrix(2, 2) << 35.0,36.0,37.0,38.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      3, Vector2(39.0,40.0), (Matrix(2, 2) << 41.0,42.0,0.0,44.0).finished(),
      4, (Matrix(2, 2) << 45.0,46.0,47.0,48.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      4, Vector2(49.0,50.0), (Matrix(2, 2) << 51.0,52.0,0.0,54.0).finished()));

  // Compute dogleg point for different deltas

  double Delta1 = 0.5;  // Less than steepest descent
  VectorValues actual1 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta1, gbn.optimizeGradientSearch(), gbn.optimize());
  DOUBLES_EQUAL(Delta1, actual1.vector().norm(), 1e-5);

  double Delta2 = 1.5;  // Between steepest descent and Newton's method
  VectorValues expected2 = DoglegOptimizerImpl::ComputeBlend(Delta2, gbn.optimizeGradientSearch(), gbn.optimize());
  VectorValues actual2 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta2, gbn.optimizeGradientSearch(), gbn.optimize());
  DOUBLES_EQUAL(Delta2, actual2.vector().norm(), 1e-5);
  EXPECT(assert_equal(expected2, actual2));

  double Delta3 = 5.0;  // Larger than Newton's method point
  VectorValues expected3 = gbn.optimize();
  VectorValues actual3 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta3, gbn.optimizeGradientSearch(), gbn.optimize());
  EXPECT(assert_equal(expected3, actual3));
}

/* ************************************************************************* */
TEST(DoglegOptimizer, Iterate) {
  // really non-linear factor graph
  NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph();

  // config far from minimum
  Point2 x0(3,0);
  Values config;
  config.insert(X(1), x0);

  double Delta = 1.0;
  for(size_t it=0; it<10; ++it) {
    auto linearized = fg.linearize(config);
    
    // Iterate assumes that linear error = nonlinear error at the linearization point, and this should be true
    double nonlinearError = fg.error(config);
    double linearError = linearized->error(config.zeroVectors());
    DOUBLES_EQUAL(nonlinearError, linearError, 1e-5);
    
    auto gbn = linearized->eliminateSequential();
    VectorValues dx_u = gbn->optimizeGradientSearch();
    VectorValues dx_n = gbn->optimize();
    DoglegOptimizerImpl::IterationResult result = DoglegOptimizerImpl::Iterate(
        Delta, DoglegOptimizerImpl::SEARCH_EACH_ITERATION, dx_u, dx_n, *gbn, fg,
        config, fg.error(config));
    Delta = result.delta;
    EXPECT(result.f_error < fg.error(config)); // Check that error decreases
    
    Values newConfig(config.retract(result.dx_d));
    config = newConfig;
    DOUBLES_EQUAL(fg.error(config), result.f_error, 1e-5); // Check that error is correctly filled in
  }
}

/* ************************************************************************* */
TEST(DoglegOptimizer, Constraint) {
  // Create a pose-graph graph with a constraint on the first pose
  NonlinearFactorGraph graph;
  const Pose2 origin(0, 0, 0), pose2(2, 0, 0);
  graph.emplace_shared<NonlinearEquality<Pose2> >(1, origin);
  auto model = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
  graph.emplace_shared<BetweenFactor<Pose2> >(1, 2, pose2, model);

  // Create feasible initial estimate
  Values initial;
  initial.insert(1, origin); // feasible !
  initial.insert(2, Pose2(2.3, 0.1, -0.2));

  // Optimize the initial values using DoglegOptimizer
  DoglegParams params;
  params.setVerbosityDL("VERBOSITY");
  DoglegOptimizer optimizer(graph, initial, params);
  Values result = optimizer.optimize();

  // Check result
  EXPECT(assert_equal(pose2, result.at<Pose2>(2)));

  // Create infeasible initial estimate
  Values infeasible;
  infeasible.insert(1, Pose2(0.1, 0, 0)); // infeasible !
  infeasible.insert(2, Pose2(2.3, 0.1, -0.2));

  // Try optimizing with infeasible initial estimate
  DoglegOptimizer optimizer2(graph, infeasible, params);

#ifdef GTSAM_USE_TBB
  CHECK_EXCEPTION(optimizer2.optimize(), std::exception);
#else
  CHECK_EXCEPTION(optimizer2.optimize(), std::invalid_argument);
#endif
}

/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
/* ************************************************************************* */