1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
|
// Copyright (C) 2004, 2006 International Business Machines and others.
// All Rights Reserved.
// This code is published under the Eclipse Public License.
//
// Authors: Carl Laird, Andreas Waechter IBM 2004-11-05
#include "MyNLP.hpp"
#include <cassert>
#ifdef __GNUC__
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
using namespace Ipopt;
/* Constructor. */
MyNLP::MyNLP()
{ }
MyNLP::~MyNLP()
{ }
bool MyNLP::get_nlp_info(
Index& n,
Index& m,
Index& nnz_jac_g,
Index& nnz_h_lag,
IndexStyleEnum& index_style
)
{
// The problem described in MyNLP.hpp has 2 variables, x1, & x2,
n = 2;
// one equality constraint,
m = 1;
// 2 nonzeros in the jacobian (one for x1, and one for x2),
nnz_jac_g = 2;
// and 2 nonzeros in the hessian of the lagrangian
// (one in the hessian of the objective for x2,
// and one in the hessian of the constraints for x1)
nnz_h_lag = 2;
// We use the standard fortran index style for row/col entries
index_style = FORTRAN_STYLE;
return true;
}
bool MyNLP::get_bounds_info(
Index n,
Number* x_l,
Number* x_u,
Index m,
Number* g_l,
Number* g_u
)
{
// here, the n and m we gave IPOPT in get_nlp_info are passed back to us.
// If desired, we could assert to make sure they are what we think they are.
assert(n == 2);
assert(m == 1);
// x1 has a lower bound of -1 and an upper bound of 1
x_l[0] = -1.0;
x_u[0] = 1.0;
// x2 has no upper or lower bound, so we set them to
// a large negative and a large positive number.
// The value that is interpretted as -/+infinity can be
// set in the options, but it defaults to -/+1e19
x_l[1] = -1.0e19;
x_u[1] = +1.0e19;
// we have one equality constraint, so we set the bounds on this constraint
// to be equal (and zero).
g_l[0] = g_u[0] = 0.0;
return true;
}
bool MyNLP::get_starting_point(
Index n,
bool init_x,
Number* x,
bool init_z,
Number* z_L,
Number* z_U,
Index m,
bool init_lambda,
Number* lambda
)
{
// Here, we assume we only have starting values for x, if you code
// your own NLP, you can provide starting values for the others if
// you wish.
assert(init_x == true);
assert(init_z == false);
assert(init_lambda == false);
// we initialize x in bounds, in the upper right quadrant
x[0] = 0.5;
x[1] = 1.5;
return true;
}
bool MyNLP::eval_f(
Index n,
const Number* x,
bool new_x,
Number& obj_value
)
{
// return the value of the objective function
Number x2 = x[1];
obj_value = -(x2 - 2.0) * (x2 - 2.0);
return true;
}
bool MyNLP::eval_grad_f(
Index n,
const Number* x,
bool new_x,
Number* grad_f
)
{
// return the gradient of the objective function grad_{x} f(x)
// grad_{x1} f(x): x1 is not in the objective
grad_f[0] = 0.0;
// grad_{x2} f(x):
Number x2 = x[1];
grad_f[1] = -2.0 * (x2 - 2.0);
return true;
}
bool MyNLP::eval_g(
Index n,
const Number* x,
bool new_x,
Index m,
Number* g
)
{
// return the value of the constraints: g(x)
Number x1 = x[0];
Number x2 = x[1];
g[0] = -(x1 * x1 + x2 - 1.0);
return true;
}
bool MyNLP::eval_jac_g(
Index n,
const Number* x,
bool new_x,
Index m,
Index nele_jac,
Index* iRow,
Index* jCol,
Number* values
)
{
if( values == NULL )
{
// return the structure of the jacobian of the constraints
// element at 1,1: grad_{x1} g_{1}(x)
iRow[0] = 1;
jCol[0] = 1;
// element at 1,2: grad_{x2} g_{1}(x)
iRow[1] = 1;
jCol[1] = 2;
}
else
{
// return the values of the jacobian of the constraints
Number x1 = x[0];
// element at 1,1: grad_{x1} g_{1}(x)
values[0] = -2.0 * x1;
// element at 1,2: grad_{x1} g_{1}(x)
values[1] = -1.0;
}
return true;
}
bool MyNLP::eval_h(
Index n,
const Number* x,
bool new_x,
Number obj_factor,
Index m,
const Number* lambda,
bool new_lambda,
Index nele_hess,
Index* iRow,
Index* jCol,
Number* values
)
{
if( values == NULL )
{
// return the structure. This is a symmetric matrix, fill the lower left
// triangle only.
// element at 1,1: grad^2_{x1,x1} L(x,lambda)
iRow[0] = 1;
jCol[0] = 1;
// element at 2,2: grad^2_{x2,x2} L(x,lambda)
iRow[1] = 2;
jCol[1] = 2;
// Note: off-diagonal elements are zero for this problem
}
else
{
// return the values
// element at 1,1: grad^2_{x1,x1} L(x,lambda)
values[0] = -2.0 * lambda[0];
// element at 2,2: grad^2_{x2,x2} L(x,lambda)
values[1] = -2.0 * obj_factor;
// Note: off-diagonal elements are zero for this problem
}
return true;
}
void MyNLP::finalize_solution(
SolverReturn status,
Index n,
const Number* x,
const Number* z_L,
const Number* z_U,
Index m,
const Number* g,
const Number* lambda,
Number obj_value,
const IpoptData* ip_data,
IpoptCalculatedQuantities* ip_cq
)
{
// here is where we would store the solution to variables, or write to a file, etc
// so we could use the solution. Since the solution is displayed to the console,
// we currently do nothing here.
}
|