5 const Real TOL
=1e-2; // roughly 1/10 mm
8 Active_constraints::status() const
10 String
s("Active|Inactive [");
11 for (int i
=0; i
< active
.sz(); i
++) {
12 s
+= String(active
[i
]) + " ";
16 for (int i
=0; i
< inactive
.sz(); i
++) {
17 s
+= String(inactive
[i
]) + " ";
25 Active_constraints::OK() {
28 assert(active
.sz() +inactive
.sz() == opt
->cons
.sz());
29 assert(H
.dim() == opt
->dim());
30 assert(active
.sz() == A
.rows());
33 for (int i
=0; i
< opt
->cons
.sz(); i
++)
35 for (int i
=0; i
< active
.sz(); i
++) {
39 for (int i
=0; i
< inactive
.sz(); i
++) {
43 for (int i
=0; i
< allcons
.sz(); i
++)
44 assert(allcons
[i
] == 1);
48 Active_constraints::get_lagrange(Vector gradient
)
56 Active_constraints::add(int k
)
62 inactive
.swap(k
,inactive
.sz()-1);
65 Vector
a( opt
->cons
[cidx
] );
71 a != 0, so if Ha = O(EPS), then
72 Ha * aH / aHa = O(EPS^2/EPS)
74 if H*a == 0, the constraints are dependent.
76 H
-= Matrix(Ha
, Ha
)/(aHa
);
80 sorry, don't know how to justify this. ..
82 Vector
addrow(Ha
/(aHa
));
83 A
-= Matrix(A
*a
, addrow
);
84 A
.insert_row(addrow
,A
.rows());
86 WARN
<< "degenerate constraints";
90 Active_constraints::drop(int k
)
95 inactive
.add(active
[k
]);
101 if (a
.norm() > EPS
) {
105 H
+= Matrix(a
,a
)/(a
*opt
->quad
*a
);
106 A
-= A
*opt
->quad
*Matrix(a
,a
)/(a
*opt
->quad
*a
);
108 WARN
<< "degenerate constraints";
109 Vector
rem_row(A
.row(q
));
110 assert(rem_row
.norm() < EPS
);
115 Active_constraints::Active_constraints(Ineq_constrained_qp
const *op
)
120 for (int i
=0; i
< op
->cons
.sz(); i
++)
122 Choleski_decomposition
chol(op
->quad
);
126 /* Find the optimum which is in the planes generated by the active
130 Active_constraints::find_active_optimum(Vector g
)
135 /****************************************************************/
138 min_elt_index(Vector v
)
140 Real m
=INFTY
; int idx
=-1;
141 for (int i
= 0; i
< v
.dim(); i
++){
146 assert(v(i
) <= INFTY
);
151 ///the numerical solving
153 Ineq_constrained_qp::solve(Vector start
) const
155 Active_constraints
act(this);
162 Vector gradient
=quad
*x
+lin
;
165 Vector
last_gradient(gradient
);
168 while (iterations
++ < MAXITER
) {
169 Vector direction
= - act
.find_active_optimum(gradient
);
171 mtor
<< "gradient "<< gradient
<< "\ndirection " << direction
<<"\n";
173 if (direction
.norm() > EPS
) {
174 mtor
<< act
.status() << '\n';
178 Inactive_iter
minidx(act
);
182 we know the optimum on this "hyperplane". Check if we
183 bump into the edges of the simplex
186 for (Inactive_iter
ia(act
); ia
.ok(); ia
++) {
188 if (ia
.vec() * direction
>= 0)
190 Real alfa
= - (ia
.vec()*x
- ia
.rhs())/
191 (ia
.vec()*direction
);
198 Real unbounded_alfa
= 1.0;
199 Real optimal_step
= MIN(minalf
, unbounded_alfa
);
201 Vector deltax
=direction
* optimal_step
;
203 gradient
+= optimal_step
* (quad
* deltax
);
205 mtor
<< "step = " << optimal_step
<< " (|dx| = " <<
206 deltax
.norm() << ")\n";
208 if (minalf
< unbounded_alfa
) {
209 /* bumped into an edge. try again, in smaller space. */
210 act
.add(minidx
.idx());
211 mtor
<< "adding cons "<< minidx
.idx()<<'\n';
214 /*ASSERT: we are at optimal solution for this "plane"*/
219 Vector lagrange_mult
=act
.get_lagrange(gradient
);
220 int m
= min_elt_index(lagrange_mult
);
222 if (m
>=0 && lagrange_mult(m
) > 0) {
223 break; // optimal sol.
225 assert(gradient
.norm() < EPS
) ;
230 mtor
<< "dropping cons " << m
<<'\n';
233 if (iterations
>= MAXITER
)
234 WARN
<<"didn't converge!\n";
236 mtor
<< ": found " << x
<<" in " << iterations
<<" iterations\n";
241 /** Mordecai Avriel, Nonlinear Programming: analysis and methods (1976)
246 This is a "projected gradient" algorithm. Starting from a point x
247 the next point is found in a direction determined by projecting
248 the gradient onto the active constraints. (well, not really the
249 gradient. The optimal solution obeying the active constraints is
250 tried. This is why H = Q^-1 in initialisation) )