6 const Real TOL
=1e-2; // roughly 1/10 mm
9 Active_constraints::status() const
11 String
s("Active|Inactive [");
12 for (int i
=0; i
< active
.sz(); i
++) {
13 s
+= String(active
[i
]) + " ";
17 for (int i
=0; i
< inactive
.sz(); i
++) {
18 s
+= String(inactive
[i
]) + " ";
26 Active_constraints::OK() {
29 assert(active
.sz() +inactive
.sz() == opt
->cons
.sz());
30 assert(H
.dim() == opt
->dim());
31 assert(active
.sz() == A
.rows());
34 for (int i
=0; i
< opt
->cons
.sz(); i
++)
36 for (int i
=0; i
< active
.sz(); i
++) {
40 for (int i
=0; i
< inactive
.sz(); i
++) {
44 for (int i
=0; i
< allcons
.sz(); i
++)
45 assert(allcons
[i
] == 1);
49 Active_constraints::get_lagrange(Vector gradient
)
57 Active_constraints::add(int k
)
63 inactive
.swap(k
,inactive
.sz()-1);
66 Vector
a( opt
->cons
[cidx
] );
72 a != 0, so if Ha = O(EPS), then
73 Ha * aH / aHa = O(EPS^2/EPS)
75 if H*a == 0, the constraints are dependent.
77 H
-= Matrix(Ha
, Ha
)/(aHa
);
81 sorry, don't know how to justify this. ..
83 Vector
addrow(Ha
/(aHa
));
84 A
-= Matrix(A
*a
, addrow
);
85 A
.insert_row(addrow
,A
.rows());
87 WARN
<< "degenerate constraints";
91 Active_constraints::drop(int k
)
96 inactive
.add(active
[k
]);
102 if (a
.norm() > EPS
) {
106 H
+= Matrix(a
,a
)/(a
*opt
->quad
*a
);
107 A
-= A
*opt
->quad
*Matrix(a
,a
)/(a
*opt
->quad
*a
);
109 WARN
<< "degenerate constraints";
110 Vector
rem_row(A
.row(q
));
111 assert(rem_row
.norm() < EPS
);
116 Active_constraints::Active_constraints(Ineq_constrained_qp
const *op
)
121 for (int i
=0; i
< op
->cons
.sz(); i
++)
123 Choleski_decomposition
chol(op
->quad
);
127 /* Find the optimum which is in the planes generated by the active
131 Active_constraints::find_active_optimum(Vector g
)
136 /****************************************************************/
139 min_elt_index(Vector v
)
141 Real m
=INFTY
; int idx
=-1;
142 for (int i
= 0; i
< v
.dim(); i
++){
147 assert(v(i
) <= INFTY
);
152 ///the numerical solving
154 Ineq_constrained_qp::solve(Vector start
) const
156 Active_constraints
act(this);
163 Vector gradient
=quad
*x
+lin
;
166 Vector
last_gradient(gradient
);
169 while (iterations
++ < MAXITER
) {
170 Vector direction
= - act
.find_active_optimum(gradient
);
172 mtor
<< "gradient "<< gradient
<< "\ndirection " << direction
<<"\n";
174 if (direction
.norm() > EPS
) {
175 mtor
<< act
.status() << '\n';
179 Inactive_iter
minidx(act
);
183 we know the optimum on this "hyperplane". Check if we
184 bump into the edges of the simplex
187 for (Inactive_iter
ia(act
); ia
.ok(); ia
++) {
189 if (ia
.vec() * direction
>= 0)
191 Real alfa
= - (ia
.vec()*x
- ia
.rhs())/
192 (ia
.vec()*direction
);
199 Real unbounded_alfa
= 1.0;
200 Real optimal_step
= MIN(minalf
, unbounded_alfa
);
202 Vector deltax
=direction
* optimal_step
;
204 gradient
+= optimal_step
* (quad
* deltax
);
206 mtor
<< "step = " << optimal_step
<< " (|dx| = " <<
207 deltax
.norm() << ")\n";
209 if (minalf
< unbounded_alfa
) {
210 /* bumped into an edge. try again, in smaller space. */
211 act
.add(minidx
.idx());
212 mtor
<< "adding cons "<< minidx
.idx()<<'\n';
215 /*ASSERT: we are at optimal solution for this "plane"*/
220 Vector lagrange_mult
=act
.get_lagrange(gradient
);
221 int m
= min_elt_index(lagrange_mult
);
223 if (m
>=0 && lagrange_mult(m
) > 0) {
224 break; // optimal sol.
226 assert(gradient
.norm() < EPS
) ;
231 mtor
<< "dropping cons " << m
<<'\n';
234 if (iterations
>= MAXITER
)
235 WARN
<<"didn't converge!\n";
237 mtor
<< ": found " << x
<<" in " << iterations
<<" iterations\n";
242 /** Mordecai Avriel, Nonlinear Programming: analysis and methods (1976)
247 This is a "projected gradient" algorithm. Starting from a point x
248 the next point is found in a direction determined by projecting
249 the gradient onto the active constraints. (well, not really the
250 gradient. The optimal solution obeying the active constraints is
251 tried. This is why H = Q^-1 in initialisation) )