lilypond-0.0.3
[lilypond.git] / qlpsolve.cc
blobc3e9176dbd912b3d6c616d66090e4403109c30e6
1 #include "qlpsolve.hh"
2 #include "debug.hh"
3 #include "choleski.hh"
5 const Real TOL=1e-2; // roughly 1/10 mm
7 String
8 Active_constraints::status() const
10 String s("Active|Inactive [");
11 for (int i=0; i< active.sz(); i++) {
12 s += String(active[i]) + " ";
15 s+="| ";
16 for (int i=0; i< inactive.sz(); i++) {
17 s += String(inactive[i]) + " ";
19 s+="]";
21 return s;
24 void
25 Active_constraints::OK() {
26 H.OK();
27 A.OK();
28 assert(active.sz() +inactive.sz() == opt->cons.sz());
29 assert(H.dim() == opt->dim());
30 assert(active.sz() == A.rows());
31 svec<int> allcons;
33 for (int i=0; i < opt->cons.sz(); i++)
34 allcons.add(0);
35 for (int i=0; i < active.sz(); i++) {
36 int j = active[i];
37 allcons[j]++;
39 for (int i=0; i < inactive.sz(); i++) {
40 int j = inactive[i];
41 allcons[j]++;
43 for (int i=0; i < allcons.sz(); i++)
44 assert(allcons[i] == 1);
47 Vector
48 Active_constraints::get_lagrange(Vector gradient)
50 Vector l(A*gradient);
52 return l;
55 void
56 Active_constraints::add(int k)
58 // add indices
59 int cidx=inactive[k];
60 active.add(cidx);
62 inactive.swap(k,inactive.sz()-1);
63 inactive.pop();
65 Vector a( opt->cons[cidx] );
66 // update of matrices
67 Vector Ha = H*a;
68 Real aHa = a*Ha;
69 if (ABS(aHa) > EPS) {
71 a != 0, so if Ha = O(EPS), then
72 Ha * aH / aHa = O(EPS^2/EPS)
74 if H*a == 0, the constraints are dependent.
76 H -= Matrix(Ha , Ha)/(aHa);
80 sorry, don't know how to justify this. ..
82 Vector addrow(Ha/(aHa));
83 A -= Matrix(A*a, addrow);
84 A.insert_row(addrow,A.rows());
85 }else
86 WARN << "degenerate constraints";
89 void
90 Active_constraints::drop(int k)
92 int q=active.sz()-1;
94 // drop indices
95 inactive.add(active[k]);
96 active.swap(k,q);
97 A.swap_rows(k,q);
98 active.pop();
100 Vector a(A.row(q));
101 if (a.norm() > EPS) {
105 H += Matrix(a,a)/(a*opt->quad*a);
106 A -= A*opt->quad*Matrix(a,a)/(a*opt->quad*a);
107 }else
108 WARN << "degenerate constraints";
109 Vector rem_row(A.row(q));
110 assert(rem_row.norm() < EPS);
111 A.delete_row(q);
115 Active_constraints::Active_constraints(Ineq_constrained_qp const *op)
116 : A(0,op->dim()),
117 H(op->dim()),
118 opt(op)
120 for (int i=0; i < op->cons.sz(); i++)
121 inactive.add(i);
122 Choleski_decomposition chol(op->quad);
123 H=chol.inverse();
126 /* Find the optimum which is in the planes generated by the active
127 constraints.
129 Vector
130 Active_constraints::find_active_optimum(Vector g)
132 return H*g;
135 /****************************************************************/
138 min_elt_index(Vector v)
140 Real m=INFTY; int idx=-1;
141 for (int i = 0; i < v.dim(); i++){
142 if (v(i) < m) {
143 idx = i;
144 m = v(i);
146 assert(v(i) <= INFTY);
148 return idx;
151 ///the numerical solving
152 Vector
153 Ineq_constrained_qp::solve(Vector start) const
155 Active_constraints act(this);
158 act.OK();
161 Vector x(start);
162 Vector gradient=quad*x+lin;
165 Vector last_gradient(gradient);
166 int iterations=0;
168 while (iterations++ < MAXITER) {
169 Vector direction= - act.find_active_optimum(gradient);
171 mtor << "gradient "<< gradient<< "\ndirection " << direction<<"\n";
173 if (direction.norm() > EPS) {
174 mtor << act.status() << '\n';
176 Real minalf = INFTY;
178 Inactive_iter minidx(act);
182 we know the optimum on this "hyperplane". Check if we
183 bump into the edges of the simplex
186 for (Inactive_iter ia(act); ia.ok(); ia++) {
188 if (ia.vec() * direction >= 0)
189 continue;
190 Real alfa= - (ia.vec()*x - ia.rhs())/
191 (ia.vec()*direction);
193 if (minalf > alfa) {
194 minidx = ia;
195 minalf = alfa;
198 Real unbounded_alfa = 1.0;
199 Real optimal_step = MIN(minalf, unbounded_alfa);
201 Vector deltax=direction * optimal_step;
202 x += deltax;
203 gradient += optimal_step * (quad * deltax);
205 mtor << "step = " << optimal_step<< " (|dx| = " <<
206 deltax.norm() << ")\n";
208 if (minalf < unbounded_alfa) {
209 /* bumped into an edge. try again, in smaller space. */
210 act.add(minidx.idx());
211 mtor << "adding cons "<< minidx.idx()<<'\n';
212 continue;
214 /*ASSERT: we are at optimal solution for this "plane"*/
219 Vector lagrange_mult=act.get_lagrange(gradient);
220 int m= min_elt_index(lagrange_mult);
222 if (m>=0 && lagrange_mult(m) > 0) {
223 break; // optimal sol.
224 } else if (m<0) {
225 assert(gradient.norm() < EPS) ;
227 break;
230 mtor << "dropping cons " << m<<'\n';
231 act.drop(m);
233 if (iterations >= MAXITER)
234 WARN<<"didn't converge!\n";
236 mtor << ": found " << x<<" in " << iterations <<" iterations\n";
237 assert_solution(x);
238 return x;
241 /** Mordecai Avriel, Nonlinear Programming: analysis and methods (1976)
242 Prentice Hall.
244 Section 13.3
246 This is a "projected gradient" algorithm. Starting from a point x
247 the next point is found in a direction determined by projecting
248 the gradient onto the active constraints. (well, not really the
249 gradient. The optimal solution obeying the active constraints is
250 tried. This is why H = Q^-1 in initialisation) )