lilypond-0.0.4
[lilypond.git] / qlpsolve.cc
blob13bd30e89b0c7021c0555bde61c7258bbc8d5166
1 #include "qlpsolve.hh"
2 #include "const.hh"
3 #include "debug.hh"
4 #include "choleski.hh"
6 const Real TOL=1e-2; // roughly 1/10 mm
8 String
9 Active_constraints::status() const
11 String s("Active|Inactive [");
12 for (int i=0; i< active.sz(); i++) {
13 s += String(active[i]) + " ";
16 s+="| ";
17 for (int i=0; i< inactive.sz(); i++) {
18 s += String(inactive[i]) + " ";
20 s+="]";
22 return s;
25 void
26 Active_constraints::OK() {
27 H.OK();
28 A.OK();
29 assert(active.sz() +inactive.sz() == opt->cons.sz());
30 assert(H.dim() == opt->dim());
31 assert(active.sz() == A.rows());
32 svec<int> allcons;
34 for (int i=0; i < opt->cons.sz(); i++)
35 allcons.add(0);
36 for (int i=0; i < active.sz(); i++) {
37 int j = active[i];
38 allcons[j]++;
40 for (int i=0; i < inactive.sz(); i++) {
41 int j = inactive[i];
42 allcons[j]++;
44 for (int i=0; i < allcons.sz(); i++)
45 assert(allcons[i] == 1);
48 Vector
49 Active_constraints::get_lagrange(Vector gradient)
51 Vector l(A*gradient);
53 return l;
56 void
57 Active_constraints::add(int k)
59 // add indices
60 int cidx=inactive[k];
61 active.add(cidx);
63 inactive.swap(k,inactive.sz()-1);
64 inactive.pop();
66 Vector a( opt->cons[cidx] );
67 // update of matrices
68 Vector Ha = H*a;
69 Real aHa = a*Ha;
70 if (ABS(aHa) > EPS) {
72 a != 0, so if Ha = O(EPS), then
73 Ha * aH / aHa = O(EPS^2/EPS)
75 if H*a == 0, the constraints are dependent.
77 H -= Matrix(Ha , Ha)/(aHa);
81 sorry, don't know how to justify this. ..
83 Vector addrow(Ha/(aHa));
84 A -= Matrix(A*a, addrow);
85 A.insert_row(addrow,A.rows());
86 }else
87 WARN << "degenerate constraints";
90 void
91 Active_constraints::drop(int k)
93 int q=active.sz()-1;
95 // drop indices
96 inactive.add(active[k]);
97 active.swap(k,q);
98 A.swap_rows(k,q);
99 active.pop();
101 Vector a(A.row(q));
102 if (a.norm() > EPS) {
106 H += Matrix(a,a)/(a*opt->quad*a);
107 A -= A*opt->quad*Matrix(a,a)/(a*opt->quad*a);
108 }else
109 WARN << "degenerate constraints";
110 Vector rem_row(A.row(q));
111 assert(rem_row.norm() < EPS);
112 A.delete_row(q);
116 Active_constraints::Active_constraints(Ineq_constrained_qp const *op)
117 : A(0,op->dim()),
118 H(op->dim()),
119 opt(op)
121 for (int i=0; i < op->cons.sz(); i++)
122 inactive.add(i);
123 Choleski_decomposition chol(op->quad);
124 H=chol.inverse();
127 /* Find the optimum which is in the planes generated by the active
128 constraints.
130 Vector
131 Active_constraints::find_active_optimum(Vector g)
133 return H*g;
136 /****************************************************************/
139 min_elt_index(Vector v)
141 Real m=INFTY; int idx=-1;
142 for (int i = 0; i < v.dim(); i++){
143 if (v(i) < m) {
144 idx = i;
145 m = v(i);
147 assert(v(i) <= INFTY);
149 return idx;
152 ///the numerical solving
153 Vector
154 Ineq_constrained_qp::solve(Vector start) const
156 Active_constraints act(this);
159 act.OK();
162 Vector x(start);
163 Vector gradient=quad*x+lin;
166 Vector last_gradient(gradient);
167 int iterations=0;
169 while (iterations++ < MAXITER) {
170 Vector direction= - act.find_active_optimum(gradient);
172 mtor << "gradient "<< gradient<< "\ndirection " << direction<<"\n";
174 if (direction.norm() > EPS) {
175 mtor << act.status() << '\n';
177 Real minalf = INFTY;
179 Inactive_iter minidx(act);
183 we know the optimum on this "hyperplane". Check if we
184 bump into the edges of the simplex
187 for (Inactive_iter ia(act); ia.ok(); ia++) {
189 if (ia.vec() * direction >= 0)
190 continue;
191 Real alfa= - (ia.vec()*x - ia.rhs())/
192 (ia.vec()*direction);
194 if (minalf > alfa) {
195 minidx = ia;
196 minalf = alfa;
199 Real unbounded_alfa = 1.0;
200 Real optimal_step = MIN(minalf, unbounded_alfa);
202 Vector deltax=direction * optimal_step;
203 x += deltax;
204 gradient += optimal_step * (quad * deltax);
206 mtor << "step = " << optimal_step<< " (|dx| = " <<
207 deltax.norm() << ")\n";
209 if (minalf < unbounded_alfa) {
210 /* bumped into an edge. try again, in smaller space. */
211 act.add(minidx.idx());
212 mtor << "adding cons "<< minidx.idx()<<'\n';
213 continue;
215 /*ASSERT: we are at optimal solution for this "plane"*/
220 Vector lagrange_mult=act.get_lagrange(gradient);
221 int m= min_elt_index(lagrange_mult);
223 if (m>=0 && lagrange_mult(m) > 0) {
224 break; // optimal sol.
225 } else if (m<0) {
226 assert(gradient.norm() < EPS) ;
228 break;
231 mtor << "dropping cons " << m<<'\n';
232 act.drop(m);
234 if (iterations >= MAXITER)
235 WARN<<"didn't converge!\n";
237 mtor << ": found " << x<<" in " << iterations <<" iterations\n";
238 assert_solution(x);
239 return x;
242 /** Mordecai Avriel, Nonlinear Programming: analysis and methods (1976)
243 Prentice Hall.
245 Section 13.3
247 This is a "projected gradient" algorithm. Starting from a point x
248 the next point is found in a direction determined by projecting
249 the gradient onto the active constraints. (well, not really the
250 gradient. The optimal solution obeying the active constraints is
251 tried. This is why H = Q^-1 in initialisation) )