lilypond-0.0.32
[lilypond.git] / src / qlpsolve.cc
blobed49c2a02f8a5d2c6e3e0962a8a1f69dd78756ce
1 #include "qlpsolve.hh"
2 #include "const.hh"
3 #include "debug.hh"
4 #include "choleski.hh"
6 const Real TOL=1e-2; // roughly 1/10 mm
8 String
9 Active_constraints::status() const
11 String s("Active|Inactive [");
12 for (int i=0; i< active.size(); i++) {
13 s += String(active[i]) + " ";
16 s+="| ";
17 for (int i=0; i< inactive.size(); i++) {
18 s += String(inactive[i]) + " ";
20 s+="]";
22 return s;
25 void
26 Active_constraints::OK()
28 #ifndef NDEBUG
29 H.OK();
30 A.OK();
31 assert(active.size() +inactive.size() == opt->cons.size());
32 assert(H.dim() == opt->dim());
33 assert(active.size() == A.rows());
34 Array<int> allcons;
36 for (int i=0; i < opt->cons.size(); i++)
37 allcons.push(0);
38 for (int i=0; i < active.size(); i++) {
39 int j = active[i];
40 allcons[j]++;
42 for (int i=0; i < inactive.size(); i++) {
43 int j = inactive[i];
44 allcons[j]++;
46 for (int i=0; i < allcons.size(); i++)
47 assert(allcons[i] == 1);
48 #endif
51 Vector
52 Active_constraints::get_lagrange(Vector gradient)
54 Vector l(A*gradient);
56 return l;
59 void
60 Active_constraints::add(int k)
62 // add indices
63 int cidx=inactive[k];
64 active.push(cidx);
66 inactive.swap(k,inactive.size()-1);
67 inactive.pop();
69 Vector a( opt->cons[cidx] );
70 // update of matrices
71 Vector Ha = H*a;
72 Real aHa = a*Ha;
73 Vector addrow(Ha.dim());
74 if (abs(aHa) > EPS) {
76 a != 0, so if Ha = O(EPS), then
77 Ha * aH / aHa = O(EPS^2/EPS)
79 if H*a == 0, the constraints are dependent.
81 H -= Matrix(Ha/aHa , Ha);
85 sorry, don't know how to justify this. ..
87 addrow=Ha;
88 addrow/= aHa;
89 A -= Matrix(A*a, addrow);
90 A.insert_row(addrow,A.rows());
91 }else
92 WARN << "degenerate constraints";
95 void
96 Active_constraints::drop(int k)
98 int q=active.size()-1;
100 // drop indices
101 inactive.push(active[k]);
102 active.swap(k,q);
103 A.swap_rows(k,q);
104 active.pop();
106 Vector a(A.row(q));
107 if (a.norm() > EPS) {
111 Real q = a*opt->quad*a;
112 H += Matrix(a,a/q);
113 A -= A*opt->quad*Matrix(a,a/q);
114 }else
115 WARN << "degenerate constraints";
116 #ifndef NDEBUG
117 Vector rem_row(A.row(q));
118 assert(rem_row.norm() < EPS);
119 #endif
121 A.delete_row(q);
125 Active_constraints::Active_constraints(Ineq_constrained_qp const *op)
126 : A(0,op->dim()),
127 H(op->dim()),
128 opt(op)
130 for (int i=0; i < op->cons.size(); i++)
131 inactive.push(i);
132 Choleski_decomposition chol(op->quad);
133 H=chol.inverse();
136 /* Find the optimum which is in the planes generated by the active
137 constraints.
139 Vector
140 Active_constraints::find_active_optimum(Vector g)
142 return H*g;
145 /* *************************************************************** */
148 min_elt_index(Vector v)
150 Real m=INFTY; int idx=-1;
151 for (int i = 0; i < v.dim(); i++){
152 if (v(i) < m) {
153 idx = i;
154 m = v(i);
156 assert(v(i) <= INFTY);
158 return idx;
161 ///the numerical solving
162 /** Mordecai Avriel, Nonlinear Programming: analysis and methods (1976)
163 Prentice Hall.
165 Section 13.3
167 This is a "projected gradient" algorithm. Starting from a point x
168 the next point is found in a direction determined by projecting
169 the gradient onto the active constraints. (well, not really the
170 gradient. The optimal solution obeying the active constraints is
171 tried. This is why H = Q^-1 in initialisation) )
175 Vector
176 Ineq_constrained_qp::solve(Vector start) const
178 Active_constraints act(this);
181 act.OK();
184 Vector x(start);
185 Vector gradient=quad*x+lin;
188 Vector last_gradient(gradient);
189 int iterations=0;
191 while (iterations++ < MAXITER) {
192 Vector direction= - act.find_active_optimum(gradient);
194 mtor << "gradient "<< gradient<< "\ndirection " << direction<<"\n";
196 if (direction.norm() > EPS) {
197 mtor << act.status() << '\n';
199 Real minalf = INFTY;
201 Inactive_iter minidx(act);
205 we know the optimum on this "hyperplane". Check if we
206 bump into the edges of the simplex
209 for (Inactive_iter ia(act); ia.ok(); ia++) {
211 if (ia.vec() * direction >= 0)
212 continue;
213 Real alfa= - (ia.vec()*x - ia.rhs())/
214 (ia.vec()*direction);
216 if (minalf > alfa) {
217 minidx = ia;
218 minalf = alfa;
221 Real unbounded_alfa = 1.0;
222 Real optimal_step = min(minalf, unbounded_alfa);
224 Vector deltax=direction * optimal_step;
225 x += deltax;
226 gradient += optimal_step * (quad * deltax);
228 mtor << "step = " << optimal_step<< " (|dx| = " <<
229 deltax.norm() << ")\n";
231 if (minalf < unbounded_alfa) {
232 /* bumped into an edge. try again, in smaller space. */
233 act.add(minidx.idx());
234 mtor << "adding cons "<< minidx.idx()<<'\n';
235 continue;
237 /*ASSERT: we are at optimal solution for this "plane"*/
242 Vector lagrange_mult=act.get_lagrange(gradient);
243 int m= min_elt_index(lagrange_mult);
245 if (m>=0 && lagrange_mult(m) > 0) {
246 break; // optimal sol.
247 } else if (m<0) {
248 assert(gradient.norm() < EPS) ;
250 break;
253 mtor << "dropping cons " << m<<'\n';
254 act.drop(m);
256 if (iterations >= MAXITER)
257 WARN<<"didn't converge!\n";
259 mtor << ": found " << x<<" in " << iterations <<" iterations\n";
260 assert_solution(x);
261 return x;