lilypond-0.0.62
[lilypond.git] / lily / qlpsolve.cc
blobdc8fbcab7e6ef27168fc9df534858764a60d2de8
1 /*
2 qlpsolve.cc -- implement Active_constraints, Inactive_iter
4 source file of the LilyPond music typesetter
6 (c) 1996, 1997 Han-Wen Nienhuys <hanwen@stack.nl>
8 TODO:
9 try fixed point arithmetic, to speed up lily.
12 #include "qlpsolve.hh"
13 #include "const.hh"
14 #include "debug.hh"
15 #include "choleski.hh"
17 const Real TOL=1e-2; // roughly 1/10 mm
19 String
20 Active_constraints::status() const
22 String s("Active|Inactive [");
23 for (int i=0; i< active.size(); i++) {
24 s += String(active[i]) + " ";
27 s+="| ";
28 for (int i=0; i< inactive.size(); i++) {
29 s += String(inactive[i]) + " ";
31 s+="]";
33 return s;
36 void
37 Active_constraints::OK()
39 #ifndef NDEBUG
40 H.OK();
41 A.OK();
42 assert(active.size() +inactive.size() == opt->cons.size());
43 assert(H.dim() == opt->dim());
44 assert(active.size() == A.rows());
45 Array<int> allcons;
47 for (int i=0; i < opt->cons.size(); i++)
48 allcons.push(0);
49 for (int i=0; i < active.size(); i++) {
50 int j = active[i];
51 allcons[j]++;
53 for (int i=0; i < inactive.size(); i++) {
54 int j = inactive[i];
55 allcons[j]++;
57 for (int i=0; i < allcons.size(); i++)
58 assert(allcons[i] == 1);
59 #endif
62 Vector
63 Active_constraints::get_lagrange(Vector gradient)
65 Vector l(A*gradient);
67 return l;
70 void
71 Active_constraints::add(int k)
73 // add indices
74 int cidx=inactive[k];
75 active.push(cidx);
77 inactive.swap(k,inactive.size()-1);
78 inactive.pop();
80 Vector a( opt->cons[cidx] );
81 // update of matrices
82 Vector Ha = H*a;
83 Real aHa = a*Ha;
84 Vector addrow(Ha.dim());
85 if (abs(aHa) > EPS) {
87 a != 0, so if Ha = O(EPS), then
88 Ha * aH / aHa = O(EPS^2/EPS)
90 if H*a == 0, the constraints are dependent.
92 H -= Matrix(Ha/aHa , Ha);
96 sorry, don't know how to justify this. ..
98 addrow=Ha;
99 addrow/= aHa;
100 A -= Matrix(A*a, addrow);
101 A.insert_row(addrow,A.rows());
102 }else
103 WARN << "degenerate constraints";
106 void
107 Active_constraints::drop(int k)
109 int q=active.size()-1;
111 // drop indices
112 inactive.push(active[k]);
113 active.swap(k,q);
114 A.swap_rows(k,q);
115 active.pop();
117 Vector a(A.row(q));
118 if (a.norm() > EPS) {
122 Real q = a*opt->quad*a;
123 H += Matrix(a,a/q);
124 A -= A*opt->quad*Matrix(a,a/q);
125 }else
126 WARN << "degenerate constraints";
127 #ifndef NDEBUG
128 Vector rem_row(A.row(q));
129 assert(rem_row.norm() < EPS);
130 #endif
132 A.delete_row(q);
136 Active_constraints::Active_constraints(Ineq_constrained_qp const *op)
137 : A(0,op->dim()),
138 H(op->dim()),
139 opt(op)
141 for (int i=0; i < op->cons.size(); i++)
142 inactive.push(i);
143 Choleski_decomposition chol(op->quad);
144 H=chol.inverse();
147 /** Find the optimum which is in the planes generated by the active
148 constraints.
150 Vector
151 Active_constraints::find_active_optimum(Vector g)
153 return H*g;
156 /* *************************************************************** */
159 min_elt_index(Vector v)
161 Real m=INFTY; int idx=-1;
162 for (int i = 0; i < v.dim(); i++){
163 if (v(i) < m) {
164 idx = i;
165 m = v(i);
167 assert(v(i) <= INFTY);
169 return idx;
173 /**the numerical solving. Mordecai Avriel, Nonlinear Programming: analysis and methods (1976)
174 Prentice Hall.
176 Section 13.3
178 This is a "projected gradient" algorithm. Starting from a point x
179 the next point is found in a direction determined by projecting
180 the gradient onto the active constraints. (well, not really the
181 gradient. The optimal solution obeying the active constraints is
182 tried. This is why H = Q^-1 in initialisation) )
186 Vector
187 Ineq_constrained_qp::solve(Vector start) const
189 if (!dim())
190 return Vector(0);
192 Active_constraints act(this);
195 act.OK();
198 Vector x(start);
199 Vector gradient=quad*x+lin;
200 // Real fvalue = x*quad*x/2 + lin*x + const_term;
201 // it's no use.
203 Vector last_gradient(gradient);
204 int iterations=0;
206 while (iterations++ < MAXITER) {
207 Vector direction= - act.find_active_optimum(gradient);
209 mtor << "gradient "<< gradient<< "\ndirection " << direction<<"\n";
211 if (direction.norm() > EPS) {
212 mtor << act.status() << '\n';
214 Real minalf = INFTY;
216 Inactive_iter minidx(act);
220 we know the optimum on this "hyperplane". Check if we
221 bump into the edges of the simplex
224 for (Inactive_iter ia(act); ia.ok(); ia++) {
226 if (ia.vec() * direction >= 0)
227 continue;
228 Real alfa= - (ia.vec()*x - ia.rhs())/
229 (ia.vec()*direction);
231 if (minalf > alfa) {
232 minidx = ia;
233 minalf = alfa;
236 Real unbounded_alfa = 1.0;
237 Real optimal_step = min(minalf, unbounded_alfa);
239 Vector deltax=direction * optimal_step;
240 x += deltax;
241 gradient += optimal_step * (quad * deltax);
243 mtor << "step = " << optimal_step<< " (|dx| = " <<
244 deltax.norm() << ")\n";
246 if (minalf < unbounded_alfa) {
247 /* bumped into an edge. try again, in smaller space. */
248 act.add(minidx.idx());
249 mtor << "adding cons "<< minidx.idx()<<'\n';
250 continue;
252 /*ASSERT: we are at optimal solution for this "plane"*/
257 Vector lagrange_mult=act.get_lagrange(gradient);
258 int m= min_elt_index(lagrange_mult);
260 if (m>=0 && lagrange_mult(m) > 0) {
261 break; // optimal sol.
262 } else if (m<0) {
263 assert(gradient.norm() < EPS) ;
265 break;
268 mtor << "dropping cons " << m<<'\n';
269 act.drop(m);
271 if (iterations >= MAXITER)
272 WARN<<"didn't converge!\n";
274 mtor << ": found " << x<<" in " << iterations <<" iterations\n";
275 assert_solution(x);
276 return x;