rewrote loc_h_harmonic's STOP initialization to reflect report.pdf; simpler now
[dmvccm.git] / src / loc_h_dmv.py
blobb6f9ac02f40290df011110b40154b4eb4a34dfeb
1 # loc_h_dmv.py
2 #
3 # dmv reestimation and inside-outside probabilities using loc_h, and
4 # no CNF-style rules
6 # Table of Contents:
7 # 1. Grammar-class and related functions
8 # 2. P_INSIDE / inner() and inner_sent()
9 # 3. P_OUTSIDE / outer()
10 # 4. Reestimation v.1: sentences as outer loop
11 # 5. Reestimation v.2: head-types as outer loop
12 # 6. Most Probable Parse
13 # 7. Testing functions
15 import io
16 from common_dmv import *
18 ### todo: debug with @accepts once in a while, but it's SLOW
19 # from typecheck import accepts, Any
21 if __name__ == "__main__":
22 print "loc_h_dmv module tests:"
24 def adj(middle, loc_h):
25 "middle is eg. k when rewriting for i<k<j (inside probabilities)."
26 return middle == loc_h or middle == loc_h+1 # ADJ == True
28 def make_GO_AT(p_STOP,p_ATTACH):
29 p_GO_AT = {}
30 for (a,h,dir), p_ah in p_ATTACH.iteritems():
31 p_GO_AT[a,h,dir, NON] = p_ah * (1-p_STOP[h, dir, NON])
32 p_GO_AT[a,h,dir, ADJ] = p_ah * (1-p_STOP[h, dir, ADJ])
33 return p_GO_AT
35 class DMV_Grammar(io.Grammar):
36 def __str__(self):
37 LJUST = 47
38 def t(n):
39 return "%d=%s" % (n, self.numtag(n))
40 def p(dict,key):
41 if key in dict:
42 if dict[key] > 1.0: raise Exception, "probability > 1.0:%s"%key
43 return dict[key]
44 else: return 0.0
45 def no_zeroL(str,tagstr,prob):
46 if prob > 0.0: return (str%(tagstr,prob)).ljust(LJUST)
47 else: return "".ljust(LJUST)
48 def no_zeroR(str,tagstr,prob):
49 if prob > 0.0: return str%(tagstr,prob)
50 else: return ""
51 def p_a(a,h):
52 p_L = p(self.p_ATTACH,(a,h,LEFT))
53 p_R = p(self.p_ATTACH,(a,h,RIGHT))
54 if p_L == 0.0 and p_R == 0.0:
55 return ''
56 else:
57 if p_L > 0.0:
58 str = "p_ATTACH[%s|%s,L] = %s" % (t(a), t(h), p_L)
59 str = str.ljust(LJUST)
60 else:
61 str = ''
62 if p_R > 0.0:
63 str = str.ljust(LJUST)
64 str += "p_ATTACH[%s|%s,R] = %s" % (t(a), t(h), p_R)
65 return '\n'+str
67 root, stop, att, ord = "","","",""
68 for h in self.headnums():
69 root += no_zeroL("\np_ROOT[%s] = %s", t(h), p(self.p_ROOT, (h)))
70 stop += '\n'
71 stop += no_zeroL("p_STOP[stop|%s,L,adj] = %s", t(h), p(self.p_STOP, (h,LEFT,ADJ)))
72 stop += no_zeroR("p_STOP[stop|%s,R,adj] = %s", t(h), p(self.p_STOP, (h,RIGHT,ADJ)))
73 stop += '\n'
74 stop += no_zeroL("p_STOP[stop|%s,L,non] = %s", t(h), p(self.p_STOP, (h,LEFT,NON)))
75 stop += no_zeroR("p_STOP[stop|%s,R,non] = %s", t(h), p(self.p_STOP, (h,RIGHT,NON)))
76 att += ''.join([p_a(a,h) for a in self.headnums()])
77 ord += '\n'
78 ord += no_zeroL("p_ORDER[ left-first|%s ] = %s", t(h), p(self.p_ORDER, (GOL,h)))
79 ord += no_zeroR("p_ORDER[right-first|%s ] = %s", t(h), p(self.p_ORDER, (GOR,h)))
80 return root + stop + att + ord
82 def __init__(self, numtag, tagnum, p_ROOT, p_STOP, p_ATTACH, p_ORDER):
83 io.Grammar.__init__(self, numtag, tagnum)
84 self.p_ROOT = p_ROOT # p_ROOT[w] = p
85 self.p_ORDER = p_ORDER # p_ORDER[seals, w] = p
86 self.p_STOP = p_STOP # p_STOP[w, LEFT, NON] = p (etc. for LA,RN,RA)
87 self.p_ATTACH = p_ATTACH # p_ATTACH[a, h, LEFT] = p (etc. for R)
88 # p_GO_AT[a, h, LEFT, NON] = p (etc. for LA,RN,RA)
89 self.p_GO_AT = make_GO_AT(self.p_STOP, self.p_ATTACH)
90 # these are used in reestimate2():
91 self.reset_iocharts()
93 def get_iochart(self, sent_nums):
94 ch_key = tuple(sent_nums)
95 try:
96 ichart = self._icharts[ch_key]
97 except KeyError:
98 ichart = {}
99 try:
100 ochart = self._ocharts[ch_key]
101 except KeyError:
102 ochart = {}
103 return (ichart, ochart)
105 def set_iochart(self, sent_nums, ichart, ochart):
106 self._icharts[tuple(sent_nums)] = ichart
107 self._ocharts[tuple(sent_nums)] = ochart
109 def reset_iocharts(self):
110 self._icharts = {}
111 self._ocharts = {}
113 def p_GO_AT_or0(self, a, h, dir, adj):
114 try:
115 return self.p_GO_AT[a, h, dir, adj]
116 except KeyError:
117 return 0.0
120 def locs(sent_nums, start, stop):
121 '''Return the between-word locations of all words in some fragment of
122 sent. We make sure to offset the locations correctly so that for
123 any w in the returned list, sent[w]==loc_w.
125 start is inclusive, stop is exclusive, as in klein-thesis and
126 Python's list-slicing.'''
127 for i0,w in enumerate(sent_nums[start:stop]):
128 loc_w = i0+start
129 yield (loc_w, w)
131 ###################################################
132 # P_INSIDE (dmv-specific) #
133 ###################################################
135 #@accepts(int, int, (int, int), int, Any(), [str], {tuple:float}, IsOneOf(None,{}))
136 def inner(i, j, node, loc_h, g, sent, ichart, mpptree=None):
137 ''' The ichart is of this form:
138 ichart[i,j,LHS, loc_h]
139 where i and j are between-word positions.
141 loc_h gives adjacency (along with k for attachment rules), and is
142 needed in P_STOP reestimation.
144 sent_nums = g.sent_nums(sent)
146 def terminal(i,j,node, loc_h, tabs):
147 if not i <= loc_h < j:
148 if 'INNER' in DEBUG:
149 print "%s*= 0.0 (wrong loc_h)" % tabs
150 return 0.0
151 elif POS(node) == sent_nums[i] and node in g.p_ORDER:
152 # todo: add to ichart perhaps? Although, it _is_ simple lookup..
153 prob = g.p_ORDER[node]
154 else:
155 if 'INNER' in DEBUG:
156 print "%sLACKING TERMINAL:" % tabs
157 prob = 0.0
158 if 'INNER' in DEBUG:
159 print "%s*= %.4f (terminal: %s -> %s_%d)" % (tabs,prob, node_str(node), sent[i], loc_h)
160 return prob
162 def e(i,j, (s_h,h), loc_h, n_t):
163 def to_mpp(p, L, R):
164 if mpptree:
165 key = (i,j, (s_h,h), loc_h)
166 if key not in mpptree:
167 mpptree[key] = (p, L, R)
168 elif mpptree[key][0] < p:
169 mpptree[key] = (p, L, R)
171 def tab():
172 "Tabs for debug output"
173 return "\t"*n_t
175 if (i, j, (s_h,h), loc_h) in ichart:
176 if 'INNER' in DEBUG:
177 print "%s*= %.4f in ichart: i:%d j:%d node:%s loc:%s" % (tab(),ichart[i, j, (s_h,h), loc_h], i, j,
178 node_str((s_h,h)), loc_h)
179 return ichart[i, j, (s_h,h), loc_h]
180 else:
181 # Either terminal rewrites, using p_ORDER:
182 if i+1 == j and (s_h == GOR or s_h == GOL):
183 return terminal(i, j, (s_h,h), loc_h, tab())
184 else: # Or not at terminal level yet:
185 if 'INNER' in DEBUG:
186 print "%s%s (%.1f) from %d to %d" % (tab(),node_str((s_h,h)),loc_h,i,j)
187 if s_h == SEAL:
188 if h == POS(ROOT): # only used in testing, o/w we use inner_sent
189 h = sent_nums[loc_h]
190 if i != 0 or j != len(sent): raise ValueError
191 else: return g.p_ROOT[h] * e(i,j,(SEAL,h),loc_h,n_t+1)
192 p_RGOL = g.p_STOP[h, LEFT, adj(i,loc_h)] * e(i,j,(RGOL,h),loc_h,n_t+1)
193 p_LGOR = g.p_STOP[h, RIGHT, adj(j,loc_h)] * e(i,j,(LGOR,h),loc_h,n_t+1)
194 p = p_RGOL + p_LGOR
195 to_mpp(p_RGOL, STOPKEY, (i,j, (RGOL,h),loc_h))
196 to_mpp(p_LGOR, (i,j, (RGOL,h),loc_h), STOPKEY )
197 if 'INNER' in DEBUG:
198 print "%sp= %.4f (STOP)" % (tab(), p)
199 elif s_h == RGOL or s_h == GOL:
200 p = 0.0
201 if s_h == RGOL:
202 p = g.p_STOP[h, RIGHT, adj(j,loc_h)] * e(i,j, (GOR,h),loc_h,n_t+1)
203 to_mpp(p, (i,j, (GOR,h),loc_h), STOPKEY)
204 for k in xgo_left(i, loc_h): # i < k <= loc_l(h)
205 p_R = e(k, j, ( s_h,h), loc_h, n_t+1)
206 if p_R > 0.0:
207 for loc_a,a in locs(sent_nums, i, k):
208 p_ah = g.p_GO_AT_or0(a, h, LEFT, adj(k,loc_h))
209 if p_ah > 0.0:
210 p_L = e(i, k, (SEAL,a), loc_a, n_t+1)
211 p_add = p_L * p_ah * p_R
212 p += p_add
213 to_mpp(p_add,
214 (i, k, (SEAL,a), loc_a),
215 (k, j, ( s_h,h), loc_h))
216 if 'INNER' in DEBUG:
217 print "%sp= %.4f (ATTACH)" % (tab(), p)
218 elif s_h == GOR or s_h == LGOR:
219 p = 0.0
220 if s_h == LGOR:
221 p = g.p_STOP[h, LEFT, adj(i,loc_h)] * e(i,j, (GOL,h),loc_h,n_t+1)
222 to_mpp(p, (i,j, (GOL,h),loc_h), STOPKEY)
223 for k in xgo_right(loc_h, j): # loc_l(h) < k < j
224 p_L = e(i, k, ( s_h,h), loc_h, n_t+1)
225 if p_L > 0.0:
226 for loc_a,a in locs(sent_nums,k,j):
227 p_ah = g.p_GO_AT_or0(a, h, RIGHT, adj(k,loc_h))
228 p_R = e(k, j, (SEAL,a), loc_a, n_t+1)
229 p_add = p_L * p_ah * p_R
230 p += p_add
231 to_mpp(p_add,
232 (i, k, ( s_h,h), loc_h),
233 (k, j, (SEAL,a), loc_a))
235 if 'INNER' in DEBUG:
236 print "%sp= %.4f (ATTACH)" % (tab(), p)
237 # elif s_h == GOL: # todo
239 ichart[i, j, (s_h,h), loc_h] = p
240 return p
241 # end of e-function
243 inner_prob = e(i,j,node,loc_h, 0)
244 if 'INNER' in DEBUG:
245 print debug_ichart(g,sent,ichart)
246 return inner_prob
247 # end of dmv.inner(i, j, node, loc_h, g, sent, ichart,mpptree)
250 def debug_ichart(g,sent,ichart):
251 str = "---ICHART:---\n"
252 for (s,t,LHS,loc_h),v in ichart.iteritems():
253 str += "%s -> %s_%d ... %s_%d (loc_h:%s):\t%s\n" % (node_str(LHS,g.numtag),
254 sent[s], s, sent[s], t, loc_h, v)
255 str += "---ICHART:end---\n"
256 return str
259 def inner_sent(g, sent, ichart):
260 return sum([g.p_ROOT[w] * inner(0, len(sent), (SEAL,w), loc_w, g, sent, ichart)
261 for loc_w,w in locs(g.sent_nums(sent),0,len(sent))])
267 ###################################################
268 # P_OUTSIDE (dmv-specific) #
269 ###################################################
271 #@accepts(int, int, (int, int), int, Any(), [str], {tuple:float}, {tuple:float})
272 def outer(i,j,w_node,loc_w, g, sent, ichart, ochart):
273 ''' http://www.student.uib.no/~kun041/dmvccm/DMVCCM.html#outer
275 w_node is a pair (seals,POS); the w in klein-thesis is made up of
276 POS(w) and loc_w
278 sent_nums = g.sent_nums(sent)
279 if POS(w_node) not in sent_nums[i:j]:
280 # sanity check, w must be able to dominate sent[i:j]
281 return 0.0
283 # local functions:
284 def e(i,j,LHS,loc_h): # P_{INSIDE}
285 try:
286 return ichart[i,j,LHS,loc_h]
287 except KeyError:
288 return inner(i,j,LHS,loc_h,g,sent,ichart)
290 def f(i,j,w_node,loc_w):
291 if not (i <= loc_w < j):
292 return 0.0
293 if (i,j,w_node,loc_w) in ochart:
294 return ochart[i,j, w_node,loc_w]
295 if w_node == ROOT:
296 if i == 0 and j == len(sent):
297 return 1.0
298 else: # ROOT may only be used on full sentence
299 return 0.0
300 # but we may have non-ROOTs (stops) over full sentence too:
301 w = POS(w_node)
302 s_w = seals(w_node)
304 # todo: try either if p_M > 0.0: or sum(), and speed-test them
306 if s_w == SEAL: # w == a
307 # todo: do the i<sent<j check here to save on calls?
308 p = g.p_ROOT[w] * f(i,j,ROOT,loc_w)
309 # left attach
310 for k in xgt(j, sent): # j<k<len(sent)+1
311 for loc_h,h in locs(sent_nums,j,k):
312 p_wh = g.p_GO_AT_or0(w, h, LEFT, adj(j, loc_h))
313 for s_h in [RGOL, GOL]:
314 p += f(i,k,(s_h,h),loc_h) * p_wh * e(j,k,(s_h,h),loc_h)
315 # right attach
316 for k in xlt(i): # k<i
317 for loc_h,h in locs(sent_nums,k,i):
318 p_wh = g.p_GO_AT_or0(w, h, RIGHT, adj(i, loc_h))
319 for s_h in [LGOR, GOR]:
320 p += e(k,i,(s_h,h), loc_h) * p_wh * f(k,j,(s_h,h), loc_h)
322 elif s_w == RGOL or s_w == GOL: # w == h, left stop + left attach
323 if s_w == RGOL:
324 s_h = SEAL
325 else: # s_w == GOL
326 s_h = LGOR
327 p = g.p_STOP[w, LEFT, adj(i,loc_w)] * f(i,j,( s_h,w),loc_w)
328 for k in xlt(i): # k<i
329 for loc_a,a in locs(sent_nums,k,i):
330 p_aw = g.p_GO_AT_or0(a, w, LEFT, adj(i, loc_w))
331 p += e(k,i, (SEAL,a),loc_a) * p_aw * f(k,j,w_node,loc_w)
333 elif s_w == GOR or s_w == LGOR: # w == h, right stop + right attach
334 if s_w == GOR:
335 s_h = RGOL
336 else: # s_w == LGOR
337 s_h = SEAL
338 p = g.p_STOP[w, RIGHT, adj(j,loc_w)] * f(i,j,( s_h,w),loc_w)
339 for k in xgt(j, sent): # j<k<len(sent)+1
340 for loc_a,a in locs(sent_nums,j,k):
341 p_ah = g.p_GO_AT_or0(a, w, RIGHT, adj(j, loc_w))
342 p += f(i,k,w_node,loc_w) * p_ah * e(j,k,(SEAL,a),loc_a)
344 ochart[i,j,w_node,loc_w] = p
345 return p
346 # end outer.f()
348 return f(i,j,w_node,loc_w)
349 # end outer(i,j,w_node,loc_w, g,sent, ichart,ochart)
354 ###################################################
355 # Reestimation v.1: #
356 # Sentences as outer loop #
357 ###################################################
359 def reest_zeros(h_nums):
360 '''A dict to hold numerators and denominators for our 6+ reestimation
361 formulas. '''
362 # todo: p_ORDER?
363 fr = { ('ROOT','den'):0.0 } # holds sum over f_sent!! not p_sent...
364 for h in h_nums:
365 fr['ROOT','num',h] = 0.0
366 for s_h in [GOR,GOL,RGOL,LGOR]:
367 x = (s_h,h)
368 fr['hat_a','den',x] = 0.0 # = c()
369 # not all arguments are attached to, so we just initialize
370 # fr['hat_a','num',a,(s_h,h)] as they show up, in reest_freq
371 for adj in [NON, ADJ]:
372 for nd in ['num','den']:
373 fr['STOP',nd,x,adj] = 0.0
374 return fr
377 def reest_freq(g, corpus):
378 fr = reest_zeros(g.headnums())
379 ichart = {}
380 ochart = {}
381 p_sent = None # 50 % speed increase on storing this locally
383 # local functions altogether 2x faster than global
384 def c(i,j,LHS,loc_h,sent):
385 if not p_sent > 0.0:
386 return p_sent
388 p_in = e(i,j, LHS,loc_h,sent)
389 if not p_in > 0.0:
390 return p_in
392 p_out = f(i,j, LHS,loc_h,sent)
393 return p_in * p_out / p_sent
394 # end reest_freq.c()
396 def f(i,j,LHS,loc_h,sent): # P_{OUTSIDE}
397 try:
398 return ochart[i,j,LHS,loc_h]
399 except KeyError:
400 return outer(i,j,LHS,loc_h,g,sent,ichart,ochart)
401 # end reest_freq.f()
403 def e(i,j,LHS,loc_h,sent): # P_{INSIDE}
404 try:
405 return ichart[i,j,LHS,loc_h]
406 except KeyError:
407 return inner(i,j,LHS,loc_h,g,sent,ichart)
408 # end reest_freq.e()
410 def w_left(i,j, x,loc_h,sent,sent_nums):
411 if not p_sent > 0.0: return
413 h = POS(x)
414 a_k = {}
415 for k in xtween(i, j):
416 p_out = f(i,j, x,loc_h, sent)
417 if not p_out > 0.0:
418 continue
419 p_R = e(k,j, x,loc_h, sent)
420 if not p_R > 0.0:
421 continue
423 for loc_a,a in locs(sent_nums, i,k): # i<=loc_l(a)<k
424 p_rule = g.p_GO_AT_or0(a, h, LEFT, adj(k, loc_h))
425 p_L = e(i,k, (SEAL,a), loc_a, sent)
426 p = p_L * p_out * p_R * p_rule
427 try: a_k[a] += p
428 except KeyError: a_k[a] = p
430 for a,p in a_k.iteritems():
431 try: fr['hat_a','num',a,x] += p / p_sent
432 except KeyError: fr['hat_a','num',a,x] = p / p_sent
433 # end reest_freq.w_left()
435 def w_right(i,j, x,loc_h,sent,sent_nums):
436 if not p_sent > 0.0: return
438 h = POS(x)
439 a_k = {}
440 for k in xtween(i, j):
441 p_out = f(i,j, x,loc_h, sent)
442 if not p_out > 0.0:
443 continue
444 p_L = e(i,k, x,loc_h, sent)
445 if not p_L > 0.0:
446 continue
448 for loc_a,a in locs(sent_nums, k,j): # k<=loc_l(a)<j
449 p_rule = g.p_GO_AT_or0(a, h, RIGHT, adj(k, loc_h))
450 p_R = e(k,j, (SEAL,a),loc_a, sent)
451 p = p_L * p_out * p_R * p_rule
452 try: a_k[a] += p
453 except KeyError: a_k[a] = p
455 for a,p in a_k.iteritems():
456 try: fr['hat_a','num',a,x] += p / p_sent
457 except KeyError: fr['hat_a','num',a,x] = p / p_sent
458 # end reest_freq.w_right()
460 # in reest_freq:
461 for sent in corpus:
462 if 'REEST' in DEBUG:
463 print sent
464 ichart = {}
465 ochart = {}
466 p_sent = inner_sent(g, sent, ichart)
467 fr['ROOT','den'] += 1 # divide by p_sent per h!
469 sent_nums = g.sent_nums(sent)
471 for loc_h,h in locs(sent_nums,0,len(sent)+1): # locs-stop is exclusive, thus +1
472 # root:
473 fr['ROOT','num',h] += g.p_ROOT[h] * e(0,len(sent), (SEAL,h),loc_h, sent) \
474 / p_sent
476 loc_l_h = loc_h
477 loc_r_h = loc_l_h+1
479 # left non-adjacent stop:
480 for i in xlt(loc_l_h):
481 fr['STOP','num',(GOL,h),NON] += c(loc_l_h, j, (LGOR, h),loc_h, sent)
482 fr['STOP','den',(GOL,h),NON] += c(loc_l_h, j, (GOL, h),loc_h, sent)
483 for j in xgteq(loc_r_h, sent):
484 fr['STOP','num',(RGOL,h),NON] += c(i, j, (SEAL, h),loc_h, sent)
485 fr['STOP','den',(RGOL,h),NON] += c(i, j, (RGOL, h),loc_h, sent)
486 # left adjacent stop, i = loc_l_h
487 fr['STOP','num',(GOL,h),ADJ] += c(loc_l_h, loc_r_h, (LGOR, h),loc_h, sent)
488 fr['STOP','den',(GOL,h),ADJ] += c(loc_l_h, loc_r_h, (GOL, h),loc_h, sent)
489 for j in xgteq(loc_r_h, sent):
490 fr['STOP','num',(RGOL,h),ADJ] += c(loc_l_h, j, (SEAL, h),loc_h, sent)
491 fr['STOP','den',(RGOL,h),ADJ] += c(loc_l_h, j, (RGOL, h),loc_h, sent)
492 # right non-adjacent stop:
493 for j in xgt(loc_r_h, sent):
494 fr['STOP','num',(GOR,h),NON] += c(loc_l_h, j, (RGOL, h),loc_h, sent)
495 fr['STOP','den',(GOR,h),NON] += c(loc_l_h, j, (GOR, h),loc_h, sent)
496 for i in xlteq(loc_l_h):
497 fr['STOP','num',(LGOR,h),NON] += c(loc_l_h, j, (SEAL, h),loc_h, sent)
498 fr['STOP','den',(LGOR,h),NON] += c(loc_l_h, j, (LGOR, h),loc_h, sent)
499 # right adjacent stop, j = loc_r_h
500 fr['STOP','num',(GOR,h),ADJ] += c(loc_l_h, loc_r_h, (RGOL, h),loc_h, sent)
501 fr['STOP','den',(GOR,h),ADJ] += c(loc_l_h, loc_r_h, (GOR, h),loc_h, sent)
502 for i in xlteq(loc_l_h):
503 fr['STOP','num',(LGOR,h),ADJ] += c(loc_l_h, j, (SEAL, h),loc_h, sent)
504 fr['STOP','den',(LGOR,h),ADJ] += c(loc_l_h, j, (LGOR, h),loc_h, sent)
506 # left attachment:
507 if 'REEST_ATTACH' in DEBUG:
508 print "Lattach %s: for i < %s"%(g.numtag(h),sent[0:loc_h+1])
509 for s_h in [RGOL, GOL]:
510 x = (s_h, h)
511 for i in xlt(loc_l_h): # i < loc_l(h)
512 if 'REEST_ATTACH' in DEBUG:
513 print "\tfor j >= %s"%sent[loc_h:len(sent)]
514 for j in xgteq(loc_r_h, sent): # j >= loc_r(h)
515 fr['hat_a','den',x] += c(i,j, x,loc_h, sent) # v_q in L&Y
516 if 'REEST_ATTACH' in DEBUG:
517 print "\t\tc( %d , %d, %s, %s, sent)=%.4f"%(i,j,node_str(x),loc_h,fr['hat_a','den',x])
518 w_left(i, j, x,loc_h, sent,sent_nums) # compute w for all a in sent
520 # right attachment:
521 if 'REEST_ATTACH' in DEBUG:
522 print "Rattach %s: for i <= %s"%(g.numtag(h),sent[0:loc_h+1])
523 for s_h in [GOR, LGOR]:
524 x = (s_h, h)
525 for i in xlteq(loc_l_h): # i <= loc_l(h)
526 if 'REEST_ATTACH' in DEBUG:
527 print "\tfor j > %s"%sent[loc_h:len(sent)]
528 for j in xgt(loc_r_h, sent): # j > loc_r(h)
529 fr['hat_a','den',x] += c(i,j, x,loc_h, sent) # v_q in L&Y
530 if 'REEST_ATTACH' in DEBUG:
531 print "\t\tc( %d , %d, %s, %s, sent)=%.4f"%(loc_h,j,node_str(x),loc_h,fr['hat_a','den',x])
532 w_right(i,j, x,loc_h, sent,sent_nums) # compute w for all a in sent
533 # end for loc_h,h
534 # end for sent
536 return fr
538 def reestimate(old_g, corpus):
539 fr = reest_freq(old_g, corpus)
540 p_ROOT, p_STOP, p_ATTACH = {},{},{}
542 for h in old_g.headnums():
543 # reest_head changes p_ROOT, p_STOP, p_ATTACH
544 reest_head(h, fr, old_g, p_ROOT, p_STOP, p_ATTACH)
545 p_ORDER = old_g.p_ORDER
546 numtag, tagnum = old_g.get_nums_tags()
548 new_g = DMV_Grammar(numtag, tagnum, p_ROOT, p_STOP, p_ATTACH, p_ORDER)
549 return new_g
552 def reest_head(h, fr, g, p_ROOT, p_STOP, p_ATTACH):
553 "Given a single head, update g with the reestimated probability."
554 # remove 0-prob stuff? todo
555 try:
556 p_ROOT[h] = fr['ROOT','num',h] / fr['ROOT','den']
557 except KeyError:
558 p_ROOT[h] = 0.0
560 for dir in [LEFT,RIGHT]:
561 for adj in [ADJ, NON]: # p_STOP
562 p_STOP[h, dir, adj] = 0.0
563 for s_h in dirseal(dir):
564 x = (s_h,h)
565 p = fr['STOP','den', x, adj]
566 if p > 0.0:
567 p = fr['STOP', 'num', x, adj] / p
568 p_STOP[h, dir, adj] += p
570 for s_h in dirseal(dir): # make hat_a for p_ATTACH
571 x = (s_h,h)
572 p_c = fr['hat_a','den',x]
574 for a in g.headnums():
575 if (a,h,dir) not in p_ATTACH:
576 p_ATTACH[a,h,dir] = 0.0
577 try: # (a,x) might not be in hat_a
578 p_ATTACH[a,h,dir] += fr['hat_a','num',a,x] / p_c
579 except KeyError: pass
580 except ZeroDivisionError: pass
586 ###################################################
587 # Reestimation v.2: #
588 # Heads as outer loop #
589 ###################################################
591 def locs_h(h, sent_nums):
592 '''Return the between-word locations of all tokens of h in sent.'''
593 return [loc_w for loc_w,w in locs(sent_nums, 0, len(sent_nums))
594 if w == h]
596 def locs_a(a, sent_nums, start, stop):
597 '''Return the between-word locations of all tokens of h in some
598 fragment of sent. We make sure to offset the locations correctly
599 so that for any w in the returned list, sent[w]==loc_w.
601 start is inclusive, stop is exclusive, as in klein-thesis and
602 Python's list-slicing (eg. return left-loc).'''
603 return [loc_w for loc_w,w in locs(sent_nums, start, stop)
604 if w == a]
606 def inner2(i, j, node, loc_h, g, sent):
607 ichart,ochart = g.get_iochart(s_n)
608 try: p = ichart[i,j,x,loc_h]
609 except KeyError: p = inner(i,j,x,loc_h,g,sent,ichart)
610 g.set_iochart(s_n,ichart,ochart)
611 return p
613 def inner_sent2(g, sent):
614 ichart,ochart = g.get_iochart(s_n)
615 p = inner_sent(g,sent,ichart)
616 g.set_iochart(s_n,ichart,ochart)
617 return p
619 def outer2(i, j,w_node,loc_w, g, sent):
620 ichart,ochart = g.get_iochart(s_n)
621 try: p = ochart[i,j,w_node,loc_w]
622 except KeyError: p = inner(i,j,w_node,loc_w,g,sent,ichart,ochart)
623 g.set_iochart(s_n,ichart,ochart)
624 return p
626 def reestimate2(old_g, corpus):
627 p_ROOT, p_STOP, p_ATTACH = {},{},{}
629 for h in old_g.headnums():
630 # reest_head changes p_ROOT, p_STOP, p_ATTACH
631 reest_head2(h, old_g, corpus, p_ROOT, p_STOP, p_ATTACH)
632 p_ORDER = old_g.p_ORDER
633 numtag, tagnum = old_g.get_nums_tags()
635 new_g = DMV_Grammar(numtag, tagnum, p_ROOT, p_STOP, p_ATTACH, p_ORDER)
636 return new_g
638 def hat_d2(xbar, x, xi, xj, g, corpus): # stop helper
639 def c(x,loc_x,i,j): return c2(x,loc_x,i,j,g,s_n,sent)
641 h = POS(x)
642 if h != POS(xbar): raise ValueError
644 num, den = 0.0, 0.0
645 for s_n,sent in [(g.sent_nums(sent),sent) for sent in corpus]:
646 for loc_h in locs_h(h,s_n):
647 loc_l_h, loc_r_h = loc_h, loc_h + 1
648 for i in xi(loc_l_h):
649 for j in xj(loc_r_h, s_n):
650 # print "s:%s %d,%d"%(sent,i,j)
651 num += c(xbar,loc_h,i,j)
652 den += c(x,loc_h,i,j)
653 if den == 0.0:
654 return den
655 return num/den # eg. SEAL/RGOL, xbar/x
658 def c2(x,loc_h,i,j,g,s_n,sent):
659 ichart,ochart = g.get_iochart(s_n)
661 def f(i,j,x,loc_h): # P_{OUTSIDE}
662 try: return ochart[i,j,x,loc_h]
663 except KeyError: return outer(i,j,x,loc_h,g,sent,ichart,ochart)
664 def e(i,j,x,loc_h): # P_{INSIDE}
665 try: return ichart[i,j,x,loc_h]
666 except KeyError: return inner(i,j,x,loc_h,g,sent,ichart)
668 p_sent = inner_sent(g, sent, ichart)
669 if not p_sent > 0.0:
670 return p_sent
672 p_in = e(i,j, x,loc_h)
673 if not p_in > 0.0:
674 return p_in
676 p_out = f(i,j, x,loc_h)
678 g.set_iochart(s_n,ichart,ochart)
679 return p_in * p_out / p_sent
681 def w2(a, x,loc_h, dir, i, j, g, s_n,sent):
682 ichart,ochart = g.get_iochart(s_n)
684 def f(i,j,x,loc_h): # P_{OUTSIDE}
685 try: return ochart[i,j,x,loc_h]
686 except KeyError: return outer(i,j,x,loc_h,g,sent,ichart,ochart)
687 def e(i,j,x,loc_h): # P_{INSIDE}
688 try: return ichart[i,j,x,loc_h]
689 except KeyError: return inner(i,j,x,loc_h,g,sent,ichart)
691 h = POS(x)
692 p_sent = inner_sent(g, sent, ichart)
694 if dir == LEFT:
695 L, R = (SEAL,a),x
696 else:
697 L, R = x,(SEAL,a)
698 w_sum = 0.0
700 for k in xtween(i,j):
701 if dir == LEFT:
702 start, stop = i, k
703 else:
704 start, stop = k, j
705 for loc_a in locs_a(a, s_n, start, stop):
706 if dir == LEFT:
707 loc_L, loc_R = loc_a, loc_h
708 else:
709 loc_L, loc_R = loc_h, loc_a
710 p = g.p_GO_AT_or0(a,h,dir,adj(k,loc_h))
711 in_L = e(i,k,L,loc_L)
712 in_R = e(k,j,R,loc_R)
713 out = f(i,j,x,loc_h)
714 w_sum += p * in_L * in_R * out
716 g.set_iochart(s_n,ichart,ochart)
717 return w_sum/p_sent
719 def hat_a2(a, x, dir, g, corpus): # attachment helper
720 def w(a,x,loc_x,dir,i,j): return w2(a,x,loc_x,dir,i,j,g,s_n,sent)
721 def c(x,loc_x,i,j): return c2(x,loc_x,i,j,g,s_n,sent)
723 h = POS(x)
724 if dir == LEFT:
725 xi, xj = xlt, xgteq
726 else:
727 xi, xj = xlteq, xgt
728 den, num = 0.0, 0.0
730 for s_n,sent in [(g.sent_nums(sent),sent) for sent in corpus]:
731 for loc_h in locs_h(h,s_n):
732 loc_l_h, loc_r_h = loc_h, loc_h + 1
733 for i in xi(loc_l_h):
734 for j in xj(loc_r_h,sent):
735 num += w(a, x,loc_h, dir, i,j)
736 den += c(x,loc_h, i,j)
737 if den == 0.0:
738 return den
739 return num/den
741 def reest_root2(h,g,corpus):
742 sum = 0.0
743 corpus_size = 0.0
744 for s_n,sent in [(g.sent_nums(sent),sent) for sent in corpus]:
745 num, den = 0.0, 0.0
746 corpus_size += 1.0
747 ichart, ochart = g.get_iochart(s_n)
748 den += inner_sent(g, sent, ichart)
749 for loc_h in locs_h(h,s_n):
750 num += \
751 g.p_ROOT[h] * \
752 inner(0, len(s_n), (SEAL,h), loc_h, g, sent, ichart)
753 g.set_iochart(s_n, ichart, ochart)
754 sum += num / den
755 return sum / corpus_size
757 def reest_head2(h, g, corpus, p_ROOT, p_STOP, p_ATTACH):
758 print "h: %d=%s ..."%(h,g.numtag(h)),
759 def hat_d(xbar,x,xi,xj): return hat_d2(xbar,x,xi,xj, g, corpus)
760 def hat_a(a, x, dir ): return hat_a2(a, x, dir, g, corpus)
762 p_STOP[h, LEFT,NON] = \
763 hat_d((SEAL,h),(RGOL,h),xlt, xgteq) + \
764 hat_d((LGOR,h),( GOL,h),xlt, xeq)
765 p_STOP[h, LEFT,ADJ] = \
766 hat_d((SEAL,h),(RGOL,h),xeq, xgteq) + \
767 hat_d((LGOR,h),( GOL,h),xeq, xeq)
768 p_STOP[h,RIGHT,NON] = \
769 hat_d((RGOL,h),( GOR,h),xeq, xgt) + \
770 hat_d((SEAL,h),(LGOR,h),xlteq,xgt)
771 p_STOP[h,RIGHT,ADJ] = \
772 hat_d((RGOL,h),( GOR,h),xeq, xeq) + \
773 hat_d((SEAL,h),(LGOR,h),xlteq,xeq)
774 print "stops done...",
776 p_ROOT[h] = reest_root2(h,g,corpus)
777 print "root done...",
779 for a in g.headnums():
780 p_ATTACH[a,h,LEFT] = \
781 hat_a(a, (GOL,h),LEFT) + \
782 hat_a(a,(RGOL,h),LEFT)
783 p_ATTACH[a,h,RIGHT] = \
784 hat_a(a, (GOR,h),RIGHT) + \
785 hat_a(a,(LGOR,h),RIGHT)
787 print "attachment done"
791 ###################################################
792 # Most Probable Parse: #
793 ###################################################
795 STOPKEY = (-1,-1,STOP,-1)
796 ROOTKEY = (-1,-1,ROOT,-1)
798 def make_mpptree(g, sent):
799 '''Tell inner() to make an mpptree, connect ROOT to this. (Logically,
800 this should be part of inner_sent though...)'''
801 ichart = {}
802 mpptree = { ROOTKEY:(0.0, ROOTKEY, None) }
803 for loc_w,w in locs(g.sent_nums(sent),0,len(sent)):
804 p = g.p_ROOT[w] * inner(0, len(sent), (SEAL,w), loc_w, g, sent, ichart, mpptree)
805 L = ROOTKEY
806 R = (0,len(sent), (SEAL,w), loc_w)
807 if mpptree[ROOTKEY][0] < p:
808 mpptree[ROOTKEY] = (p, L, R)
809 return mpptree
811 def parse_mpptree(mpptree, sent):
812 '''mpptree is a dict of the form {k:(p,L,R),...}; where k, L and R
813 are `keys' of the form (i,j,node,loc).
815 returns an mpp of the form [((head, loc_h),(arg, loc_a)), ...],
816 where head and arg are tags.'''
817 # local functions for clear access to mpptree:
818 def k_node(key):
819 return key[2]
820 def k_POS(key):
821 return POS(k_node(key))
822 def k_seals(key):
823 return seals(k_node(key))
824 def k_locnode(key):
825 return (k_node(key),key[3])
826 def k_locPOS(key):
827 return (k_POS(key),key[3])
828 def k_terminal(key):
829 s_k = k_seals(key) # i+1 == j
830 return key[0] + 1 == key[1] and (s_k == GOR or s_k == GOL)
831 def t_L(tree_entry):
832 return tree_entry[1]
833 def t_R(tree_entry):
834 return tree_entry[2]
836 # arbitrarily, "ROOT attaches to right". We add it here to
837 # avoid further complications:
838 firstkey = t_R(mpptree[ROOTKEY])
839 deps = set([ (k_locPOS(ROOTKEY), k_locPOS(firstkey), RIGHT) ])
840 q = [firstkey]
842 while len(q) > 0:
843 k = q.pop()
844 if k_terminal(k):
845 continue
846 else:
847 L = t_L( mpptree[k] )
848 R = t_R( mpptree[k] )
849 if k_locnode( k ) == k_locnode( L ): # Rattach
850 deps.add((k_locPOS( k ), k_locPOS( R ), LEFT))
851 q.extend( [L, R] )
852 elif k_locnode( k ) == k_locnode( R ): # Lattach
853 deps.add((k_locPOS( k ), k_locPOS( L ), RIGHT))
854 q.extend( [L, R] )
855 elif R == STOPKEY:
856 q.append( L )
857 elif L == STOPKEY:
858 q.append( R )
859 return deps
861 def mpp(g, sent):
862 tagf = g.numtag # localized function, todo: speed-test
863 mpptree = make_mpptree(g, sent)
864 return set([((tagf(h), loc_h), (tagf(a), loc_a))
865 for (h, loc_h),(a,loc_a),dir in parse_mpptree(mpptree,sent)])
868 ########################################################################
869 # testing functions: #
870 ########################################################################
872 testcorpus = [s.split() for s in ['det nn vbd c vbd','vbd nn c vbd',
873 'det nn vbd', 'det nn vbd c pp',
874 'det nn vbd', 'det vbd vbd c pp',
875 'det nn vbd', 'det nn vbd c vbd',
876 'det nn vbd', 'det nn vbd c vbd',
877 'det nn vbd', 'det nn vbd c vbd',
878 'det nn vbd', 'det nn vbd c pp',
879 'det nn vbd pp', 'det nn vbd', ]]
881 def testgrammar():
882 import loc_h_harmonic
883 reload(loc_h_harmonic)
885 # make sure these are the way they were when setting up the tests:
886 loc_h_harmonic.HARMONIC_C = 0.0
887 loc_h_harmonic.FNONSTOP_MIN = 25
888 loc_h_harmonic.FSTOP_MIN = 5
889 loc_h_harmonic.RIGHT_FIRST = 1.0
890 loc_h_harmonic.OLD_STOP_CALC = True
892 return loc_h_harmonic.initialize(testcorpus)
894 def testreestimation2():
895 g2 = testgrammar()
896 reestimate2(g2, testcorpus)
897 return g2
899 def testreestimation():
900 g = testgrammar()
901 g = reestimate(g, testcorpus)
902 return g
905 def testmpp_regression(mpptree,k_n):
906 mpp = {ROOTKEY: (2.877072116829971e-05, STOPKEY, (0, 3, (2, 3), 1)),
907 (0, 1, (1, 1), 0): (0.1111111111111111, (0, 1, (0, 1), 0), STOPKEY),
908 (0, 1, (2, 1), 0): (0.049382716049382713, STOPKEY, (0, 1, (1, 1), 0)),
909 (0, 3, (1, 3), 1): (0.00027619892321567721,
910 (0, 1, (2, 1), 0),
911 (1, 3, (1, 3), 1)),
912 (0, 3, (2, 3), 1): (0.00012275507698474543, STOPKEY, (0, 3, (1, 3), 1)),
913 (1, 3, (0, 3), 1): (0.025280986819448362,
914 (1, 2, (0, 3), 1),
915 (2, 3, (2, 4), 2)),
916 (1, 3, (1, 3), 1): (0.0067415964851862296, (1, 3, (0, 3), 1), STOPKEY),
917 (2, 3, (1, 4), 2): (0.32692307692307693, (2, 3, (0, 4), 2), STOPKEY),
918 (2, 3, (2, 4), 2): (0.037721893491124266, STOPKEY, (2, 3, (1, 4), 2))}
919 for k,(v,L,R) in mpp.iteritems():
920 k2 = k[0:k_n] # 3 if the new does not check loc_h
921 if type(k)==str:
922 k2 = k
923 if k2 not in mpptree:
924 print "mpp regression, %s missing"%(k2,)
925 else:
926 vnew = mpptree[k2][0]
927 if not "%.10f"%vnew == "%.10f"%v:
928 print "mpp regression, wanted %s=%.5f, got %.5f"%(k2,v,vnew)
931 def testgrammar_a():
932 h, a = 0, 1
933 p_ROOT, p_STOP, p_ATTACH, p_ORDER = {},{},{},{}
934 p_ROOT[h] = 0.9
935 p_ROOT[a] = 0.1
936 p_STOP[h,LEFT,NON] = 1.0
937 p_STOP[h,LEFT,ADJ] = 1.0
938 p_STOP[h,RIGHT,NON] = 0.4 # RSTOP
939 p_STOP[h,RIGHT,ADJ] = 0.3 # RSTOP
940 p_STOP[a,LEFT,NON] = 1.0
941 p_STOP[a,LEFT,ADJ] = 1.0
942 p_STOP[a,RIGHT,NON] = 0.4 # RSTOP
943 p_STOP[a,RIGHT,ADJ] = 0.3 # RSTOP
944 p_ATTACH[a,h,LEFT] = 1.0 # not used
945 p_ATTACH[a,h,RIGHT] = 1.0 # not used
946 p_ATTACH[h,a,LEFT] = 1.0 # not used
947 p_ATTACH[h,a,RIGHT] = 1.0 # not used
948 p_ATTACH[h,h,LEFT] = 1.0 # not used
949 p_ATTACH[h,h,RIGHT] = 1.0 # not used
950 p_ORDER[(GOR, h)] = 1.0
951 p_ORDER[(GOL, h)] = 0.0
952 p_ORDER[(GOR, a)] = 1.0
953 p_ORDER[(GOL, a)] = 0.0
954 g = DMV_Grammar({h:'h',a:'a'}, {'h':h,'a':a}, p_ROOT, p_STOP, p_ATTACH, p_ORDER)
955 # these probabilities are impossible so add them manually:
956 g.p_GO_AT[a,a,LEFT,NON] = 0.4 # Lattach
957 g.p_GO_AT[a,a,LEFT,ADJ] = 0.6 # Lattach
958 g.p_GO_AT[h,a,LEFT,NON] = 0.2 # Lattach to h
959 g.p_GO_AT[h,a,LEFT,ADJ] = 0.1 # Lattach to h
960 g.p_GO_AT[a,a,RIGHT,NON] = 1.0 # Rattach
961 g.p_GO_AT[a,a,RIGHT,ADJ] = 1.0 # Rattach
962 g.p_GO_AT[h,a,RIGHT,NON] = 1.0 # Rattach to h
963 g.p_GO_AT[h,a,RIGHT,ADJ] = 1.0 # Rattach to h
964 g.p_GO_AT[h,h,LEFT,NON] = 0.2 # Lattach
965 g.p_GO_AT[h,h,LEFT,ADJ] = 0.1 # Lattach
966 g.p_GO_AT[a,h,LEFT,NON] = 0.4 # Lattach to a
967 g.p_GO_AT[a,h,LEFT,ADJ] = 0.6 # Lattach to a
968 g.p_GO_AT[h,h,RIGHT,NON] = 1.0 # Rattach
969 g.p_GO_AT[h,h,RIGHT,ADJ] = 1.0 # Rattach
970 g.p_GO_AT[a,h,RIGHT,NON] = 1.0 # Rattach to a
971 g.p_GO_AT[a,h,RIGHT,ADJ] = 1.0 # Rattach to a
972 return g
975 def testgrammar_h():
976 h = 0
977 p_ROOT, p_STOP, p_ATTACH, p_ORDER = {},{},{},{}
978 p_ROOT[h] = 1.0
979 p_STOP[h,LEFT,NON] = 1.0
980 p_STOP[h,LEFT,ADJ] = 1.0
981 p_STOP[h,RIGHT,NON] = 0.4
982 p_STOP[h,RIGHT,ADJ] = 0.3
983 p_ATTACH[h,h,LEFT] = 1.0 # not used
984 p_ATTACH[h,h,RIGHT] = 1.0 # not used
985 p_ORDER[(GOR, h)] = 1.0
986 p_ORDER[(GOL, h)] = 0.0
987 g = DMV_Grammar({h:'h'}, {'h':h}, p_ROOT, p_STOP, p_ATTACH, p_ORDER)
988 g.p_GO_AT[h,h,LEFT,NON] = 0.6 # these probabilities are impossible
989 g.p_GO_AT[h,h,LEFT,ADJ] = 0.7 # so add them manually...
990 g.p_GO_AT[h,h,RIGHT,NON] = 1.0
991 g.p_GO_AT[h,h,RIGHT,ADJ] = 1.0
992 return g
996 def testreestimation_h():
997 DEBUG.add('REEST')
998 g = testgrammar_h()
999 reestimate(g,['h h h'.split()])
1002 def test(wanted, got):
1003 if not wanted == got:
1004 raise Warning, "Regression! Should be %s: %s" % (wanted, got)
1006 def regression_tests():
1007 testmpp_regression(make_mpptree(testgrammar(), testcorpus[2]),4)
1008 h = 0
1010 test("0.120",
1011 "%.3f" % inner(0, 2, (SEAL,h), 0, testgrammar_h(), 'h h'.split(),{}))
1012 test("0.063",
1013 "%.3f" % inner(0, 2, (SEAL,h), 1, testgrammar_h(), 'h h'.split(),{}))
1014 test("0.1842",
1015 "%.4f" % inner_sent(testgrammar_h(), 'h h h'.split(),{}))
1017 test("0.1092",
1018 "%.4f" % inner(0, 3, (SEAL,0), 0, testgrammar_h(), 'h h h'.split(),{}))
1019 test("0.0252",
1020 "%.4f" % inner(0, 3, (SEAL,0), 1, testgrammar_h(), 'h h h'.split(),{}))
1021 test("0.0498",
1022 "%.4f" % inner(0, 3, (SEAL,h), 2, testgrammar_h(), 'h h h'.split(),{}))
1024 test("0.58" ,
1025 "%.2f" % outer(1, 3, (RGOL,h), 2, testgrammar_h(),'h h h'.split(),{},{}))
1026 test("0.61" , # ftw? can't be right... there's an 0.4 shared between these two...
1027 "%.2f" % outer(1, 3, (RGOL,h), 1, testgrammar_h(),'h h h'.split(),{},{}))
1029 test("0.00" ,
1030 "%.2f" % outer(1, 3, (RGOL,h), 0, testgrammar_h(),'h h h'.split(),{},{}))
1031 test("0.00" ,
1032 "%.2f" % outer(1, 3, (RGOL,h), 3, testgrammar_h(),'h h h'.split(),{},{}))
1034 test("0.1089" ,
1035 "%.4f" % outer(0, 1, (GOR,h), 0,testgrammar_a(),'h a'.split(),{},{}))
1036 test("0.3600" ,
1037 "%.4f" % outer(0, 2, (GOR,h), 0,testgrammar_a(),'h a'.split(),{},{}))
1038 test("0.0000" ,
1039 "%.4f" % outer(0, 3, (GOR,h), 0,testgrammar_a(),'h a'.split(),{},{}))
1041 # todo: add more of these tests...
1045 def compare_grammars(g1,g2):
1046 result = ""
1047 for d1,d2 in [(g1.p_ATTACH,g2.p_ATTACH),(g1.p_STOP,g2.p_STOP),
1048 (g1.p_ORDER, g2.p_ORDER), (g1.p_ROOT,g2.p_ROOT) ]:
1049 for k,v in d1.iteritems():
1050 if k not in d2:
1051 result += "\nreestimate1[%s]=%s missing from reestimate2"%(k,v)
1052 elif "%s"%d2[k] != "%s"%v:
1053 result += "\nreestimate1[%s]=%s while \nreestimate2[%s]=%s."%(k,v,k,d2[k])
1054 for k,v in d2.iteritems():
1055 if k not in d1:
1056 result += "\nreestimate2[%s]=%s missing from reestimate1"%(k,v)
1057 return result
1060 def testNVNgrammar():
1061 import loc_h_harmonic
1063 # make sure these are the way they were when setting up the tests:
1064 loc_h_harmonic.HARMONIC_C = 0.0
1065 loc_h_harmonic.FNONSTOP_MIN = 25
1066 loc_h_harmonic.FSTOP_MIN = 5
1067 loc_h_harmonic.RIGHT_FIRST = 1.0
1068 loc_h_harmonic.OLD_STOP_CALC = True
1070 g = loc_h_harmonic.initialize(['n v n'.split()])
1071 return g # todo
1073 def testIO():
1074 g = testgrammar()
1075 inners = [(sent, inner_sent(g, sent, {})) for sent in testcorpus]
1076 return inners
1078 if __name__ == "__main__":
1079 DEBUG.clear()
1080 regression_tests()
1082 # import profile
1083 # profile.run('testreestimation()')
1085 # import timeit
1086 # print timeit.Timer("loc_h_dmv.testreestimation()",'''import loc_h_dmv
1087 # reload(loc_h_dmv)''').timeit(1)
1090 # print "mpp-test:"
1091 # import pprint
1092 # for s in testcorpus:
1093 # print "sent:%s\nparse:set(\n%s)"%(s,pprint.pformat(list(mpp(testgrammar(), s)),
1094 # width=40))
1096 # g1 = testreestimation()
1097 # g2 = testreestimation2()
1098 # print compare_grammars(g1,g2)
1106 if False:
1107 g = testNVNgrammar()
1108 q_sent = inner_sent(g,'n v n'.split(),{})
1109 q_tree = {}
1110 q_tree[1] = 2.7213e-06 # n_0 -> v, n_0 -> n_2
1111 q_tree[2] = 9.738e-06 # n -> v -> n
1112 q_tree[3] = 2.268e-06 # n_0 -> n_2 -> v
1113 q_tree[4] = 2.7213e-06 # same as 1-3
1114 q_tree[5] = 9.738e-06
1115 q_tree[6] = 2.268e-06
1116 q_tree[7] = 1.086e-05 # n <- v -> n (e-05!!!)
1117 f_T_q = {}
1118 for i,q_t in q_tree.iteritems():
1119 f_T_q[i] = q_t / q_sent
1120 import pprint
1121 pprint.pprint(q_tree)
1122 pprint.pprint(f_T_q)
1123 print sum([f for f in f_T_q.values()])
1125 def treediv(num,den):
1126 return \
1127 sum([f_T_q[i] for i in num ]) / \
1128 sum([f_T_q[i] for i in den ])
1129 g2 = {}
1130 # g2['root --> _n_'] = treediv( (1,2,3,4,5,6), (1,2,3,4,5,6,7) )
1131 # g2['root --> _v_'] = treediv( (7,), (1,2,3,4,5,6,7) )
1132 # g2['_n_ --> STOP n><'] = treediv( (1,2,3,4,5,6,7,1,2,3,4,5,6,7),
1133 # (1,2,3,4,5,6,7,1,2,3,4,5,6,7))
1135 # g2['_n_ --> STOP n>< NON'] = treediv( (3,4,5,6),
1136 # (3,4,5,6,4) )
1138 # g2['_v_ --> STOP v><'] = treediv( (1,2,3,4,5,6,7),
1139 # (1,2,3,4,5,6,7) )
1140 # nlrtrees = (1,2,3,4,5,6,7,1,2,3,4,5,6,7,
1141 # 3,4,4,5,6)
1142 # g2['n>< --> _n_ n><'] = treediv( ( 4, 6), nlrtrees )
1143 # g2['n>< --> _v_ n><'] = treediv( (3,4,5), nlrtrees )
1144 # g2['n>< --> n> STOP'] = treediv( (1,2,3,4,5,6,7,1,2,3,4,5,6,7),
1145 # nlrtrees )
1147 # g2['n>< --> n> STOP ADJ'] = treediv( ( 4,5, 7,1,2,3,4,5,6,7),
1148 # nlrtrees )
1149 # g2['n>< --> n> STOP NON'] = treediv( (1,2,3, 6),
1150 # nlrtrees )
1152 # vlrtrees = (1,2,3,4,5,6,7,
1153 # 7,5)
1154 # g2['v>< --> _n_ v><'] = treediv( (5,7), vlrtrees )
1155 # g2['v>< --> v> STOP'] = treediv( (1,2,3,4,5,6,7), vlrtrees )
1156 # nrtrees = (1,2,3,4,5,6,7,1,2,3,4,5,6,7,
1157 # 1,1,2,3,6)
1158 # g2['n> --> n> _n_'] = treediv( (1,3), nrtrees )
1159 # g2['n> --> n> _v_'] = treediv( (1,2,6), nrtrees )
1161 # g2['n> --> n> _n_ NON'] = treediv( (1,), nrtrees )
1162 # g2['n> --> n> _n_ ADJ'] = treediv( ( 3,), nrtrees )
1163 # g2['n> --> n> _v_ ADJ'] = treediv( ( 1,2, 6), nrtrees )
1165 # vrtrees = (1,2,3,4,5,6,7,
1166 # 7,2)
1167 # g2['v> --> v> _n_'] = treediv( (2,7), vrtrees )
1169 # g2[' v|n,R '] = treediv( (1, 2, 6),
1170 # (1,1,2,3,6) )
1171 # g2[' n|n,R '] = treediv( (1, 3),
1172 # (1,1,2,3,6) )
1174 g2[' stop|n,R,non '] = treediv( ( 1,2,3,6),
1175 (1,1,2,3,6) )
1176 g2[' v|n,left '] = treediv( ( 3,4,5),
1177 (6,4,3,4,5) )
1178 g2[' n|n,left '] = treediv( (6,4),
1179 (6,4,3,4,5) )
1181 pprint.pprint(g2)
1182 g3 = reestimate2(g, ['n v n'.split()])
1183 print g3
1184 g4 = reestimate2(g, ['n v n'.split()])
1185 print g4