99b687939737a83f1d4a39cdbefe81d3e664f674
[dmvccm.git] / src / loc_h_dmv.py
blob99b687939737a83f1d4a39cdbefe81d3e664f674
1 # loc_h_dmv.py
2 #
3 # dmv reestimation and inside-outside probabilities using loc_h, and
4 # no CNF-style rules
6 # Table of Contents:
7 # 1. Grammar-class and related functions
8 # 2. P_INSIDE / inner() and inner_sent()
9 # 3. P_OUTSIDE / outer()
10 # 4. Reestimation v.1: sentences as outer loop
11 # 5. Reestimation v.2: head-types as outer loop
12 # 6. Most Probable Parse
13 # 7. Testing functions
15 import io
16 from common_dmv import *
18 ### todo: debug with @accepts once in a while, but it's SLOW
19 # from typecheck import accepts, Any
21 if __name__ == "__main__":
22 print "loc_h_dmv module tests:"
24 def adj(middle, loc_h):
25 "middle is eg. k when rewriting for i<k<j (inside probabilities)."
26 return middle == loc_h or middle == loc_h+1 # ADJ == True
28 def make_GO_AT(p_STOP,p_ATTACH):
29 p_GO_AT = {}
30 for (a,h,dir), p_ah in p_ATTACH.iteritems():
31 p_GO_AT[a,h,dir, NON] = p_ah * (1-p_STOP[h, dir, NON])
32 p_GO_AT[a,h,dir, ADJ] = p_ah * (1-p_STOP[h, dir, ADJ])
33 return p_GO_AT
35 class DMV_Grammar(io.Grammar):
36 def __str__(self):
37 LJUST = 47
38 def t(n):
39 return "%d=%s" % (n, self.numtag(n))
40 def p(dict,key):
41 if key in dict: return dict[key]
42 else: return 0.0
43 def no_zeroL(str,tagstr,prob):
44 if prob > 0.0: return (str%(tagstr,prob)).ljust(LJUST)
45 else: return "".ljust(LJUST)
46 def no_zeroR(str,tagstr,prob):
47 if prob > 0.0: return str%(tagstr,prob)
48 else: return ""
49 def p_a(a,h):
50 p_L = p(self.p_ATTACH,(a,h,LEFT))
51 p_R = p(self.p_ATTACH,(a,h,RIGHT))
52 if p_L == 0.0 and p_R == 0.0:
53 return ''
54 else:
55 if p_L > 0.0:
56 str = "p_ATTACH[%s|%s,L] = %s" % (t(a), t(h), p_L)
57 str = str.ljust(LJUST)
58 else:
59 str = ''
60 if p_R > 0.0:
61 str = str.ljust(LJUST)
62 str += "p_ATTACH[%s|%s,R] = %s" % (t(a), t(h), p_R)
63 return '\n'+str
65 root, stop, att, ord = "","","",""
66 for h in self.headnums():
67 root += no_zeroL("\np_ROOT[%s] = %s", t(h), p(self.p_ROOT, (h)))
68 stop += '\n'
69 stop += no_zeroL("p_STOP[stop|%s,L,adj] = %s", t(h), p(self.p_STOP, (h,LEFT,ADJ)))
70 stop += no_zeroR("p_STOP[stop|%s,R,adj] = %s", t(h), p(self.p_STOP, (h,RIGHT,ADJ)))
71 stop += '\n'
72 stop += no_zeroL("p_STOP[stop|%s,L,non] = %s", t(h), p(self.p_STOP, (h,LEFT,NON)))
73 stop += no_zeroR("p_STOP[stop|%s,R,non] = %s", t(h), p(self.p_STOP, (h,RIGHT,NON)))
74 att += ''.join([p_a(a,h) for a in self.headnums()])
75 ord += '\n'
76 ord += no_zeroL("p_ORDER[ left-first|%s ] = %s", t(h), p(self.p_ORDER, (GOL,h)))
77 ord += no_zeroR("p_ORDER[right-first|%s ] = %s", t(h), p(self.p_ORDER, (GOR,h)))
78 return root + stop + att + ord
80 def __init__(self, numtag, tagnum, p_ROOT, p_STOP, p_ATTACH, p_ORDER):
81 io.Grammar.__init__(self, numtag, tagnum)
82 self.p_ROOT = p_ROOT # p_ROOT[w] = p
83 self.p_ORDER = p_ORDER # p_ORDER[seals, w] = p
84 self.p_STOP = p_STOP # p_STOP[w, LEFT, NON] = p (etc. for LA,RN,RA)
85 self.p_ATTACH = p_ATTACH # p_ATTACH[a, h, LEFT] = p (etc. for R)
86 # p_GO_AT[a, h, LEFT, NON] = p (etc. for LA,RN,RA)
87 self.p_GO_AT = make_GO_AT(self.p_STOP, self.p_ATTACH)
88 # these are used in reestimate2():
89 self.reset_iocharts()
91 def get_iochart(self, sent_nums):
92 ch_key = tuple(sent_nums)
93 try:
94 ichart = self._icharts[ch_key]
95 except KeyError:
96 ichart = {}
97 try:
98 ochart = self._ocharts[ch_key]
99 except KeyError:
100 ochart = {}
101 return (ichart, ochart)
103 def set_iochart(self, sent_nums, ichart, ochart):
104 self._icharts[tuple(sent_nums)] = ichart
105 self._ocharts[tuple(sent_nums)] = ochart
107 def reset_iocharts(self):
108 self._icharts = {}
109 self._ocharts = {}
111 def p_GO_AT_or0(self, a, h, dir, adj):
112 try:
113 return self.p_GO_AT[a, h, dir, adj]
114 except KeyError:
115 return 0.0
118 def locs(sent_nums, start, stop):
119 '''Return the between-word locations of all words in some fragment of
120 sent. We make sure to offset the locations correctly so that for
121 any w in the returned list, sent[w]==loc_w.
123 start is inclusive, stop is exclusive, as in klein-thesis and
124 Python's list-slicing.'''
125 for i0,w in enumerate(sent_nums[start:stop]):
126 loc_w = i0+start
127 yield (loc_w, w)
129 ###################################################
130 # P_INSIDE (dmv-specific) #
131 ###################################################
133 #@accepts(int, int, (int, int), int, Any(), [str], {tuple:float}, IsOneOf(None,{}))
134 def inner(i, j, node, loc_h, g, sent, ichart, mpptree=None):
135 ''' The ichart is of this form:
136 ichart[i,j,LHS, loc_h]
137 where i and j are between-word positions.
139 loc_h gives adjacency (along with k for attachment rules), and is
140 needed in P_STOP reestimation.
142 sent_nums = g.sent_nums(sent)
144 def terminal(i,j,node, loc_h, tabs):
145 if not i <= loc_h < j:
146 if 'INNER' in DEBUG:
147 print "%s*= 0.0 (wrong loc_h)" % tabs
148 return 0.0
149 elif POS(node) == sent_nums[i] and node in g.p_ORDER:
150 # todo: add to ichart perhaps? Although, it _is_ simple lookup..
151 prob = g.p_ORDER[node]
152 else:
153 if 'INNER' in DEBUG:
154 print "%sLACKING TERMINAL:" % tabs
155 prob = 0.0
156 if 'INNER' in DEBUG:
157 print "%s*= %.4f (terminal: %s -> %s_%d)" % (tabs,prob, node_str(node), sent[i], loc_h)
158 return prob
160 def e(i,j, (s_h,h), loc_h, n_t):
161 def to_mpp(p, L, R):
162 if mpptree:
163 key = (i,j, (s_h,h), loc_h)
164 if key not in mpptree:
165 mpptree[key] = (p, L, R)
166 elif mpptree[key][0] < p:
167 mpptree[key] = (p, L, R)
169 def tab():
170 "Tabs for debug output"
171 return "\t"*n_t
173 if (i, j, (s_h,h), loc_h) in ichart:
174 if 'INNER' in DEBUG:
175 print "%s*= %.4f in ichart: i:%d j:%d node:%s loc:%s" % (tab(),ichart[i, j, (s_h,h), loc_h], i, j,
176 node_str((s_h,h)), loc_h)
177 return ichart[i, j, (s_h,h), loc_h]
178 else:
179 # Either terminal rewrites, using p_ORDER:
180 if i+1 == j and (s_h == GOR or s_h == GOL):
181 return terminal(i, j, (s_h,h), loc_h, tab())
182 else: # Or not at terminal level yet:
183 if 'INNER' in DEBUG:
184 print "%s%s (%.1f) from %d to %d" % (tab(),node_str((s_h,h)),loc_h,i,j)
185 if s_h == SEAL:
186 if h == POS(ROOT): # only used in testing, o/w we use inner_sent
187 h = sent_nums[loc_h]
188 if i != 0 or j != len(sent): raise ValueError
189 else: return g.p_ROOT[h] * e(i,j,(SEAL,h),loc_h,n_t+1)
190 p_RGOL = g.p_STOP[h, LEFT, adj(i,loc_h)] * e(i,j,(RGOL,h),loc_h,n_t+1)
191 p_LGOR = g.p_STOP[h, RIGHT, adj(j,loc_h)] * e(i,j,(LGOR,h),loc_h,n_t+1)
192 p = p_RGOL + p_LGOR
193 to_mpp(p_RGOL, STOPKEY, (i,j, (RGOL,h),loc_h))
194 to_mpp(p_LGOR, (i,j, (RGOL,h),loc_h), STOPKEY )
195 if 'INNER' in DEBUG:
196 print "%sp= %.4f (STOP)" % (tab(), p)
197 elif s_h == RGOL or s_h == GOL:
198 p = 0.0
199 if s_h == RGOL:
200 p = g.p_STOP[h, RIGHT, adj(j,loc_h)] * e(i,j, (GOR,h),loc_h,n_t+1)
201 to_mpp(p, (i,j, (GOR,h),loc_h), STOPKEY)
202 for k in xgo_left(i, loc_h): # i < k <= loc_l(h)
203 p_R = e(k, j, ( s_h,h), loc_h, n_t+1)
204 if p_R > 0.0:
205 for loc_a,a in locs(sent_nums, i, k):
206 p_ah = g.p_GO_AT_or0(a, h, LEFT, adj(k,loc_h))
207 if p_ah > 0.0:
208 p_L = e(i, k, (SEAL,a), loc_a, n_t+1)
209 p_add = p_L * p_ah * p_R
210 p += p_add
211 to_mpp(p_add,
212 (i, k, (SEAL,a), loc_a),
213 (k, j, ( s_h,h), loc_h))
214 if 'INNER' in DEBUG:
215 print "%sp= %.4f (ATTACH)" % (tab(), p)
216 elif s_h == GOR or s_h == LGOR:
217 p = 0.0
218 if s_h == LGOR:
219 p = g.p_STOP[h, LEFT, adj(i,loc_h)] * e(i,j, (GOL,h),loc_h,n_t+1)
220 to_mpp(p, (i,j, (GOL,h),loc_h), STOPKEY)
221 for k in xgo_right(loc_h, j): # loc_l(h) < k < j
222 p_L = e(i, k, ( s_h,h), loc_h, n_t+1)
223 if p_L > 0.0:
224 for loc_a,a in locs(sent_nums,k,j):
225 p_ah = g.p_GO_AT_or0(a, h, RIGHT, adj(k,loc_h))
226 p_R = e(k, j, (SEAL,a), loc_a, n_t+1)
227 p_add = p_L * p_ah * p_R
228 p += p_add
229 to_mpp(p_add,
230 (i, k, ( s_h,h), loc_h),
231 (k, j, (SEAL,a), loc_a))
233 if 'INNER' in DEBUG:
234 print "%sp= %.4f (ATTACH)" % (tab(), p)
235 # elif s_h == GOL: # todo
237 ichart[i, j, (s_h,h), loc_h] = p
238 return p
239 # end of e-function
241 inner_prob = e(i,j,node,loc_h, 0)
242 if 'INNER' in DEBUG:
243 print debug_ichart(g,sent,ichart)
244 return inner_prob
245 # end of dmv.inner(i, j, node, loc_h, g, sent, ichart,mpptree)
248 def debug_ichart(g,sent,ichart):
249 str = "---ICHART:---\n"
250 for (s,t,LHS,loc_h),v in ichart.iteritems():
251 str += "%s -> %s_%d ... %s_%d (loc_h:%s):\t%s\n" % (node_str(LHS,g.numtag),
252 sent[s], s, sent[s], t, loc_h, v)
253 str += "---ICHART:end---\n"
254 return str
257 def inner_sent(g, sent, ichart):
258 return sum([g.p_ROOT[w] * inner(0, len(sent), (SEAL,w), loc_w, g, sent, ichart)
259 for loc_w,w in locs(g.sent_nums(sent),0,len(sent))])
265 ###################################################
266 # P_OUTSIDE (dmv-specific) #
267 ###################################################
269 #@accepts(int, int, (int, int), int, Any(), [str], {tuple:float}, {tuple:float})
270 def outer(i,j,w_node,loc_w, g, sent, ichart, ochart):
271 ''' http://www.student.uib.no/~kun041/dmvccm/DMVCCM.html#outer
273 w_node is a pair (seals,POS); the w in klein-thesis is made up of
274 POS(w) and loc_w
276 sent_nums = g.sent_nums(sent)
277 if POS(w_node) not in sent_nums[i:j]:
278 # sanity check, w must be able to dominate sent[i:j]
279 return 0.0
281 # local functions:
282 def e(i,j,LHS,loc_h): # P_{INSIDE}
283 try:
284 return ichart[i,j,LHS,loc_h]
285 except KeyError:
286 return inner(i,j,LHS,loc_h,g,sent,ichart)
288 def f(i,j,w_node,loc_w):
289 if not (i <= loc_w < j):
290 return 0.0
291 if (i,j,w_node,loc_w) in ochart:
292 return ochart[i,j, w_node,loc_w]
293 if w_node == ROOT:
294 if i == 0 and j == len(sent):
295 return 1.0
296 else: # ROOT may only be used on full sentence
297 return 0.0
298 # but we may have non-ROOTs (stops) over full sentence too:
299 w = POS(w_node)
300 s_w = seals(w_node)
302 # todo: try either if p_M > 0.0: or sum(), and speed-test them
304 if s_w == SEAL: # w == a
305 # todo: do the i<sent<j check here to save on calls?
306 p = g.p_ROOT[w] * f(i,j,ROOT,loc_w)
307 # left attach
308 for k in xgt(j, sent): # j<k<len(sent)+1
309 for loc_h,h in locs(sent_nums,j,k):
310 p_wh = g.p_GO_AT_or0(w, h, LEFT, adj(j, loc_h))
311 for s_h in [RGOL, GOL]:
312 p += f(i,k,(s_h,h),loc_h) * p_wh * e(j,k,(s_h,h),loc_h)
313 # right attach
314 for k in xlt(i): # k<i
315 for loc_h,h in locs(sent_nums,k,i):
316 p_wh = g.p_GO_AT_or0(w, h, RIGHT, adj(i, loc_h))
317 for s_h in [LGOR, GOR]:
318 p += e(k,i,(s_h,h), loc_h) * p_wh * f(k,j,(s_h,h), loc_h)
320 elif s_w == RGOL or s_w == GOL: # w == h, left stop + left attach
321 if s_w == RGOL:
322 s_h = SEAL
323 else: # s_w == GOL
324 s_h = LGOR
325 p = g.p_STOP[w, LEFT, adj(i,loc_w)] * f(i,j,( s_h,w),loc_w)
326 for k in xlt(i): # k<i
327 for loc_a,a in locs(sent_nums,k,i):
328 p_aw = g.p_GO_AT_or0(a, w, LEFT, adj(i, loc_w))
329 p += e(k,i, (SEAL,a),loc_a) * p_aw * f(k,j,w_node,loc_w)
331 elif s_w == GOR or s_w == LGOR: # w == h, right stop + right attach
332 if s_w == GOR:
333 s_h = RGOL
334 else: # s_w == LGOR
335 s_h = SEAL
336 p = g.p_STOP[w, RIGHT, adj(j,loc_w)] * f(i,j,( s_h,w),loc_w)
337 for k in xgt(j, sent): # j<k<len(sent)+1
338 for loc_a,a in locs(sent_nums,j,k):
339 p_ah = g.p_GO_AT_or0(a, w, RIGHT, adj(j, loc_w))
340 p += f(i,k,w_node,loc_w) * p_ah * e(j,k,(SEAL,a),loc_a)
342 ochart[i,j,w_node,loc_w] = p
343 return p
344 # end outer.f()
346 return f(i,j,w_node,loc_w)
347 # end outer(i,j,w_node,loc_w, g,sent, ichart,ochart)
352 ###################################################
353 # Reestimation v.1: #
354 # Sentences as outer loop #
355 ###################################################
357 def reest_zeros(h_nums):
358 '''A dict to hold numerators and denominators for our 6+ reestimation
359 formulas. '''
360 # todo: p_ORDER?
361 fr = { ('ROOT','den'):0.0 } # holds sum over f_sent!! not p_sent...
362 for h in h_nums:
363 fr['ROOT','num',h] = 0.0
364 for s_h in [GOR,GOL,RGOL,LGOR]:
365 x = (s_h,h)
366 fr['hat_a','den',x] = 0.0 # = c()
367 # not all arguments are attached to, so we just initialize
368 # fr['hat_a','num',a,(s_h,h)] as they show up, in reest_freq
369 for adj in [NON, ADJ]:
370 for nd in ['num','den']:
371 fr['STOP',nd,x,adj] = 0.0
372 return fr
375 def reest_freq(g, corpus):
376 fr = reest_zeros(g.headnums())
377 ichart = {}
378 ochart = {}
379 p_sent = None # 50 % speed increase on storing this locally
381 # local functions altogether 2x faster than global
382 def c(i,j,LHS,loc_h,sent):
383 if not p_sent > 0.0:
384 return p_sent
386 p_in = e(i,j, LHS,loc_h,sent)
387 if not p_in > 0.0:
388 return p_in
390 p_out = f(i,j, LHS,loc_h,sent)
391 return p_in * p_out / p_sent
392 # end reest_freq.c()
394 def f(i,j,LHS,loc_h,sent): # P_{OUTSIDE}
395 try:
396 return ochart[i,j,LHS,loc_h]
397 except KeyError:
398 return outer(i,j,LHS,loc_h,g,sent,ichart,ochart)
399 # end reest_freq.f()
401 def e(i,j,LHS,loc_h,sent): # P_{INSIDE}
402 try:
403 return ichart[i,j,LHS,loc_h]
404 except KeyError:
405 return inner(i,j,LHS,loc_h,g,sent,ichart)
406 # end reest_freq.e()
408 def w_left(i,j, x,loc_h,sent,sent_nums):
409 if not p_sent > 0.0: return
411 h = POS(x)
412 a_k = {}
413 for k in xtween(i, j):
414 p_out = f(i,j, x,loc_h, sent)
415 if not p_out > 0.0:
416 continue
417 p_R = e(k,j, x,loc_h, sent)
418 if not p_R > 0.0:
419 continue
421 for loc_a,a in locs(sent_nums, i,k): # i<=loc_l(a)<k
422 p_rule = g.p_GO_AT_or0(a, h, LEFT, adj(k, loc_h))
423 p_L = e(i,k, (SEAL,a), loc_a, sent)
424 p = p_L * p_out * p_R * p_rule
425 try: a_k[a] += p
426 except KeyError: a_k[a] = p
428 for a,p in a_k.iteritems():
429 try: fr['hat_a','num',a,x] += p / p_sent
430 except KeyError: fr['hat_a','num',a,x] = p / p_sent
431 # end reest_freq.w_left()
433 def w_right(i,j, x,loc_h,sent,sent_nums):
434 if not p_sent > 0.0: return
436 h = POS(x)
437 a_k = {}
438 for k in xtween(i, j):
439 p_out = f(i,j, x,loc_h, sent)
440 if not p_out > 0.0:
441 continue
442 p_L = e(i,k, x,loc_h, sent)
443 if not p_L > 0.0:
444 continue
446 for loc_a,a in locs(sent_nums, k,j): # k<=loc_l(a)<j
447 p_rule = g.p_GO_AT_or0(a, h, RIGHT, adj(k, loc_h))
448 p_R = e(k,j, (SEAL,a),loc_a, sent)
449 p = p_L * p_out * p_R * p_rule
450 try: a_k[a] += p
451 except KeyError: a_k[a] = p
453 for a,p in a_k.iteritems():
454 try: fr['hat_a','num',a,x] += p / p_sent
455 except KeyError: fr['hat_a','num',a,x] = p / p_sent
456 # end reest_freq.w_right()
458 # in reest_freq:
459 for sent in corpus:
460 if 'REEST' in DEBUG:
461 print sent
462 ichart = {}
463 ochart = {}
464 p_sent = inner_sent(g, sent, ichart)
465 fr['ROOT','den'] += 1 # divide by p_sent per h!
467 sent_nums = g.sent_nums(sent)
469 for loc_h,h in locs(sent_nums,0,len(sent)+1): # locs-stop is exclusive, thus +1
470 # root:
471 fr['ROOT','num',h] += g.p_ROOT[h] * e(0,len(sent), (SEAL,h),loc_h, sent) \
472 / p_sent
474 loc_l_h = loc_h
475 loc_r_h = loc_l_h+1
477 # left non-adjacent stop:
478 for i in xlt(loc_l_h):
479 fr['STOP','num',(GOL,h),NON] += c(loc_l_h, j, (LGOR, h),loc_h, sent)
480 fr['STOP','den',(GOL,h),NON] += c(loc_l_h, j, (GOL, h),loc_h, sent)
481 for j in xgteq(loc_r_h, sent):
482 fr['STOP','num',(RGOL,h),NON] += c(i, j, (SEAL, h),loc_h, sent)
483 fr['STOP','den',(RGOL,h),NON] += c(i, j, (RGOL, h),loc_h, sent)
484 # left adjacent stop, i = loc_l_h
485 fr['STOP','num',(GOL,h),ADJ] += c(loc_l_h, loc_r_h, (LGOR, h),loc_h, sent)
486 fr['STOP','den',(GOL,h),ADJ] += c(loc_l_h, loc_r_h, (GOL, h),loc_h, sent)
487 for j in xgteq(loc_r_h, sent):
488 fr['STOP','num',(RGOL,h),ADJ] += c(loc_l_h, j, (SEAL, h),loc_h, sent)
489 fr['STOP','den',(RGOL,h),ADJ] += c(loc_l_h, j, (RGOL, h),loc_h, sent)
490 # right non-adjacent stop:
491 for j in xgt(loc_r_h, sent):
492 fr['STOP','num',(GOR,h),NON] += c(loc_l_h, j, (RGOL, h),loc_h, sent)
493 fr['STOP','den',(GOR,h),NON] += c(loc_l_h, j, (GOR, h),loc_h, sent)
494 for i in xlteq(loc_l_h):
495 fr['STOP','num',(LGOR,h),NON] += c(loc_l_h, j, (SEAL, h),loc_h, sent)
496 fr['STOP','den',(LGOR,h),NON] += c(loc_l_h, j, (LGOR, h),loc_h, sent)
497 # right adjacent stop, j = loc_r_h
498 fr['STOP','num',(GOR,h),ADJ] += c(loc_l_h, loc_r_h, (RGOL, h),loc_h, sent)
499 fr['STOP','den',(GOR,h),ADJ] += c(loc_l_h, loc_r_h, (GOR, h),loc_h, sent)
500 for i in xlteq(loc_l_h):
501 fr['STOP','num',(LGOR,h),ADJ] += c(loc_l_h, j, (SEAL, h),loc_h, sent)
502 fr['STOP','den',(LGOR,h),ADJ] += c(loc_l_h, j, (LGOR, h),loc_h, sent)
504 # left attachment:
505 if 'REEST_ATTACH' in DEBUG:
506 print "Lattach %s: for i < %s"%(g.numtag(h),sent[0:loc_h+1])
507 for s_h in [RGOL, GOL]:
508 x = (s_h, h)
509 for i in xlt(loc_l_h): # i < loc_l(h)
510 if 'REEST_ATTACH' in DEBUG:
511 print "\tfor j >= %s"%sent[loc_h:len(sent)]
512 for j in xgteq(loc_r_h, sent): # j >= loc_r(h)
513 fr['hat_a','den',x] += c(i,j, x,loc_h, sent) # v_q in L&Y
514 if 'REEST_ATTACH' in DEBUG:
515 print "\t\tc( %d , %d, %s, %s, sent)=%.4f"%(i,j,node_str(x),loc_h,fr['hat_a','den',x])
516 w_left(i, j, x,loc_h, sent,sent_nums) # compute w for all a in sent
518 # right attachment:
519 if 'REEST_ATTACH' in DEBUG:
520 print "Rattach %s: for i <= %s"%(g.numtag(h),sent[0:loc_h+1])
521 for s_h in [GOR, LGOR]:
522 x = (s_h, h)
523 for i in xlteq(loc_l_h): # i <= loc_l(h)
524 if 'REEST_ATTACH' in DEBUG:
525 print "\tfor j > %s"%sent[loc_h:len(sent)]
526 for j in xgt(loc_r_h, sent): # j > loc_r(h)
527 fr['hat_a','den',x] += c(i,j, x,loc_h, sent) # v_q in L&Y
528 if 'REEST_ATTACH' in DEBUG:
529 print "\t\tc( %d , %d, %s, %s, sent)=%.4f"%(loc_h,j,node_str(x),loc_h,fr['hat_a','den',x])
530 w_right(i,j, x,loc_h, sent,sent_nums) # compute w for all a in sent
531 # end for loc_h,h
532 # end for sent
534 return fr
536 def reestimate(old_g, corpus):
537 fr = reest_freq(old_g, corpus)
538 p_ROOT, p_STOP, p_ATTACH = {},{},{}
540 for h in old_g.headnums():
541 # reest_head changes p_ROOT, p_STOP, p_ATTACH
542 reest_head(h, fr, old_g, p_ROOT, p_STOP, p_ATTACH)
543 p_ORDER = old_g.p_ORDER
544 numtag, tagnum = old_g.get_nums_tags()
546 new_g = DMV_Grammar(numtag, tagnum, p_ROOT, p_STOP, p_ATTACH, p_ORDER)
547 return new_g
550 def reest_head(h, fr, g, p_ROOT, p_STOP, p_ATTACH):
551 "Given a single head, update g with the reestimated probability."
552 # remove 0-prob stuff? todo
553 try:
554 p_ROOT[h] = fr['ROOT','num',h] / fr['ROOT','den']
555 except KeyError:
556 p_ROOT[h] = 0.0
558 for dir in [LEFT,RIGHT]:
559 for adj in [ADJ, NON]: # p_STOP
560 p_STOP[h, dir, adj] = 0.0
561 for s_h in dirseal(dir):
562 x = (s_h,h)
563 p = fr['STOP','den', x, adj]
564 if p > 0.0:
565 p = fr['STOP', 'num', x, adj] / p
566 p_STOP[h, dir, adj] += p
568 for s_h in dirseal(dir): # make hat_a for p_ATTACH
569 x = (s_h,h)
570 p_c = fr['hat_a','den',x]
572 for a in g.headnums():
573 if (a,h,dir) not in p_ATTACH:
574 p_ATTACH[a,h,dir] = 0.0
575 try: # (a,x) might not be in hat_a
576 p_ATTACH[a,h,dir] += fr['hat_a','num',a,x] / p_c
577 except KeyError: pass
578 except ZeroDivisionError: pass
584 ###################################################
585 # Reestimation v.2: #
586 # Heads as outer loop #
587 ###################################################
589 def locs_h(h, sent_nums):
590 '''Return the between-word locations of all tokens of h in sent.'''
591 return [loc_w for loc_w,w in locs(sent_nums, 0, len(sent_nums))
592 if w == h]
594 def locs_a(a, sent_nums, start, stop):
595 '''Return the between-word locations of all tokens of h in some
596 fragment of sent. We make sure to offset the locations correctly
597 so that for any w in the returned list, sent[w]==loc_w.
599 start is inclusive, stop is exclusive, as in klein-thesis and
600 Python's list-slicing (eg. return left-loc).'''
601 return [loc_w for loc_w,w in locs(sent_nums, start, stop)
602 if w == a]
604 def inner2(i, j, node, loc_h, g, sent):
605 ichart,ochart = g.get_iochart(s_n)
606 try: p = ichart[i,j,x,loc_h]
607 except KeyError: p = inner(i,j,x,loc_h,g,sent,ichart)
608 g.set_iochart(s_n,ichart,ochart)
609 return p
611 def inner_sent2(g, sent):
612 ichart,ochart = g.get_iochart(s_n)
613 p = inner_sent(g,sent,ichart)
614 g.set_iochart(s_n,ichart,ochart)
615 return p
617 def outer2(i, j,w_node,loc_w, g, sent):
618 ichart,ochart = g.get_iochart(s_n)
619 try: p = ochart[i,j,w_node,loc_w]
620 except KeyError: p = inner(i,j,w_node,loc_w,g,sent,ichart,ochart)
621 g.set_iochart(s_n,ichart,ochart)
622 return p
624 def reestimate2(old_g, corpus):
625 p_ROOT, p_STOP, p_ATTACH = {},{},{}
627 for h in old_g.headnums():
628 # reest_head changes p_ROOT, p_STOP, p_ATTACH
629 reest_head2(h, old_g, corpus, p_ROOT, p_STOP, p_ATTACH)
630 p_ORDER = old_g.p_ORDER
631 numtag, tagnum = old_g.get_nums_tags()
633 new_g = DMV_Grammar(numtag, tagnum, p_ROOT, p_STOP, p_ATTACH, p_ORDER)
634 return new_g
636 def hat_d2(xbar, x, xi, xj, g, corpus): # stop helper
637 def c(x,loc_x,i,j): return c2(x,loc_x,i,j,g,s_n,sent)
639 h = POS(x)
640 if h != POS(xbar): raise ValueError
642 num, den = 0.0, 0.0
643 for s_n,sent in [(g.sent_nums(sent),sent) for sent in corpus]:
644 for loc_h in locs_h(h,s_n):
645 loc_l_h, loc_r_h = loc_h, loc_h + 1
646 for i in xi(loc_l_h):
647 for j in xj(loc_r_h, s_n):
648 # print "s:%s %d,%d"%(sent,i,j)
649 num += c(xbar,loc_h,i,j)
650 den += c(x,loc_h,i,j)
651 if den == 0.0:
652 return den
653 return num/den # eg. SEAL/RGOL, xbar/x
656 def c2(x,loc_h,i,j,g,s_n,sent):
657 ichart,ochart = g.get_iochart(s_n)
659 def f(i,j,x,loc_h): # P_{OUTSIDE}
660 try: return ochart[i,j,x,loc_h]
661 except KeyError: return outer(i,j,x,loc_h,g,sent,ichart,ochart)
662 def e(i,j,x,loc_h): # P_{INSIDE}
663 try: return ichart[i,j,x,loc_h]
664 except KeyError: return inner(i,j,x,loc_h,g,sent,ichart)
666 p_sent = inner_sent(g, sent, ichart)
667 if not p_sent > 0.0:
668 return p_sent
670 p_in = e(i,j, x,loc_h)
671 if not p_in > 0.0:
672 return p_in
674 p_out = f(i,j, x,loc_h)
676 g.set_iochart(s_n,ichart,ochart)
677 return p_in * p_out / p_sent
679 def w2(a, x,loc_h, dir, i, j, g, s_n,sent):
680 ichart,ochart = g.get_iochart(s_n)
682 def f(i,j,x,loc_h): # P_{OUTSIDE}
683 try: return ochart[i,j,x,loc_h]
684 except KeyError: return outer(i,j,x,loc_h,g,sent,ichart,ochart)
685 def e(i,j,x,loc_h): # P_{INSIDE}
686 try: return ichart[i,j,x,loc_h]
687 except KeyError: return inner(i,j,x,loc_h,g,sent,ichart)
689 h = POS(x)
690 p_sent = inner_sent(g, sent, ichart)
692 if dir == LEFT:
693 L, R = (SEAL,a),x
694 else:
695 L, R = x,(SEAL,a)
696 w_sum = 0.0
698 for k in xtween(i,j):
699 if dir == LEFT:
700 start, stop = i, k
701 else:
702 start, stop = k, j
703 for loc_a in locs_a(a, s_n, start, stop):
704 if dir == LEFT:
705 loc_L, loc_R = loc_a, loc_h
706 else:
707 loc_L, loc_R = loc_h, loc_a
708 p = g.p_GO_AT_or0(a,h,dir,adj(k,loc_h))
709 in_L = e(i,k,L,loc_L)
710 in_R = e(k,j,R,loc_R)
711 out = f(i,j,x,loc_h)
712 w_sum += p * in_L * in_R * out
714 g.set_iochart(s_n,ichart,ochart)
715 return w_sum/p_sent
717 def hat_a2(a, x, dir, g, corpus): # attachment helper
718 def w(a,x,loc_x,dir,i,j): return w2(a,x,loc_x,dir,i,j,g,s_n,sent)
719 def c(x,loc_x,i,j): return c2(x,loc_x,i,j,g,s_n,sent)
721 h = POS(x)
722 if dir == LEFT:
723 xi, xj = xlt, xgteq
724 else:
725 xi, xj = xlteq, xgt
726 den, num = 0.0, 0.0
728 for s_n,sent in [(g.sent_nums(sent),sent) for sent in corpus]:
729 for loc_h in locs_h(h,s_n):
730 loc_l_h, loc_r_h = loc_h, loc_h + 1
731 for i in xi(loc_l_h):
732 for j in xj(loc_r_h,sent):
733 num += w(a, x,loc_h, dir, i,j)
734 den += c(x,loc_h, i,j)
735 if den == 0.0:
736 return den
737 return num/den
739 def reest_root2(h,g,corpus):
740 sum = 0.0
741 corpus_size = 0.0
742 for s_n,sent in [(g.sent_nums(sent),sent) for sent in corpus]:
743 num, den = 0.0, 0.0
744 corpus_size += 1.0
745 ichart, ochart = g.get_iochart(s_n)
746 den += inner_sent(g, sent, ichart)
747 for loc_h in locs_h(h,s_n):
748 num += \
749 g.p_ROOT[h] * \
750 inner(0, len(s_n), (SEAL,h), loc_h, g, sent, ichart)
751 g.set_iochart(s_n, ichart, ochart)
752 sum += num / den
753 return sum / corpus_size
755 def reest_head2(h, g, corpus, p_ROOT, p_STOP, p_ATTACH):
756 print "h: %d=%s ..."%(h,g.numtag(h)),
757 def hat_d(xbar,x,xi,xj): return hat_d2(xbar,x,xi,xj, g, corpus)
758 def hat_a(a, x, dir ): return hat_a2(a, x, dir, g, corpus)
760 p_STOP[h, LEFT,NON] = \
761 hat_d((SEAL,h),(RGOL,h),xlt, xgteq) + \
762 hat_d((LGOR,h),( GOL,h),xlt, xeq)
763 p_STOP[h, LEFT,ADJ] = \
764 hat_d((SEAL,h),(RGOL,h),xeq, xgteq) + \
765 hat_d((LGOR,h),( GOL,h),xeq, xeq)
766 p_STOP[h,RIGHT,NON] = \
767 hat_d((RGOL,h),( GOR,h),xeq, xgt) + \
768 hat_d((SEAL,h),(LGOR,h),xlteq,xgt)
769 p_STOP[h,RIGHT,ADJ] = \
770 hat_d((RGOL,h),( GOR,h),xeq, xeq) + \
771 hat_d((SEAL,h),(LGOR,h),xlteq,xeq)
772 print "stops done...",
774 p_ROOT[h] = reest_root2(h,g,corpus)
775 print "root done...",
777 for a in g.headnums():
778 p_ATTACH[a,h,LEFT] = \
779 hat_a(a, (GOL,h),LEFT) + \
780 hat_a(a,(RGOL,h),LEFT)
781 p_ATTACH[a,h,RIGHT] = \
782 hat_a(a, (GOR,h),RIGHT) + \
783 hat_a(a,(LGOR,h),RIGHT)
785 print "attachment done"
789 ###################################################
790 # Most Probable Parse: #
791 ###################################################
793 STOPKEY = (-1,-1,STOP,-1)
794 ROOTKEY = (-1,-1,ROOT,-1)
796 def make_mpptree(g, sent):
797 '''Tell inner() to make an mpptree, connect ROOT to this. (Logically,
798 this should be part of inner_sent though...)'''
799 ichart = {}
800 mpptree = { ROOTKEY:(0.0, ROOTKEY, None) }
801 for loc_w,w in locs(g.sent_nums(sent),0,len(sent)):
802 p = g.p_ROOT[w] * inner(0, len(sent), (SEAL,w), loc_w, g, sent, ichart, mpptree)
803 L = ROOTKEY
804 R = (0,len(sent), (SEAL,w), loc_w)
805 if mpptree[ROOTKEY][0] < p:
806 mpptree[ROOTKEY] = (p, L, R)
807 return mpptree
809 def parse_mpptree(mpptree, sent):
810 '''mpptree is a dict of the form {k:(p,L,R),...}; where k, L and R
811 are `keys' of the form (i,j,node,loc).
813 returns an mpp of the form [((head, loc_h),(arg, loc_a)), ...],
814 where head and arg are tags.'''
815 # local functions for clear access to mpptree:
816 def k_node(key):
817 return key[2]
818 def k_POS(key):
819 return POS(k_node(key))
820 def k_seals(key):
821 return seals(k_node(key))
822 def k_locnode(key):
823 return (k_node(key),key[3])
824 def k_locPOS(key):
825 return (k_POS(key),key[3])
826 def k_terminal(key):
827 s_k = k_seals(key) # i+1 == j
828 return key[0] + 1 == key[1] and (s_k == GOR or s_k == GOL)
829 def t_L(tree_entry):
830 return tree_entry[1]
831 def t_R(tree_entry):
832 return tree_entry[2]
834 # arbitrarily, "ROOT attaches to right". We add it here to
835 # avoid further complications:
836 firstkey = t_R(mpptree[ROOTKEY])
837 deps = set([ (k_locPOS(ROOTKEY), k_locPOS(firstkey), RIGHT) ])
838 q = [firstkey]
840 while len(q) > 0:
841 k = q.pop()
842 if k_terminal(k):
843 continue
844 else:
845 L = t_L( mpptree[k] )
846 R = t_R( mpptree[k] )
847 if k_locnode( k ) == k_locnode( L ): # Rattach
848 deps.add((k_locPOS( k ), k_locPOS( R ), LEFT))
849 q.extend( [L, R] )
850 elif k_locnode( k ) == k_locnode( R ): # Lattach
851 deps.add((k_locPOS( k ), k_locPOS( L ), RIGHT))
852 q.extend( [L, R] )
853 elif R == STOPKEY:
854 q.append( L )
855 elif L == STOPKEY:
856 q.append( R )
857 return deps
859 def mpp(g, sent):
860 tagf = g.numtag # localized function, todo: speed-test
861 mpptree = make_mpptree(g, sent)
862 return set([((tagf(h), loc_h), (tagf(a), loc_a))
863 for (h, loc_h),(a,loc_a),dir in parse_mpptree(mpptree,sent)])
866 ########################################################################
867 # testing functions: #
868 ########################################################################
870 testcorpus = [s.split() for s in ['det nn vbd c vbd','vbd nn c vbd',
871 'det nn vbd', 'det nn vbd c pp',
872 'det nn vbd', 'det vbd vbd c pp',
873 'det nn vbd', 'det nn vbd c vbd',
874 'det nn vbd', 'det nn vbd c vbd',
875 'det nn vbd', 'det nn vbd c vbd',
876 'det nn vbd', 'det nn vbd c pp',
877 'det nn vbd pp', 'det nn vbd', ]]
879 def testgrammar():
880 import loc_h_harmonic
881 reload(loc_h_harmonic)
883 # make sure these are the way they were when setting up the tests:
884 loc_h_harmonic.HARMONIC_C = 0.0
885 loc_h_harmonic.FNONSTOP_MIN = 25
886 loc_h_harmonic.FSTOP_MIN = 5
887 loc_h_harmonic.RIGHT_FIRST = 1.0
888 loc_h_harmonic.OTHER_STOP_CALC = False
890 return loc_h_harmonic.initialize(testcorpus)
892 def testreestimation2():
893 g2 = testgrammar()
894 reestimate2(g2, testcorpus)
895 return g2
897 def testreestimation():
898 g = testgrammar()
899 g = reestimate(g, testcorpus)
900 return g
903 def testmpp_regression(mpptree,k_n):
904 mpp = {ROOTKEY: (2.877072116829971e-05, STOPKEY, (0, 3, (2, 3), 1)),
905 (0, 1, (1, 1), 0): (0.1111111111111111, (0, 1, (0, 1), 0), STOPKEY),
906 (0, 1, (2, 1), 0): (0.049382716049382713, STOPKEY, (0, 1, (1, 1), 0)),
907 (0, 3, (1, 3), 1): (0.00027619892321567721,
908 (0, 1, (2, 1), 0),
909 (1, 3, (1, 3), 1)),
910 (0, 3, (2, 3), 1): (0.00012275507698474543, STOPKEY, (0, 3, (1, 3), 1)),
911 (1, 3, (0, 3), 1): (0.025280986819448362,
912 (1, 2, (0, 3), 1),
913 (2, 3, (2, 4), 2)),
914 (1, 3, (1, 3), 1): (0.0067415964851862296, (1, 3, (0, 3), 1), STOPKEY),
915 (2, 3, (1, 4), 2): (0.32692307692307693, (2, 3, (0, 4), 2), STOPKEY),
916 (2, 3, (2, 4), 2): (0.037721893491124266, STOPKEY, (2, 3, (1, 4), 2))}
917 for k,(v,L,R) in mpp.iteritems():
918 k2 = k[0:k_n] # 3 if the new does not check loc_h
919 if type(k)==str:
920 k2 = k
921 if k2 not in mpptree:
922 print "mpp regression, %s missing"%(k2,)
923 else:
924 vnew = mpptree[k2][0]
925 if not "%.10f"%vnew == "%.10f"%v:
926 print "mpp regression, wanted %s=%.5f, got %.5f"%(k2,v,vnew)
929 def testgrammar_a():
930 h, a = 0, 1
931 p_ROOT, p_STOP, p_ATTACH, p_ORDER = {},{},{},{}
932 p_ROOT[h] = 0.9
933 p_ROOT[a] = 0.1
934 p_STOP[h,LEFT,NON] = 1.0
935 p_STOP[h,LEFT,ADJ] = 1.0
936 p_STOP[h,RIGHT,NON] = 0.4 # RSTOP
937 p_STOP[h,RIGHT,ADJ] = 0.3 # RSTOP
938 p_STOP[a,LEFT,NON] = 1.0
939 p_STOP[a,LEFT,ADJ] = 1.0
940 p_STOP[a,RIGHT,NON] = 0.4 # RSTOP
941 p_STOP[a,RIGHT,ADJ] = 0.3 # RSTOP
942 p_ATTACH[a,h,LEFT] = 1.0 # not used
943 p_ATTACH[a,h,RIGHT] = 1.0 # not used
944 p_ATTACH[h,a,LEFT] = 1.0 # not used
945 p_ATTACH[h,a,RIGHT] = 1.0 # not used
946 p_ATTACH[h,h,LEFT] = 1.0 # not used
947 p_ATTACH[h,h,RIGHT] = 1.0 # not used
948 p_ORDER[(GOR, h)] = 1.0
949 p_ORDER[(GOL, h)] = 0.0
950 p_ORDER[(GOR, a)] = 1.0
951 p_ORDER[(GOL, a)] = 0.0
952 g = DMV_Grammar({h:'h',a:'a'}, {'h':h,'a':a}, p_ROOT, p_STOP, p_ATTACH, p_ORDER)
953 # these probabilities are impossible so add them manually:
954 g.p_GO_AT[a,a,LEFT,NON] = 0.4 # Lattach
955 g.p_GO_AT[a,a,LEFT,ADJ] = 0.6 # Lattach
956 g.p_GO_AT[h,a,LEFT,NON] = 0.2 # Lattach to h
957 g.p_GO_AT[h,a,LEFT,ADJ] = 0.1 # Lattach to h
958 g.p_GO_AT[a,a,RIGHT,NON] = 1.0 # Rattach
959 g.p_GO_AT[a,a,RIGHT,ADJ] = 1.0 # Rattach
960 g.p_GO_AT[h,a,RIGHT,NON] = 1.0 # Rattach to h
961 g.p_GO_AT[h,a,RIGHT,ADJ] = 1.0 # Rattach to h
962 g.p_GO_AT[h,h,LEFT,NON] = 0.2 # Lattach
963 g.p_GO_AT[h,h,LEFT,ADJ] = 0.1 # Lattach
964 g.p_GO_AT[a,h,LEFT,NON] = 0.4 # Lattach to a
965 g.p_GO_AT[a,h,LEFT,ADJ] = 0.6 # Lattach to a
966 g.p_GO_AT[h,h,RIGHT,NON] = 1.0 # Rattach
967 g.p_GO_AT[h,h,RIGHT,ADJ] = 1.0 # Rattach
968 g.p_GO_AT[a,h,RIGHT,NON] = 1.0 # Rattach to a
969 g.p_GO_AT[a,h,RIGHT,ADJ] = 1.0 # Rattach to a
970 return g
973 def testgrammar_h():
974 h = 0
975 p_ROOT, p_STOP, p_ATTACH, p_ORDER = {},{},{},{}
976 p_ROOT[h] = 1.0
977 p_STOP[h,LEFT,NON] = 1.0
978 p_STOP[h,LEFT,ADJ] = 1.0
979 p_STOP[h,RIGHT,NON] = 0.4
980 p_STOP[h,RIGHT,ADJ] = 0.3
981 p_ATTACH[h,h,LEFT] = 1.0 # not used
982 p_ATTACH[h,h,RIGHT] = 1.0 # not used
983 p_ORDER[(GOR, h)] = 1.0
984 p_ORDER[(GOL, h)] = 0.0
985 g = DMV_Grammar({h:'h'}, {'h':h}, p_ROOT, p_STOP, p_ATTACH, p_ORDER)
986 g.p_GO_AT[h,h,LEFT,NON] = 0.6 # these probabilities are impossible
987 g.p_GO_AT[h,h,LEFT,ADJ] = 0.7 # so add them manually...
988 g.p_GO_AT[h,h,RIGHT,NON] = 1.0
989 g.p_GO_AT[h,h,RIGHT,ADJ] = 1.0
990 return g
994 def testreestimation_h():
995 DEBUG.add('REEST')
996 g = testgrammar_h()
997 reestimate(g,['h h h'.split()])
1000 def test(wanted, got):
1001 if not wanted == got:
1002 raise Warning, "Regression! Should be %s: %s" % (wanted, got)
1004 def regression_tests():
1005 testmpp_regression(make_mpptree(testgrammar(), testcorpus[2]),4)
1006 h = 0
1008 test("0.120",
1009 "%.3f" % inner(0, 2, (SEAL,h), 0, testgrammar_h(), 'h h'.split(),{}))
1010 test("0.063",
1011 "%.3f" % inner(0, 2, (SEAL,h), 1, testgrammar_h(), 'h h'.split(),{}))
1012 test("0.1842",
1013 "%.4f" % inner_sent(testgrammar_h(), 'h h h'.split(),{}))
1015 test("0.1092",
1016 "%.4f" % inner(0, 3, (SEAL,0), 0, testgrammar_h(), 'h h h'.split(),{}))
1017 test("0.0252",
1018 "%.4f" % inner(0, 3, (SEAL,0), 1, testgrammar_h(), 'h h h'.split(),{}))
1019 test("0.0498",
1020 "%.4f" % inner(0, 3, (SEAL,h), 2, testgrammar_h(), 'h h h'.split(),{}))
1022 test("0.58" ,
1023 "%.2f" % outer(1, 3, (RGOL,h), 2, testgrammar_h(),'h h h'.split(),{},{}))
1024 test("0.61" , # ftw? can't be right... there's an 0.4 shared between these two...
1025 "%.2f" % outer(1, 3, (RGOL,h), 1, testgrammar_h(),'h h h'.split(),{},{}))
1027 test("0.00" ,
1028 "%.2f" % outer(1, 3, (RGOL,h), 0, testgrammar_h(),'h h h'.split(),{},{}))
1029 test("0.00" ,
1030 "%.2f" % outer(1, 3, (RGOL,h), 3, testgrammar_h(),'h h h'.split(),{},{}))
1032 test("0.1089" ,
1033 "%.4f" % outer(0, 1, (GOR,h), 0,testgrammar_a(),'h a'.split(),{},{}))
1034 test("0.3600" ,
1035 "%.4f" % outer(0, 2, (GOR,h), 0,testgrammar_a(),'h a'.split(),{},{}))
1036 test("0.0000" ,
1037 "%.4f" % outer(0, 3, (GOR,h), 0,testgrammar_a(),'h a'.split(),{},{}))
1039 # todo: add more of these tests...
1043 def compare_grammars(g1,g2):
1044 result = ""
1045 for d1,d2 in [(g1.p_ATTACH,g2.p_ATTACH),(g1.p_STOP,g2.p_STOP),
1046 (g1.p_ORDER, g2.p_ORDER), (g1.p_ROOT,g2.p_ROOT) ]:
1047 for k,v in d1.iteritems():
1048 if k not in d2:
1049 result += "\nreestimate1[%s]=%s missing from reestimate2"%(k,v)
1050 elif "%s"%d2[k] != "%s"%v:
1051 result += "\nreestimate1[%s]=%s while \nreestimate2[%s]=%s."%(k,v,k,d2[k])
1052 for k,v in d2.iteritems():
1053 if k not in d1:
1054 result += "\nreestimate2[%s]=%s missing from reestimate1"%(k,v)
1055 return result
1058 def testNVNgrammar():
1059 from loc_h_harmonic import initialize
1060 g = initialize(['n v n'.split()])
1061 return g # todo
1063 def testIO():
1064 g = testgrammar()
1065 inners = [(sent, inner_sent(g, sent, {})) for sent in testcorpus]
1066 return inners
1068 if __name__ == "__main__":
1069 DEBUG.clear()
1070 regression_tests()
1072 # import profile
1073 # profile.run('testreestimation()')
1075 # import timeit
1076 # print timeit.Timer("loc_h_dmv.testreestimation()",'''import loc_h_dmv
1077 # reload(loc_h_dmv)''').timeit(1)
1080 # print "mpp-test:"
1081 # import pprint
1082 # for s in testcorpus:
1083 # print "sent:%s\nparse:set(\n%s)"%(s,pprint.pformat(list(mpp(testgrammar(), s)),
1084 # width=40))
1086 # g1 = testreestimation()
1087 # g2 = testreestimation2()
1088 # print compare_grammars(g1,g2)
1089 g = testNVNgrammar()
1090 q_sent = inner_sent(g,'n v n'.split(),{})
1091 q_tree = {}
1092 q_tree[1] = 2.7213e-06 # n_0 -> v, n_0 -> n_2
1093 q_tree[2] = 9.738e-06 # n -> v -> n
1094 q_tree[3] = 2.268e-06 # n_0 -> n_2 -> v
1095 q_tree[4] = 2.7213e-06 # same as 1-3
1096 q_tree[5] = 9.738e-06
1097 q_tree[6] = 2.268e-06
1098 q_tree[7] = 1.086e-05 # n <- v -> n (e-05!!!)
1099 f_T_q = {}
1100 for i,q_t in q_tree.iteritems():
1101 f_T_q[i] = q_t / q_sent
1102 import pprint
1103 pprint.pprint(q_tree)
1104 pprint.pprint(f_T_q)
1105 print sum([f for f in f_T_q.values()])
1107 def treediv(num,den):
1108 return \
1109 sum([f_T_q[i] for i in num ]) / \
1110 sum([f_T_q[i] for i in den ])
1111 g2 = {}
1112 # g2['root --> _n_'] = treediv( (1,2,3,4,5,6), (1,2,3,4,5,6,7) )
1113 # g2['root --> _v_'] = treediv( (7,), (1,2,3,4,5,6,7) )
1114 # g2['_n_ --> STOP n><'] = treediv( (1,2,3,4,5,6,7,1,2,3,4,5,6,7),
1115 # (1,2,3,4,5,6,7,1,2,3,4,5,6,7))
1117 # g2['_n_ --> STOP n>< NON'] = treediv( (3,4,5,6),
1118 # (3,4,5,6,4) )
1120 # g2['_v_ --> STOP v><'] = treediv( (1,2,3,4,5,6,7),
1121 # (1,2,3,4,5,6,7) )
1122 # nlrtrees = (1,2,3,4,5,6,7,1,2,3,4,5,6,7,
1123 # 3,4,4,5,6)
1124 # g2['n>< --> _n_ n><'] = treediv( ( 4, 6), nlrtrees )
1125 # g2['n>< --> _v_ n><'] = treediv( (3,4,5), nlrtrees )
1126 # g2['n>< --> n> STOP'] = treediv( (1,2,3,4,5,6,7,1,2,3,4,5,6,7),
1127 # nlrtrees )
1129 # g2['n>< --> n> STOP ADJ'] = treediv( ( 4,5, 7,1,2,3,4,5,6,7),
1130 # nlrtrees )
1131 # g2['n>< --> n> STOP NON'] = treediv( (1,2,3, 6),
1132 # nlrtrees )
1134 # vlrtrees = (1,2,3,4,5,6,7,
1135 # 7,5)
1136 # g2['v>< --> _n_ v><'] = treediv( (5,7), vlrtrees )
1137 # g2['v>< --> v> STOP'] = treediv( (1,2,3,4,5,6,7), vlrtrees )
1138 # nrtrees = (1,2,3,4,5,6,7,1,2,3,4,5,6,7,
1139 # 1,1,2,3,6)
1140 # g2['n> --> n> _n_'] = treediv( (1,3), nrtrees )
1141 # g2['n> --> n> _v_'] = treediv( (1,2,6), nrtrees )
1143 # g2['n> --> n> _n_ NON'] = treediv( (1,), nrtrees )
1144 # g2['n> --> n> _n_ ADJ'] = treediv( ( 3,), nrtrees )
1145 # g2['n> --> n> _v_ ADJ'] = treediv( ( 1,2, 6), nrtrees )
1147 # vrtrees = (1,2,3,4,5,6,7,
1148 # 7,2)
1149 # g2['v> --> v> _n_'] = treediv( (2,7), vrtrees )
1151 # g2[' v|n,R '] = treediv( (1, 2, 6),
1152 # (1,1,2,3,6) )
1153 # g2[' n|n,R '] = treediv( (1, 3),
1154 # (1,1,2,3,6) )
1156 g2[' stop|n,R,non '] = treediv( ( 1,2,3,6),
1157 (1,1,2,3,6) )
1158 g2[' v|n,left '] = treediv( ( 3,4,5),
1159 (6,4,3,4,5) )
1160 g2[' n|n,left '] = treediv( (6,4),
1161 (6,4,3,4,5) )
1163 pprint.pprint(g2)
1164 g3 = reestimate2(g, ['n v n'.split()])
1165 print g3
1166 g4 = reestimate2(g, ['n v n'.split()])
1167 print g4