99b687939737a83f1d4a39cdbefe81d3e664f674
3 # dmv reestimation and inside-outside probabilities using loc_h, and
7 # 1. Grammar-class and related functions
8 # 2. P_INSIDE / inner() and inner_sent()
9 # 3. P_OUTSIDE / outer()
10 # 4. Reestimation v.1: sentences as outer loop
11 # 5. Reestimation v.2: head-types as outer loop
12 # 6. Most Probable Parse
13 # 7. Testing functions
16 from common_dmv
import *
18 ### todo: debug with @accepts once in a while, but it's SLOW
19 # from typecheck import accepts, Any
21 if __name__
== "__main__":
22 print "loc_h_dmv module tests:"
24 def adj(middle
, loc_h
):
25 "middle is eg. k when rewriting for i<k<j (inside probabilities)."
26 return middle
== loc_h
or middle
== loc_h
+1 # ADJ == True
28 def make_GO_AT(p_STOP
,p_ATTACH
):
30 for (a
,h
,dir), p_ah
in p_ATTACH
.iteritems():
31 p_GO_AT
[a
,h
,dir, NON
] = p_ah
* (1-p_STOP
[h
, dir, NON
])
32 p_GO_AT
[a
,h
,dir, ADJ
] = p_ah
* (1-p_STOP
[h
, dir, ADJ
])
35 class DMV_Grammar(io
.Grammar
):
39 return "%d=%s" % (n
, self
.numtag(n
))
41 if key
in dict: return dict[key
]
43 def no_zeroL(str,tagstr
,prob
):
44 if prob
> 0.0: return (str%(tagstr
,prob
)).ljust(LJUST
)
45 else: return "".ljust(LJUST
)
46 def no_zeroR(str,tagstr
,prob
):
47 if prob
> 0.0: return str%(tagstr
,prob
)
50 p_L
= p(self
.p_ATTACH
,(a
,h
,LEFT
))
51 p_R
= p(self
.p_ATTACH
,(a
,h
,RIGHT
))
52 if p_L
== 0.0 and p_R
== 0.0:
56 str = "p_ATTACH[%s|%s,L] = %s" % (t(a
), t(h
), p_L
)
57 str = str.ljust(LJUST
)
61 str = str.ljust(LJUST
)
62 str += "p_ATTACH[%s|%s,R] = %s" % (t(a
), t(h
), p_R
)
65 root
, stop
, att
, ord = "","","",""
66 for h
in self
.headnums():
67 root
+= no_zeroL("\np_ROOT[%s] = %s", t(h
), p(self
.p_ROOT
, (h
)))
69 stop
+= no_zeroL("p_STOP[stop|%s,L,adj] = %s", t(h
), p(self
.p_STOP
, (h
,LEFT
,ADJ
)))
70 stop
+= no_zeroR("p_STOP[stop|%s,R,adj] = %s", t(h
), p(self
.p_STOP
, (h
,RIGHT
,ADJ
)))
72 stop
+= no_zeroL("p_STOP[stop|%s,L,non] = %s", t(h
), p(self
.p_STOP
, (h
,LEFT
,NON
)))
73 stop
+= no_zeroR("p_STOP[stop|%s,R,non] = %s", t(h
), p(self
.p_STOP
, (h
,RIGHT
,NON
)))
74 att
+= ''.join([p_a(a
,h
) for a
in self
.headnums()])
76 ord += no_zeroL("p_ORDER[ left-first|%s ] = %s", t(h
), p(self
.p_ORDER
, (GOL
,h
)))
77 ord += no_zeroR("p_ORDER[right-first|%s ] = %s", t(h
), p(self
.p_ORDER
, (GOR
,h
)))
78 return root
+ stop
+ att
+ ord
80 def __init__(self
, numtag
, tagnum
, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
):
81 io
.Grammar
.__init
__(self
, numtag
, tagnum
)
82 self
.p_ROOT
= p_ROOT
# p_ROOT[w] = p
83 self
.p_ORDER
= p_ORDER
# p_ORDER[seals, w] = p
84 self
.p_STOP
= p_STOP
# p_STOP[w, LEFT, NON] = p (etc. for LA,RN,RA)
85 self
.p_ATTACH
= p_ATTACH
# p_ATTACH[a, h, LEFT] = p (etc. for R)
86 # p_GO_AT[a, h, LEFT, NON] = p (etc. for LA,RN,RA)
87 self
.p_GO_AT
= make_GO_AT(self
.p_STOP
, self
.p_ATTACH
)
88 # these are used in reestimate2():
91 def get_iochart(self
, sent_nums
):
92 ch_key
= tuple(sent_nums
)
94 ichart
= self
._icharts
[ch_key
]
98 ochart
= self
._ocharts
[ch_key
]
101 return (ichart
, ochart
)
103 def set_iochart(self
, sent_nums
, ichart
, ochart
):
104 self
._icharts
[tuple(sent_nums
)] = ichart
105 self
._ocharts
[tuple(sent_nums
)] = ochart
107 def reset_iocharts(self
):
111 def p_GO_AT_or0(self
, a
, h
, dir, adj
):
113 return self
.p_GO_AT
[a
, h
, dir, adj
]
118 def locs(sent_nums
, start
, stop
):
119 '''Return the between-word locations of all words in some fragment of
120 sent. We make sure to offset the locations correctly so that for
121 any w in the returned list, sent[w]==loc_w.
123 start is inclusive, stop is exclusive, as in klein-thesis and
124 Python's list-slicing.'''
125 for i0
,w
in enumerate(sent_nums
[start
:stop
]):
129 ###################################################
130 # P_INSIDE (dmv-specific) #
131 ###################################################
133 #@accepts(int, int, (int, int), int, Any(), [str], {tuple:float}, IsOneOf(None,{}))
134 def inner(i
, j
, node
, loc_h
, g
, sent
, ichart
, mpptree
=None):
135 ''' The ichart is of this form:
136 ichart[i,j,LHS, loc_h]
137 where i and j are between-word positions.
139 loc_h gives adjacency (along with k for attachment rules), and is
140 needed in P_STOP reestimation.
142 sent_nums
= g
.sent_nums(sent
)
144 def terminal(i
,j
,node
, loc_h
, tabs
):
145 if not i
<= loc_h
< j
:
147 print "%s*= 0.0 (wrong loc_h)" % tabs
149 elif POS(node
) == sent_nums
[i
] and node
in g
.p_ORDER
:
150 # todo: add to ichart perhaps? Although, it _is_ simple lookup..
151 prob
= g
.p_ORDER
[node
]
154 print "%sLACKING TERMINAL:" % tabs
157 print "%s*= %.4f (terminal: %s -> %s_%d)" % (tabs
,prob
, node_str(node
), sent
[i
], loc_h
)
160 def e(i
,j
, (s_h
,h
), loc_h
, n_t
):
163 key
= (i
,j
, (s_h
,h
), loc_h
)
164 if key
not in mpptree
:
165 mpptree
[key
] = (p
, L
, R
)
166 elif mpptree
[key
][0] < p
:
167 mpptree
[key
] = (p
, L
, R
)
170 "Tabs for debug output"
173 if (i
, j
, (s_h
,h
), loc_h
) in ichart
:
175 print "%s*= %.4f in ichart: i:%d j:%d node:%s loc:%s" % (tab(),ichart
[i
, j
, (s_h
,h
), loc_h
], i
, j
,
176 node_str((s_h
,h
)), loc_h
)
177 return ichart
[i
, j
, (s_h
,h
), loc_h
]
179 # Either terminal rewrites, using p_ORDER:
180 if i
+1 == j
and (s_h
== GOR
or s_h
== GOL
):
181 return terminal(i
, j
, (s_h
,h
), loc_h
, tab())
182 else: # Or not at terminal level yet:
184 print "%s%s (%.1f) from %d to %d" % (tab(),node_str((s_h
,h
)),loc_h
,i
,j
)
186 if h
== POS(ROOT
): # only used in testing, o/w we use inner_sent
188 if i
!= 0 or j
!= len(sent
): raise ValueError
189 else: return g
.p_ROOT
[h
] * e(i
,j
,(SEAL
,h
),loc_h
,n_t
+1)
190 p_RGOL
= g
.p_STOP
[h
, LEFT
, adj(i
,loc_h
)] * e(i
,j
,(RGOL
,h
),loc_h
,n_t
+1)
191 p_LGOR
= g
.p_STOP
[h
, RIGHT
, adj(j
,loc_h
)] * e(i
,j
,(LGOR
,h
),loc_h
,n_t
+1)
193 to_mpp(p_RGOL
, STOPKEY
, (i
,j
, (RGOL
,h
),loc_h
))
194 to_mpp(p_LGOR
, (i
,j
, (RGOL
,h
),loc_h
), STOPKEY
)
196 print "%sp= %.4f (STOP)" % (tab(), p
)
197 elif s_h
== RGOL
or s_h
== GOL
:
200 p
= g
.p_STOP
[h
, RIGHT
, adj(j
,loc_h
)] * e(i
,j
, (GOR
,h
),loc_h
,n_t
+1)
201 to_mpp(p
, (i
,j
, (GOR
,h
),loc_h
), STOPKEY
)
202 for k
in xgo_left(i
, loc_h
): # i < k <= loc_l(h)
203 p_R
= e(k
, j
, ( s_h
,h
), loc_h
, n_t
+1)
205 for loc_a
,a
in locs(sent_nums
, i
, k
):
206 p_ah
= g
.p_GO_AT_or0(a
, h
, LEFT
, adj(k
,loc_h
))
208 p_L
= e(i
, k
, (SEAL
,a
), loc_a
, n_t
+1)
209 p_add
= p_L
* p_ah
* p_R
212 (i
, k
, (SEAL
,a
), loc_a
),
213 (k
, j
, ( s_h
,h
), loc_h
))
215 print "%sp= %.4f (ATTACH)" % (tab(), p
)
216 elif s_h
== GOR
or s_h
== LGOR
:
219 p
= g
.p_STOP
[h
, LEFT
, adj(i
,loc_h
)] * e(i
,j
, (GOL
,h
),loc_h
,n_t
+1)
220 to_mpp(p
, (i
,j
, (GOL
,h
),loc_h
), STOPKEY
)
221 for k
in xgo_right(loc_h
, j
): # loc_l(h) < k < j
222 p_L
= e(i
, k
, ( s_h
,h
), loc_h
, n_t
+1)
224 for loc_a
,a
in locs(sent_nums
,k
,j
):
225 p_ah
= g
.p_GO_AT_or0(a
, h
, RIGHT
, adj(k
,loc_h
))
226 p_R
= e(k
, j
, (SEAL
,a
), loc_a
, n_t
+1)
227 p_add
= p_L
* p_ah
* p_R
230 (i
, k
, ( s_h
,h
), loc_h
),
231 (k
, j
, (SEAL
,a
), loc_a
))
234 print "%sp= %.4f (ATTACH)" % (tab(), p
)
235 # elif s_h == GOL: # todo
237 ichart
[i
, j
, (s_h
,h
), loc_h
] = p
241 inner_prob
= e(i
,j
,node
,loc_h
, 0)
243 print debug_ichart(g
,sent
,ichart
)
245 # end of dmv.inner(i, j, node, loc_h, g, sent, ichart,mpptree)
248 def debug_ichart(g
,sent
,ichart
):
249 str = "---ICHART:---\n"
250 for (s
,t
,LHS
,loc_h
),v
in ichart
.iteritems():
251 str += "%s -> %s_%d ... %s_%d (loc_h:%s):\t%s\n" % (node_str(LHS
,g
.numtag
),
252 sent
[s
], s
, sent
[s
], t
, loc_h
, v
)
253 str += "---ICHART:end---\n"
257 def inner_sent(g
, sent
, ichart
):
258 return sum([g
.p_ROOT
[w
] * inner(0, len(sent
), (SEAL
,w
), loc_w
, g
, sent
, ichart
)
259 for loc_w
,w
in locs(g
.sent_nums(sent
),0,len(sent
))])
265 ###################################################
266 # P_OUTSIDE (dmv-specific) #
267 ###################################################
269 #@accepts(int, int, (int, int), int, Any(), [str], {tuple:float}, {tuple:float})
270 def outer(i
,j
,w_node
,loc_w
, g
, sent
, ichart
, ochart
):
271 ''' http://www.student.uib.no/~kun041/dmvccm/DMVCCM.html#outer
273 w_node is a pair (seals,POS); the w in klein-thesis is made up of
276 sent_nums
= g
.sent_nums(sent
)
277 if POS(w_node
) not in sent_nums
[i
:j
]:
278 # sanity check, w must be able to dominate sent[i:j]
282 def e(i
,j
,LHS
,loc_h
): # P_{INSIDE}
284 return ichart
[i
,j
,LHS
,loc_h
]
286 return inner(i
,j
,LHS
,loc_h
,g
,sent
,ichart
)
288 def f(i
,j
,w_node
,loc_w
):
289 if not (i
<= loc_w
< j
):
291 if (i
,j
,w_node
,loc_w
) in ochart
:
292 return ochart
[i
,j
, w_node
,loc_w
]
294 if i
== 0 and j
== len(sent
):
296 else: # ROOT may only be used on full sentence
298 # but we may have non-ROOTs (stops) over full sentence too:
302 # todo: try either if p_M > 0.0: or sum(), and speed-test them
304 if s_w
== SEAL
: # w == a
305 # todo: do the i<sent<j check here to save on calls?
306 p
= g
.p_ROOT
[w
] * f(i
,j
,ROOT
,loc_w
)
308 for k
in xgt(j
, sent
): # j<k<len(sent)+1
309 for loc_h
,h
in locs(sent_nums
,j
,k
):
310 p_wh
= g
.p_GO_AT_or0(w
, h
, LEFT
, adj(j
, loc_h
))
311 for s_h
in [RGOL
, GOL
]:
312 p
+= f(i
,k
,(s_h
,h
),loc_h
) * p_wh
* e(j
,k
,(s_h
,h
),loc_h
)
314 for k
in xlt(i
): # k<i
315 for loc_h
,h
in locs(sent_nums
,k
,i
):
316 p_wh
= g
.p_GO_AT_or0(w
, h
, RIGHT
, adj(i
, loc_h
))
317 for s_h
in [LGOR
, GOR
]:
318 p
+= e(k
,i
,(s_h
,h
), loc_h
) * p_wh
* f(k
,j
,(s_h
,h
), loc_h
)
320 elif s_w
== RGOL
or s_w
== GOL
: # w == h, left stop + left attach
325 p
= g
.p_STOP
[w
, LEFT
, adj(i
,loc_w
)] * f(i
,j
,( s_h
,w
),loc_w
)
326 for k
in xlt(i
): # k<i
327 for loc_a
,a
in locs(sent_nums
,k
,i
):
328 p_aw
= g
.p_GO_AT_or0(a
, w
, LEFT
, adj(i
, loc_w
))
329 p
+= e(k
,i
, (SEAL
,a
),loc_a
) * p_aw
* f(k
,j
,w_node
,loc_w
)
331 elif s_w
== GOR
or s_w
== LGOR
: # w == h, right stop + right attach
336 p
= g
.p_STOP
[w
, RIGHT
, adj(j
,loc_w
)] * f(i
,j
,( s_h
,w
),loc_w
)
337 for k
in xgt(j
, sent
): # j<k<len(sent)+1
338 for loc_a
,a
in locs(sent_nums
,j
,k
):
339 p_ah
= g
.p_GO_AT_or0(a
, w
, RIGHT
, adj(j
, loc_w
))
340 p
+= f(i
,k
,w_node
,loc_w
) * p_ah
* e(j
,k
,(SEAL
,a
),loc_a
)
342 ochart
[i
,j
,w_node
,loc_w
] = p
346 return f(i
,j
,w_node
,loc_w
)
347 # end outer(i,j,w_node,loc_w, g,sent, ichart,ochart)
352 ###################################################
353 # Reestimation v.1: #
354 # Sentences as outer loop #
355 ###################################################
357 def reest_zeros(h_nums
):
358 '''A dict to hold numerators and denominators for our 6+ reestimation
361 fr
= { ('ROOT','den'):0.0 } # holds sum over f_sent!! not p_sent...
363 fr
['ROOT','num',h
] = 0.0
364 for s_h
in [GOR
,GOL
,RGOL
,LGOR
]:
366 fr
['hat_a','den',x
] = 0.0 # = c()
367 # not all arguments are attached to, so we just initialize
368 # fr['hat_a','num',a,(s_h,h)] as they show up, in reest_freq
369 for adj
in [NON
, ADJ
]:
370 for nd
in ['num','den']:
371 fr
['STOP',nd
,x
,adj
] = 0.0
375 def reest_freq(g
, corpus
):
376 fr
= reest_zeros(g
.headnums())
379 p_sent
= None # 50 % speed increase on storing this locally
381 # local functions altogether 2x faster than global
382 def c(i
,j
,LHS
,loc_h
,sent
):
386 p_in
= e(i
,j
, LHS
,loc_h
,sent
)
390 p_out
= f(i
,j
, LHS
,loc_h
,sent
)
391 return p_in
* p_out
/ p_sent
394 def f(i
,j
,LHS
,loc_h
,sent
): # P_{OUTSIDE}
396 return ochart
[i
,j
,LHS
,loc_h
]
398 return outer(i
,j
,LHS
,loc_h
,g
,sent
,ichart
,ochart
)
401 def e(i
,j
,LHS
,loc_h
,sent
): # P_{INSIDE}
403 return ichart
[i
,j
,LHS
,loc_h
]
405 return inner(i
,j
,LHS
,loc_h
,g
,sent
,ichart
)
408 def w_left(i
,j
, x
,loc_h
,sent
,sent_nums
):
409 if not p_sent
> 0.0: return
413 for k
in xtween(i
, j
):
414 p_out
= f(i
,j
, x
,loc_h
, sent
)
417 p_R
= e(k
,j
, x
,loc_h
, sent
)
421 for loc_a
,a
in locs(sent_nums
, i
,k
): # i<=loc_l(a)<k
422 p_rule
= g
.p_GO_AT_or0(a
, h
, LEFT
, adj(k
, loc_h
))
423 p_L
= e(i
,k
, (SEAL
,a
), loc_a
, sent
)
424 p
= p_L
* p_out
* p_R
* p_rule
426 except KeyError: a_k
[a
] = p
428 for a
,p
in a_k
.iteritems():
429 try: fr
['hat_a','num',a
,x
] += p
/ p_sent
430 except KeyError: fr
['hat_a','num',a
,x
] = p
/ p_sent
431 # end reest_freq.w_left()
433 def w_right(i
,j
, x
,loc_h
,sent
,sent_nums
):
434 if not p_sent
> 0.0: return
438 for k
in xtween(i
, j
):
439 p_out
= f(i
,j
, x
,loc_h
, sent
)
442 p_L
= e(i
,k
, x
,loc_h
, sent
)
446 for loc_a
,a
in locs(sent_nums
, k
,j
): # k<=loc_l(a)<j
447 p_rule
= g
.p_GO_AT_or0(a
, h
, RIGHT
, adj(k
, loc_h
))
448 p_R
= e(k
,j
, (SEAL
,a
),loc_a
, sent
)
449 p
= p_L
* p_out
* p_R
* p_rule
451 except KeyError: a_k
[a
] = p
453 for a
,p
in a_k
.iteritems():
454 try: fr
['hat_a','num',a
,x
] += p
/ p_sent
455 except KeyError: fr
['hat_a','num',a
,x
] = p
/ p_sent
456 # end reest_freq.w_right()
464 p_sent
= inner_sent(g
, sent
, ichart
)
465 fr
['ROOT','den'] += 1 # divide by p_sent per h!
467 sent_nums
= g
.sent_nums(sent
)
469 for loc_h
,h
in locs(sent_nums
,0,len(sent
)+1): # locs-stop is exclusive, thus +1
471 fr
['ROOT','num',h
] += g
.p_ROOT
[h
] * e(0,len(sent
), (SEAL
,h
),loc_h
, sent
) \
477 # left non-adjacent stop:
478 for i
in xlt(loc_l_h
):
479 fr
['STOP','num',(GOL
,h
),NON
] += c(loc_l_h
, j
, (LGOR
, h
),loc_h
, sent
)
480 fr
['STOP','den',(GOL
,h
),NON
] += c(loc_l_h
, j
, (GOL
, h
),loc_h
, sent
)
481 for j
in xgteq(loc_r_h
, sent
):
482 fr
['STOP','num',(RGOL
,h
),NON
] += c(i
, j
, (SEAL
, h
),loc_h
, sent
)
483 fr
['STOP','den',(RGOL
,h
),NON
] += c(i
, j
, (RGOL
, h
),loc_h
, sent
)
484 # left adjacent stop, i = loc_l_h
485 fr
['STOP','num',(GOL
,h
),ADJ
] += c(loc_l_h
, loc_r_h
, (LGOR
, h
),loc_h
, sent
)
486 fr
['STOP','den',(GOL
,h
),ADJ
] += c(loc_l_h
, loc_r_h
, (GOL
, h
),loc_h
, sent
)
487 for j
in xgteq(loc_r_h
, sent
):
488 fr
['STOP','num',(RGOL
,h
),ADJ
] += c(loc_l_h
, j
, (SEAL
, h
),loc_h
, sent
)
489 fr
['STOP','den',(RGOL
,h
),ADJ
] += c(loc_l_h
, j
, (RGOL
, h
),loc_h
, sent
)
490 # right non-adjacent stop:
491 for j
in xgt(loc_r_h
, sent
):
492 fr
['STOP','num',(GOR
,h
),NON
] += c(loc_l_h
, j
, (RGOL
, h
),loc_h
, sent
)
493 fr
['STOP','den',(GOR
,h
),NON
] += c(loc_l_h
, j
, (GOR
, h
),loc_h
, sent
)
494 for i
in xlteq(loc_l_h
):
495 fr
['STOP','num',(LGOR
,h
),NON
] += c(loc_l_h
, j
, (SEAL
, h
),loc_h
, sent
)
496 fr
['STOP','den',(LGOR
,h
),NON
] += c(loc_l_h
, j
, (LGOR
, h
),loc_h
, sent
)
497 # right adjacent stop, j = loc_r_h
498 fr
['STOP','num',(GOR
,h
),ADJ
] += c(loc_l_h
, loc_r_h
, (RGOL
, h
),loc_h
, sent
)
499 fr
['STOP','den',(GOR
,h
),ADJ
] += c(loc_l_h
, loc_r_h
, (GOR
, h
),loc_h
, sent
)
500 for i
in xlteq(loc_l_h
):
501 fr
['STOP','num',(LGOR
,h
),ADJ
] += c(loc_l_h
, j
, (SEAL
, h
),loc_h
, sent
)
502 fr
['STOP','den',(LGOR
,h
),ADJ
] += c(loc_l_h
, j
, (LGOR
, h
),loc_h
, sent
)
505 if 'REEST_ATTACH' in DEBUG
:
506 print "Lattach %s: for i < %s"%(g
.numtag(h
),sent
[0:loc_h
+1])
507 for s_h
in [RGOL
, GOL
]:
509 for i
in xlt(loc_l_h
): # i < loc_l(h)
510 if 'REEST_ATTACH' in DEBUG
:
511 print "\tfor j >= %s"%sent
[loc_h
:len(sent
)]
512 for j
in xgteq(loc_r_h
, sent
): # j >= loc_r(h)
513 fr
['hat_a','den',x
] += c(i
,j
, x
,loc_h
, sent
) # v_q in L&Y
514 if 'REEST_ATTACH' in DEBUG
:
515 print "\t\tc( %d , %d, %s, %s, sent)=%.4f"%(i
,j
,node_str(x
),loc_h
,fr
['hat_a','den',x
])
516 w_left(i
, j
, x
,loc_h
, sent
,sent_nums
) # compute w for all a in sent
519 if 'REEST_ATTACH' in DEBUG
:
520 print "Rattach %s: for i <= %s"%(g
.numtag(h
),sent
[0:loc_h
+1])
521 for s_h
in [GOR
, LGOR
]:
523 for i
in xlteq(loc_l_h
): # i <= loc_l(h)
524 if 'REEST_ATTACH' in DEBUG
:
525 print "\tfor j > %s"%sent
[loc_h
:len(sent
)]
526 for j
in xgt(loc_r_h
, sent
): # j > loc_r(h)
527 fr
['hat_a','den',x
] += c(i
,j
, x
,loc_h
, sent
) # v_q in L&Y
528 if 'REEST_ATTACH' in DEBUG
:
529 print "\t\tc( %d , %d, %s, %s, sent)=%.4f"%(loc_h
,j
,node_str(x
),loc_h
,fr
['hat_a','den',x
])
530 w_right(i
,j
, x
,loc_h
, sent
,sent_nums
) # compute w for all a in sent
536 def reestimate(old_g
, corpus
):
537 fr
= reest_freq(old_g
, corpus
)
538 p_ROOT
, p_STOP
, p_ATTACH
= {},{},{}
540 for h
in old_g
.headnums():
541 # reest_head changes p_ROOT, p_STOP, p_ATTACH
542 reest_head(h
, fr
, old_g
, p_ROOT
, p_STOP
, p_ATTACH
)
543 p_ORDER
= old_g
.p_ORDER
544 numtag
, tagnum
= old_g
.get_nums_tags()
546 new_g
= DMV_Grammar(numtag
, tagnum
, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
)
550 def reest_head(h
, fr
, g
, p_ROOT
, p_STOP
, p_ATTACH
):
551 "Given a single head, update g with the reestimated probability."
552 # remove 0-prob stuff? todo
554 p_ROOT
[h
] = fr
['ROOT','num',h
] / fr
['ROOT','den']
558 for dir in [LEFT
,RIGHT
]:
559 for adj
in [ADJ
, NON
]: # p_STOP
560 p_STOP
[h
, dir, adj
] = 0.0
561 for s_h
in dirseal(dir):
563 p
= fr
['STOP','den', x
, adj
]
565 p
= fr
['STOP', 'num', x
, adj
] / p
566 p_STOP
[h
, dir, adj
] += p
568 for s_h
in dirseal(dir): # make hat_a for p_ATTACH
570 p_c
= fr
['hat_a','den',x
]
572 for a
in g
.headnums():
573 if (a
,h
,dir) not in p_ATTACH
:
574 p_ATTACH
[a
,h
,dir] = 0.0
575 try: # (a,x) might not be in hat_a
576 p_ATTACH
[a
,h
,dir] += fr
['hat_a','num',a
,x
] / p_c
577 except KeyError: pass
578 except ZeroDivisionError: pass
584 ###################################################
585 # Reestimation v.2: #
586 # Heads as outer loop #
587 ###################################################
589 def locs_h(h
, sent_nums
):
590 '''Return the between-word locations of all tokens of h in sent.'''
591 return [loc_w
for loc_w
,w
in locs(sent_nums
, 0, len(sent_nums
))
594 def locs_a(a
, sent_nums
, start
, stop
):
595 '''Return the between-word locations of all tokens of h in some
596 fragment of sent. We make sure to offset the locations correctly
597 so that for any w in the returned list, sent[w]==loc_w.
599 start is inclusive, stop is exclusive, as in klein-thesis and
600 Python's list-slicing (eg. return left-loc).'''
601 return [loc_w
for loc_w
,w
in locs(sent_nums
, start
, stop
)
604 def inner2(i
, j
, node
, loc_h
, g
, sent
):
605 ichart
,ochart
= g
.get_iochart(s_n
)
606 try: p
= ichart
[i
,j
,x
,loc_h
]
607 except KeyError: p
= inner(i
,j
,x
,loc_h
,g
,sent
,ichart
)
608 g
.set_iochart(s_n
,ichart
,ochart
)
611 def inner_sent2(g
, sent
):
612 ichart
,ochart
= g
.get_iochart(s_n
)
613 p
= inner_sent(g
,sent
,ichart
)
614 g
.set_iochart(s_n
,ichart
,ochart
)
617 def outer2(i
, j
,w_node
,loc_w
, g
, sent
):
618 ichart
,ochart
= g
.get_iochart(s_n
)
619 try: p
= ochart
[i
,j
,w_node
,loc_w
]
620 except KeyError: p
= inner(i
,j
,w_node
,loc_w
,g
,sent
,ichart
,ochart
)
621 g
.set_iochart(s_n
,ichart
,ochart
)
624 def reestimate2(old_g
, corpus
):
625 p_ROOT
, p_STOP
, p_ATTACH
= {},{},{}
627 for h
in old_g
.headnums():
628 # reest_head changes p_ROOT, p_STOP, p_ATTACH
629 reest_head2(h
, old_g
, corpus
, p_ROOT
, p_STOP
, p_ATTACH
)
630 p_ORDER
= old_g
.p_ORDER
631 numtag
, tagnum
= old_g
.get_nums_tags()
633 new_g
= DMV_Grammar(numtag
, tagnum
, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
)
636 def hat_d2(xbar
, x
, xi
, xj
, g
, corpus
): # stop helper
637 def c(x
,loc_x
,i
,j
): return c2(x
,loc_x
,i
,j
,g
,s_n
,sent
)
640 if h
!= POS(xbar
): raise ValueError
643 for s_n
,sent
in [(g
.sent_nums(sent
),sent
) for sent
in corpus
]:
644 for loc_h
in locs_h(h
,s_n
):
645 loc_l_h
, loc_r_h
= loc_h
, loc_h
+ 1
646 for i
in xi(loc_l_h
):
647 for j
in xj(loc_r_h
, s_n
):
648 # print "s:%s %d,%d"%(sent,i,j)
649 num
+= c(xbar
,loc_h
,i
,j
)
650 den
+= c(x
,loc_h
,i
,j
)
653 return num
/den
# eg. SEAL/RGOL, xbar/x
656 def c2(x
,loc_h
,i
,j
,g
,s_n
,sent
):
657 ichart
,ochart
= g
.get_iochart(s_n
)
659 def f(i
,j
,x
,loc_h
): # P_{OUTSIDE}
660 try: return ochart
[i
,j
,x
,loc_h
]
661 except KeyError: return outer(i
,j
,x
,loc_h
,g
,sent
,ichart
,ochart
)
662 def e(i
,j
,x
,loc_h
): # P_{INSIDE}
663 try: return ichart
[i
,j
,x
,loc_h
]
664 except KeyError: return inner(i
,j
,x
,loc_h
,g
,sent
,ichart
)
666 p_sent
= inner_sent(g
, sent
, ichart
)
670 p_in
= e(i
,j
, x
,loc_h
)
674 p_out
= f(i
,j
, x
,loc_h
)
676 g
.set_iochart(s_n
,ichart
,ochart
)
677 return p_in
* p_out
/ p_sent
679 def w2(a
, x
,loc_h
, dir, i
, j
, g
, s_n
,sent
):
680 ichart
,ochart
= g
.get_iochart(s_n
)
682 def f(i
,j
,x
,loc_h
): # P_{OUTSIDE}
683 try: return ochart
[i
,j
,x
,loc_h
]
684 except KeyError: return outer(i
,j
,x
,loc_h
,g
,sent
,ichart
,ochart
)
685 def e(i
,j
,x
,loc_h
): # P_{INSIDE}
686 try: return ichart
[i
,j
,x
,loc_h
]
687 except KeyError: return inner(i
,j
,x
,loc_h
,g
,sent
,ichart
)
690 p_sent
= inner_sent(g
, sent
, ichart
)
698 for k
in xtween(i
,j
):
703 for loc_a
in locs_a(a
, s_n
, start
, stop
):
705 loc_L
, loc_R
= loc_a
, loc_h
707 loc_L
, loc_R
= loc_h
, loc_a
708 p
= g
.p_GO_AT_or0(a
,h
,dir,adj(k
,loc_h
))
709 in_L
= e(i
,k
,L
,loc_L
)
710 in_R
= e(k
,j
,R
,loc_R
)
712 w_sum
+= p
* in_L
* in_R
* out
714 g
.set_iochart(s_n
,ichart
,ochart
)
717 def hat_a2(a
, x
, dir, g
, corpus
): # attachment helper
718 def w(a
,x
,loc_x
,dir,i
,j
): return w2(a
,x
,loc_x
,dir,i
,j
,g
,s_n
,sent
)
719 def c(x
,loc_x
,i
,j
): return c2(x
,loc_x
,i
,j
,g
,s_n
,sent
)
728 for s_n
,sent
in [(g
.sent_nums(sent
),sent
) for sent
in corpus
]:
729 for loc_h
in locs_h(h
,s_n
):
730 loc_l_h
, loc_r_h
= loc_h
, loc_h
+ 1
731 for i
in xi(loc_l_h
):
732 for j
in xj(loc_r_h
,sent
):
733 num
+= w(a
, x
,loc_h
, dir, i
,j
)
734 den
+= c(x
,loc_h
, i
,j
)
739 def reest_root2(h
,g
,corpus
):
742 for s_n
,sent
in [(g
.sent_nums(sent
),sent
) for sent
in corpus
]:
745 ichart
, ochart
= g
.get_iochart(s_n
)
746 den
+= inner_sent(g
, sent
, ichart
)
747 for loc_h
in locs_h(h
,s_n
):
750 inner(0, len(s_n
), (SEAL
,h
), loc_h
, g
, sent
, ichart
)
751 g
.set_iochart(s_n
, ichart
, ochart
)
753 return sum / corpus_size
755 def reest_head2(h
, g
, corpus
, p_ROOT
, p_STOP
, p_ATTACH
):
756 print "h: %d=%s ..."%(h
,g
.numtag(h
)),
757 def hat_d(xbar
,x
,xi
,xj
): return hat_d2(xbar
,x
,xi
,xj
, g
, corpus
)
758 def hat_a(a
, x
, dir ): return hat_a2(a
, x
, dir, g
, corpus
)
760 p_STOP
[h
, LEFT
,NON
] = \
761 hat_d((SEAL
,h
),(RGOL
,h
),xlt
, xgteq
) + \
762 hat_d((LGOR
,h
),( GOL
,h
),xlt
, xeq
)
763 p_STOP
[h
, LEFT
,ADJ
] = \
764 hat_d((SEAL
,h
),(RGOL
,h
),xeq
, xgteq
) + \
765 hat_d((LGOR
,h
),( GOL
,h
),xeq
, xeq
)
766 p_STOP
[h
,RIGHT
,NON
] = \
767 hat_d((RGOL
,h
),( GOR
,h
),xeq
, xgt
) + \
768 hat_d((SEAL
,h
),(LGOR
,h
),xlteq
,xgt
)
769 p_STOP
[h
,RIGHT
,ADJ
] = \
770 hat_d((RGOL
,h
),( GOR
,h
),xeq
, xeq
) + \
771 hat_d((SEAL
,h
),(LGOR
,h
),xlteq
,xeq
)
772 print "stops done...",
774 p_ROOT
[h
] = reest_root2(h
,g
,corpus
)
775 print "root done...",
777 for a
in g
.headnums():
778 p_ATTACH
[a
,h
,LEFT
] = \
779 hat_a(a
, (GOL
,h
),LEFT
) + \
780 hat_a(a
,(RGOL
,h
),LEFT
)
781 p_ATTACH
[a
,h
,RIGHT
] = \
782 hat_a(a
, (GOR
,h
),RIGHT
) + \
783 hat_a(a
,(LGOR
,h
),RIGHT
)
785 print "attachment done"
789 ###################################################
790 # Most Probable Parse: #
791 ###################################################
793 STOPKEY
= (-1,-1,STOP
,-1)
794 ROOTKEY
= (-1,-1,ROOT
,-1)
796 def make_mpptree(g
, sent
):
797 '''Tell inner() to make an mpptree, connect ROOT to this. (Logically,
798 this should be part of inner_sent though...)'''
800 mpptree
= { ROOTKEY
:(0.0, ROOTKEY
, None) }
801 for loc_w
,w
in locs(g
.sent_nums(sent
),0,len(sent
)):
802 p
= g
.p_ROOT
[w
] * inner(0, len(sent
), (SEAL
,w
), loc_w
, g
, sent
, ichart
, mpptree
)
804 R
= (0,len(sent
), (SEAL
,w
), loc_w
)
805 if mpptree
[ROOTKEY
][0] < p
:
806 mpptree
[ROOTKEY
] = (p
, L
, R
)
809 def parse_mpptree(mpptree
, sent
):
810 '''mpptree is a dict of the form {k:(p,L,R),...}; where k, L and R
811 are `keys' of the form (i,j,node,loc).
813 returns an mpp of the form [((head, loc_h),(arg, loc_a)), ...],
814 where head and arg are tags.'''
815 # local functions for clear access to mpptree:
819 return POS(k_node(key
))
821 return seals(k_node(key
))
823 return (k_node(key
),key
[3])
825 return (k_POS(key
),key
[3])
827 s_k
= k_seals(key
) # i+1 == j
828 return key
[0] + 1 == key
[1] and (s_k
== GOR
or s_k
== GOL
)
834 # arbitrarily, "ROOT attaches to right". We add it here to
835 # avoid further complications:
836 firstkey
= t_R(mpptree
[ROOTKEY
])
837 deps
= set([ (k_locPOS(ROOTKEY
), k_locPOS(firstkey
), RIGHT
) ])
845 L
= t_L( mpptree
[k
] )
846 R
= t_R( mpptree
[k
] )
847 if k_locnode( k
) == k_locnode( L
): # Rattach
848 deps
.add((k_locPOS( k
), k_locPOS( R
), LEFT
))
850 elif k_locnode( k
) == k_locnode( R
): # Lattach
851 deps
.add((k_locPOS( k
), k_locPOS( L
), RIGHT
))
860 tagf
= g
.numtag
# localized function, todo: speed-test
861 mpptree
= make_mpptree(g
, sent
)
862 return set([((tagf(h
), loc_h
), (tagf(a
), loc_a
))
863 for (h
, loc_h
),(a
,loc_a
),dir in parse_mpptree(mpptree
,sent
)])
866 ########################################################################
867 # testing functions: #
868 ########################################################################
870 testcorpus
= [s
.split() for s
in ['det nn vbd c vbd','vbd nn c vbd',
871 'det nn vbd', 'det nn vbd c pp',
872 'det nn vbd', 'det vbd vbd c pp',
873 'det nn vbd', 'det nn vbd c vbd',
874 'det nn vbd', 'det nn vbd c vbd',
875 'det nn vbd', 'det nn vbd c vbd',
876 'det nn vbd', 'det nn vbd c pp',
877 'det nn vbd pp', 'det nn vbd', ]]
880 import loc_h_harmonic
881 reload(loc_h_harmonic
)
883 # make sure these are the way they were when setting up the tests:
884 loc_h_harmonic
.HARMONIC_C
= 0.0
885 loc_h_harmonic
.FNONSTOP_MIN
= 25
886 loc_h_harmonic
.FSTOP_MIN
= 5
887 loc_h_harmonic
.RIGHT_FIRST
= 1.0
888 loc_h_harmonic
.OTHER_STOP_CALC
= False
890 return loc_h_harmonic
.initialize(testcorpus
)
892 def testreestimation2():
894 reestimate2(g2
, testcorpus
)
897 def testreestimation():
899 g
= reestimate(g
, testcorpus
)
903 def testmpp_regression(mpptree
,k_n
):
904 mpp
= {ROOTKEY
: (2.877072116829971e-05, STOPKEY
, (0, 3, (2, 3), 1)),
905 (0, 1, (1, 1), 0): (0.1111111111111111, (0, 1, (0, 1), 0), STOPKEY
),
906 (0, 1, (2, 1), 0): (0.049382716049382713, STOPKEY
, (0, 1, (1, 1), 0)),
907 (0, 3, (1, 3), 1): (0.00027619892321567721,
910 (0, 3, (2, 3), 1): (0.00012275507698474543, STOPKEY
, (0, 3, (1, 3), 1)),
911 (1, 3, (0, 3), 1): (0.025280986819448362,
914 (1, 3, (1, 3), 1): (0.0067415964851862296, (1, 3, (0, 3), 1), STOPKEY
),
915 (2, 3, (1, 4), 2): (0.32692307692307693, (2, 3, (0, 4), 2), STOPKEY
),
916 (2, 3, (2, 4), 2): (0.037721893491124266, STOPKEY
, (2, 3, (1, 4), 2))}
917 for k
,(v
,L
,R
) in mpp
.iteritems():
918 k2
= k
[0:k_n
] # 3 if the new does not check loc_h
921 if k2
not in mpptree
:
922 print "mpp regression, %s missing"%(k2
,)
924 vnew
= mpptree
[k2
][0]
925 if not "%.10f"%vnew
== "%.10f"%v
:
926 print "mpp regression, wanted %s=%.5f, got %.5f"%(k2
,v
,vnew
)
931 p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
= {},{},{},{}
934 p_STOP
[h
,LEFT
,NON
] = 1.0
935 p_STOP
[h
,LEFT
,ADJ
] = 1.0
936 p_STOP
[h
,RIGHT
,NON
] = 0.4 # RSTOP
937 p_STOP
[h
,RIGHT
,ADJ
] = 0.3 # RSTOP
938 p_STOP
[a
,LEFT
,NON
] = 1.0
939 p_STOP
[a
,LEFT
,ADJ
] = 1.0
940 p_STOP
[a
,RIGHT
,NON
] = 0.4 # RSTOP
941 p_STOP
[a
,RIGHT
,ADJ
] = 0.3 # RSTOP
942 p_ATTACH
[a
,h
,LEFT
] = 1.0 # not used
943 p_ATTACH
[a
,h
,RIGHT
] = 1.0 # not used
944 p_ATTACH
[h
,a
,LEFT
] = 1.0 # not used
945 p_ATTACH
[h
,a
,RIGHT
] = 1.0 # not used
946 p_ATTACH
[h
,h
,LEFT
] = 1.0 # not used
947 p_ATTACH
[h
,h
,RIGHT
] = 1.0 # not used
948 p_ORDER
[(GOR
, h
)] = 1.0
949 p_ORDER
[(GOL
, h
)] = 0.0
950 p_ORDER
[(GOR
, a
)] = 1.0
951 p_ORDER
[(GOL
, a
)] = 0.0
952 g
= DMV_Grammar({h
:'h',a
:'a'}, {'h':h
,'a':a
}, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
)
953 # these probabilities are impossible so add them manually:
954 g
.p_GO_AT
[a
,a
,LEFT
,NON
] = 0.4 # Lattach
955 g
.p_GO_AT
[a
,a
,LEFT
,ADJ
] = 0.6 # Lattach
956 g
.p_GO_AT
[h
,a
,LEFT
,NON
] = 0.2 # Lattach to h
957 g
.p_GO_AT
[h
,a
,LEFT
,ADJ
] = 0.1 # Lattach to h
958 g
.p_GO_AT
[a
,a
,RIGHT
,NON
] = 1.0 # Rattach
959 g
.p_GO_AT
[a
,a
,RIGHT
,ADJ
] = 1.0 # Rattach
960 g
.p_GO_AT
[h
,a
,RIGHT
,NON
] = 1.0 # Rattach to h
961 g
.p_GO_AT
[h
,a
,RIGHT
,ADJ
] = 1.0 # Rattach to h
962 g
.p_GO_AT
[h
,h
,LEFT
,NON
] = 0.2 # Lattach
963 g
.p_GO_AT
[h
,h
,LEFT
,ADJ
] = 0.1 # Lattach
964 g
.p_GO_AT
[a
,h
,LEFT
,NON
] = 0.4 # Lattach to a
965 g
.p_GO_AT
[a
,h
,LEFT
,ADJ
] = 0.6 # Lattach to a
966 g
.p_GO_AT
[h
,h
,RIGHT
,NON
] = 1.0 # Rattach
967 g
.p_GO_AT
[h
,h
,RIGHT
,ADJ
] = 1.0 # Rattach
968 g
.p_GO_AT
[a
,h
,RIGHT
,NON
] = 1.0 # Rattach to a
969 g
.p_GO_AT
[a
,h
,RIGHT
,ADJ
] = 1.0 # Rattach to a
975 p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
= {},{},{},{}
977 p_STOP
[h
,LEFT
,NON
] = 1.0
978 p_STOP
[h
,LEFT
,ADJ
] = 1.0
979 p_STOP
[h
,RIGHT
,NON
] = 0.4
980 p_STOP
[h
,RIGHT
,ADJ
] = 0.3
981 p_ATTACH
[h
,h
,LEFT
] = 1.0 # not used
982 p_ATTACH
[h
,h
,RIGHT
] = 1.0 # not used
983 p_ORDER
[(GOR
, h
)] = 1.0
984 p_ORDER
[(GOL
, h
)] = 0.0
985 g
= DMV_Grammar({h
:'h'}, {'h':h
}, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
)
986 g
.p_GO_AT
[h
,h
,LEFT
,NON
] = 0.6 # these probabilities are impossible
987 g
.p_GO_AT
[h
,h
,LEFT
,ADJ
] = 0.7 # so add them manually...
988 g
.p_GO_AT
[h
,h
,RIGHT
,NON
] = 1.0
989 g
.p_GO_AT
[h
,h
,RIGHT
,ADJ
] = 1.0
994 def testreestimation_h():
997 reestimate(g
,['h h h'.split()])
1000 def test(wanted
, got
):
1001 if not wanted
== got
:
1002 raise Warning, "Regression! Should be %s: %s" % (wanted
, got
)
1004 def regression_tests():
1005 testmpp_regression(make_mpptree(testgrammar(), testcorpus
[2]),4)
1009 "%.3f" % inner(0, 2, (SEAL
,h
), 0, testgrammar_h(), 'h h'.split(),{}))
1011 "%.3f" % inner(0, 2, (SEAL
,h
), 1, testgrammar_h(), 'h h'.split(),{}))
1013 "%.4f" % inner_sent(testgrammar_h(), 'h h h'.split(),{}))
1016 "%.4f" % inner(0, 3, (SEAL
,0), 0, testgrammar_h(), 'h h h'.split(),{}))
1018 "%.4f" % inner(0, 3, (SEAL
,0), 1, testgrammar_h(), 'h h h'.split(),{}))
1020 "%.4f" % inner(0, 3, (SEAL
,h
), 2, testgrammar_h(), 'h h h'.split(),{}))
1023 "%.2f" % outer(1, 3, (RGOL
,h
), 2, testgrammar_h(),'h h h'.split(),{},{}))
1024 test("0.61" , # ftw? can't be right... there's an 0.4 shared between these two...
1025 "%.2f" % outer(1, 3, (RGOL
,h
), 1, testgrammar_h(),'h h h'.split(),{},{}))
1028 "%.2f" % outer(1, 3, (RGOL
,h
), 0, testgrammar_h(),'h h h'.split(),{},{}))
1030 "%.2f" % outer(1, 3, (RGOL
,h
), 3, testgrammar_h(),'h h h'.split(),{},{}))
1033 "%.4f" % outer(0, 1, (GOR
,h
), 0,testgrammar_a(),'h a'.split(),{},{}))
1035 "%.4f" % outer(0, 2, (GOR
,h
), 0,testgrammar_a(),'h a'.split(),{},{}))
1037 "%.4f" % outer(0, 3, (GOR
,h
), 0,testgrammar_a(),'h a'.split(),{},{}))
1039 # todo: add more of these tests...
1043 def compare_grammars(g1
,g2
):
1045 for d1
,d2
in [(g1
.p_ATTACH
,g2
.p_ATTACH
),(g1
.p_STOP
,g2
.p_STOP
),
1046 (g1
.p_ORDER
, g2
.p_ORDER
), (g1
.p_ROOT
,g2
.p_ROOT
) ]:
1047 for k
,v
in d1
.iteritems():
1049 result
+= "\nreestimate1[%s]=%s missing from reestimate2"%(k
,v
)
1050 elif "%s"%d2[k
] != "%s"%v
:
1051 result
+= "\nreestimate1[%s]=%s while \nreestimate2[%s]=%s."%(k
,v
,k
,d2
[k
])
1052 for k
,v
in d2
.iteritems():
1054 result
+= "\nreestimate2[%s]=%s missing from reestimate1"%(k
,v
)
1058 def testNVNgrammar():
1059 from loc_h_harmonic
import initialize
1060 g
= initialize(['n v n'.split()])
1065 inners
= [(sent
, inner_sent(g
, sent
, {})) for sent
in testcorpus
]
1068 if __name__
== "__main__":
1073 # profile.run('testreestimation()')
1076 # print timeit.Timer("loc_h_dmv.testreestimation()",'''import loc_h_dmv
1077 # reload(loc_h_dmv)''').timeit(1)
1082 # for s in testcorpus:
1083 # print "sent:%s\nparse:set(\n%s)"%(s,pprint.pformat(list(mpp(testgrammar(), s)),
1086 # g1 = testreestimation()
1087 # g2 = testreestimation2()
1088 # print compare_grammars(g1,g2)
1089 g
= testNVNgrammar()
1090 q_sent
= inner_sent(g
,'n v n'.split(),{})
1092 q_tree
[1] = 2.7213e-06 # n_0 -> v, n_0 -> n_2
1093 q_tree
[2] = 9.738e-06 # n -> v -> n
1094 q_tree
[3] = 2.268e-06 # n_0 -> n_2 -> v
1095 q_tree
[4] = 2.7213e-06 # same as 1-3
1096 q_tree
[5] = 9.738e-06
1097 q_tree
[6] = 2.268e-06
1098 q_tree
[7] = 1.086e-05 # n <- v -> n (e-05!!!)
1100 for i
,q_t
in q_tree
.iteritems():
1101 f_T_q
[i
] = q_t
/ q_sent
1103 pprint
.pprint(q_tree
)
1104 pprint
.pprint(f_T_q
)
1105 print sum([f
for f
in f_T_q
.values()])
1107 def treediv(num
,den
):
1109 sum([f_T_q
[i
] for i
in num
]) / \
1110 sum([f_T_q
[i
] for i
in den
])
1112 # g2['root --> _n_'] = treediv( (1,2,3,4,5,6), (1,2,3,4,5,6,7) )
1113 # g2['root --> _v_'] = treediv( (7,), (1,2,3,4,5,6,7) )
1114 # g2['_n_ --> STOP n><'] = treediv( (1,2,3,4,5,6,7,1,2,3,4,5,6,7),
1115 # (1,2,3,4,5,6,7,1,2,3,4,5,6,7))
1117 # g2['_n_ --> STOP n>< NON'] = treediv( (3,4,5,6),
1120 # g2['_v_ --> STOP v><'] = treediv( (1,2,3,4,5,6,7),
1122 # nlrtrees = (1,2,3,4,5,6,7,1,2,3,4,5,6,7,
1124 # g2['n>< --> _n_ n><'] = treediv( ( 4, 6), nlrtrees )
1125 # g2['n>< --> _v_ n><'] = treediv( (3,4,5), nlrtrees )
1126 # g2['n>< --> n> STOP'] = treediv( (1,2,3,4,5,6,7,1,2,3,4,5,6,7),
1129 # g2['n>< --> n> STOP ADJ'] = treediv( ( 4,5, 7,1,2,3,4,5,6,7),
1131 # g2['n>< --> n> STOP NON'] = treediv( (1,2,3, 6),
1134 # vlrtrees = (1,2,3,4,5,6,7,
1136 # g2['v>< --> _n_ v><'] = treediv( (5,7), vlrtrees )
1137 # g2['v>< --> v> STOP'] = treediv( (1,2,3,4,5,6,7), vlrtrees )
1138 # nrtrees = (1,2,3,4,5,6,7,1,2,3,4,5,6,7,
1140 # g2['n> --> n> _n_'] = treediv( (1,3), nrtrees )
1141 # g2['n> --> n> _v_'] = treediv( (1,2,6), nrtrees )
1143 # g2['n> --> n> _n_ NON'] = treediv( (1,), nrtrees )
1144 # g2['n> --> n> _n_ ADJ'] = treediv( ( 3,), nrtrees )
1145 # g2['n> --> n> _v_ ADJ'] = treediv( ( 1,2, 6), nrtrees )
1147 # vrtrees = (1,2,3,4,5,6,7,
1149 # g2['v> --> v> _n_'] = treediv( (2,7), vrtrees )
1151 # g2[' v|n,R '] = treediv( (1, 2, 6),
1153 # g2[' n|n,R '] = treediv( (1, 3),
1156 g2
[' stop|n,R,non '] = treediv( ( 1,2,3,6),
1158 g2
[' v|n,left '] = treediv( ( 3,4,5),
1160 g2
[' n|n,left '] = treediv( (6,4),
1164 g3
= reestimate2(g
, ['n v n'.split()])
1166 g4
= reestimate2(g
, ['n v n'.split()])