3 # dmv reestimation and inside-outside probabilities using loc_h, and
7 # 1. Grammar-class and related functions
8 # 2. P_INSIDE / inner() and inner_sent()
9 # 3. P_OUTSIDE / outer()
10 # 4. Reestimation v.1: sentences as outer loop
11 # 5. Reestimation v.2: head-types as outer loop
12 # 6. Most Probable Parse
13 # 7. Testing functions
16 from common_dmv
import *
18 ### todo: debug with @accepts once in a while, but it's SLOW
19 # from typecheck import accepts, Any
21 if __name__
== "__main__":
22 print "loc_h_dmv module tests:"
24 def adj(middle
, loc_h
):
25 "middle is eg. k when rewriting for i<k<j (inside probabilities)."
26 return middle
== loc_h
or middle
== loc_h
+1 # ADJ == True
28 def make_GO_AT(p_STOP
,p_ATTACH
):
30 for (a
,h
,dir), p_ah
in p_ATTACH
.iteritems():
31 p_GO_AT
[a
,h
,dir, NON
] = p_ah
* (1-p_STOP
[h
, dir, NON
])
32 p_GO_AT
[a
,h
,dir, ADJ
] = p_ah
* (1-p_STOP
[h
, dir, ADJ
])
35 class DMV_Grammar(io
.Grammar
):
39 return "%d=%s" % (n
, self
.numtag(n
))
42 if dict[key
] > 1.0: raise Exception, "probability > 1.0:%s"%key
45 def no_zeroL(str,tagstr
,prob
):
46 if prob
> 0.0: return (str%(tagstr
,prob
)).ljust(LJUST
)
47 else: return "".ljust(LJUST
)
48 def no_zeroR(str,tagstr
,prob
):
49 if prob
> 0.0: return str%(tagstr
,prob
)
52 p_L
= p(self
.p_ATTACH
,(a
,h
,LEFT
))
53 p_R
= p(self
.p_ATTACH
,(a
,h
,RIGHT
))
54 if p_L
== 0.0 and p_R
== 0.0:
58 str = "p_ATTACH[%s|%s,L] = %s" % (t(a
), t(h
), p_L
)
59 str = str.ljust(LJUST
)
63 str = str.ljust(LJUST
)
64 str += "p_ATTACH[%s|%s,R] = %s" % (t(a
), t(h
), p_R
)
67 root
, stop
, att
, ord = "","","",""
68 for h
in self
.headnums():
69 root
+= no_zeroL("\np_ROOT[%s] = %s", t(h
), p(self
.p_ROOT
, (h
)))
71 stop
+= no_zeroL("p_STOP[stop|%s,L,adj] = %s", t(h
), p(self
.p_STOP
, (h
,LEFT
,ADJ
)))
72 stop
+= no_zeroR("p_STOP[stop|%s,R,adj] = %s", t(h
), p(self
.p_STOP
, (h
,RIGHT
,ADJ
)))
74 stop
+= no_zeroL("p_STOP[stop|%s,L,non] = %s", t(h
), p(self
.p_STOP
, (h
,LEFT
,NON
)))
75 stop
+= no_zeroR("p_STOP[stop|%s,R,non] = %s", t(h
), p(self
.p_STOP
, (h
,RIGHT
,NON
)))
76 att
+= ''.join([p_a(a
,h
) for a
in self
.headnums()])
78 ord += no_zeroL("p_ORDER[ left-first|%s ] = %s", t(h
), p(self
.p_ORDER
, (GOL
,h
)))
79 ord += no_zeroR("p_ORDER[right-first|%s ] = %s", t(h
), p(self
.p_ORDER
, (GOR
,h
)))
80 return root
+ stop
+ att
+ ord
82 def __init__(self
, numtag
, tagnum
, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
):
83 io
.Grammar
.__init
__(self
, numtag
, tagnum
)
84 self
.p_ROOT
= p_ROOT
# p_ROOT[w] = p
85 self
.p_ORDER
= p_ORDER
# p_ORDER[seals, w] = p
86 self
.p_STOP
= p_STOP
# p_STOP[w, LEFT, NON] = p (etc. for LA,RN,RA)
87 self
.p_ATTACH
= p_ATTACH
# p_ATTACH[a, h, LEFT] = p (etc. for R)
88 # p_GO_AT[a, h, LEFT, NON] = p (etc. for LA,RN,RA)
89 self
.p_GO_AT
= make_GO_AT(self
.p_STOP
, self
.p_ATTACH
)
90 # these are used in reestimate2():
93 def get_iochart(self
, sent_nums
):
94 ch_key
= tuple(sent_nums
)
96 ichart
= self
._icharts
[ch_key
]
100 ochart
= self
._ocharts
[ch_key
]
103 return (ichart
, ochart
)
105 def set_iochart(self
, sent_nums
, ichart
, ochart
):
106 self
._icharts
[tuple(sent_nums
)] = ichart
107 self
._ocharts
[tuple(sent_nums
)] = ochart
109 def reset_iocharts(self
):
113 def p_GO_AT_or0(self
, a
, h
, dir, adj
):
115 return self
.p_GO_AT
[a
, h
, dir, adj
]
120 def locs(sent_nums
, start
, stop
):
121 '''Return the between-word locations of all words in some fragment of
122 sent. We make sure to offset the locations correctly so that for
123 any w in the returned list, sent[w]==loc_w.
125 start is inclusive, stop is exclusive, as in klein-thesis and
126 Python's list-slicing.'''
127 for i0
,w
in enumerate(sent_nums
[start
:stop
]):
131 ###################################################
132 # P_INSIDE (dmv-specific) #
133 ###################################################
135 #@accepts(int, int, (int, int), int, Any(), [str], {tuple:float}, IsOneOf(None,{}))
136 def inner(i
, j
, node
, loc_h
, g
, sent
, ichart
, mpptree
=None):
137 ''' The ichart is of this form:
138 ichart[i,j,LHS, loc_h]
139 where i and j are between-word positions.
141 loc_h gives adjacency (along with k for attachment rules), and is
142 needed in P_STOP reestimation.
144 sent_nums
= g
.sent_nums(sent
)
146 def terminal(i
,j
,node
, loc_h
, tabs
):
147 if not i
<= loc_h
< j
:
149 print "%s*= 0.0 (wrong loc_h)" % tabs
151 elif POS(node
) == sent_nums
[i
] and node
in g
.p_ORDER
:
152 # todo: add to ichart perhaps? Although, it _is_ simple lookup..
153 prob
= g
.p_ORDER
[node
]
156 print "%sLACKING TERMINAL:" % tabs
159 print "%s*= %.4f (terminal: %s -> %s_%d)" % (tabs
,prob
, node_str(node
), sent
[i
], loc_h
)
162 def e(i
,j
, (s_h
,h
), loc_h
, n_t
):
165 key
= (i
,j
, (s_h
,h
), loc_h
)
166 if key
not in mpptree
:
167 mpptree
[key
] = (p
, L
, R
)
168 elif mpptree
[key
][0] < p
:
169 mpptree
[key
] = (p
, L
, R
)
172 "Tabs for debug output"
175 if (i
, j
, (s_h
,h
), loc_h
) in ichart
:
177 print "%s*= %.4f in ichart: i:%d j:%d node:%s loc:%s" % (tab(),ichart
[i
, j
, (s_h
,h
), loc_h
], i
, j
,
178 node_str((s_h
,h
)), loc_h
)
179 return ichart
[i
, j
, (s_h
,h
), loc_h
]
181 # Either terminal rewrites, using p_ORDER:
182 if i
+1 == j
and (s_h
== GOR
or s_h
== GOL
):
183 return terminal(i
, j
, (s_h
,h
), loc_h
, tab())
184 else: # Or not at terminal level yet:
186 print "%s%s (%.1f) from %d to %d" % (tab(),node_str((s_h
,h
)),loc_h
,i
,j
)
188 if h
== POS(ROOT
): # only used in testing, o/w we use inner_sent
190 if i
!= 0 or j
!= len(sent
): raise ValueError
191 else: return g
.p_ROOT
[h
] * e(i
,j
,(SEAL
,h
),loc_h
,n_t
+1)
192 p_RGOL
= g
.p_STOP
[h
, LEFT
, adj(i
,loc_h
)] * e(i
,j
,(RGOL
,h
),loc_h
,n_t
+1)
193 p_LGOR
= g
.p_STOP
[h
, RIGHT
, adj(j
,loc_h
)] * e(i
,j
,(LGOR
,h
),loc_h
,n_t
+1)
195 to_mpp(p_RGOL
, STOPKEY
, (i
,j
, (RGOL
,h
),loc_h
))
196 to_mpp(p_LGOR
, (i
,j
, (RGOL
,h
),loc_h
), STOPKEY
)
198 print "%sp= %.4f (STOP)" % (tab(), p
)
199 elif s_h
== RGOL
or s_h
== GOL
:
202 p
= g
.p_STOP
[h
, RIGHT
, adj(j
,loc_h
)] * e(i
,j
, (GOR
,h
),loc_h
,n_t
+1)
203 to_mpp(p
, (i
,j
, (GOR
,h
),loc_h
), STOPKEY
)
204 for k
in xgo_left(i
, loc_h
): # i < k <= loc_l(h)
205 p_R
= e(k
, j
, ( s_h
,h
), loc_h
, n_t
+1)
207 for loc_a
,a
in locs(sent_nums
, i
, k
):
208 p_ah
= g
.p_GO_AT_or0(a
, h
, LEFT
, adj(k
,loc_h
))
210 p_L
= e(i
, k
, (SEAL
,a
), loc_a
, n_t
+1)
211 p_add
= p_L
* p_ah
* p_R
214 (i
, k
, (SEAL
,a
), loc_a
),
215 (k
, j
, ( s_h
,h
), loc_h
))
217 print "%sp= %.4f (ATTACH)" % (tab(), p
)
218 elif s_h
== GOR
or s_h
== LGOR
:
221 p
= g
.p_STOP
[h
, LEFT
, adj(i
,loc_h
)] * e(i
,j
, (GOL
,h
),loc_h
,n_t
+1)
222 to_mpp(p
, (i
,j
, (GOL
,h
),loc_h
), STOPKEY
)
223 for k
in xgo_right(loc_h
, j
): # loc_l(h) < k < j
224 p_L
= e(i
, k
, ( s_h
,h
), loc_h
, n_t
+1)
226 for loc_a
,a
in locs(sent_nums
,k
,j
):
227 p_ah
= g
.p_GO_AT_or0(a
, h
, RIGHT
, adj(k
,loc_h
))
228 p_R
= e(k
, j
, (SEAL
,a
), loc_a
, n_t
+1)
229 p_add
= p_L
* p_ah
* p_R
232 (i
, k
, ( s_h
,h
), loc_h
),
233 (k
, j
, (SEAL
,a
), loc_a
))
236 print "%sp= %.4f (ATTACH)" % (tab(), p
)
237 # elif s_h == GOL: # todo
239 ichart
[i
, j
, (s_h
,h
), loc_h
] = p
243 inner_prob
= e(i
,j
,node
,loc_h
, 0)
245 print debug_ichart(g
,sent
,ichart
)
247 # end of dmv.inner(i, j, node, loc_h, g, sent, ichart,mpptree)
250 def debug_ichart(g
,sent
,ichart
):
251 str = "---ICHART:---\n"
252 for (s
,t
,LHS
,loc_h
),v
in ichart
.iteritems():
253 str += "%s -> %s_%d ... %s_%d (loc_h:%s):\t%s\n" % (node_str(LHS
,g
.numtag
),
254 sent
[s
], s
, sent
[s
], t
, loc_h
, v
)
255 str += "---ICHART:end---\n"
259 def inner_sent(g
, sent
, ichart
):
260 return sum([g
.p_ROOT
[w
] * inner(0, len(sent
), (SEAL
,w
), loc_w
, g
, sent
, ichart
)
261 for loc_w
,w
in locs(g
.sent_nums(sent
),0,len(sent
))])
267 ###################################################
268 # P_OUTSIDE (dmv-specific) #
269 ###################################################
271 #@accepts(int, int, (int, int), int, Any(), [str], {tuple:float}, {tuple:float})
272 def outer(i
,j
,w_node
,loc_w
, g
, sent
, ichart
, ochart
):
273 ''' http://www.student.uib.no/~kun041/dmvccm/DMVCCM.html#outer
275 w_node is a pair (seals,POS); the w in klein-thesis is made up of
278 sent_nums
= g
.sent_nums(sent
)
279 if POS(w_node
) not in sent_nums
[i
:j
]:
280 # sanity check, w must be able to dominate sent[i:j]
284 def e(i
,j
,LHS
,loc_h
): # P_{INSIDE}
286 return ichart
[i
,j
,LHS
,loc_h
]
288 return inner(i
,j
,LHS
,loc_h
,g
,sent
,ichart
)
290 def f(i
,j
,w_node
,loc_w
):
291 if not (i
<= loc_w
< j
):
293 if (i
,j
,w_node
,loc_w
) in ochart
:
294 return ochart
[i
,j
, w_node
,loc_w
]
296 if i
== 0 and j
== len(sent
):
298 else: # ROOT may only be used on full sentence
300 # but we may have non-ROOTs (stops) over full sentence too:
304 # todo: try either if p_M > 0.0: or sum(), and speed-test them
306 if s_w
== SEAL
: # w == a
307 # todo: do the i<sent<j check here to save on calls?
308 p
= g
.p_ROOT
[w
] * f(i
,j
,ROOT
,loc_w
)
310 for k
in xgt(j
, sent
): # j<k<len(sent)+1
311 for loc_h
,h
in locs(sent_nums
,j
,k
):
312 p_wh
= g
.p_GO_AT_or0(w
, h
, LEFT
, adj(j
, loc_h
))
313 for s_h
in [RGOL
, GOL
]:
314 p
+= f(i
,k
,(s_h
,h
),loc_h
) * p_wh
* e(j
,k
,(s_h
,h
),loc_h
)
316 for k
in xlt(i
): # k<i
317 for loc_h
,h
in locs(sent_nums
,k
,i
):
318 p_wh
= g
.p_GO_AT_or0(w
, h
, RIGHT
, adj(i
, loc_h
))
319 for s_h
in [LGOR
, GOR
]:
320 p
+= e(k
,i
,(s_h
,h
), loc_h
) * p_wh
* f(k
,j
,(s_h
,h
), loc_h
)
322 elif s_w
== RGOL
or s_w
== GOL
: # w == h, left stop + left attach
327 p
= g
.p_STOP
[w
, LEFT
, adj(i
,loc_w
)] * f(i
,j
,( s_h
,w
),loc_w
)
328 for k
in xlt(i
): # k<i
329 for loc_a
,a
in locs(sent_nums
,k
,i
):
330 p_aw
= g
.p_GO_AT_or0(a
, w
, LEFT
, adj(i
, loc_w
))
331 p
+= e(k
,i
, (SEAL
,a
),loc_a
) * p_aw
* f(k
,j
,w_node
,loc_w
)
333 elif s_w
== GOR
or s_w
== LGOR
: # w == h, right stop + right attach
338 p
= g
.p_STOP
[w
, RIGHT
, adj(j
,loc_w
)] * f(i
,j
,( s_h
,w
),loc_w
)
339 for k
in xgt(j
, sent
): # j<k<len(sent)+1
340 for loc_a
,a
in locs(sent_nums
,j
,k
):
341 p_ah
= g
.p_GO_AT_or0(a
, w
, RIGHT
, adj(j
, loc_w
))
342 p
+= f(i
,k
,w_node
,loc_w
) * p_ah
* e(j
,k
,(SEAL
,a
),loc_a
)
344 ochart
[i
,j
,w_node
,loc_w
] = p
348 return f(i
,j
,w_node
,loc_w
)
349 # end outer(i,j,w_node,loc_w, g,sent, ichart,ochart)
354 ###################################################
355 # Reestimation v.1: #
356 # Sentences as outer loop #
357 ###################################################
359 def reest_zeros(h_nums
):
360 '''A dict to hold numerators and denominators for our 6+ reestimation
363 fr
= { ('ROOT','den'):0.0 } # holds sum over f_sent!! not p_sent...
365 fr
['ROOT','num',h
] = 0.0
366 for s_h
in [GOR
,GOL
,RGOL
,LGOR
]:
368 fr
['hat_a','den',x
] = 0.0 # = c()
369 # not all arguments are attached to, so we just initialize
370 # fr['hat_a','num',a,(s_h,h)] as they show up, in reest_freq
371 for adj
in [NON
, ADJ
]:
372 for nd
in ['num','den']:
373 fr
['STOP',nd
,x
,adj
] = 0.0
377 def reest_freq(g
, corpus
):
378 fr
= reest_zeros(g
.headnums())
381 p_sent
= None # 50 % speed increase on storing this locally
383 # local functions altogether 2x faster than global
384 def c(i
,j
,LHS
,loc_h
,sent
):
388 p_in
= e(i
,j
, LHS
,loc_h
,sent
)
392 p_out
= f(i
,j
, LHS
,loc_h
,sent
)
393 return p_in
* p_out
/ p_sent
396 def f(i
,j
,LHS
,loc_h
,sent
): # P_{OUTSIDE}
398 return ochart
[i
,j
,LHS
,loc_h
]
400 return outer(i
,j
,LHS
,loc_h
,g
,sent
,ichart
,ochart
)
403 def e(i
,j
,LHS
,loc_h
,sent
): # P_{INSIDE}
405 return ichart
[i
,j
,LHS
,loc_h
]
407 return inner(i
,j
,LHS
,loc_h
,g
,sent
,ichart
)
410 def w_left(i
,j
, x
,loc_h
,sent
,sent_nums
):
411 if not p_sent
> 0.0: return
415 for k
in xtween(i
, j
):
416 p_out
= f(i
,j
, x
,loc_h
, sent
)
419 p_R
= e(k
,j
, x
,loc_h
, sent
)
423 for loc_a
,a
in locs(sent_nums
, i
,k
): # i<=loc_l(a)<k
424 p_rule
= g
.p_GO_AT_or0(a
, h
, LEFT
, adj(k
, loc_h
))
425 p_L
= e(i
,k
, (SEAL
,a
), loc_a
, sent
)
426 p
= p_L
* p_out
* p_R
* p_rule
428 except KeyError: a_k
[a
] = p
430 for a
,p
in a_k
.iteritems():
431 try: fr
['hat_a','num',a
,x
] += p
/ p_sent
432 except KeyError: fr
['hat_a','num',a
,x
] = p
/ p_sent
433 # end reest_freq.w_left()
435 def w_right(i
,j
, x
,loc_h
,sent
,sent_nums
):
436 if not p_sent
> 0.0: return
440 for k
in xtween(i
, j
):
441 p_out
= f(i
,j
, x
,loc_h
, sent
)
444 p_L
= e(i
,k
, x
,loc_h
, sent
)
448 for loc_a
,a
in locs(sent_nums
, k
,j
): # k<=loc_l(a)<j
449 p_rule
= g
.p_GO_AT_or0(a
, h
, RIGHT
, adj(k
, loc_h
))
450 p_R
= e(k
,j
, (SEAL
,a
),loc_a
, sent
)
451 p
= p_L
* p_out
* p_R
* p_rule
453 except KeyError: a_k
[a
] = p
455 for a
,p
in a_k
.iteritems():
456 try: fr
['hat_a','num',a
,x
] += p
/ p_sent
457 except KeyError: fr
['hat_a','num',a
,x
] = p
/ p_sent
458 # end reest_freq.w_right()
466 p_sent
= inner_sent(g
, sent
, ichart
)
467 fr
['ROOT','den'] += 1 # divide by p_sent per h!
469 sent_nums
= g
.sent_nums(sent
)
471 for loc_h
,h
in locs(sent_nums
,0,len(sent
)+1): # locs-stop is exclusive, thus +1
473 fr
['ROOT','num',h
] += g
.p_ROOT
[h
] * e(0,len(sent
), (SEAL
,h
),loc_h
, sent
) \
479 # left non-adjacent stop:
480 for i
in xlt(loc_l_h
):
481 fr
['STOP','num',(GOL
,h
),NON
] += c(loc_l_h
, j
, (LGOR
, h
),loc_h
, sent
)
482 fr
['STOP','den',(GOL
,h
),NON
] += c(loc_l_h
, j
, (GOL
, h
),loc_h
, sent
)
483 for j
in xgteq(loc_r_h
, sent
):
484 fr
['STOP','num',(RGOL
,h
),NON
] += c(i
, j
, (SEAL
, h
),loc_h
, sent
)
485 fr
['STOP','den',(RGOL
,h
),NON
] += c(i
, j
, (RGOL
, h
),loc_h
, sent
)
486 # left adjacent stop, i = loc_l_h
487 fr
['STOP','num',(GOL
,h
),ADJ
] += c(loc_l_h
, loc_r_h
, (LGOR
, h
),loc_h
, sent
)
488 fr
['STOP','den',(GOL
,h
),ADJ
] += c(loc_l_h
, loc_r_h
, (GOL
, h
),loc_h
, sent
)
489 for j
in xgteq(loc_r_h
, sent
):
490 fr
['STOP','num',(RGOL
,h
),ADJ
] += c(loc_l_h
, j
, (SEAL
, h
),loc_h
, sent
)
491 fr
['STOP','den',(RGOL
,h
),ADJ
] += c(loc_l_h
, j
, (RGOL
, h
),loc_h
, sent
)
492 # right non-adjacent stop:
493 for j
in xgt(loc_r_h
, sent
):
494 fr
['STOP','num',(GOR
,h
),NON
] += c(loc_l_h
, j
, (RGOL
, h
),loc_h
, sent
)
495 fr
['STOP','den',(GOR
,h
),NON
] += c(loc_l_h
, j
, (GOR
, h
),loc_h
, sent
)
496 for i
in xlteq(loc_l_h
):
497 fr
['STOP','num',(LGOR
,h
),NON
] += c(loc_l_h
, j
, (SEAL
, h
),loc_h
, sent
)
498 fr
['STOP','den',(LGOR
,h
),NON
] += c(loc_l_h
, j
, (LGOR
, h
),loc_h
, sent
)
499 # right adjacent stop, j = loc_r_h
500 fr
['STOP','num',(GOR
,h
),ADJ
] += c(loc_l_h
, loc_r_h
, (RGOL
, h
),loc_h
, sent
)
501 fr
['STOP','den',(GOR
,h
),ADJ
] += c(loc_l_h
, loc_r_h
, (GOR
, h
),loc_h
, sent
)
502 for i
in xlteq(loc_l_h
):
503 fr
['STOP','num',(LGOR
,h
),ADJ
] += c(loc_l_h
, j
, (SEAL
, h
),loc_h
, sent
)
504 fr
['STOP','den',(LGOR
,h
),ADJ
] += c(loc_l_h
, j
, (LGOR
, h
),loc_h
, sent
)
507 if 'REEST_ATTACH' in DEBUG
:
508 print "Lattach %s: for i < %s"%(g
.numtag(h
),sent
[0:loc_h
+1])
509 for s_h
in [RGOL
, GOL
]:
511 for i
in xlt(loc_l_h
): # i < loc_l(h)
512 if 'REEST_ATTACH' in DEBUG
:
513 print "\tfor j >= %s"%sent
[loc_h
:len(sent
)]
514 for j
in xgteq(loc_r_h
, sent
): # j >= loc_r(h)
515 fr
['hat_a','den',x
] += c(i
,j
, x
,loc_h
, sent
) # v_q in L&Y
516 if 'REEST_ATTACH' in DEBUG
:
517 print "\t\tc( %d , %d, %s, %s, sent)=%.4f"%(i
,j
,node_str(x
),loc_h
,fr
['hat_a','den',x
])
518 w_left(i
, j
, x
,loc_h
, sent
,sent_nums
) # compute w for all a in sent
521 if 'REEST_ATTACH' in DEBUG
:
522 print "Rattach %s: for i <= %s"%(g
.numtag(h
),sent
[0:loc_h
+1])
523 for s_h
in [GOR
, LGOR
]:
525 for i
in xlteq(loc_l_h
): # i <= loc_l(h)
526 if 'REEST_ATTACH' in DEBUG
:
527 print "\tfor j > %s"%sent
[loc_h
:len(sent
)]
528 for j
in xgt(loc_r_h
, sent
): # j > loc_r(h)
529 fr
['hat_a','den',x
] += c(i
,j
, x
,loc_h
, sent
) # v_q in L&Y
530 if 'REEST_ATTACH' in DEBUG
:
531 print "\t\tc( %d , %d, %s, %s, sent)=%.4f"%(loc_h
,j
,node_str(x
),loc_h
,fr
['hat_a','den',x
])
532 w_right(i
,j
, x
,loc_h
, sent
,sent_nums
) # compute w for all a in sent
538 def reestimate(old_g
, corpus
):
539 fr
= reest_freq(old_g
, corpus
)
540 p_ROOT
, p_STOP
, p_ATTACH
= {},{},{}
542 for h
in old_g
.headnums():
543 # reest_head changes p_ROOT, p_STOP, p_ATTACH
544 reest_head(h
, fr
, old_g
, p_ROOT
, p_STOP
, p_ATTACH
)
545 p_ORDER
= old_g
.p_ORDER
546 numtag
, tagnum
= old_g
.get_nums_tags()
548 new_g
= DMV_Grammar(numtag
, tagnum
, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
)
552 def reest_head(h
, fr
, g
, p_ROOT
, p_STOP
, p_ATTACH
):
553 "Given a single head, update g with the reestimated probability."
554 # remove 0-prob stuff? todo
556 p_ROOT
[h
] = fr
['ROOT','num',h
] / fr
['ROOT','den']
560 for dir in [LEFT
,RIGHT
]:
561 for adj
in [ADJ
, NON
]: # p_STOP
562 p_STOP
[h
, dir, adj
] = 0.0
563 for s_h
in dirseal(dir):
565 p
= fr
['STOP','den', x
, adj
]
567 p
= fr
['STOP', 'num', x
, adj
] / p
568 p_STOP
[h
, dir, adj
] += p
570 for s_h
in dirseal(dir): # make hat_a for p_ATTACH
572 p_c
= fr
['hat_a','den',x
]
574 for a
in g
.headnums():
575 if (a
,h
,dir) not in p_ATTACH
:
576 p_ATTACH
[a
,h
,dir] = 0.0
577 try: # (a,x) might not be in hat_a
578 p_ATTACH
[a
,h
,dir] += fr
['hat_a','num',a
,x
] / p_c
579 except KeyError: pass
580 except ZeroDivisionError: pass
586 ###################################################
587 # Reestimation v.2: #
588 # Heads as outer loop #
589 ###################################################
591 def locs_h(h
, sent_nums
):
592 '''Return the between-word locations of all tokens of h in sent.'''
593 return [loc_w
for loc_w
,w
in locs(sent_nums
, 0, len(sent_nums
))
596 def locs_a(a
, sent_nums
, start
, stop
):
597 '''Return the between-word locations of all tokens of h in some
598 fragment of sent. We make sure to offset the locations correctly
599 so that for any w in the returned list, sent[w]==loc_w.
601 start is inclusive, stop is exclusive, as in klein-thesis and
602 Python's list-slicing (eg. return left-loc).'''
603 return [loc_w
for loc_w
,w
in locs(sent_nums
, start
, stop
)
606 def inner2(i
, j
, node
, loc_h
, g
, sent
):
607 ichart
,ochart
= g
.get_iochart(s_n
)
608 try: p
= ichart
[i
,j
,x
,loc_h
]
609 except KeyError: p
= inner(i
,j
,x
,loc_h
,g
,sent
,ichart
)
610 g
.set_iochart(s_n
,ichart
,ochart
)
613 def inner_sent2(g
, sent
):
614 ichart
,ochart
= g
.get_iochart(s_n
)
615 p
= inner_sent(g
,sent
,ichart
)
616 g
.set_iochart(s_n
,ichart
,ochart
)
619 def outer2(i
, j
,w_node
,loc_w
, g
, sent
):
620 ichart
,ochart
= g
.get_iochart(s_n
)
621 try: p
= ochart
[i
,j
,w_node
,loc_w
]
622 except KeyError: p
= inner(i
,j
,w_node
,loc_w
,g
,sent
,ichart
,ochart
)
623 g
.set_iochart(s_n
,ichart
,ochart
)
626 def reestimate2(old_g
, corpus
):
627 p_ROOT
, p_STOP
, p_ATTACH
= {},{},{}
629 for h
in old_g
.headnums():
630 # reest_head changes p_ROOT, p_STOP, p_ATTACH
631 reest_head2(h
, old_g
, corpus
, p_ROOT
, p_STOP
, p_ATTACH
)
632 p_ORDER
= old_g
.p_ORDER
633 numtag
, tagnum
= old_g
.get_nums_tags()
635 new_g
= DMV_Grammar(numtag
, tagnum
, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
)
638 def hat_d2(xbar
, x
, xi
, xj
, g
, corpus
): # stop helper
639 def c(x
,loc_x
,i
,j
): return c2(x
,loc_x
,i
,j
,g
,s_n
,sent
)
642 if h
!= POS(xbar
): raise ValueError
645 for s_n
,sent
in [(g
.sent_nums(sent
),sent
) for sent
in corpus
]:
646 for loc_h
in locs_h(h
,s_n
):
647 loc_l_h
, loc_r_h
= loc_h
, loc_h
+ 1
648 for i
in xi(loc_l_h
):
649 for j
in xj(loc_r_h
, s_n
):
650 # print "s:%s %d,%d"%(sent,i,j)
651 num
+= c(xbar
,loc_h
,i
,j
)
652 den
+= c(x
,loc_h
,i
,j
)
655 return num
/den
# eg. SEAL/RGOL, xbar/x
658 def c2(x
,loc_h
,i
,j
,g
,s_n
,sent
):
659 ichart
,ochart
= g
.get_iochart(s_n
)
661 def f(i
,j
,x
,loc_h
): # P_{OUTSIDE}
662 try: return ochart
[i
,j
,x
,loc_h
]
663 except KeyError: return outer(i
,j
,x
,loc_h
,g
,sent
,ichart
,ochart
)
664 def e(i
,j
,x
,loc_h
): # P_{INSIDE}
665 try: return ichart
[i
,j
,x
,loc_h
]
666 except KeyError: return inner(i
,j
,x
,loc_h
,g
,sent
,ichart
)
668 p_sent
= inner_sent(g
, sent
, ichart
)
672 p_in
= e(i
,j
, x
,loc_h
)
676 p_out
= f(i
,j
, x
,loc_h
)
678 g
.set_iochart(s_n
,ichart
,ochart
)
679 return p_in
* p_out
/ p_sent
681 def w2(a
, x
,loc_h
, dir, i
, j
, g
, s_n
,sent
):
682 ichart
,ochart
= g
.get_iochart(s_n
)
684 def f(i
,j
,x
,loc_h
): # P_{OUTSIDE}
685 try: return ochart
[i
,j
,x
,loc_h
]
686 except KeyError: return outer(i
,j
,x
,loc_h
,g
,sent
,ichart
,ochart
)
687 def e(i
,j
,x
,loc_h
): # P_{INSIDE}
688 try: return ichart
[i
,j
,x
,loc_h
]
689 except KeyError: return inner(i
,j
,x
,loc_h
,g
,sent
,ichart
)
692 p_sent
= inner_sent(g
, sent
, ichart
)
700 for k
in xtween(i
,j
):
705 for loc_a
in locs_a(a
, s_n
, start
, stop
):
707 loc_L
, loc_R
= loc_a
, loc_h
709 loc_L
, loc_R
= loc_h
, loc_a
710 p
= g
.p_GO_AT_or0(a
,h
,dir,adj(k
,loc_h
))
711 in_L
= e(i
,k
,L
,loc_L
)
712 in_R
= e(k
,j
,R
,loc_R
)
714 w_sum
+= p
* in_L
* in_R
* out
716 g
.set_iochart(s_n
,ichart
,ochart
)
719 def hat_a2(a
, x
, dir, g
, corpus
): # attachment helper
720 def w(a
,x
,loc_x
,dir,i
,j
): return w2(a
,x
,loc_x
,dir,i
,j
,g
,s_n
,sent
)
721 def c(x
,loc_x
,i
,j
): return c2(x
,loc_x
,i
,j
,g
,s_n
,sent
)
730 for s_n
,sent
in [(g
.sent_nums(sent
),sent
) for sent
in corpus
]:
731 for loc_h
in locs_h(h
,s_n
):
732 loc_l_h
, loc_r_h
= loc_h
, loc_h
+ 1
733 for i
in xi(loc_l_h
):
734 for j
in xj(loc_r_h
,sent
):
735 num
+= w(a
, x
,loc_h
, dir, i
,j
)
736 den
+= c(x
,loc_h
, i
,j
)
741 def reest_root2(h
,g
,corpus
):
744 for s_n
,sent
in [(g
.sent_nums(sent
),sent
) for sent
in corpus
]:
747 ichart
, ochart
= g
.get_iochart(s_n
)
748 den
+= inner_sent(g
, sent
, ichart
)
749 for loc_h
in locs_h(h
,s_n
):
752 inner(0, len(s_n
), (SEAL
,h
), loc_h
, g
, sent
, ichart
)
753 g
.set_iochart(s_n
, ichart
, ochart
)
755 return sum / corpus_size
757 def reest_head2(h
, g
, corpus
, p_ROOT
, p_STOP
, p_ATTACH
):
758 print "h: %d=%s ..."%(h
,g
.numtag(h
)),
759 def hat_d(xbar
,x
,xi
,xj
): return hat_d2(xbar
,x
,xi
,xj
, g
, corpus
)
760 def hat_a(a
, x
, dir ): return hat_a2(a
, x
, dir, g
, corpus
)
762 p_STOP
[h
, LEFT
,NON
] = \
763 hat_d((SEAL
,h
),(RGOL
,h
),xlt
, xgteq
) + \
764 hat_d((LGOR
,h
),( GOL
,h
),xlt
, xeq
)
765 p_STOP
[h
, LEFT
,ADJ
] = \
766 hat_d((SEAL
,h
),(RGOL
,h
),xeq
, xgteq
) + \
767 hat_d((LGOR
,h
),( GOL
,h
),xeq
, xeq
)
768 p_STOP
[h
,RIGHT
,NON
] = \
769 hat_d((RGOL
,h
),( GOR
,h
),xeq
, xgt
) + \
770 hat_d((SEAL
,h
),(LGOR
,h
),xlteq
,xgt
)
771 p_STOP
[h
,RIGHT
,ADJ
] = \
772 hat_d((RGOL
,h
),( GOR
,h
),xeq
, xeq
) + \
773 hat_d((SEAL
,h
),(LGOR
,h
),xlteq
,xeq
)
774 print "stops done...",
776 p_ROOT
[h
] = reest_root2(h
,g
,corpus
)
777 print "root done...",
779 for a
in g
.headnums():
780 p_ATTACH
[a
,h
,LEFT
] = \
781 hat_a(a
, (GOL
,h
),LEFT
) + \
782 hat_a(a
,(RGOL
,h
),LEFT
)
783 p_ATTACH
[a
,h
,RIGHT
] = \
784 hat_a(a
, (GOR
,h
),RIGHT
) + \
785 hat_a(a
,(LGOR
,h
),RIGHT
)
787 print "attachment done"
791 ###################################################
792 # Most Probable Parse: #
793 ###################################################
795 STOPKEY
= (-1,-1,STOP
,-1)
796 ROOTKEY
= (-1,-1,ROOT
,-1)
798 def make_mpptree(g
, sent
):
799 '''Tell inner() to make an mpptree, connect ROOT to this. (Logically,
800 this should be part of inner_sent though...)'''
802 mpptree
= { ROOTKEY
:(0.0, ROOTKEY
, None) }
803 for loc_w
,w
in locs(g
.sent_nums(sent
),0,len(sent
)):
804 p
= g
.p_ROOT
[w
] * inner(0, len(sent
), (SEAL
,w
), loc_w
, g
, sent
, ichart
, mpptree
)
806 R
= (0,len(sent
), (SEAL
,w
), loc_w
)
807 if mpptree
[ROOTKEY
][0] < p
:
808 mpptree
[ROOTKEY
] = (p
, L
, R
)
811 def parse_mpptree(mpptree
, sent
):
812 '''mpptree is a dict of the form {k:(p,L,R),...}; where k, L and R
813 are `keys' of the form (i,j,node,loc).
815 returns an mpp of the form [((head, loc_h),(arg, loc_a)), ...],
816 where head and arg are tags.'''
817 # local functions for clear access to mpptree:
821 return POS(k_node(key
))
823 return seals(k_node(key
))
825 return (k_node(key
),key
[3])
827 return (k_POS(key
),key
[3])
829 s_k
= k_seals(key
) # i+1 == j
830 return key
[0] + 1 == key
[1] and (s_k
== GOR
or s_k
== GOL
)
836 # arbitrarily, "ROOT attaches to right". We add it here to
837 # avoid further complications:
838 firstkey
= t_R(mpptree
[ROOTKEY
])
839 deps
= set([ (k_locPOS(ROOTKEY
), k_locPOS(firstkey
), RIGHT
) ])
847 L
= t_L( mpptree
[k
] )
848 R
= t_R( mpptree
[k
] )
849 if k_locnode( k
) == k_locnode( L
): # Rattach
850 deps
.add((k_locPOS( k
), k_locPOS( R
), LEFT
))
852 elif k_locnode( k
) == k_locnode( R
): # Lattach
853 deps
.add((k_locPOS( k
), k_locPOS( L
), RIGHT
))
862 tagf
= g
.numtag
# localized function, todo: speed-test
863 mpptree
= make_mpptree(g
, sent
)
864 return set([((tagf(h
), loc_h
), (tagf(a
), loc_a
))
865 for (h
, loc_h
),(a
,loc_a
),dir in parse_mpptree(mpptree
,sent
)])
868 ########################################################################
869 # testing functions: #
870 ########################################################################
872 testcorpus
= [s
.split() for s
in ['det nn vbd c vbd','vbd nn c vbd',
873 'det nn vbd', 'det nn vbd c pp',
874 'det nn vbd', 'det vbd vbd c pp',
875 'det nn vbd', 'det nn vbd c vbd',
876 'det nn vbd', 'det nn vbd c vbd',
877 'det nn vbd', 'det nn vbd c vbd',
878 'det nn vbd', 'det nn vbd c pp',
879 'det nn vbd pp', 'det nn vbd', ]]
882 import loc_h_harmonic
883 reload(loc_h_harmonic
)
885 # make sure these are the way they were when setting up the tests:
886 loc_h_harmonic
.HARMONIC_C
= 0.0
887 loc_h_harmonic
.FNONSTOP_MIN
= 25
888 loc_h_harmonic
.FSTOP_MIN
= 5
889 loc_h_harmonic
.RIGHT_FIRST
= 1.0
890 loc_h_harmonic
.OLD_STOP_CALC
= True
892 return loc_h_harmonic
.initialize(testcorpus
)
894 def testreestimation2():
896 reestimate2(g2
, testcorpus
)
899 def testreestimation():
901 g
= reestimate(g
, testcorpus
)
905 def testmpp_regression(mpptree
,k_n
):
906 mpp
= {ROOTKEY
: (2.877072116829971e-05, STOPKEY
, (0, 3, (2, 3), 1)),
907 (0, 1, (1, 1), 0): (0.1111111111111111, (0, 1, (0, 1), 0), STOPKEY
),
908 (0, 1, (2, 1), 0): (0.049382716049382713, STOPKEY
, (0, 1, (1, 1), 0)),
909 (0, 3, (1, 3), 1): (0.00027619892321567721,
912 (0, 3, (2, 3), 1): (0.00012275507698474543, STOPKEY
, (0, 3, (1, 3), 1)),
913 (1, 3, (0, 3), 1): (0.025280986819448362,
916 (1, 3, (1, 3), 1): (0.0067415964851862296, (1, 3, (0, 3), 1), STOPKEY
),
917 (2, 3, (1, 4), 2): (0.32692307692307693, (2, 3, (0, 4), 2), STOPKEY
),
918 (2, 3, (2, 4), 2): (0.037721893491124266, STOPKEY
, (2, 3, (1, 4), 2))}
919 for k
,(v
,L
,R
) in mpp
.iteritems():
920 k2
= k
[0:k_n
] # 3 if the new does not check loc_h
923 if k2
not in mpptree
:
924 print "mpp regression, %s missing"%(k2
,)
926 vnew
= mpptree
[k2
][0]
927 if not "%.10f"%vnew
== "%.10f"%v
:
928 print "mpp regression, wanted %s=%.5f, got %.5f"%(k2
,v
,vnew
)
933 p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
= {},{},{},{}
936 p_STOP
[h
,LEFT
,NON
] = 1.0
937 p_STOP
[h
,LEFT
,ADJ
] = 1.0
938 p_STOP
[h
,RIGHT
,NON
] = 0.4 # RSTOP
939 p_STOP
[h
,RIGHT
,ADJ
] = 0.3 # RSTOP
940 p_STOP
[a
,LEFT
,NON
] = 1.0
941 p_STOP
[a
,LEFT
,ADJ
] = 1.0
942 p_STOP
[a
,RIGHT
,NON
] = 0.4 # RSTOP
943 p_STOP
[a
,RIGHT
,ADJ
] = 0.3 # RSTOP
944 p_ATTACH
[a
,h
,LEFT
] = 1.0 # not used
945 p_ATTACH
[a
,h
,RIGHT
] = 1.0 # not used
946 p_ATTACH
[h
,a
,LEFT
] = 1.0 # not used
947 p_ATTACH
[h
,a
,RIGHT
] = 1.0 # not used
948 p_ATTACH
[h
,h
,LEFT
] = 1.0 # not used
949 p_ATTACH
[h
,h
,RIGHT
] = 1.0 # not used
950 p_ORDER
[(GOR
, h
)] = 1.0
951 p_ORDER
[(GOL
, h
)] = 0.0
952 p_ORDER
[(GOR
, a
)] = 1.0
953 p_ORDER
[(GOL
, a
)] = 0.0
954 g
= DMV_Grammar({h
:'h',a
:'a'}, {'h':h
,'a':a
}, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
)
955 # these probabilities are impossible so add them manually:
956 g
.p_GO_AT
[a
,a
,LEFT
,NON
] = 0.4 # Lattach
957 g
.p_GO_AT
[a
,a
,LEFT
,ADJ
] = 0.6 # Lattach
958 g
.p_GO_AT
[h
,a
,LEFT
,NON
] = 0.2 # Lattach to h
959 g
.p_GO_AT
[h
,a
,LEFT
,ADJ
] = 0.1 # Lattach to h
960 g
.p_GO_AT
[a
,a
,RIGHT
,NON
] = 1.0 # Rattach
961 g
.p_GO_AT
[a
,a
,RIGHT
,ADJ
] = 1.0 # Rattach
962 g
.p_GO_AT
[h
,a
,RIGHT
,NON
] = 1.0 # Rattach to h
963 g
.p_GO_AT
[h
,a
,RIGHT
,ADJ
] = 1.0 # Rattach to h
964 g
.p_GO_AT
[h
,h
,LEFT
,NON
] = 0.2 # Lattach
965 g
.p_GO_AT
[h
,h
,LEFT
,ADJ
] = 0.1 # Lattach
966 g
.p_GO_AT
[a
,h
,LEFT
,NON
] = 0.4 # Lattach to a
967 g
.p_GO_AT
[a
,h
,LEFT
,ADJ
] = 0.6 # Lattach to a
968 g
.p_GO_AT
[h
,h
,RIGHT
,NON
] = 1.0 # Rattach
969 g
.p_GO_AT
[h
,h
,RIGHT
,ADJ
] = 1.0 # Rattach
970 g
.p_GO_AT
[a
,h
,RIGHT
,NON
] = 1.0 # Rattach to a
971 g
.p_GO_AT
[a
,h
,RIGHT
,ADJ
] = 1.0 # Rattach to a
977 p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
= {},{},{},{}
979 p_STOP
[h
,LEFT
,NON
] = 1.0
980 p_STOP
[h
,LEFT
,ADJ
] = 1.0
981 p_STOP
[h
,RIGHT
,NON
] = 0.4
982 p_STOP
[h
,RIGHT
,ADJ
] = 0.3
983 p_ATTACH
[h
,h
,LEFT
] = 1.0 # not used
984 p_ATTACH
[h
,h
,RIGHT
] = 1.0 # not used
985 p_ORDER
[(GOR
, h
)] = 1.0
986 p_ORDER
[(GOL
, h
)] = 0.0
987 g
= DMV_Grammar({h
:'h'}, {'h':h
}, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
)
988 g
.p_GO_AT
[h
,h
,LEFT
,NON
] = 0.6 # these probabilities are impossible
989 g
.p_GO_AT
[h
,h
,LEFT
,ADJ
] = 0.7 # so add them manually...
990 g
.p_GO_AT
[h
,h
,RIGHT
,NON
] = 1.0
991 g
.p_GO_AT
[h
,h
,RIGHT
,ADJ
] = 1.0
996 def testreestimation_h():
999 reestimate(g
,['h h h'.split()])
1002 def test(wanted
, got
):
1003 if not wanted
== got
:
1004 raise Warning, "Regression! Should be %s: %s" % (wanted
, got
)
1006 def regression_tests():
1007 testmpp_regression(make_mpptree(testgrammar(), testcorpus
[2]),4)
1011 "%.3f" % inner(0, 2, (SEAL
,h
), 0, testgrammar_h(), 'h h'.split(),{}))
1013 "%.3f" % inner(0, 2, (SEAL
,h
), 1, testgrammar_h(), 'h h'.split(),{}))
1015 "%.4f" % inner_sent(testgrammar_h(), 'h h h'.split(),{}))
1018 "%.4f" % inner(0, 3, (SEAL
,0), 0, testgrammar_h(), 'h h h'.split(),{}))
1020 "%.4f" % inner(0, 3, (SEAL
,0), 1, testgrammar_h(), 'h h h'.split(),{}))
1022 "%.4f" % inner(0, 3, (SEAL
,h
), 2, testgrammar_h(), 'h h h'.split(),{}))
1025 "%.2f" % outer(1, 3, (RGOL
,h
), 2, testgrammar_h(),'h h h'.split(),{},{}))
1026 test("0.61" , # ftw? can't be right... there's an 0.4 shared between these two...
1027 "%.2f" % outer(1, 3, (RGOL
,h
), 1, testgrammar_h(),'h h h'.split(),{},{}))
1030 "%.2f" % outer(1, 3, (RGOL
,h
), 0, testgrammar_h(),'h h h'.split(),{},{}))
1032 "%.2f" % outer(1, 3, (RGOL
,h
), 3, testgrammar_h(),'h h h'.split(),{},{}))
1035 "%.4f" % outer(0, 1, (GOR
,h
), 0,testgrammar_a(),'h a'.split(),{},{}))
1037 "%.4f" % outer(0, 2, (GOR
,h
), 0,testgrammar_a(),'h a'.split(),{},{}))
1039 "%.4f" % outer(0, 3, (GOR
,h
), 0,testgrammar_a(),'h a'.split(),{},{}))
1041 # todo: add more of these tests...
1045 def compare_grammars(g1
,g2
):
1047 for d1
,d2
in [(g1
.p_ATTACH
,g2
.p_ATTACH
),(g1
.p_STOP
,g2
.p_STOP
),
1048 (g1
.p_ORDER
, g2
.p_ORDER
), (g1
.p_ROOT
,g2
.p_ROOT
) ]:
1049 for k
,v
in d1
.iteritems():
1051 result
+= "\nreestimate1[%s]=%s missing from reestimate2"%(k
,v
)
1052 elif "%s"%d2[k
] != "%s"%v
:
1053 result
+= "\nreestimate1[%s]=%s while \nreestimate2[%s]=%s."%(k
,v
,k
,d2
[k
])
1054 for k
,v
in d2
.iteritems():
1056 result
+= "\nreestimate2[%s]=%s missing from reestimate1"%(k
,v
)
1060 def testNVNgrammar():
1061 import loc_h_harmonic
1063 # make sure these are the way they were when setting up the tests:
1064 loc_h_harmonic
.HARMONIC_C
= 0.0
1065 loc_h_harmonic
.FNONSTOP_MIN
= 25
1066 loc_h_harmonic
.FSTOP_MIN
= 5
1067 loc_h_harmonic
.RIGHT_FIRST
= 1.0
1068 loc_h_harmonic
.OLD_STOP_CALC
= True
1070 g
= loc_h_harmonic
.initialize(['n v n'.split()])
1075 inners
= [(sent
, inner_sent(g
, sent
, {})) for sent
in testcorpus
]
1078 if __name__
== "__main__":
1083 # profile.run('testreestimation()')
1086 # print timeit.Timer("loc_h_dmv.testreestimation()",'''import loc_h_dmv
1087 # reload(loc_h_dmv)''').timeit(1)
1092 # for s in testcorpus:
1093 # print "sent:%s\nparse:set(\n%s)"%(s,pprint.pformat(list(mpp(testgrammar(), s)),
1096 # g1 = testreestimation()
1097 # g2 = testreestimation2()
1098 # print compare_grammars(g1,g2)
1107 g
= testNVNgrammar()
1108 q_sent
= inner_sent(g
,'n v n'.split(),{})
1110 q_tree
[1] = 2.7213e-06 # n_0 -> v, n_0 -> n_2
1111 q_tree
[2] = 9.738e-06 # n -> v -> n
1112 q_tree
[3] = 2.268e-06 # n_0 -> n_2 -> v
1113 q_tree
[4] = 2.7213e-06 # same as 1-3
1114 q_tree
[5] = 9.738e-06
1115 q_tree
[6] = 2.268e-06
1116 q_tree
[7] = 1.086e-05 # n <- v -> n (e-05!!!)
1118 for i
,q_t
in q_tree
.iteritems():
1119 f_T_q
[i
] = q_t
/ q_sent
1121 pprint
.pprint(q_tree
)
1122 pprint
.pprint(f_T_q
)
1123 print sum([f
for f
in f_T_q
.values()])
1125 def treediv(num
,den
):
1127 sum([f_T_q
[i
] for i
in num
]) / \
1128 sum([f_T_q
[i
] for i
in den
])
1130 # g2['root --> _n_'] = treediv( (1,2,3,4,5,6), (1,2,3,4,5,6,7) )
1131 # g2['root --> _v_'] = treediv( (7,), (1,2,3,4,5,6,7) )
1132 # g2['_n_ --> STOP n><'] = treediv( (1,2,3,4,5,6,7,1,2,3,4,5,6,7),
1133 # (1,2,3,4,5,6,7,1,2,3,4,5,6,7))
1135 # g2['_n_ --> STOP n>< NON'] = treediv( (3,4,5,6),
1138 # g2['_v_ --> STOP v><'] = treediv( (1,2,3,4,5,6,7),
1140 # nlrtrees = (1,2,3,4,5,6,7,1,2,3,4,5,6,7,
1142 # g2['n>< --> _n_ n><'] = treediv( ( 4, 6), nlrtrees )
1143 # g2['n>< --> _v_ n><'] = treediv( (3,4,5), nlrtrees )
1144 # g2['n>< --> n> STOP'] = treediv( (1,2,3,4,5,6,7,1,2,3,4,5,6,7),
1147 # g2['n>< --> n> STOP ADJ'] = treediv( ( 4,5, 7,1,2,3,4,5,6,7),
1149 # g2['n>< --> n> STOP NON'] = treediv( (1,2,3, 6),
1152 # vlrtrees = (1,2,3,4,5,6,7,
1154 # g2['v>< --> _n_ v><'] = treediv( (5,7), vlrtrees )
1155 # g2['v>< --> v> STOP'] = treediv( (1,2,3,4,5,6,7), vlrtrees )
1156 # nrtrees = (1,2,3,4,5,6,7,1,2,3,4,5,6,7,
1158 # g2['n> --> n> _n_'] = treediv( (1,3), nrtrees )
1159 # g2['n> --> n> _v_'] = treediv( (1,2,6), nrtrees )
1161 # g2['n> --> n> _n_ NON'] = treediv( (1,), nrtrees )
1162 # g2['n> --> n> _n_ ADJ'] = treediv( ( 3,), nrtrees )
1163 # g2['n> --> n> _v_ ADJ'] = treediv( ( 1,2, 6), nrtrees )
1165 # vrtrees = (1,2,3,4,5,6,7,
1167 # g2['v> --> v> _n_'] = treediv( (2,7), vrtrees )
1169 # g2[' v|n,R '] = treediv( (1, 2, 6),
1171 # g2[' n|n,R '] = treediv( (1, 3),
1174 g2
[' stop|n,R,non '] = treediv( ( 1,2,3,6),
1176 g2
[' v|n,left '] = treediv( ( 3,4,5),
1178 g2
[' n|n,left '] = treediv( (6,4),
1182 g3
= reestimate2(g
, ['n v n'.split()])
1184 g4
= reestimate2(g
, ['n v n'.split()])