2 # todo: some more testing on the Brown corpus:
3 # # first five sentences of the Brown corpus:
4 # g_brown = harmonic.initialize([['AT', 'NP-TL', 'NN-TL', 'JJ-TL', 'NN-TL', 'VBD', 'NR', 'AT', 'NN', 'IN', 'NP$', 'JJ', 'NN', 'NN', 'VBD', '``', 'AT', 'NN', "''", 'CS', 'DTI', 'NNS', 'VBD', 'NN', '.'], ['AT', 'NN', 'RBR', 'VBD', 'IN', 'NN', 'NNS', 'CS', 'AT', 'NN-TL', 'JJ-TL', 'NN-TL', ',', 'WDT', 'HVD', 'JJ', 'NN', 'IN', 'AT', 'NN', ',', '``', 'VBZ', 'AT', 'NN', 'CC', 'NNS', 'IN', 'AT', 'NN-TL', 'IN-TL', 'NP-TL', "''", 'IN', 'AT', 'NN', 'IN', 'WDT', 'AT', 'NN', 'BEDZ', 'VBN', '.'], ['AT', 'NP', 'NN', 'NN', 'HVD', 'BEN', 'VBN', 'IN', 'NP-TL', 'JJ-TL', 'NN-TL', 'NN-TL', 'NP', 'NP', 'TO', 'VB', 'NNS', 'IN', 'JJ', '``', 'NNS', "''", 'IN', 'AT', 'JJ', 'NN', 'WDT', 'BEDZ', 'VBN', 'IN', 'NN-TL', 'NP', 'NP', 'NP', '.'], ['``', 'RB', 'AT', 'JJ', 'NN', 'IN', 'JJ', 'NNS', 'BEDZ', 'VBN', "''", ',', 'AT', 'NN', 'VBD', ',', '``', 'IN', 'AT', 'JJ', 'NN', 'IN', 'AT', 'NN', ',', 'AT', 'NN', 'IN', 'NNS', 'CC', 'AT', 'NN', 'IN', 'DT', 'NN', "''", '.'], ['AT', 'NN', 'VBD', 'PPS', 'DOD', 'VB', 'CS', 'AP', 'IN', 'NP$', 'NN', 'CC', 'NN', 'NNS', '``', 'BER', 'JJ', 'CC', 'JJ', 'CC', 'RB', 'JJ', "''", '.'], ['PPS', 'VBD', 'CS', 'NP', 'NNS', 'VB', '``', 'TO', 'HV', 'DTS', 'NNS', 'VBN', 'CC', 'VBN', 'IN', 'AT', 'NN', 'IN', 'VBG', 'CC', 'VBG', 'PPO', "''", '.'], ['AT', 'JJ', 'NN', 'VBD', 'IN', 'AT', 'NN', 'IN', 'AP', 'NNS', ',', 'IN', 'PPO', 'AT', 'NP', 'CC', 'NP-TL', 'NN-TL', 'VBG', 'NNS', 'WDT', 'PPS', 'VBD', '``', 'BER', 'QL', 'VBN', 'CC', 'VB', 'RB', 'VBN', 'NNS', 'WDT', 'VB', 'IN', 'AT', 'JJT', 'NN', 'IN', 'ABX', 'NNS', "''", '.'], ['NN-HL', 'VBN-HL'], ['WRB', ',', 'AT', 'NN', 'VBD', 'PPS', 'VBZ', '``', 'DTS', 'CD', 'NNS', 'MD', 'BE', 'VBN', 'TO', 'VB', 'JJR', 'NN', 'CC', 'VB', 'AT', 'NN', 'IN', 'NN', "''", '.'], ['AT', 'NN-TL', 'VBG-TL', 'NN-TL', ',', 'AT', 'NN', 'VBD', ',', '``', 'BEZ', 'VBG', 'IN', 'VBN', 'JJ', 'NNS', 'CS', 'AT', 'NN', 'IN', 'NN', 'NNS', 'NNS', "''", '.']])
5 # # 36:'AT' in g_brown.numtag, 40:'NP-TL'
8 # test_brown = inner(0,2, (LRBAR,36), g_brown, ['AT', 'NP-TL' ,'NN-TL','JJ-TL'], {})
10 # for r in g_brown.rules((2,36)) + g_brown.rules((1,36)) + g_brown.rules((0,36)):
13 # if head(L) in [36,40,-2] and head(R) in [36,40,-2]:
15 # print "Brown-test gives: %.8f" % test_brown
19 # this will give the tag sequences of all the 6218 Brown corpus
20 # sentences of length < 7:
21 # [[tag for (w, tag) in sent]
22 # for sent in nltk.corpus.brown.tagged_sents() if len(sent) < 7]
26 ##############################
28 ##############################
32 sent = 'h h h'.split()
35 inner(0,2,ROOT,2, g,sent,ichart)
36 print debug_ichart(g,sent,ichart)
37 prune( 0,2,ROOT,2, g,[0,0,0],ichart)
38 print debug_ichart(g,sent,ichart)
40 tree = inner(0,2,ROOT,2, g,sent,ichart)[1]
41 print prune2( 0,2,ROOT,2, ichart, tree)
44 def prune2(s,t,LHS,loc_h, ichart,tree):
47 def prune2_helper(s,t,LHS,loc_h):
48 newichart[(s,t,LHS,loc_h)] = ichart[(s,t,LHS,loc_h)]
49 for d in tree[s,t,LHS,loc_h]:
50 prune2_helper(d[0],d[1],d[2],d[3])
52 prune2_helper(s,t,LHS,loc_h)
55 def prune(s,t,LHS, g, sent_nums, ichart):
56 '''Removes unused subtrees with positive probability from the
59 Unused := any and all mothers (or grandmothers etc.) have
61 def prune_helper(keep,s,t,LHS,loc_h):
62 keep = keep and ichart[(s,t,LHS,loc_h)] > 0.0
63 for rule in g.sent_rules(LHS, sent_nums):
67 if (s,t,L,loc_h) in ichart:
68 prune_helper(keep, s,t, L,loc_h)
70 if (s,t,R,loc_h) in ichart:
71 prune_helper(keep, s,t, R,loc_h)
74 for loc_L in locs(head(L), sent_nums, s, r):
75 if (s,r,rule.L(),loc_L) in ichart:
76 prune_helper(keep, s ,r,rule.L(),loc_L)
77 for loc_R in locs(head(R), sent_nums, r+1, t):
78 if (r+1,t,rule.R(),loc_R) in ichart:
79 prune_helper(keep,r+1,t,rule.R(),loc_R)
81 if not (s,t,LHS,loc_h) in keepichart:
82 keepichart[(s,t,LHS,loc_h)] = keep
83 else: # eg. if previously some parent rule had 0.0, but then a
84 # later rule said "No, I've got a use for this subtree"
85 keepichart[(s,t,LHS,loc_h)] += keep
89 for loc_h,h in enumerate(sent_nums):
90 keep = ichart[(s,t,LHS,loc_h)] > 0.0
91 keepichart[(s,t,LHS,loc_h)] = keep
92 prune_helper(keep,s,t,LHS,loc_h)
94 for (s,t,LHS,loc_h),v in keepichart.iteritems():
96 if 'PRUNE' in io.DEBUG:
97 print "popping s:%d t:%d LHS:%s loc_h:%d" % (s,t,LHS,loc_h)
98 ichart.pop((s,t,LHS,loc_h))
99 # end prune(s,t,LHS,loc_h, g, sent_nums, ichart)
101 def prune_sent(g, sent_nums, ichart):
102 return prune(0, len(sent_nums)-1, ROOT, g, sent_nums, ichart)
106 def P_STOP(STOP, h, dir, adj, g, corpus):
112 locs_h = locs(h_tag, sent)
113 io.debug( "locs_h:%s, sent:%s"%(locs_h,sent) , 'PSTOP')
115 inner(0, len(sent)-1, ROOT, loc_h, g, sent, chart)
116 for s in range(loc_h): # s<loc(h), range gives strictly less
117 for t in range(loc_h, len(sent)):
118 io.debug( "s:%s t:%s loc:%d"%(s,t,loc_h) , 'PSTOP')
119 if (s, t, (LRBAR,h), loc_h) in chart:
120 io.debug( "num+=%s"%chart[(s, t, (LRBAR,h), loc_h)] , 'PSTOP')
121 P_STOP_num += chart[(s, t, (LRBAR,h), loc_h)]
122 if (s, t, (RBAR,h), loc_h) in chart:
123 io.debug( "den+=%s"%chart[(s, t, (RBAR,h), loc_h)] , 'PSTOP')
124 P_STOP_den += chart[(s, t, (RBAR,h), loc_h)]
126 io.debug( "num/den: %s / %s"%(P_STOP_num, P_STOP_den) , 'PSTOP')
128 io.debug( "num/den: %s / %s = %s"%(P_STOP_num, P_STOP_den,P_STOP_num / P_STOP_den) , 'PSTOP')
129 return P_STOP_num / P_STOP_den # upside down in article
135 '''Here it seems like they store rule information on a per-head (per
136 direction) basis, in deps_D(h, dir) which gives us a list. '''
139 for dir in ['l', 'r']:
140 for a in deps(h, dir):
143 P_STOP (0, h, dir, adj) * \
144 P_CHOOSE (a, h, dir) * \
146 P_STOP (STOP | h, dir, adj)
148 return P_h(root(sent))
151 if __name__ == "__main__": # from dmv.py
152 # these are not Real rules, just testing the classes. todo: make
153 # a rule-set to test inner() on.
155 s = DMV_Rule((LRBAR,0), (NOBAR,1),(NOBAR,2), 1.0, 0.0) # s->np vp
156 np = DMV_Rule((NOBAR,1), (NOBAR,3),(NOBAR,4), 0.3, 0.0) # np->n p
157 b[(NOBAR,1), 'n'] = 0.7 # np->'n'
158 b[(NOBAR,3), 'n'] = 1.0 # n->'n'
159 b[(NOBAR,4), 'p'] = 1.0 # p->'p'
160 vp = DMV_Rule((NOBAR,2), (NOBAR,5),(NOBAR,1), 0.1, 0.0) # vp->v np (two parses use this rule)
161 vp2 = DMV_Rule((NOBAR,2), (NOBAR,2),(NOBAR,4), 0.9, 0.0) # vp->vp p
162 b[(NOBAR,5), 'v'] = 1.0 # v->'v'
164 g = DMV_Grammar([s,np,vp,vp2], b, "todo","todo", "todo")
167 test1 = io.inner(0,0, (NOBAR,1), g, ['n'], {})
169 print "should be 0.70 : %.2f" % test1[0]
172 test2 = io.inner(0,2, (NOBAR,2), g, ['v','n','p'], {})
173 if "%.2f" % test2[0] != "0.09": # 0.092999 etc, don't care about that
174 print "should be 0.09 if the io.py-test is right : %.2f" % test2[0]
175 # the following should manage to look stuff up in the chart:
176 test2 = io.inner(0,2, (NOBAR,2), g, ['v','n','p'], test2[1])
177 if "%.2f" % test2[0] != "0.09":
178 print "should be 0.09 if the io.py-test is right : %.2f" % test2[0]