fixed a little bug in root reestimation (forgot to divide by len(corpus)=f_T_q(ROOT))
[dmvccm.git] / src / main.py
blob3facf6820e7d6e22048250001918666cc14655a9
1 # Todo: since we evaluate _after_ we reestimate, we loose the icharts
2 # made while reestimating. If we had these available, evaluate and
3 # corpus_likelihood would be a lot faster, but since they need to run
4 # _after_ reestimate, we'll have to store an ichart per sentence. So
5 # try storing those icharts in some loc_h_dmv global, and see if it's
6 # faster using space rather than time.
8 from common_dmv import MPPROOT, test, node_str
9 from wsjdep import WSJDepCorpusReader
10 #HARMONIC_C: 509.637290698, FNONSTOP_MIN: 30.1124584139, FSTOP_MIN: 13.0830178845
11 def initialize_loc_h(tagonlys):
12 import loc_h_harmonic # since we need to change constants (is there a better way?)
13 reload(loc_h_harmonic)
14 import random
15 # loc_h_harmonic.HARMONIC_C = 380.111684914
16 # loc_h_harmonic.FSTOP_MIN = 13.5744632704
17 # loc_h_harmonic.FNONSTOP_MIN = 34.8939452454
18 loc_h_harmonic.HARMONIC_C = 0 # 509.63 #1000.0 * random.random()
19 loc_h_harmonic.FSTOP_MIN = 13.08 #20.0 * random.random()
20 loc_h_harmonic.FNONSTOP_MIN = 30.11 #50.0 * random.random() + loc_h_harmonic.FSTOP_MIN
22 loc_h_harmonic.RIGHT_FIRST = 1.0
23 loc_h_harmonic.OTHER_STOP_CALC = False
24 print '''
25 HARMONIC_C: %s, FNONSTOP_MIN: %s, FSTOP_MIN: %s
26 RIGHT_FIRST: %s, OTHER_STOP_CALC: %s'''%(loc_h_harmonic.HARMONIC_C,
27 loc_h_harmonic.FNONSTOP_MIN,
28 loc_h_harmonic.FSTOP_MIN,
29 loc_h_harmonic.RIGHT_FIRST,
30 loc_h_harmonic.OTHER_STOP_CALC)
31 g = loc_h_harmonic.initialize(tagonlys)
32 return g
34 def initialize_cnf(tagonlys):
35 import cnf_harmonic # since we need to change constants (is there a better way?)
36 reload(cnf_harmonic)
37 cnf_harmonic.HARMONIC_C = 0.0
38 cnf_harmonic.FNONSTOP_MIN = 25
39 cnf_harmonic.FSTOP_MIN = 5
40 return cnf_harmonic.initialize(tagonlys)
43 def test_likelihood(reestimate, initialize, inner_sent,
44 corpus_size=20, corpus_offset=1000, iterations=4, eval=False):
45 def run_IO(g, iterations, tagonlys, tags_and_parses):
46 sumlog,msg = corpus_likelihood(g, tagonlys)
47 print msg
48 if eval: print evaluate(g, tags_and_parses)
49 for i in range(iterations):
50 g = reestimate(g, tagonlys)
51 print "reestimation number %d done"%i
52 if eval: print evaluate(g, tags_and_parses)
54 prev_sumlog = sumlog
55 sumlog,msg = corpus_likelihood(g, tagonlys)
56 if sumlog < prev_sumlog:
57 raise Exception, msg+"but previous was %s"%prev_sumlog
58 print msg
59 return g
61 def corpus_likelihood(g, tagsonly):
62 from math import log
63 sumlog = 0.0
64 for sent in tagsonly:
65 p_sent = inner_sent(g, sent, {})
66 if p_sent == 0.0:
67 print "%s had zero probability!"%sent
68 else:
69 sumlog += log(p_sent)
70 avg = sumlog / len(tagsonly)
71 return (sumlog, "Sum of log P_{sentence}: %.4f (should move towards 0), avg: %s\n"%(sumlog,avg))
73 reader = WSJDepCorpusReader(None)
74 tagonlys = reader.tagonly_sents()[corpus_offset:corpus_offset+corpus_size]
75 tags_and_parses = reader.tagged_and_parsed_sents()[corpus_offset:corpus_offset+corpus_size]
77 # from loc_h_dmv import testcorpus
78 # tagonlys = testcorpus
80 print "initializing %d sentences..." % corpus_size,
81 g = initialize(tagonlys)
82 print "initialized"
84 g = run_IO(g, iterations, tagonlys, tags_and_parses) # make iterations argument, todo
85 return g
88 def evaluate(g, tagged_and_parsed_sents):
89 '''
90 tagged_and_parsed_sents is a list of pairs:
91 (tagonly_sent, parsed_sent)
93 R_num += 1 if pair from parsed is in mpp
94 R_den += 1 per pair from parsed
96 P_num += 1 if pair from mpp is in parsed
97 P_den += 1 per pair from mpp
99 F1 = (2 * P * R)/(P + R), harmonisk snitt av P og R
101 from loc_h_dmv import mpp
102 from wsjdep import add_root
104 R, R_r, P, P_r = {}, {}, {}, {}
105 for nd in ['num', 'den']:
106 R[nd], R_r[nd], P[nd], P_r[nd] = 0, 0, 0, 0
107 unrooted = 0 # parses where we couldn't add_root
109 for sent, gold_parse in tagged_and_parsed_sents:
110 mpp_sent = mpp(g, sent)
111 try: gold_parse = add_root(gold_parse)
112 except ValueError: unrooted += 1
114 for pair in gold_parse:
115 dict = R
116 if pair[0] == MPPROOT: dict = R_r
117 dict['den'] += 1
118 if pair in mpp_sent: dict['num'] += 1
120 for pair in mpp_sent:
121 dict = P
122 if pair[0] == MPPROOT: dict = P_r
123 dict['den'] += 1
124 if pair in gold_parse: dict['num'] += 1
126 recall = float(R['num']) / float(R['den'])
127 precision = float(P['num']) / float(P['den'])
128 recall_r = float(R['num']+R_r['num']) / float(R['den']+R_r['den'])
129 precision_r = float(P['num']+P_r['num']) / float(P['den']+P_r['den'])
130 F1, F1_r = 0.0, 0.0
131 if (precision + recall) > 0.0:
132 F1 = (2 * recall * precision) / (precision + recall)
133 if (precision_r + recall_r) > 0.0:
134 F1_r = (2 * recall_r * precision_r) / (precision_r + recall_r)
136 str_vals = (R['num'],R['den'],recall, R['num']+R_r['num'], R['den']+R_r['den'], recall_r,
137 P['num'],P['den'],precision, P['num']+P_r['num'], P['den']+P_r['den'], precision_r,
138 F1, F1_r, unrooted)
139 return '''Recall: %d/%d = %.4f\tRecall_r: %d/%d = %.4f
140 Precision: %d/%d = %.4f\tPrecision_r: %d/%d = %.4f
141 F1: \t\t%.4f\t\tF1_r: \t\t%.4f (unrooted gold parses: %d)'''%str_vals
146 def compare_loc_h_cnf():
147 reader = WSJDepCorpusReader(None)
148 corpus_size = 200
149 corpus_offset = 1000
150 tagonlys = reader.tagonly_sents()[corpus_offset:corpus_offset+corpus_size]
152 import loc_h_harmonic, cnf_harmonic
153 g_l = loc_h_harmonic.initialize(tagonlys)
154 g_c = cnf_harmonic.initialize(tagonlys)
156 initials = [
157 (g_l.p_ROOT.iteritems(), g_c.p_ROOT),
158 (g_c.p_ROOT.iteritems(), g_l.p_ROOT),
159 (g_l.p_STOP.iteritems(), g_c.p_STOP),
160 (g_c.p_STOP.iteritems(), g_l.p_STOP),
161 (g_l.p_ATTACH.iteritems(), g_c.p_ATTACH),
162 (g_c.p_ATTACH.iteritems(), g_l.p_ATTACH)]
163 for a_items, b in initials:
164 for k,v in a_items:
165 if k not in b.keys(): raise Warning, "a[%s]=%s, but %s not in b"(k,v,k)
166 if (k,v) not in b.iteritems(): raise Warning, "a[%s]=%s, but b[%s]=%s"(k,v,k,b[k])
169 import loc_h_dmv, cnf_dmv
170 from common_dmv import GOR
171 for sent in tagonlys:
172 ochart_l, ochart_c, ichart_l, ichart_c = {},{},{},{}
173 i_l = loc_h_dmv.inner_sent(g_l, sent, ichart_l)
174 i_c = cnf_dmv.inner_sent(g_c, sent, ichart_c)
175 test( "%s"%i_l, "%s"%i_c, "i_l","i_c")
177 for loc_w,w in enumerate(sent):
178 w_node = (GOR, g_l.tagnum(w))
179 o_l = loc_h_dmv.outer(loc_w,loc_w+1,w_node,loc_w, g_l, sent, ichart_l,ochart_l)
180 o_c = cnf_dmv.outer(loc_w,loc_w+1,w_node, g_c, sent, ichart_c,ochart_c)
181 print "%s, %s, %s"%(sent,node_str(w_node),loc_w)
182 test("%s"%o_l, "%s"%o_c, "o_l(0,1,(GOR,%s),%d,...)"%(w,loc_w),"o_c")
184 # end compare_loc_h_cnf()
187 def init_nothing(g,H,N,S):
188 print '''
189 HARMONIC_C: %s, FNONSTOP_MIN: %s, FSTOP_MIN: %s'''%(H,N,S)
190 return lambda corpus:g
192 def rnd_grammars_test():
193 import loc_h_dmv
194 reload(loc_h_dmv)
196 rnd_grammars0 = []
197 for i in xrange(20):
198 g = test_likelihood(loc_h_dmv.reestimate,
199 initialize_loc_h,
200 loc_h_dmv.inner_sent,
201 corpus_size=6268,
202 iterations=0,
203 corpus_offset=0,
204 eval=True)
205 rnd_grammars0 += [(g, g.HARMONIC_C, g.FNONSTOP_MIN, g.FSTOP_MIN)]
207 rnd_grammars1 = [(test_likelihood(loc_h_dmv.reestimate,
208 init_nothing(g,H,N,S),
209 loc_h_dmv.inner_sent,
210 corpus_size=6268,
211 iterations=1,
212 corpus_offset=0,
213 eval=True),
214 H,N,S)
215 for g,H,N,S in rnd_grammars0]
216 rnd_grammars2 = [(test_likelihood(loc_h_dmv.reestimate,
217 init_nothing(g,H,N,S),
218 loc_h_dmv.inner_sent,
219 corpus_size=6268,
220 iterations=1,
221 corpus_offset=0,
222 eval=True),
223 H,N,S)
224 for g,H,N,S in rnd_grammars1]
226 if __name__ == "__main__":
227 print "main.py:"
229 # compare_loc_h_cnf()
231 # import cnf_dmv
232 # reload(cnf_dmv)
233 # print "\ntrying cnf-reestimate ##############################"
234 # g = test_likelihood(cnf_dmv.reestimate,
235 # initialize_cnf,
236 # cnf_dmv.inner_sent,
237 # corpus_size=5,
238 # iterations=4)
240 import loc_h_dmv
241 # reload(loc_h_dmv)
242 # rnd_grammars_test()
243 print "\ntrying reestimate v.1 ##############################"
244 g = test_likelihood(loc_h_dmv.reestimate,
245 initialize_loc_h,
246 loc_h_dmv.inner_sent,
247 corpus_size=5,
248 iterations=4,
249 corpus_offset=0,
250 eval=True)
251 print g
253 print "\ntrying reestimate v.2 ##############################"
254 g = test_likelihood(loc_h_dmv.reestimate2,
255 initialize_loc_h,
256 loc_h_dmv.inner_sent,
257 corpus_size=5,
258 iterations=4,
259 corpus_offset=0)
260 print "main.py: done"