Randomize penalties for aborts and add blacklisting.
[luajit-2.0/celess22.git] / src / lj_trace.c
blob6f63c9454439b32fa7fbae401765eb386d073696
1 /*
2 ** Trace management.
3 ** Copyright (C) 2005-2010 Mike Pall. See Copyright Notice in luajit.h
4 */
6 #define lj_trace_c
7 #define LUA_CORE
9 #include "lj_obj.h"
11 #if LJ_HASJIT
13 #include "lj_gc.h"
14 #include "lj_err.h"
15 #include "lj_str.h"
16 #include "lj_frame.h"
17 #include "lj_state.h"
18 #include "lj_bc.h"
19 #include "lj_ir.h"
20 #include "lj_jit.h"
21 #include "lj_iropt.h"
22 #include "lj_mcode.h"
23 #include "lj_trace.h"
24 #include "lj_snap.h"
25 #include "lj_gdbjit.h"
26 #include "lj_record.h"
27 #include "lj_asm.h"
28 #include "lj_dispatch.h"
29 #include "lj_vm.h"
30 #include "lj_vmevent.h"
31 #include "lj_target.h"
33 /* -- Error handling ------------------------------------------------------ */
35 /* Synchronous abort with error message. */
36 void lj_trace_err(jit_State *J, TraceError e)
38 setnilV(&J->errinfo); /* No error info. */
39 setintV(J->L->top++, (int32_t)e);
40 lj_err_throw(J->L, LUA_ERRRUN);
43 /* Synchronous abort with error message and error info. */
44 void lj_trace_err_info(jit_State *J, TraceError e)
46 setintV(J->L->top++, (int32_t)e);
47 lj_err_throw(J->L, LUA_ERRRUN);
50 /* -- Trace management ---------------------------------------------------- */
52 /* The current trace is first assembled in J->cur. The variable length
53 ** arrays point to shared, growable buffers (J->irbuf etc.). The trace is
54 ** kept in this state until a new trace needs to be created. Then the current
55 ** trace and its data structures are copied to a new (compact) Trace object.
58 /* Find a free trace number. */
59 static TraceNo trace_findfree(jit_State *J)
61 MSize osz, lim;
62 if (J->freetrace == 0)
63 J->freetrace = 1;
64 for (; J->freetrace < J->sizetrace; J->freetrace++)
65 if (J->trace[J->freetrace] == NULL)
66 return J->freetrace++;
67 /* Need to grow trace array. */
68 lim = (MSize)J->param[JIT_P_maxtrace] + 1;
69 if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535;
70 osz = J->sizetrace;
71 if (osz >= lim)
72 return 0; /* Too many traces. */
73 lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, Trace *);
74 while (osz < J->sizetrace)
75 J->trace[osz++] = NULL;
76 return J->freetrace;
79 #define TRACE_COPYELEM(field, szfield, tp) \
80 T2->field = (tp *)p; \
81 memcpy(p, T->field, T->szfield*sizeof(tp)); \
82 p += T->szfield*sizeof(tp);
84 /* Save a trace by copying and compacting it. */
85 static Trace *trace_save(jit_State *J, Trace *T)
87 size_t sztr = ((sizeof(Trace)+7)&~7);
88 size_t szins = (T->nins-T->nk)*sizeof(IRIns);
89 size_t sz = sztr + szins +
90 T->nsnap*sizeof(SnapShot) +
91 T->nsnapmap*sizeof(SnapEntry);
92 Trace *T2 = lj_mem_newt(J->L, (MSize)sz, Trace);
93 char *p = (char *)T2 + sztr;
94 memcpy(T2, T, sizeof(Trace));
95 T2->ir = (IRIns *)p - T->nk;
96 memcpy(p, T->ir+T->nk, szins);
97 p += szins;
98 TRACE_COPYELEM(snap, nsnap, SnapShot)
99 TRACE_COPYELEM(snapmap, nsnapmap, SnapEntry)
100 lj_gc_barriertrace(J2G(J), T);
101 return T2;
104 /* Free a trace. */
105 static void trace_free(jit_State *J, TraceNo traceno)
107 lua_assert(traceno != 0);
108 if (traceno < J->freetrace)
109 J->freetrace = traceno;
110 lj_gdbjit_deltrace(J, J->trace[traceno]);
111 if (traceno == J->curtrace) {
112 lua_assert(J->trace[traceno] == &J->cur);
113 J->trace[traceno] = NULL;
114 J->curtrace = 0;
115 } else {
116 Trace *T = J->trace[traceno];
117 lua_assert(T != NULL && T != &J->cur);
118 J->trace[traceno] = NULL;
119 lj_mem_free(J2G(J), T,
120 ((sizeof(Trace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
121 T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry));
125 /* Free all traces associated with a prototype. No unpatching needed. */
126 void lj_trace_freeproto(global_State *g, GCproto *pt)
128 jit_State *J = G2J(g);
129 TraceNo traceno;
130 /* Free all root traces. */
131 for (traceno = pt->trace; traceno != 0; ) {
132 TraceNo side, nextroot = J->trace[traceno]->nextroot;
133 /* Free all side traces. */
134 for (side = J->trace[traceno]->nextside; side != 0; ) {
135 TraceNo next = J->trace[side]->nextside;
136 trace_free(J, side);
137 side = next;
139 /* Now free the trace itself. */
140 trace_free(J, traceno);
141 traceno = nextroot;
145 /* Re-enable compiling a prototype by unpatching any modified bytecode. */
146 void lj_trace_reenableproto(GCproto *pt)
148 if ((pt->flags & PROTO_HAS_ILOOP)) {
149 BCIns *bc = proto_bc(pt);
150 BCPos i, sizebc = pt->sizebc;;
151 pt->flags &= ~PROTO_HAS_ILOOP;
152 if (bc_op(bc[0]) == BC_IFUNCF)
153 setbc_op(&bc[0], BC_FUNCF);
154 for (i = 1; i < sizebc; i++) {
155 BCOp op = bc_op(bc[i]);
156 if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP)
157 setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP);
162 /* Unpatch the bytecode modified by a root trace. */
163 static void trace_unpatch(jit_State *J, Trace *T)
165 BCOp op = bc_op(T->startins);
166 MSize pcofs = T->snap[0].mapofs + T->snap[0].nent;
167 BCIns *pc = ((BCIns *)snap_pc(T->snapmap[pcofs])) - 1;
168 UNUSED(J);
169 switch (op) {
170 case BC_FORL:
171 lua_assert(bc_op(*pc) == BC_JFORI);
172 setbc_op(pc, BC_FORI); /* Unpatch JFORI, too. */
173 pc += bc_j(*pc);
174 lua_assert(bc_op(*pc) == BC_JFORL && J->trace[bc_d(*pc)] == T);
175 *pc = T->startins;
176 break;
177 case BC_LOOP:
178 lua_assert(bc_op(*pc) == BC_JLOOP && J->trace[bc_d(*pc)] == T);
179 *pc = T->startins;
180 break;
181 case BC_ITERL:
182 lua_assert(bc_op(*pc) == BC_JMP);
183 pc += bc_j(*pc)+2;
184 lua_assert(bc_op(*pc) == BC_JITERL && J->trace[bc_d(*pc)] == T);
185 *pc = T->startins;
186 break;
187 case BC_FUNCF:
188 lua_assert(bc_op(*pc) == BC_JFUNCF && J->trace[bc_d(*pc)] == T);
189 *pc = T->startins;
190 break;
191 case BC_JMP: /* No need to unpatch branches in parent traces (yet). */
192 default:
193 lua_assert(0);
194 break;
198 /* Free a root trace and any attached side traces. */
199 static void trace_freeroot(jit_State *J, Trace *T, TraceNo traceno)
201 GCproto *pt = &gcref(T->startpt)->pt;
202 TraceNo side;
203 lua_assert(T->root == 0 && pt != NULL);
204 /* First unpatch any modified bytecode. */
205 trace_unpatch(J, T);
206 /* Unlink root trace from chain anchored in prototype. */
207 if (pt->trace == traceno) { /* Trace is first in chain. Easy. */
208 pt->trace = T->nextroot;
209 } else { /* Otherwise search in chain of root traces. */
210 Trace *T2 = J->trace[pt->trace];
211 while (T2->nextroot != traceno) {
212 lua_assert(T2->nextroot != 0);
213 T2 = J->trace[T2->nextroot];
215 T2->nextroot = T->nextroot; /* Unlink from chain. */
217 /* Free all side traces. */
218 for (side = T->nextside; side != 0; ) {
219 TraceNo next = J->trace[side]->nextside;
220 trace_free(J, side);
221 side = next;
223 /* Now free the trace itself. */
224 trace_free(J, traceno);
227 /* Flush a root trace + side traces, if there are no links to it. */
228 int lj_trace_flush(jit_State *J, TraceNo traceno)
230 if (traceno > 0 && traceno < J->sizetrace) {
231 Trace *T = J->trace[traceno];
232 if (T && T->root == 0) {
233 ptrdiff_t i;
234 for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--)
235 if (i != (ptrdiff_t)traceno && J->trace[i] &&
236 J->trace[i]->root != traceno && J->trace[i]->link == traceno)
237 return 0; /* Failed: existing link to trace. */
238 trace_freeroot(J, T, traceno);
239 return 1; /* Ok. */
242 return 0; /* Failed. */
245 /* Flush all traces associated with a prototype. */
246 void lj_trace_flushproto(global_State *g, GCproto *pt)
248 while (pt->trace != 0)
249 trace_freeroot(G2J(g), G2J(g)->trace[pt->trace], pt->trace);
252 /* Flush all traces. */
253 int lj_trace_flushall(lua_State *L)
255 jit_State *J = L2J(L);
256 ptrdiff_t i;
257 if ((J2G(J)->hookmask & HOOK_GC))
258 return 1;
259 for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) {
260 Trace *T = J->trace[i];
261 if (T && T->root == 0)
262 trace_freeroot(J, T, (TraceNo)i);
264 #ifdef LUA_USE_ASSERT
265 for (i = 0; i < (ptrdiff_t)J->sizetrace; i++)
266 lua_assert(J->trace[i] == NULL);
267 #endif
268 J->freetrace = 0;
269 /* Free the whole machine code and invalidate all exit stub groups. */
270 lj_mcode_free(J);
271 memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup));
272 lj_vmevent_send(L, TRACE,
273 setstrV(L, L->top++, lj_str_newlit(L, "flush"));
275 return 0;
278 /* Free everything associated with the JIT compiler state. */
279 void lj_trace_freestate(global_State *g)
281 jit_State *J = G2J(g);
282 #ifdef LUA_USE_ASSERT
283 { /* This assumes all traces have already been freed. */
284 ptrdiff_t i;
285 for (i = 0; i < (ptrdiff_t)J->sizetrace; i++)
286 lua_assert(J->trace[i] == NULL);
288 #endif
289 lj_mcode_free(J);
290 lj_ir_knum_freeall(J);
291 lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry);
292 lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot);
293 lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns);
294 lj_mem_freevec(g, J->trace, J->sizetrace, Trace *);
297 /* -- Penalties and blacklisting ------------------------------------------ */
299 /* Trivial PRNG for randomization of penalties. */
300 static uint32_t penalty_prng(jit_State *J, int bits)
302 /* Yes, this LCG is very weak, but that doesn't matter for our use case. */
303 J->prngstate = J->prngstate * 1103515245 + 12345;
304 return J->prngstate >> (32-bits);
307 /* Blacklist a bytecode instruction. */
308 static void blacklist_pc(GCproto *pt, BCIns *pc)
310 setbc_op(pc, (int)bc_op(*pc)+(int)BC_ILOOP-(int)BC_LOOP);
311 pt->flags |= PROTO_HAS_ILOOP;
314 /* Penalize a bytecode instruction. */
315 static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e)
317 uint32_t i, val = PENALTY_MIN;
318 for (i = 0; i < PENALTY_SLOTS; i++)
319 if (mref(J->penalty[i].pc, const BCIns) == pc) { /* Cache slot found? */
320 /* First try to bump its hotcount several times. */
321 val = ((uint32_t)J->penalty[i].val << 1) +
322 penalty_prng(J, PENALTY_RNDBITS);
323 if (val > PENALTY_MAX) {
324 blacklist_pc(pt, pc); /* Blacklist it, if that didn't help. */
325 return;
327 goto setpenalty;
329 /* Assign a new penalty cache slot. */
330 i = J->penaltyslot;
331 J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1);
332 setmref(J->penalty[i].pc, pc);
333 setpenalty:
334 J->penalty[i].val = (uint16_t)val;
335 J->penalty[i].reason = e;
336 hotcount_set(J2GG(J), pc+1, val);
339 /* -- Trace compiler state machine ---------------------------------------- */
341 /* Start tracing. */
342 static void trace_start(jit_State *J)
344 lua_State *L;
346 if (J->curtrace != 0 && J->trace[J->curtrace] == &J->cur) {
347 J->trace[J->curtrace] = trace_save(J, &J->cur); /* Save current trace. */
348 J->curtrace = 0;
351 if ((J->pt->flags & PROTO_NO_JIT)) { /* JIT disabled for this proto? */
352 if (J->parent == 0) {
353 /* Lazy bytecode patching to disable hotcount events. */
354 setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP);
355 J->pt->flags |= PROTO_HAS_ILOOP;
357 J->state = LJ_TRACE_IDLE; /* Silently ignored. */
358 return;
361 /* Get a new trace number. */
362 J->curtrace = trace_findfree(J);
363 if (LJ_UNLIKELY(J->curtrace == 0)) { /* No free trace? */
364 lua_assert((J2G(J)->hookmask & HOOK_GC) == 0);
365 lj_trace_flushall(J->L);
366 J->state = LJ_TRACE_IDLE; /* Silently ignored. */
367 return;
369 J->trace[J->curtrace] = &J->cur;
371 /* Setup enough of the current trace to be able to send the vmevent. */
372 memset(&J->cur, 0, sizeof(Trace));
373 J->cur.nins = J->cur.nk = REF_BASE;
374 J->cur.ir = J->irbuf;
375 J->cur.snap = J->snapbuf;
376 J->cur.snapmap = J->snapmapbuf;
377 J->mergesnap = 0;
378 J->needsnap = 0;
379 J->guardemit.irt = 0;
381 L = J->L;
382 lj_vmevent_send(L, TRACE,
383 setstrV(L, L->top++, lj_str_newlit(L, "start"));
384 setintV(L->top++, J->curtrace);
385 setfuncV(L, L->top++, J->fn);
386 setintV(L->top++, proto_bcpos(J->pt, J->pc));
387 if (J->parent) {
388 setintV(L->top++, J->parent);
389 setintV(L->top++, J->exitno);
392 lj_record_setup(J);
395 /* Stop tracing. */
396 static void trace_stop(jit_State *J)
398 BCIns *pc = (BCIns *)J->startpc; /* Not const here. */
399 BCOp op = bc_op(J->cur.startins);
400 GCproto *pt = &gcref(J->cur.startpt)->pt;
401 lua_State *L;
403 switch (op) {
404 case BC_FORL:
405 setbc_op(pc+bc_j(J->cur.startins), BC_JFORI); /* Patch FORI, too. */
406 /* fallthrough */
407 case BC_LOOP:
408 case BC_ITERL:
409 case BC_FUNCF:
410 /* Patch bytecode of starting instruction in root trace. */
411 setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP);
412 setbc_d(pc, J->curtrace);
413 /* Add to root trace chain in prototype. */
414 J->cur.nextroot = pt->trace;
415 pt->trace = (TraceNo1)J->curtrace;
416 break;
417 case BC_JMP:
418 /* Patch exit branch in parent to side trace entry. */
419 lua_assert(J->parent != 0 && J->cur.root != 0);
420 lj_asm_patchexit(J, J->trace[J->parent], J->exitno, J->cur.mcode);
421 /* Avoid compiling a side trace twice (stack resizing uses parent exit). */
422 J->trace[J->parent]->snap[J->exitno].count = SNAPCOUNT_DONE;
423 /* Add to side trace chain in root trace. */
425 Trace *root = J->trace[J->cur.root];
426 root->nchild++;
427 J->cur.nextside = root->nextside;
428 root->nextside = (TraceNo1)J->curtrace;
430 break;
431 default:
432 lua_assert(0);
433 break;
436 /* Commit new mcode only after all patching is done. */
437 lj_mcode_commit(J, J->cur.mcode);
438 lj_gdbjit_addtrace(J, &J->cur, J->curtrace);
440 L = J->L;
441 lj_vmevent_send(L, TRACE,
442 setstrV(L, L->top++, lj_str_newlit(L, "stop"));
443 setintV(L->top++, J->curtrace);
447 /* Abort tracing. */
448 static int trace_abort(jit_State *J)
450 lua_State *L = J->L;
451 TraceError e = LJ_TRERR_RECERR;
452 lj_mcode_abort(J);
453 if (tvisnum(L->top-1))
454 e = (TraceError)lj_num2int(numV(L->top-1));
455 if (e == LJ_TRERR_MCODELM) {
456 J->state = LJ_TRACE_ASM;
457 return 1; /* Retry ASM with new MCode area. */
459 /* Penalize or blacklist starting bytecode instruction. */
460 if (J->parent == 0)
461 penalty_pc(J, &gcref(J->cur.startpt)->pt, (BCIns *)J->startpc, e);
462 if (J->curtrace) { /* Is there anything to abort? */
463 ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */
464 lj_vmevent_send(L, TRACE,
465 TValue *frame;
466 const BCIns *pc;
467 GCfunc *fn;
468 setstrV(L, L->top++, lj_str_newlit(L, "abort"));
469 setintV(L->top++, J->curtrace);
470 /* Find original Lua function call to generate a better error message. */
471 frame = J->L->base-1;
472 pc = J->pc;
473 while (!isluafunc(frame_func(frame))) {
474 pc = frame_pc(frame) - 1;
475 frame = frame_prev(frame);
477 fn = frame_func(frame);
478 setfuncV(L, L->top++, fn);
479 setintV(L->top++, proto_bcpos(funcproto(fn), pc));
480 copyTV(L, L->top++, restorestack(L, errobj));
481 copyTV(L, L->top++, &J->errinfo);
483 /* Drop aborted trace after the vmevent (which may still access it). */
484 J->trace[J->curtrace] = NULL;
485 if (J->curtrace < J->freetrace)
486 J->freetrace = J->curtrace;
487 J->curtrace = 0;
489 L->top--; /* Remove error object */
490 if (e == LJ_TRERR_MCODEAL)
491 lj_trace_flushall(L);
492 return 0;
495 /* State machine for the trace compiler. Protected callback. */
496 static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud)
498 jit_State *J = (jit_State *)ud;
499 UNUSED(dummy);
500 do {
501 switch (J->state) {
502 case LJ_TRACE_START:
503 J->state = LJ_TRACE_RECORD; /* trace_start() may change state. */
504 trace_start(J);
505 lj_dispatch_update(J2G(J));
506 break;
508 case LJ_TRACE_RECORD:
509 setvmstate(J2G(J), RECORD);
510 lj_vmevent_send(L, RECORD,
511 setintV(L->top++, J->curtrace);
512 setfuncV(L, L->top++, J->fn);
513 setintV(L->top++, J->pt ? (int32_t)proto_bcpos(J->pt, J->pc) : -1);
514 setintV(L->top++, J->framedepth);
516 lj_record_ins(J);
517 break;
519 case LJ_TRACE_END:
520 J->loopref = 0;
521 if ((J->flags & JIT_F_OPT_LOOP) &&
522 J->cur.link == J->curtrace && J->framedepth + J->retdepth == 0) {
523 setvmstate(J2G(J), OPT);
524 lj_opt_dce(J);
525 if (lj_opt_loop(J)) { /* Loop optimization failed? */
526 J->loopref = J->cur.nins;
527 J->state = LJ_TRACE_RECORD; /* Try to continue recording. */
528 break;
530 J->loopref = J->chain[IR_LOOP]; /* Needed by assembler. */
532 J->state = LJ_TRACE_ASM;
533 break;
535 case LJ_TRACE_ASM:
536 setvmstate(J2G(J), ASM);
537 lj_asm_trace(J, &J->cur);
538 trace_stop(J);
539 setvmstate(J2G(J), INTERP);
540 J->state = LJ_TRACE_IDLE;
541 lj_dispatch_update(J2G(J));
542 return NULL;
544 default: /* Trace aborted asynchronously. */
545 setintV(L->top++, (int32_t)LJ_TRERR_RECERR);
546 /* fallthrough */
547 case LJ_TRACE_ERR:
548 if (trace_abort(J))
549 break; /* Retry. */
550 setvmstate(J2G(J), INTERP);
551 J->state = LJ_TRACE_IDLE;
552 lj_dispatch_update(J2G(J));
553 return NULL;
555 } while (J->state > LJ_TRACE_RECORD);
556 return NULL;
559 /* -- Event handling ------------------------------------------------------ */
561 /* A bytecode instruction is about to be executed. Record it. */
562 void lj_trace_ins(jit_State *J, const BCIns *pc)
564 /* Note: J->L must already be set. pc is the true bytecode PC here. */
565 J->pc = pc;
566 J->fn = curr_func(J->L);
567 J->pt = isluafunc(J->fn) ? funcproto(J->fn) : NULL;
568 while (lj_vm_cpcall(J->L, NULL, (void *)J, trace_state) != 0)
569 J->state = LJ_TRACE_ERR;
572 /* A hotcount triggered. Start recording a root trace. */
573 void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc)
575 /* Note: pc is the interpreter bytecode PC here. It's offset by 1. */
576 hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]+1); /* Reset hotcount. */
577 /* Only start a new trace if not recording or inside __gc call or vmevent. */
578 if (J->state == LJ_TRACE_IDLE &&
579 !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) {
580 J->parent = 0; /* Root trace. */
581 J->exitno = 0;
582 J->state = LJ_TRACE_START;
583 lj_trace_ins(J, pc-1);
587 /* Check for a hot side exit. If yes, start recording a side trace. */
588 static void trace_hotside(jit_State *J, const BCIns *pc)
590 SnapShot *snap = &J->trace[J->parent]->snap[J->exitno];
591 if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) &&
592 snap->count != SNAPCOUNT_DONE &&
593 ++snap->count >= J->param[JIT_P_hotexit]) {
594 lua_assert(J->state == LJ_TRACE_IDLE);
595 /* J->parent is non-zero for a side trace. */
596 J->state = LJ_TRACE_START;
597 lj_trace_ins(J, pc);
601 /* Tiny struct to pass data to protected call. */
602 typedef struct ExitDataCP {
603 jit_State *J;
604 void *exptr; /* Pointer to exit state. */
605 const BCIns *pc; /* Restart interpreter at this PC. */
606 } ExitDataCP;
608 /* Need to protect lj_snap_restore because it may throw. */
609 static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud)
611 ExitDataCP *exd = (ExitDataCP *)ud;
612 cframe_errfunc(L->cframe) = -1; /* Inherit error function. */
613 exd->pc = lj_snap_restore(exd->J, exd->exptr);
614 UNUSED(dummy);
615 return NULL;
618 /* A trace exited. Restore interpreter state. */
619 int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr)
621 lua_State *L = J->L;
622 ExitDataCP exd;
623 int errcode;
624 exd.J = J;
625 exd.exptr = exptr;
626 errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp);
627 if (errcode)
628 return errcode;
630 lj_vmevent_send(L, TEXIT,
631 ExitState *ex = (ExitState *)exptr;
632 uint32_t i;
633 lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK);
634 setintV(L->top++, J->parent);
635 setintV(L->top++, J->exitno);
636 setintV(L->top++, RID_NUM_GPR);
637 setintV(L->top++, RID_NUM_FPR);
638 for (i = 0; i < RID_NUM_GPR; i++)
639 setintV(L->top++, ex->gpr[i]);
640 for (i = 0; i < RID_NUM_FPR; i++) {
641 setnumV(L->top, ex->fpr[i]);
642 if (LJ_UNLIKELY(tvisnan(L->top)))
643 setnanV(L->top);
644 L->top++;
648 trace_hotside(J, exd.pc);
649 setcframe_pc(cframe_raw(L->cframe), exd.pc);
650 return 0;
653 #endif