ARM64: Fix IR_HREF code generation for constant FP keys.
[luajit-2.0.git] / src / lj_jit.h
blob6902fba33326d85c947ffbdfd04b4af5f8d65f07
1 /*
2 ** Common definitions for the JIT compiler.
3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
4 */
6 #ifndef _LJ_JIT_H
7 #define _LJ_JIT_H
9 #include "lj_obj.h"
10 #if LJ_HASJIT
11 #include "lj_ir.h"
13 /* -- JIT engine flags ---------------------------------------------------- */
15 /* General JIT engine flags. 4 bits. */
16 #define JIT_F_ON 0x00000001
18 /* CPU-specific JIT engine flags. 12 bits. Flags and strings must match. */
19 #define JIT_F_CPU 0x00000010
21 #if LJ_TARGET_X86ORX64
23 #define JIT_F_SSE3 (JIT_F_CPU << 0)
24 #define JIT_F_SSE4_1 (JIT_F_CPU << 1)
25 #define JIT_F_BMI2 (JIT_F_CPU << 2)
28 #define JIT_F_CPUSTRING "\4SSE3\6SSE4.1\4BMI2"
30 #elif LJ_TARGET_ARM
32 #define JIT_F_ARMV6_ (JIT_F_CPU << 0)
33 #define JIT_F_ARMV6T2_ (JIT_F_CPU << 1)
34 #define JIT_F_ARMV7 (JIT_F_CPU << 2)
35 #define JIT_F_ARMV8 (JIT_F_CPU << 3)
36 #define JIT_F_VFPV2 (JIT_F_CPU << 4)
37 #define JIT_F_VFPV3 (JIT_F_CPU << 5)
39 #define JIT_F_ARMV6 (JIT_F_ARMV6_|JIT_F_ARMV6T2_|JIT_F_ARMV7|JIT_F_ARMV8)
40 #define JIT_F_ARMV6T2 (JIT_F_ARMV6T2_|JIT_F_ARMV7|JIT_F_ARMV8)
41 #define JIT_F_VFP (JIT_F_VFPV2|JIT_F_VFPV3)
43 #define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7\5ARMv8\5VFPv2\5VFPv3"
45 #elif LJ_TARGET_PPC
47 #define JIT_F_SQRT (JIT_F_CPU << 0)
48 #define JIT_F_ROUND (JIT_F_CPU << 1)
50 #define JIT_F_CPUSTRING "\4SQRT\5ROUND"
52 #elif LJ_TARGET_MIPS
54 #define JIT_F_MIPSXXR2 (JIT_F_CPU << 0)
56 #if LJ_TARGET_MIPS32
57 #if LJ_TARGET_MIPSR6
58 #define JIT_F_CPUSTRING "\010MIPS32R6"
59 #else
60 #define JIT_F_CPUSTRING "\010MIPS32R2"
61 #endif
62 #else
63 #if LJ_TARGET_MIPSR6
64 #define JIT_F_CPUSTRING "\010MIPS64R6"
65 #else
66 #define JIT_F_CPUSTRING "\010MIPS64R2"
67 #endif
68 #endif
70 #else
72 #define JIT_F_CPUSTRING ""
74 #endif
76 /* Optimization flags. 12 bits. */
77 #define JIT_F_OPT 0x00010000
78 #define JIT_F_OPT_MASK 0x0fff0000
80 #define JIT_F_OPT_FOLD (JIT_F_OPT << 0)
81 #define JIT_F_OPT_CSE (JIT_F_OPT << 1)
82 #define JIT_F_OPT_DCE (JIT_F_OPT << 2)
83 #define JIT_F_OPT_FWD (JIT_F_OPT << 3)
84 #define JIT_F_OPT_DSE (JIT_F_OPT << 4)
85 #define JIT_F_OPT_NARROW (JIT_F_OPT << 5)
86 #define JIT_F_OPT_LOOP (JIT_F_OPT << 6)
87 #define JIT_F_OPT_ABC (JIT_F_OPT << 7)
88 #define JIT_F_OPT_SINK (JIT_F_OPT << 8)
89 #define JIT_F_OPT_FUSE (JIT_F_OPT << 9)
90 #define JIT_F_OPT_FMA (JIT_F_OPT << 10)
92 /* Optimizations names for -O. Must match the order above. */
93 #define JIT_F_OPTSTRING \
94 "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4sink\4fuse\3fma"
96 /* Optimization levels set a fixed combination of flags. */
97 #define JIT_F_OPT_0 0
98 #define JIT_F_OPT_1 (JIT_F_OPT_FOLD|JIT_F_OPT_CSE|JIT_F_OPT_DCE)
99 #define JIT_F_OPT_2 (JIT_F_OPT_1|JIT_F_OPT_NARROW|JIT_F_OPT_LOOP)
100 #define JIT_F_OPT_3 (JIT_F_OPT_2|\
101 JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_SINK|JIT_F_OPT_FUSE)
102 #define JIT_F_OPT_DEFAULT JIT_F_OPT_3
103 /* Note: FMA is not set by default. */
105 /* -- JIT engine parameters ----------------------------------------------- */
107 #if LJ_TARGET_WINDOWS || LJ_64
108 /* See: https://devblogs.microsoft.com/oldnewthing/20031008-00/?p=42223 */
109 #define JIT_P_sizemcode_DEFAULT 64
110 #else
111 /* Could go as low as 4K, but the mmap() overhead would be rather high. */
112 #define JIT_P_sizemcode_DEFAULT 32
113 #endif
115 /* Optimization parameters and their defaults. Length is a char in octal! */
116 #define JIT_PARAMDEF(_) \
117 _(\010, maxtrace, 1000) /* Max. # of traces in cache. */ \
118 _(\011, maxrecord, 4000) /* Max. # of recorded IR instructions. */ \
119 _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \
120 _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \
121 _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \
122 _(\011, minstitch, 0) /* Min. # of IR ins for a stitched trace. */ \
124 _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \
125 _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \
126 _(\007, tryside, 4) /* # of attempts to compile a side trace. */ \
128 _(\012, instunroll, 4) /* Max. unroll for instable loops. */ \
129 _(\012, loopunroll, 15) /* Max. unroll for loop ops in side traces. */ \
130 _(\012, callunroll, 3) /* Max. unroll for recursive calls. */ \
131 _(\011, recunroll, 2) /* Min. unroll for true recursion. */ \
133 /* Size of each machine code area (in KBytes). */ \
134 _(\011, sizemcode, JIT_P_sizemcode_DEFAULT) \
135 /* Max. total size of all machine code areas (in KBytes). */ \
136 _(\010, maxmcode, 512) \
137 /* End of list. */
139 enum {
140 #define JIT_PARAMENUM(len, name, value) JIT_P_##name,
141 JIT_PARAMDEF(JIT_PARAMENUM)
142 #undef JIT_PARAMENUM
143 JIT_P__MAX
146 #define JIT_PARAMSTR(len, name, value) #len #name
147 #define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR)
149 /* -- JIT engine data structures ------------------------------------------ */
151 /* Trace compiler state. */
152 typedef enum {
153 LJ_TRACE_IDLE, /* Trace compiler idle. */
154 LJ_TRACE_ACTIVE = 0x10,
155 LJ_TRACE_RECORD, /* Bytecode recording active. */
156 LJ_TRACE_RECORD_1ST, /* Record 1st instruction, too. */
157 LJ_TRACE_START, /* New trace started. */
158 LJ_TRACE_END, /* End of trace. */
159 LJ_TRACE_ASM, /* Assemble trace. */
160 LJ_TRACE_ERR /* Trace aborted with error. */
161 } TraceState;
163 /* Post-processing action. */
164 typedef enum {
165 LJ_POST_NONE, /* No action. */
166 LJ_POST_FIXCOMP, /* Fixup comparison and emit pending guard. */
167 LJ_POST_FIXGUARD, /* Fixup and emit pending guard. */
168 LJ_POST_FIXGUARDSNAP, /* Fixup and emit pending guard and snapshot. */
169 LJ_POST_FIXBOOL, /* Fixup boolean result. */
170 LJ_POST_FIXCONST, /* Fixup constant results. */
171 LJ_POST_FFRETRY /* Suppress recording of retried fast functions. */
172 } PostProc;
174 /* Machine code type. */
175 #if LJ_TARGET_X86ORX64
176 typedef uint8_t MCode;
177 #else
178 typedef uint32_t MCode;
179 #endif
181 /* Linked list of MCode areas. */
182 typedef struct MCLink {
183 MCode *next; /* Next area. */
184 size_t size; /* Size of current area. */
185 } MCLink;
187 /* Stack snapshot header. */
188 typedef struct SnapShot {
189 uint32_t mapofs; /* Offset into snapshot map. */
190 IRRef1 ref; /* First IR ref for this snapshot. */
191 uint16_t mcofs; /* Offset into machine code in MCode units. */
192 uint8_t nslots; /* Number of valid slots. */
193 uint8_t topslot; /* Maximum frame extent. */
194 uint8_t nent; /* Number of compressed entries. */
195 uint8_t count; /* Count of taken exits for this snapshot. */
196 } SnapShot;
198 #define SNAPCOUNT_DONE 255 /* Already compiled and linked a side trace. */
200 /* Compressed snapshot entry. */
201 typedef uint32_t SnapEntry;
203 #define SNAP_FRAME 0x010000 /* Frame slot. */
204 #define SNAP_CONT 0x020000 /* Continuation slot. */
205 #define SNAP_NORESTORE 0x040000 /* No need to restore slot. */
206 #define SNAP_SOFTFPNUM 0x080000 /* Soft-float number. */
207 #define SNAP_KEYINDEX 0x100000 /* Traversal key index. */
208 LJ_STATIC_ASSERT(SNAP_FRAME == TREF_FRAME);
209 LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT);
210 LJ_STATIC_ASSERT(SNAP_KEYINDEX == TREF_KEYINDEX);
212 #define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref))
213 #define SNAP_TR(slot, tr) \
214 (((SnapEntry)(slot) << 24) + \
215 ((tr) & (TREF_KEYINDEX|TREF_CONT|TREF_FRAME|TREF_REFMASK)))
216 #if !LJ_FR2
217 #define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc))
218 #endif
219 #define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz))
220 #define snap_ref(sn) ((sn) & 0xffff)
221 #define snap_slot(sn) ((BCReg)((sn) >> 24))
222 #define snap_isframe(sn) ((sn) & SNAP_FRAME)
223 #define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref))
225 static LJ_AINLINE const BCIns *snap_pc(SnapEntry *sn)
227 #if LJ_FR2
228 uint64_t pcbase;
229 memcpy(&pcbase, sn, sizeof(uint64_t));
230 return (const BCIns *)(pcbase >> 8);
231 #else
232 return (const BCIns *)(uintptr_t)*sn;
233 #endif
236 /* Snapshot and exit numbers. */
237 typedef uint32_t SnapNo;
238 typedef uint32_t ExitNo;
240 /* Trace number. */
241 typedef uint32_t TraceNo; /* Used to pass around trace numbers. */
242 typedef uint16_t TraceNo1; /* Stored trace number. */
244 /* Type of link. ORDER LJ_TRLINK */
245 typedef enum {
246 LJ_TRLINK_NONE, /* Incomplete trace. No link, yet. */
247 LJ_TRLINK_ROOT, /* Link to other root trace. */
248 LJ_TRLINK_LOOP, /* Loop to same trace. */
249 LJ_TRLINK_TAILREC, /* Tail-recursion. */
250 LJ_TRLINK_UPREC, /* Up-recursion. */
251 LJ_TRLINK_DOWNREC, /* Down-recursion. */
252 LJ_TRLINK_INTERP, /* Fallback to interpreter. */
253 LJ_TRLINK_RETURN, /* Return to interpreter. */
254 LJ_TRLINK_STITCH /* Trace stitching. */
255 } TraceLink;
257 /* Trace object. */
258 typedef struct GCtrace {
259 GCHeader;
260 uint16_t nsnap; /* Number of snapshots. */
261 IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */
262 #if LJ_GC64
263 uint32_t unused_gc64;
264 #endif
265 GCRef gclist;
266 IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */
267 IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */
268 uint32_t nsnapmap; /* Number of snapshot map elements. */
269 SnapShot *snap; /* Snapshot array. */
270 SnapEntry *snapmap; /* Snapshot map. */
271 GCRef startpt; /* Starting prototype. */
272 MRef startpc; /* Bytecode PC of starting instruction. */
273 BCIns startins; /* Original bytecode of starting instruction. */
274 MSize szmcode; /* Size of machine code. */
275 MCode *mcode; /* Start of machine code. */
276 #if LJ_ABI_PAUTH
277 ASMFunction mcauth; /* Start of machine code, with ptr auth applied. */
278 #endif
279 MSize mcloop; /* Offset of loop start in machine code. */
280 uint16_t nchild; /* Number of child traces (root trace only). */
281 uint16_t spadjust; /* Stack pointer adjustment (offset in bytes). */
282 TraceNo1 traceno; /* Trace number. */
283 TraceNo1 link; /* Linked trace (or self for loops). */
284 TraceNo1 root; /* Root trace of side trace (or 0 for root traces). */
285 TraceNo1 nextroot; /* Next root trace for same prototype. */
286 TraceNo1 nextside; /* Next side trace of same root trace. */
287 uint8_t sinktags; /* Trace has SINK tags. */
288 uint8_t topslot; /* Top stack slot already checked to be allocated. */
289 uint8_t linktype; /* Type of link. */
290 uint8_t unused1;
291 #ifdef LUAJIT_USE_GDBJIT
292 void *gdbjit_entry; /* GDB JIT entry. */
293 #endif
294 } GCtrace;
296 #define gco2trace(o) check_exp((o)->gch.gct == ~LJ_TTRACE, (GCtrace *)(o))
297 #define traceref(J, n) \
298 check_exp((n)>0 && (MSize)(n)<J->sizetrace, (GCtrace *)gcref(J->trace[(n)]))
300 LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtrace, gclist));
302 static LJ_AINLINE MSize snap_nextofs(GCtrace *T, SnapShot *snap)
304 if (snap+1 == &T->snap[T->nsnap])
305 return T->nsnapmap;
306 else
307 return (snap+1)->mapofs;
310 /* Round-robin penalty cache for bytecodes leading to aborted traces. */
311 typedef struct HotPenalty {
312 MRef pc; /* Starting bytecode PC. */
313 uint16_t val; /* Penalty value, i.e. hotcount start. */
314 uint16_t reason; /* Abort reason (really TraceErr). */
315 } HotPenalty;
317 #define PENALTY_SLOTS 64 /* Penalty cache slot. Must be a power of 2. */
318 #define PENALTY_MIN (36*2) /* Minimum penalty value. */
319 #define PENALTY_MAX 60000 /* Maximum penalty value. */
320 #define PENALTY_RNDBITS 4 /* # of random bits to add to penalty value. */
322 /* Round-robin backpropagation cache for narrowing conversions. */
323 typedef struct BPropEntry {
324 IRRef1 key; /* Key: original reference. */
325 IRRef1 val; /* Value: reference after conversion. */
326 IRRef mode; /* Mode for this entry (currently IRCONV_*). */
327 } BPropEntry;
329 /* Number of slots for the backpropagation cache. Must be a power of 2. */
330 #define BPROP_SLOTS 16
332 /* Scalar evolution analysis cache. */
333 typedef struct ScEvEntry {
334 MRef pc; /* Bytecode PC of FORI. */
335 IRRef1 idx; /* Index reference. */
336 IRRef1 start; /* Constant start reference. */
337 IRRef1 stop; /* Constant stop reference. */
338 IRRef1 step; /* Constant step reference. */
339 IRType1 t; /* Scalar type. */
340 uint8_t dir; /* Direction. 1: +, 0: -. */
341 } ScEvEntry;
343 /* Reverse bytecode map (IRRef -> PC). Only for selected instructions. */
344 typedef struct RBCHashEntry {
345 MRef pc; /* Bytecode PC. */
346 GCRef pt; /* Prototype. */
347 IRRef ref; /* IR reference. */
348 } RBCHashEntry;
350 /* Number of slots in the reverse bytecode hash table. Must be a power of 2. */
351 #define RBCHASH_SLOTS 8
353 /* 128 bit SIMD constants. */
354 enum {
355 LJ_KSIMD_ABS,
356 LJ_KSIMD_NEG,
357 LJ_KSIMD__MAX
360 enum {
361 #if LJ_TARGET_X86ORX64
362 LJ_K64_TOBIT, /* 2^52 + 2^51 */
363 LJ_K64_2P64, /* 2^64 */
364 LJ_K64_M2P64, /* -2^64 */
365 #if LJ_32
366 LJ_K64_M2P64_31, /* -2^64 or -2^31 */
367 #else
368 LJ_K64_M2P64_31 = LJ_K64_M2P64,
369 #endif
370 #endif
371 #if LJ_TARGET_MIPS
372 LJ_K64_2P31, /* 2^31 */
373 #if LJ_64
374 LJ_K64_2P63, /* 2^63 */
375 LJ_K64_M2P64, /* -2^64 */
376 #endif
377 #endif
378 LJ_K64__MAX,
380 #define LJ_K64__USED (LJ_TARGET_X86ORX64 || LJ_TARGET_MIPS)
382 enum {
383 #if LJ_TARGET_X86ORX64
384 LJ_K32_M2P64_31, /* -2^64 or -2^31 */
385 #endif
386 #if LJ_TARGET_PPC
387 LJ_K32_2P52_2P31, /* 2^52 + 2^31 */
388 LJ_K32_2P52, /* 2^52 */
389 #endif
390 #if LJ_TARGET_PPC || LJ_TARGET_MIPS
391 LJ_K32_2P31, /* 2^31 */
392 #endif
393 #if LJ_TARGET_MIPS64
394 LJ_K32_2P63, /* 2^63 */
395 LJ_K32_M2P64, /* -2^64 */
396 #endif
397 LJ_K32__MAX
399 #define LJ_K32__USED (LJ_TARGET_X86ORX64 || LJ_TARGET_PPC || LJ_TARGET_MIPS)
401 /* Get 16 byte aligned pointer to SIMD constant. */
402 #define LJ_KSIMD(J, n) \
403 ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15))
405 /* Set/reset flag to activate the SPLIT pass for the current trace. */
406 #if LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)
407 #define lj_needsplit(J) (J->needsplit = 1)
408 #define lj_resetsplit(J) (J->needsplit = 0)
409 #else
410 #define lj_needsplit(J) UNUSED(J)
411 #define lj_resetsplit(J) UNUSED(J)
412 #endif
414 /* Fold state is used to fold instructions on-the-fly. */
415 typedef struct FoldState {
416 IRIns ins; /* Currently emitted instruction. */
417 IRIns left[2]; /* Instruction referenced by left operand. */
418 IRIns right[2]; /* Instruction referenced by right operand. */
419 } FoldState;
421 /* JIT compiler state. */
422 typedef struct jit_State {
423 GCtrace cur; /* Current trace. */
424 GCtrace *curfinal; /* Final address of current trace (set during asm). */
426 lua_State *L; /* Current Lua state. */
427 const BCIns *pc; /* Current PC. */
428 GCfunc *fn; /* Current function. */
429 GCproto *pt; /* Current prototype. */
430 TRef *base; /* Current frame base, points into J->slots. */
432 uint32_t flags; /* JIT engine flags. */
433 BCReg maxslot; /* Relative to baseslot. */
434 BCReg baseslot; /* Current frame base, offset into J->slots. */
436 uint8_t mergesnap; /* Allowed to merge with next snapshot. */
437 uint8_t needsnap; /* Need snapshot before recording next bytecode. */
438 IRType1 guardemit; /* Accumulated IRT_GUARD for emitted instructions. */
439 uint8_t bcskip; /* Number of bytecode instructions to skip. */
441 FoldState fold; /* Fold state. */
443 const BCIns *bc_min; /* Start of allowed bytecode range for root trace. */
444 MSize bc_extent; /* Extent of the range. */
446 TraceState state; /* Trace compiler state. */
448 int32_t instunroll; /* Unroll counter for instable loops. */
449 int32_t loopunroll; /* Unroll counter for loop ops in side traces. */
450 int32_t tailcalled; /* Number of successive tailcalls. */
451 int32_t framedepth; /* Current frame depth. */
452 int32_t retdepth; /* Return frame depth (count of RETF). */
454 #if LJ_K32__USED
455 uint32_t k32[LJ_K32__MAX]; /* Common 4 byte constants used by backends. */
456 #endif
457 TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */
458 #if LJ_K64__USED
459 TValue k64[LJ_K64__MAX]; /* Common 8 byte constants. */
460 #endif
462 IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */
463 IRRef irtoplim; /* Upper limit of instuction buffer (biased). */
464 IRRef irbotlim; /* Lower limit of instuction buffer (biased). */
465 IRRef loopref; /* Last loop reference or ref of final LOOP (or 0). */
467 MSize sizesnap; /* Size of temp. snapshot buffer. */
468 SnapShot *snapbuf; /* Temp. snapshot buffer. */
469 SnapEntry *snapmapbuf; /* Temp. snapshot map buffer. */
470 MSize sizesnapmap; /* Size of temp. snapshot map buffer. */
472 PostProc postproc; /* Required post-processing after execution. */
473 #if LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)
474 uint8_t needsplit; /* Need SPLIT pass. */
475 #endif
476 uint8_t retryrec; /* Retry recording. */
478 GCRef *trace; /* Array of traces. */
479 TraceNo freetrace; /* Start of scan for next free trace. */
480 MSize sizetrace; /* Size of trace array. */
481 IRRef1 ktrace; /* Reference to KGC with GCtrace. */
483 IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */
484 TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */
486 int32_t param[JIT_P__MAX]; /* JIT engine parameters. */
488 MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */
490 HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */
491 uint32_t penaltyslot; /* Round-robin index into penalty slots. */
493 #ifdef LUAJIT_ENABLE_TABLE_BUMP
494 RBCHashEntry rbchash[RBCHASH_SLOTS]; /* Reverse bytecode map. */
495 #endif
497 BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */
498 uint32_t bpropslot; /* Round-robin index into bpropcache slots. */
500 ScEvEntry scev; /* Scalar evolution analysis cache slots. */
502 const BCIns *startpc; /* Bytecode PC of starting instruction. */
503 TraceNo parent; /* Parent of current side trace (0 for root traces). */
504 ExitNo exitno; /* Exit number in parent of current side trace. */
505 int exitcode; /* Exit code from unwound trace. */
507 BCIns *patchpc; /* PC for pending re-patch. */
508 BCIns patchins; /* Instruction for pending re-patch. */
510 int mcprot; /* Protection of current mcode area. */
511 MCode *mcarea; /* Base of current mcode area. */
512 MCode *mctop; /* Top of current mcode area. */
513 MCode *mcbot; /* Bottom of current mcode area. */
514 size_t szmcarea; /* Size of current mcode area. */
515 size_t szallmcarea; /* Total size of all allocated mcode areas. */
517 TValue errinfo; /* Additional info element for trace errors. */
519 #if LJ_HASPROFILE
520 GCproto *prev_pt; /* Previous prototype. */
521 BCLine prev_line; /* Previous line. */
522 int prof_mode; /* Profiling mode: 0, 'f', 'l'. */
523 #endif
524 } jit_State;
526 #ifdef LUA_USE_ASSERT
527 #define lj_assertJ(c, ...) lj_assertG_(J2G(J), (c), __VA_ARGS__)
528 #else
529 #define lj_assertJ(c, ...) ((void)J)
530 #endif
531 #endif
533 #endif