2 ** Machine code management.
3 ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
16 #include "lj_dispatch.h"
18 #if LJ_HASJIT || LJ_HASFFI
22 /* -- OS-specific functions ----------------------------------------------- */
24 #if LJ_HASJIT || LJ_HASFFI
26 /* Define this if you want to run LuaJIT with Valgrind. */
27 #ifdef LUAJIT_USE_VALGRIND
28 #include <valgrind/valgrind.h>
32 void sys_icache_invalidate(void *start
, size_t len
);
35 /* Synchronize data/instruction cache. */
36 void lj_mcode_sync(void *start
, void *end
)
38 #ifdef LUAJIT_USE_VALGRIND
39 VALGRIND_DISCARD_TRANSLATIONS(start
, (char *)end
-(char *)start
);
41 #if LJ_TARGET_X86ORX64
42 UNUSED(start
); UNUSED(end
);
44 sys_icache_invalidate(start
, (char *)end
-(char *)start
);
46 lj_vm_cachesync(start
, end
);
47 #elif defined(__GNUC__)
48 __clear_cache(start
, end
);
50 #error "Missing builtin to flush instruction cache"
60 #define WIN32_LEAN_AND_MEAN
63 #define MCPROT_RW PAGE_READWRITE
64 #define MCPROT_RX PAGE_EXECUTE_READ
65 #define MCPROT_RWX PAGE_EXECUTE_READWRITE
67 static void *mcode_alloc_at(jit_State
*J
, uintptr_t hint
, size_t sz
, DWORD prot
)
69 void *p
= VirtualAlloc((void *)hint
, sz
,
70 MEM_RESERVE
|MEM_COMMIT
|MEM_TOP_DOWN
, prot
);
72 lj_trace_err(J
, LJ_TRERR_MCODEAL
);
76 static void mcode_free(jit_State
*J
, void *p
, size_t sz
)
78 UNUSED(J
); UNUSED(sz
);
79 VirtualFree(p
, 0, MEM_RELEASE
);
82 static int mcode_setprot(void *p
, size_t sz
, DWORD prot
)
85 return !VirtualProtect(p
, sz
, prot
, &oprot
);
93 #define MAP_ANONYMOUS MAP_ANON
96 #define MCPROT_RW (PROT_READ|PROT_WRITE)
97 #define MCPROT_RX (PROT_READ|PROT_EXEC)
98 #define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC)
100 static void *mcode_alloc_at(jit_State
*J
, uintptr_t hint
, size_t sz
, int prot
)
102 void *p
= mmap((void *)hint
, sz
, prot
, MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
103 if (p
== MAP_FAILED
) {
104 if (!hint
) lj_trace_err(J
, LJ_TRERR_MCODEAL
);
110 static void mcode_free(jit_State
*J
, void *p
, size_t sz
)
116 static int mcode_setprot(void *p
, size_t sz
, int prot
)
118 return mprotect(p
, sz
, prot
);
123 #error "Missing OS support for explicit placement of executable memory"
127 /* Fallback allocator. This will fail if memory is not executable by default. */
128 #define LUAJIT_UNPROTECT_MCODE
133 static void *mcode_alloc_at(jit_State
*J
, uintptr_t hint
, size_t sz
, int prot
)
135 UNUSED(hint
); UNUSED(prot
);
136 return lj_mem_new(J
->L
, sz
);
139 static void mcode_free(jit_State
*J
, void *p
, size_t sz
)
141 lj_mem_free(J2G(J
), p
, sz
);
146 /* -- MCode area protection ----------------------------------------------- */
148 /* Define this ONLY if page protection twiddling becomes a bottleneck. */
149 #ifdef LUAJIT_UNPROTECT_MCODE
151 /* It's generally considered to be a potential security risk to have
152 ** pages with simultaneous write *and* execute access in a process.
154 ** Do not even think about using this mode for server processes or
155 ** apps handling untrusted external data (such as a browser).
157 ** The security risk is not in LuaJIT itself -- but if an adversary finds
158 ** any *other* flaw in your C application logic, then any RWX memory page
159 ** simplifies writing an exploit considerably.
161 #define MCPROT_GEN MCPROT_RWX
162 #define MCPROT_RUN MCPROT_RWX
164 static void mcode_protect(jit_State
*J
, int prot
)
166 UNUSED(J
); UNUSED(prot
);
171 /* This is the default behaviour and much safer:
173 ** Most of the time the memory pages holding machine code are executable,
174 ** but NONE of them is writable.
176 ** The current memory area is marked read-write (but NOT executable) only
177 ** during the short time window while the assembler generates machine code.
179 #define MCPROT_GEN MCPROT_RW
180 #define MCPROT_RUN MCPROT_RX
182 /* Protection twiddling failed. Probably due to kernel security. */
183 static LJ_NOINLINE
void mcode_protfail(jit_State
*J
)
185 lua_CFunction panic
= J2G(J
)->panic
;
188 setstrV(L
, L
->top
++, lj_err_str(L
, LJ_ERR_JITPROT
));
193 /* Change protection of MCode area. */
194 static void mcode_protect(jit_State
*J
, int prot
)
196 if (J
->mcprot
!= prot
) {
197 if (LJ_UNLIKELY(mcode_setprot(J
->mcarea
, J
->szmcarea
, prot
)))
205 /* -- MCode area allocation ----------------------------------------------- */
208 #define mcode_validptr(p) ((p) && (uintptr_t)(p) < (uintptr_t)1<<47)
210 #define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000)
213 #ifdef LJ_TARGET_JUMPRANGE
215 /* Get memory within relative jump distance of our code in 64 bit mode. */
216 static void *mcode_alloc(jit_State
*J
, size_t sz
)
218 /* Target an address in the static assembler code (64K aligned).
219 ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
220 ** Use half the jump range so every address in the range can reach any other.
223 /* Use the middle of the 256MB-aligned region. */
224 uintptr_t target
= ((uintptr_t)(void *)lj_vm_exit_handler
& 0xf0000000u
) +
227 uintptr_t target
= (uintptr_t)(void *)lj_vm_exit_handler
& ~(uintptr_t)0xffff;
229 const uintptr_t range
= (1u << (LJ_TARGET_JUMPRANGE
-1)) - (1u << 21);
230 /* First try a contiguous area below the last one. */
231 uintptr_t hint
= J
->mcarea
? (uintptr_t)J
->mcarea
- sz
: 0;
233 for (i
= 0; i
< 32; i
++) { /* 32 attempts ought to be enough ... */
234 if (mcode_validptr(hint
)) {
235 void *p
= mcode_alloc_at(J
, hint
, sz
, MCPROT_GEN
);
237 if (mcode_validptr(p
) &&
238 ((uintptr_t)p
+ sz
- target
< range
|| target
- (uintptr_t)p
< range
))
240 if (p
) mcode_free(J
, p
, sz
); /* Free badly placed area. */
242 /* Next try probing pseudo-random addresses. */
244 hint
= (0x78fb ^ LJ_PRNG_BITS(J
, 15)) << 16; /* 64K aligned. */
245 } while (!(hint
+ sz
< range
));
246 hint
= target
+ hint
- (range
>>1);
248 lj_trace_err(J
, LJ_TRERR_MCODEAL
); /* Give up. OS probably ignores hints? */
254 /* All memory addresses are reachable by relative jumps. */
255 static void *mcode_alloc(jit_State
*J
, size_t sz
)
258 /* Allow better executable memory allocation for OpenBSD W^X mode. */
259 void *p
= mcode_alloc_at(J
, 0, sz
, MCPROT_RUN
);
260 if (p
&& mcode_setprot(p
, sz
, MCPROT_GEN
)) {
261 mcode_free(J
, p
, sz
);
266 return mcode_alloc_at(J
, 0, sz
, MCPROT_GEN
);
272 /* -- MCode area management ----------------------------------------------- */
274 /* Linked list of MCode areas. */
275 typedef struct MCLink
{
276 MCode
*next
; /* Next area. */
277 size_t size
; /* Size of current area. */
280 /* Allocate a new MCode area. */
281 static void mcode_allocarea(jit_State
*J
)
283 MCode
*oldarea
= J
->mcarea
;
284 size_t sz
= (size_t)J
->param
[JIT_P_sizemcode
] << 10;
285 sz
= (sz
+ LJ_PAGESIZE
-1) & ~(size_t)(LJ_PAGESIZE
- 1);
286 J
->mcarea
= (MCode
*)mcode_alloc(J
, sz
);
288 J
->mcprot
= MCPROT_GEN
;
289 J
->mctop
= (MCode
*)((char *)J
->mcarea
+ J
->szmcarea
);
290 J
->mcbot
= (MCode
*)((char *)J
->mcarea
+ sizeof(MCLink
));
291 ((MCLink
*)J
->mcarea
)->next
= oldarea
;
292 ((MCLink
*)J
->mcarea
)->size
= sz
;
293 J
->szallmcarea
+= sz
;
296 /* Free all MCode areas. */
297 void lj_mcode_free(jit_State
*J
)
299 MCode
*mc
= J
->mcarea
;
303 MCode
*next
= ((MCLink
*)mc
)->next
;
304 mcode_free(J
, mc
, ((MCLink
*)mc
)->size
);
309 /* -- MCode transactions -------------------------------------------------- */
311 /* Reserve the remainder of the current MCode area. */
312 MCode
*lj_mcode_reserve(jit_State
*J
, MCode
**lim
)
317 mcode_protect(J
, MCPROT_GEN
);
322 /* Commit the top part of the current MCode area. */
323 void lj_mcode_commit(jit_State
*J
, MCode
*top
)
326 mcode_protect(J
, MCPROT_RUN
);
329 /* Abort the reservation. */
330 void lj_mcode_abort(jit_State
*J
)
333 mcode_protect(J
, MCPROT_RUN
);
336 /* Set/reset protection to allow patching of MCode areas. */
337 MCode
*lj_mcode_patch(jit_State
*J
, MCode
*ptr
, int finish
)
339 #ifdef LUAJIT_UNPROTECT_MCODE
340 UNUSED(J
); UNUSED(ptr
); UNUSED(finish
);
344 if (J
->mcarea
== ptr
)
345 mcode_protect(J
, MCPROT_RUN
);
346 else if (LJ_UNLIKELY(mcode_setprot(ptr
, ((MCLink
*)ptr
)->size
, MCPROT_RUN
)))
350 MCode
*mc
= J
->mcarea
;
351 /* Try current area first to use the protection cache. */
352 if (ptr
>= mc
&& ptr
< (MCode
*)((char *)mc
+ J
->szmcarea
)) {
353 mcode_protect(J
, MCPROT_GEN
);
356 /* Otherwise search through the list of MCode areas. */
358 mc
= ((MCLink
*)mc
)->next
;
359 lua_assert(mc
!= NULL
);
360 if (ptr
>= mc
&& ptr
< (MCode
*)((char *)mc
+ ((MCLink
*)mc
)->size
)) {
361 if (LJ_UNLIKELY(mcode_setprot(mc
, ((MCLink
*)mc
)->size
, MCPROT_GEN
)))
370 /* Limit of MCode reservation reached. */
371 void lj_mcode_limiterr(jit_State
*J
, size_t need
)
373 size_t sizemcode
, maxmcode
;
375 sizemcode
= (size_t)J
->param
[JIT_P_sizemcode
] << 10;
376 sizemcode
= (sizemcode
+ LJ_PAGESIZE
-1) & ~(size_t)(LJ_PAGESIZE
- 1);
377 maxmcode
= (size_t)J
->param
[JIT_P_maxmcode
] << 10;
378 if ((size_t)need
> sizemcode
)
379 lj_trace_err(J
, LJ_TRERR_MCODEOV
); /* Too long for any area. */
380 if (J
->szallmcarea
+ sizemcode
> maxmcode
)
381 lj_trace_err(J
, LJ_TRERR_MCODEAL
);
383 lj_trace_err(J
, LJ_TRERR_MCODELM
); /* Retry with new area. */