2 ** Machine code management.
3 ** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
15 #include "lj_dispatch.h"
19 /* -- OS-specific functions ----------------------------------------------- */
21 #if LJ_HASJIT || LJ_HASFFI
23 /* Define this if you want to run LuaJIT with Valgrind. */
24 #ifdef LUAJIT_USE_VALGRIND
25 #include <valgrind/valgrind.h>
28 #if !LJ_TARGET_X86ORX64 && LJ_TARGET_OSX
29 void sys_icache_invalidate(void *start
, size_t len
);
32 /* Synchronize data/instruction cache. */
33 void lj_mcode_sync(void *start
, void *end
)
35 #ifdef LUAJIT_USE_VALGRIND
36 VALGRIND_DISCARD_TRANSLATIONS(start
, (char *)end
-(char *)start
);
38 #if LJ_TARGET_X86ORX64
39 UNUSED(start
); UNUSED(end
);
41 sys_icache_invalidate(start
, (char *)end
-(char *)start
);
43 lj_vm_cachesync(start
, end
);
44 #elif defined(__GNUC__)
45 __clear_cache(start
, end
);
47 #error "Missing builtin to flush instruction cache"
57 #define WIN32_LEAN_AND_MEAN
60 #define MCPROT_RW PAGE_READWRITE
61 #define MCPROT_RX PAGE_EXECUTE_READ
62 #define MCPROT_RWX PAGE_EXECUTE_READWRITE
64 static void *mcode_alloc_at(jit_State
*J
, uintptr_t hint
, size_t sz
, DWORD prot
)
66 void *p
= VirtualAlloc((void *)hint
, sz
,
67 MEM_RESERVE
|MEM_COMMIT
|MEM_TOP_DOWN
, prot
);
69 lj_trace_err(J
, LJ_TRERR_MCODEAL
);
73 static void mcode_free(jit_State
*J
, void *p
, size_t sz
)
75 UNUSED(J
); UNUSED(sz
);
76 VirtualFree(p
, 0, MEM_RELEASE
);
79 static void mcode_setprot(void *p
, size_t sz
, DWORD prot
)
82 VirtualProtect(p
, sz
, prot
, &oprot
);
90 #define MAP_ANONYMOUS MAP_ANON
93 #define MCPROT_RW (PROT_READ|PROT_WRITE)
94 #define MCPROT_RX (PROT_READ|PROT_EXEC)
95 #define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC)
97 static void *mcode_alloc_at(jit_State
*J
, uintptr_t hint
, size_t sz
, int prot
)
99 void *p
= mmap((void *)hint
, sz
, prot
, MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
100 if (p
== MAP_FAILED
&& !hint
)
101 lj_trace_err(J
, LJ_TRERR_MCODEAL
);
105 static void mcode_free(jit_State
*J
, void *p
, size_t sz
)
111 static void mcode_setprot(void *p
, size_t sz
, int prot
)
113 mprotect(p
, sz
, prot
);
118 #error "Missing OS support for explicit placement of executable memory"
122 /* Fallback allocator. This will fail if memory is not executable by default. */
123 #define LUAJIT_UNPROTECT_MCODE
128 static void *mcode_alloc_at(jit_State
*J
, uintptr_t hint
, size_t sz
, int prot
)
130 UNUSED(hint
); UNUSED(prot
);
131 return lj_mem_new(J
->L
, sz
);
134 static void mcode_free(jit_State
*J
, void *p
, size_t sz
)
136 lj_mem_free(J2G(J
), p
, sz
);
139 #define mcode_setprot(p, sz, prot) UNUSED(p)
143 /* -- MCode area protection ----------------------------------------------- */
145 /* Define this ONLY if the page protection twiddling becomes a bottleneck. */
146 #ifdef LUAJIT_UNPROTECT_MCODE
148 /* It's generally considered to be a potential security risk to have
149 ** pages with simultaneous write *and* execute access in a process.
151 ** Do not even think about using this mode for server processes or
152 ** apps handling untrusted external data (such as a browser).
154 ** The security risk is not in LuaJIT itself -- but if an adversary finds
155 ** any *other* flaw in your C application logic, then any RWX memory page
156 ** simplifies writing an exploit considerably.
158 #define MCPROT_GEN MCPROT_RWX
159 #define MCPROT_RUN MCPROT_RWX
161 static void mcode_protect(jit_State
*J
, int prot
)
163 UNUSED(J
); UNUSED(prot
);
168 /* This is the default behaviour and much safer:
170 ** Most of the time the memory pages holding machine code are executable,
171 ** but NONE of them is writable.
173 ** The current memory area is marked read-write (but NOT executable) only
174 ** during the short time window while the assembler generates machine code.
176 #define MCPROT_GEN MCPROT_RW
177 #define MCPROT_RUN MCPROT_RX
179 /* Change protection of MCode area. */
180 static void mcode_protect(jit_State
*J
, int prot
)
182 if (J
->mcprot
!= prot
) {
183 mcode_setprot(J
->mcarea
, J
->szmcarea
, prot
);
190 /* -- MCode area allocation ----------------------------------------------- */
193 #define mcode_validptr(p) ((p) && (uintptr_t)(p) < (uintptr_t)1<<47)
195 #define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000)
198 #ifdef LJ_TARGET_JUMPRANGE
200 /* Get memory within relative jump distance of our code in 64 bit mode. */
201 static void *mcode_alloc(jit_State
*J
, size_t sz
)
203 /* Target an address in the static assembler code (64K aligned).
204 ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
207 /* Use the middle of the 256MB-aligned region. */
208 uintptr_t target
= ((uintptr_t)(void *)lj_vm_exit_handler
& 0xf0000000u
) +
211 uintptr_t target
= (uintptr_t)(void *)lj_vm_exit_handler
& ~(uintptr_t)0xffff;
213 const uintptr_t range
= (1u << LJ_TARGET_JUMPRANGE
) - (1u << 21);
214 /* First try a contiguous area below the last one. */
215 uintptr_t hint
= J
->mcarea
? (uintptr_t)J
->mcarea
- sz
: 0;
217 for (i
= 0; i
< 32; i
++) { /* 32 attempts ought to be enough ... */
218 if (mcode_validptr(hint
)) {
219 void *p
= mcode_alloc_at(J
, hint
, sz
, MCPROT_GEN
);
221 if (mcode_validptr(p
)) {
222 if ((uintptr_t)p
+ sz
- target
< range
|| target
- (uintptr_t)p
< range
)
224 mcode_free(J
, p
, sz
); /* Free badly placed area. */
227 /* Next try probing pseudo-random addresses. */
229 hint
= (0x78fb ^ LJ_PRNG_BITS(J
, 15)) << 16; /* 64K aligned. */
230 } while (!(hint
+ sz
< range
));
231 hint
= target
+ hint
- (range
>>1);
233 lj_trace_err(J
, LJ_TRERR_MCODEAL
); /* Give up. OS probably ignores hints? */
239 /* All memory addresses are reachable by relative jumps. */
240 #define mcode_alloc(J, sz) mcode_alloc_at((J), 0, (sz), MCPROT_GEN)
244 /* -- MCode area management ----------------------------------------------- */
246 /* Linked list of MCode areas. */
247 typedef struct MCLink
{
248 MCode
*next
; /* Next area. */
249 size_t size
; /* Size of current area. */
252 /* Allocate a new MCode area. */
253 static void mcode_allocarea(jit_State
*J
)
255 MCode
*oldarea
= J
->mcarea
;
256 size_t sz
= (size_t)J
->param
[JIT_P_sizemcode
] << 10;
257 sz
= (sz
+ LJ_PAGESIZE
-1) & ~(size_t)(LJ_PAGESIZE
- 1);
258 J
->mcarea
= (MCode
*)mcode_alloc(J
, sz
);
260 J
->mcprot
= MCPROT_GEN
;
261 J
->mctop
= (MCode
*)((char *)J
->mcarea
+ J
->szmcarea
);
262 J
->mcbot
= (MCode
*)((char *)J
->mcarea
+ sizeof(MCLink
));
263 ((MCLink
*)J
->mcarea
)->next
= oldarea
;
264 ((MCLink
*)J
->mcarea
)->size
= sz
;
265 J
->szallmcarea
+= sz
;
268 /* Free all MCode areas. */
269 void lj_mcode_free(jit_State
*J
)
271 MCode
*mc
= J
->mcarea
;
275 MCode
*next
= ((MCLink
*)mc
)->next
;
276 mcode_free(J
, mc
, ((MCLink
*)mc
)->size
);
281 /* -- MCode transactions -------------------------------------------------- */
283 /* Reserve the remainder of the current MCode area. */
284 MCode
*lj_mcode_reserve(jit_State
*J
, MCode
**lim
)
289 mcode_protect(J
, MCPROT_GEN
);
294 /* Commit the top part of the current MCode area. */
295 void lj_mcode_commit(jit_State
*J
, MCode
*top
)
298 mcode_protect(J
, MCPROT_RUN
);
301 /* Abort the reservation. */
302 void lj_mcode_abort(jit_State
*J
)
304 mcode_protect(J
, MCPROT_RUN
);
307 /* Set/reset protection to allow patching of MCode areas. */
308 MCode
*lj_mcode_patch(jit_State
*J
, MCode
*ptr
, int finish
)
310 #ifdef LUAJIT_UNPROTECT_MCODE
311 UNUSED(J
); UNUSED(ptr
); UNUSED(finish
);
315 if (J
->mcarea
== ptr
)
316 mcode_protect(J
, MCPROT_RUN
);
318 mcode_setprot(ptr
, ((MCLink
*)ptr
)->size
, MCPROT_RUN
);
321 MCode
*mc
= J
->mcarea
;
322 /* Try current area first to use the protection cache. */
323 if (ptr
>= mc
&& ptr
< (MCode
*)((char *)mc
+ J
->szmcarea
)) {
324 mcode_protect(J
, MCPROT_GEN
);
327 /* Otherwise search through the list of MCode areas. */
329 mc
= ((MCLink
*)mc
)->next
;
330 lua_assert(mc
!= NULL
);
331 if (ptr
>= mc
&& ptr
< (MCode
*)((char *)mc
+ ((MCLink
*)mc
)->size
)) {
332 mcode_setprot(mc
, ((MCLink
*)mc
)->size
, MCPROT_GEN
);
340 /* Limit of MCode reservation reached. */
341 void lj_mcode_limiterr(jit_State
*J
, size_t need
)
343 size_t sizemcode
, maxmcode
;
345 sizemcode
= (size_t)J
->param
[JIT_P_sizemcode
] << 10;
346 sizemcode
= (sizemcode
+ LJ_PAGESIZE
-1) & ~(size_t)(LJ_PAGESIZE
- 1);
347 maxmcode
= (size_t)J
->param
[JIT_P_maxmcode
] << 10;
348 if ((size_t)need
> sizemcode
)
349 lj_trace_err(J
, LJ_TRERR_MCODEOV
); /* Too long for any area. */
350 if (J
->szallmcarea
+ sizemcode
> maxmcode
)
351 lj_trace_err(J
, LJ_TRERR_MCODEAL
);
353 lj_trace_err(J
, LJ_TRERR_MCODELM
); /* Retry with new area. */