2 ** Machine code management.
3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
16 #include "lj_dispatch.h"
19 #if LJ_HASJIT || LJ_HASFFI
23 /* -- OS-specific functions ----------------------------------------------- */
25 #if LJ_HASJIT || LJ_HASFFI
27 /* Define this if you want to run LuaJIT with Valgrind. */
28 #ifdef LUAJIT_USE_VALGRIND
29 #include <valgrind/valgrind.h>
33 #define WIN32_LEAN_AND_MEAN
38 void sys_icache_invalidate(void *start
, size_t len
);
41 /* Synchronize data/instruction cache. */
42 void lj_mcode_sync(void *start
, void *end
)
44 #ifdef LUAJIT_USE_VALGRIND
45 VALGRIND_DISCARD_TRANSLATIONS(start
, (char *)end
-(char *)start
);
47 #if LJ_TARGET_X86ORX64
48 UNUSED(start
); UNUSED(end
);
49 #elif LJ_TARGET_WINDOWS
50 FlushInstructionCache(GetCurrentProcess(), start
, (char *)end
-(char *)start
);
52 sys_icache_invalidate(start
, (char *)end
-(char *)start
);
54 lj_vm_cachesync(start
, end
);
55 #elif defined(__GNUC__) || defined(__clang__)
56 __clear_cache(start
, end
);
58 #error "Missing builtin to flush instruction cache"
68 #define MCPROT_RW PAGE_READWRITE
69 #define MCPROT_RX PAGE_EXECUTE_READ
70 #define MCPROT_RWX PAGE_EXECUTE_READWRITE
72 static void *mcode_alloc_at(jit_State
*J
, uintptr_t hint
, size_t sz
, DWORD prot
)
74 void *p
= LJ_WIN_VALLOC((void *)hint
, sz
,
75 MEM_RESERVE
|MEM_COMMIT
|MEM_TOP_DOWN
, prot
);
77 lj_trace_err(J
, LJ_TRERR_MCODEAL
);
81 static void mcode_free(jit_State
*J
, void *p
, size_t sz
)
83 UNUSED(J
); UNUSED(sz
);
84 VirtualFree(p
, 0, MEM_RELEASE
);
87 static int mcode_setprot(void *p
, size_t sz
, DWORD prot
)
90 return !LJ_WIN_VPROTECT(p
, sz
, prot
, &oprot
);
98 #define MAP_ANONYMOUS MAP_ANON
101 #define MCPROT_RW (PROT_READ|PROT_WRITE)
102 #define MCPROT_RX (PROT_READ|PROT_EXEC)
103 #define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC)
105 #define MCPROT_CREATE (PROT_MPROTECT(MCPROT_RWX))
107 #define MCPROT_CREATE 0
110 static void *mcode_alloc_at(jit_State
*J
, uintptr_t hint
, size_t sz
, int prot
)
112 void *p
= mmap((void *)hint
, sz
, prot
|MCPROT_CREATE
, MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
113 if (p
== MAP_FAILED
) {
114 if (!hint
) lj_trace_err(J
, LJ_TRERR_MCODEAL
);
120 static void mcode_free(jit_State
*J
, void *p
, size_t sz
)
126 static int mcode_setprot(void *p
, size_t sz
, int prot
)
128 return mprotect(p
, sz
, prot
);
133 #error "Missing OS support for explicit placement of executable memory"
137 /* -- MCode area protection ----------------------------------------------- */
139 #if LUAJIT_SECURITY_MCODE == 0
141 /* Define this ONLY if page protection twiddling becomes a bottleneck.
143 ** It's generally considered to be a potential security risk to have
144 ** pages with simultaneous write *and* execute access in a process.
146 ** Do not even think about using this mode for server processes or
147 ** apps handling untrusted external data.
149 ** The security risk is not in LuaJIT itself -- but if an adversary finds
150 ** any *other* flaw in your C application logic, then any RWX memory pages
151 ** simplify writing an exploit considerably.
153 #define MCPROT_GEN MCPROT_RWX
154 #define MCPROT_RUN MCPROT_RWX
156 static void mcode_protect(jit_State
*J
, int prot
)
158 UNUSED(J
); UNUSED(prot
); UNUSED(mcode_setprot
);
163 /* This is the default behaviour and much safer:
165 ** Most of the time the memory pages holding machine code are executable,
166 ** but NONE of them is writable.
168 ** The current memory area is marked read-write (but NOT executable) only
169 ** during the short time window while the assembler generates machine code.
171 #define MCPROT_GEN MCPROT_RW
172 #define MCPROT_RUN MCPROT_RX
174 /* Protection twiddling failed. Probably due to kernel security. */
175 static LJ_NORET LJ_NOINLINE
void mcode_protfail(jit_State
*J
)
177 lua_CFunction panic
= J2G(J
)->panic
;
180 setstrV(L
, L
->top
++, lj_err_str(L
, LJ_ERR_JITPROT
));
186 /* Change protection of MCode area. */
187 static void mcode_protect(jit_State
*J
, int prot
)
189 if (J
->mcprot
!= prot
) {
190 if (LJ_UNLIKELY(mcode_setprot(J
->mcarea
, J
->szmcarea
, prot
)))
198 /* -- MCode area allocation ----------------------------------------------- */
201 #define mcode_validptr(p) (p)
203 #define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000)
206 #ifdef LJ_TARGET_JUMPRANGE
208 /* Get memory within relative jump distance of our code in 64 bit mode. */
209 static void *mcode_alloc(jit_State
*J
, size_t sz
)
211 /* Target an address in the static assembler code (64K aligned).
212 ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
213 ** Use half the jump range so every address in the range can reach any other.
216 /* Use the middle of the 256MB-aligned region. */
217 uintptr_t target
= ((uintptr_t)(void *)lj_vm_exit_handler
&
218 ~(uintptr_t)0x0fffffffu
) + 0x08000000u
;
220 uintptr_t target
= (uintptr_t)(void *)lj_vm_exit_handler
& ~(uintptr_t)0xffff;
222 const uintptr_t range
= (1u << (LJ_TARGET_JUMPRANGE
-1)) - (1u << 21);
223 /* First try a contiguous area below the last one. */
224 uintptr_t hint
= J
->mcarea
? (uintptr_t)J
->mcarea
- sz
: 0;
226 /* Limit probing iterations, depending on the available pool size. */
227 for (i
= 0; i
< LJ_TARGET_JUMPRANGE
; i
++) {
228 if (mcode_validptr(hint
)) {
229 void *p
= mcode_alloc_at(J
, hint
, sz
, MCPROT_GEN
);
231 if (mcode_validptr(p
) &&
232 ((uintptr_t)p
+ sz
- target
< range
|| target
- (uintptr_t)p
< range
))
234 if (p
) mcode_free(J
, p
, sz
); /* Free badly placed area. */
236 /* Next try probing 64K-aligned pseudo-random addresses. */
238 hint
= lj_prng_u64(&J2G(J
)->prng
) & ((1u<<LJ_TARGET_JUMPRANGE
)-0x10000);
239 } while (!(hint
+ sz
< range
+range
));
240 hint
= target
+ hint
- range
;
242 lj_trace_err(J
, LJ_TRERR_MCODEAL
); /* Give up. OS probably ignores hints? */
248 /* All memory addresses are reachable by relative jumps. */
249 static void *mcode_alloc(jit_State
*J
, size_t sz
)
251 #if defined(__OpenBSD__) || defined(__NetBSD__) || LJ_TARGET_UWP
252 /* Allow better executable memory allocation for OpenBSD W^X mode. */
253 void *p
= mcode_alloc_at(J
, 0, sz
, MCPROT_RUN
);
254 if (p
&& mcode_setprot(p
, sz
, MCPROT_GEN
)) {
255 mcode_free(J
, p
, sz
);
260 return mcode_alloc_at(J
, 0, sz
, MCPROT_GEN
);
266 /* -- MCode area management ----------------------------------------------- */
268 /* Allocate a new MCode area. */
269 static void mcode_allocarea(jit_State
*J
)
271 MCode
*oldarea
= J
->mcarea
;
272 size_t sz
= (size_t)J
->param
[JIT_P_sizemcode
] << 10;
273 sz
= (sz
+ LJ_PAGESIZE
-1) & ~(size_t)(LJ_PAGESIZE
- 1);
274 J
->mcarea
= (MCode
*)mcode_alloc(J
, sz
);
276 J
->mcprot
= MCPROT_GEN
;
277 J
->mctop
= (MCode
*)((char *)J
->mcarea
+ J
->szmcarea
);
278 J
->mcbot
= (MCode
*)((char *)J
->mcarea
+ sizeof(MCLink
));
279 ((MCLink
*)J
->mcarea
)->next
= oldarea
;
280 ((MCLink
*)J
->mcarea
)->size
= sz
;
281 J
->szallmcarea
+= sz
;
282 J
->mcbot
= (MCode
*)lj_err_register_mcode(J
->mcarea
, sz
, (uint8_t *)J
->mcbot
);
285 /* Free all MCode areas. */
286 void lj_mcode_free(jit_State
*J
)
288 MCode
*mc
= J
->mcarea
;
292 MCode
*next
= ((MCLink
*)mc
)->next
;
293 size_t sz
= ((MCLink
*)mc
)->size
;
294 lj_err_deregister_mcode(mc
, sz
, (uint8_t *)mc
+ sizeof(MCLink
));
295 mcode_free(J
, mc
, sz
);
300 /* -- MCode transactions -------------------------------------------------- */
302 /* Reserve the remainder of the current MCode area. */
303 MCode
*lj_mcode_reserve(jit_State
*J
, MCode
**lim
)
308 mcode_protect(J
, MCPROT_GEN
);
313 /* Commit the top part of the current MCode area. */
314 void lj_mcode_commit(jit_State
*J
, MCode
*top
)
317 mcode_protect(J
, MCPROT_RUN
);
320 /* Abort the reservation. */
321 void lj_mcode_abort(jit_State
*J
)
324 mcode_protect(J
, MCPROT_RUN
);
327 /* Set/reset protection to allow patching of MCode areas. */
328 MCode
*lj_mcode_patch(jit_State
*J
, MCode
*ptr
, int finish
)
331 #if LUAJIT_SECURITY_MCODE
332 if (J
->mcarea
== ptr
)
333 mcode_protect(J
, MCPROT_RUN
);
334 else if (LJ_UNLIKELY(mcode_setprot(ptr
, ((MCLink
*)ptr
)->size
, MCPROT_RUN
)))
339 MCode
*mc
= J
->mcarea
;
340 /* Try current area first to use the protection cache. */
341 if (ptr
>= mc
&& ptr
< (MCode
*)((char *)mc
+ J
->szmcarea
)) {
342 #if LUAJIT_SECURITY_MCODE
343 mcode_protect(J
, MCPROT_GEN
);
347 /* Otherwise search through the list of MCode areas. */
349 mc
= ((MCLink
*)mc
)->next
;
350 lj_assertJ(mc
!= NULL
, "broken MCode area chain");
351 if (ptr
>= mc
&& ptr
< (MCode
*)((char *)mc
+ ((MCLink
*)mc
)->size
)) {
352 #if LUAJIT_SECURITY_MCODE
353 if (LJ_UNLIKELY(mcode_setprot(mc
, ((MCLink
*)mc
)->size
, MCPROT_GEN
)))
362 /* Limit of MCode reservation reached. */
363 void lj_mcode_limiterr(jit_State
*J
, size_t need
)
365 size_t sizemcode
, maxmcode
;
367 sizemcode
= (size_t)J
->param
[JIT_P_sizemcode
] << 10;
368 sizemcode
= (sizemcode
+ LJ_PAGESIZE
-1) & ~(size_t)(LJ_PAGESIZE
- 1);
369 maxmcode
= (size_t)J
->param
[JIT_P_maxmcode
] << 10;
370 if (need
* sizeof(MCode
) > sizemcode
)
371 lj_trace_err(J
, LJ_TRERR_MCODEOV
); /* Too long for any area. */
372 if (J
->szallmcarea
+ sizemcode
> maxmcode
)
373 lj_trace_err(J
, LJ_TRERR_MCODEAL
);
375 lj_trace_err(J
, LJ_TRERR_MCODELM
); /* Retry with new area. */