RELEASE LuaJIT-2.0.0-beta10
[luajit-2.0.git] / src / lj_mcode.c
blobfb6b6dcee709276ca76571d1ecbbfee1b2b57cb0
1 /*
2 ** Machine code management.
3 ** Copyright (C) 2005-2012 Mike Pall. See Copyright Notice in luajit.h
4 */
6 #define lj_mcode_c
7 #define LUA_CORE
9 #include "lj_obj.h"
10 #if LJ_HASJIT
11 #include "lj_gc.h"
12 #include "lj_jit.h"
13 #include "lj_mcode.h"
14 #include "lj_trace.h"
15 #include "lj_dispatch.h"
16 #include "lj_vm.h"
17 #endif
19 /* -- OS-specific functions ----------------------------------------------- */
21 #if LJ_HASJIT || LJ_HASFFI
23 /* Define this if you want to run LuaJIT with Valgrind. */
24 #ifdef LUAJIT_USE_VALGRIND
25 #include <valgrind/valgrind.h>
26 #endif
28 #if !LJ_TARGET_X86ORX64 && LJ_TARGET_OSX
29 void sys_icache_invalidate(void *start, size_t len);
30 #endif
32 /* Synchronize data/instruction cache. */
33 void lj_mcode_sync(void *start, void *end)
35 #ifdef LUAJIT_USE_VALGRIND
36 VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start);
37 #endif
38 #if LJ_TARGET_X86ORX64
39 UNUSED(start); UNUSED(end);
40 #elif LJ_TARGET_OSX
41 sys_icache_invalidate(start, (char *)end-(char *)start);
42 #elif LJ_TARGET_PPC
43 lj_vm_cachesync(start, end);
44 #elif defined(__GNUC__)
45 __clear_cache(start, end);
46 #else
47 #error "Missing builtin to flush instruction cache"
48 #endif
51 #endif
53 #if LJ_HASJIT
55 #if LJ_TARGET_WINDOWS
57 #define WIN32_LEAN_AND_MEAN
58 #include <windows.h>
60 #define MCPROT_RW PAGE_READWRITE
61 #define MCPROT_RX PAGE_EXECUTE_READ
62 #define MCPROT_RWX PAGE_EXECUTE_READWRITE
64 static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
66 void *p = VirtualAlloc((void *)hint, sz,
67 MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
68 if (!p && !hint)
69 lj_trace_err(J, LJ_TRERR_MCODEAL);
70 return p;
73 static void mcode_free(jit_State *J, void *p, size_t sz)
75 UNUSED(J); UNUSED(sz);
76 VirtualFree(p, 0, MEM_RELEASE);
79 static void mcode_setprot(void *p, size_t sz, DWORD prot)
81 DWORD oprot;
82 VirtualProtect(p, sz, prot, &oprot);
85 #elif LJ_TARGET_POSIX
87 #include <sys/mman.h>
89 #ifndef MAP_ANONYMOUS
90 #define MAP_ANONYMOUS MAP_ANON
91 #endif
93 #define MCPROT_RW (PROT_READ|PROT_WRITE)
94 #define MCPROT_RX (PROT_READ|PROT_EXEC)
95 #define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC)
97 static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
99 void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
100 if (p == MAP_FAILED && !hint)
101 lj_trace_err(J, LJ_TRERR_MCODEAL);
102 return p;
105 static void mcode_free(jit_State *J, void *p, size_t sz)
107 UNUSED(J);
108 munmap(p, sz);
111 static void mcode_setprot(void *p, size_t sz, int prot)
113 mprotect(p, sz, prot);
116 #elif LJ_64
118 #error "Missing OS support for explicit placement of executable memory"
120 #else
122 /* Fallback allocator. This will fail if memory is not executable by default. */
123 #define LUAJIT_UNPROTECT_MCODE
124 #define MCPROT_RW 0
125 #define MCPROT_RX 0
126 #define MCPROT_RWX 0
128 static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
130 UNUSED(hint); UNUSED(prot);
131 return lj_mem_new(J->L, sz);
134 static void mcode_free(jit_State *J, void *p, size_t sz)
136 lj_mem_free(J2G(J), p, sz);
139 #define mcode_setprot(p, sz, prot) UNUSED(p)
141 #endif
143 /* -- MCode area protection ----------------------------------------------- */
145 /* Define this ONLY if the page protection twiddling becomes a bottleneck. */
146 #ifdef LUAJIT_UNPROTECT_MCODE
148 /* It's generally considered to be a potential security risk to have
149 ** pages with simultaneous write *and* execute access in a process.
151 ** Do not even think about using this mode for server processes or
152 ** apps handling untrusted external data (such as a browser).
154 ** The security risk is not in LuaJIT itself -- but if an adversary finds
155 ** any *other* flaw in your C application logic, then any RWX memory page
156 ** simplifies writing an exploit considerably.
158 #define MCPROT_GEN MCPROT_RWX
159 #define MCPROT_RUN MCPROT_RWX
161 static void mcode_protect(jit_State *J, int prot)
163 UNUSED(J); UNUSED(prot);
166 #else
168 /* This is the default behaviour and much safer:
170 ** Most of the time the memory pages holding machine code are executable,
171 ** but NONE of them is writable.
173 ** The current memory area is marked read-write (but NOT executable) only
174 ** during the short time window while the assembler generates machine code.
176 #define MCPROT_GEN MCPROT_RW
177 #define MCPROT_RUN MCPROT_RX
179 /* Change protection of MCode area. */
180 static void mcode_protect(jit_State *J, int prot)
182 if (J->mcprot != prot) {
183 mcode_setprot(J->mcarea, J->szmcarea, prot);
184 J->mcprot = prot;
188 #endif
190 /* -- MCode area allocation ----------------------------------------------- */
192 #if LJ_TARGET_X64
193 #define mcode_validptr(p) ((p) && (uintptr_t)(p) < (uintptr_t)1<<47)
194 #else
195 #define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000)
196 #endif
198 #ifdef LJ_TARGET_JUMPRANGE
200 /* Get memory within relative jump distance of our code in 64 bit mode. */
201 static void *mcode_alloc(jit_State *J, size_t sz)
203 /* Target an address in the static assembler code (64K aligned).
204 ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
206 #if LJ_TARGET_MIPS
207 /* Use the middle of the 256MB-aligned region. */
208 uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) +
209 0x08000000u;
210 #else
211 uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
212 #endif
213 const uintptr_t range = (1u << LJ_TARGET_JUMPRANGE) - (1u << 21);
214 /* First try a contiguous area below the last one. */
215 uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
216 int i;
217 for (i = 0; i < 32; i++) { /* 32 attempts ought to be enough ... */
218 if (mcode_validptr(hint)) {
219 void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN);
221 if (mcode_validptr(p)) {
222 if ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range)
223 return p;
224 mcode_free(J, p, sz); /* Free badly placed area. */
227 /* Next try probing pseudo-random addresses. */
228 do {
229 hint = (0x78fb ^ LJ_PRNG_BITS(J, 15)) << 16; /* 64K aligned. */
230 } while (!(hint + sz < range));
231 hint = target + hint - (range>>1);
233 lj_trace_err(J, LJ_TRERR_MCODEAL); /* Give up. OS probably ignores hints? */
234 return NULL;
237 #else
239 /* All memory addresses are reachable by relative jumps. */
240 #define mcode_alloc(J, sz) mcode_alloc_at((J), 0, (sz), MCPROT_GEN)
242 #endif
244 /* -- MCode area management ----------------------------------------------- */
246 /* Linked list of MCode areas. */
247 typedef struct MCLink {
248 MCode *next; /* Next area. */
249 size_t size; /* Size of current area. */
250 } MCLink;
252 /* Allocate a new MCode area. */
253 static void mcode_allocarea(jit_State *J)
255 MCode *oldarea = J->mcarea;
256 size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10;
257 sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
258 J->mcarea = (MCode *)mcode_alloc(J, sz);
259 J->szmcarea = sz;
260 J->mcprot = MCPROT_GEN;
261 J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea);
262 J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink));
263 ((MCLink *)J->mcarea)->next = oldarea;
264 ((MCLink *)J->mcarea)->size = sz;
265 J->szallmcarea += sz;
268 /* Free all MCode areas. */
269 void lj_mcode_free(jit_State *J)
271 MCode *mc = J->mcarea;
272 J->mcarea = NULL;
273 J->szallmcarea = 0;
274 while (mc) {
275 MCode *next = ((MCLink *)mc)->next;
276 mcode_free(J, mc, ((MCLink *)mc)->size);
277 mc = next;
281 /* -- MCode transactions -------------------------------------------------- */
283 /* Reserve the remainder of the current MCode area. */
284 MCode *lj_mcode_reserve(jit_State *J, MCode **lim)
286 if (!J->mcarea)
287 mcode_allocarea(J);
288 else
289 mcode_protect(J, MCPROT_GEN);
290 *lim = J->mcbot;
291 return J->mctop;
294 /* Commit the top part of the current MCode area. */
295 void lj_mcode_commit(jit_State *J, MCode *top)
297 J->mctop = top;
298 mcode_protect(J, MCPROT_RUN);
301 /* Abort the reservation. */
302 void lj_mcode_abort(jit_State *J)
304 mcode_protect(J, MCPROT_RUN);
307 /* Set/reset protection to allow patching of MCode areas. */
308 MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
310 #ifdef LUAJIT_UNPROTECT_MCODE
311 UNUSED(J); UNUSED(ptr); UNUSED(finish);
312 return NULL;
313 #else
314 if (finish) {
315 if (J->mcarea == ptr)
316 mcode_protect(J, MCPROT_RUN);
317 else
318 mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN);
319 return NULL;
320 } else {
321 MCode *mc = J->mcarea;
322 /* Try current area first to use the protection cache. */
323 if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
324 mcode_protect(J, MCPROT_GEN);
325 return mc;
327 /* Otherwise search through the list of MCode areas. */
328 for (;;) {
329 mc = ((MCLink *)mc)->next;
330 lua_assert(mc != NULL);
331 if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
332 mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN);
333 return mc;
337 #endif
340 /* Limit of MCode reservation reached. */
341 void lj_mcode_limiterr(jit_State *J, size_t need)
343 size_t sizemcode, maxmcode;
344 lj_mcode_abort(J);
345 sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10;
346 sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
347 maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10;
348 if ((size_t)need > sizemcode)
349 lj_trace_err(J, LJ_TRERR_MCODEOV); /* Too long for any area. */
350 if (J->szallmcarea + sizemcode > maxmcode)
351 lj_trace_err(J, LJ_TRERR_MCODEAL);
352 mcode_allocarea(J);
353 lj_trace_err(J, LJ_TRERR_MCODELM); /* Retry with new area. */
356 #endif