2 ** Machine code management.
3 ** Copyright (C) 2005-2010 Mike Pall. See Copyright Notice in luajit.h
17 #include "lj_dispatch.h"
19 /* -- OS-specific functions ----------------------------------------------- */
21 #if defined(LUA_USE_WIN)
23 #define WIN32_LEAN_AND_MEAN
26 #define MCPROT_RW PAGE_READWRITE
27 #define MCPROT_RX PAGE_EXECUTE_READ
28 #define MCPROT_RWX PAGE_EXECUTE_READWRITE
30 static LJ_AINLINE
void *mcode_alloc(jit_State
*J
, size_t sz
, DWORD prot
)
32 void *p
= VirtualAlloc(NULL
, sz
, MEM_RESERVE
|MEM_COMMIT
|MEM_TOP_DOWN
, prot
);
34 lj_trace_err(J
, LJ_TRERR_MCODEAL
);
38 static LJ_AINLINE
void mcode_free(jit_State
*J
, void *p
, size_t sz
)
40 UNUSED(J
); UNUSED(sz
);
41 VirtualFree(p
, 0, MEM_RELEASE
);
44 static LJ_AINLINE
void mcode_setprot(void *p
, size_t sz
, DWORD prot
)
47 VirtualProtect(p
, sz
, prot
, &oprot
);
50 #elif defined(LUA_USE_POSIX)
55 #define MAP_ANONYMOUS MAP_ANON
58 #define MCPROT_RW (PROT_READ|PROT_WRITE)
59 #define MCPROT_RX (PROT_READ|PROT_EXEC)
60 #define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC)
62 static LJ_AINLINE
void *mcode_alloc(jit_State
*J
, size_t sz
, int prot
)
64 void *p
= mmap(NULL
, sz
, prot
, MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
66 lj_trace_err(J
, LJ_TRERR_MCODEAL
);
70 static LJ_AINLINE
void mcode_free(jit_State
*J
, void *p
, size_t sz
)
76 static LJ_AINLINE
void mcode_setprot(void *p
, size_t sz
, int prot
)
78 mprotect(p
, sz
, prot
);
83 /* Fallback allocator. This will fail if memory is not executable by default. */
84 #define LUAJIT_UNPROTECT_MCODE
89 static LJ_AINLINE
void *mcode_alloc(jit_State
*J
, size_t sz
, int prot
)
92 return lj_mem_new(J
->L
, sz
);
95 static LJ_AINLINE
void mcode_free(jit_State
*J
, void *p
, size_t sz
)
97 lj_mem_free(J2G(J
), p
, sz
);
100 #define mcode_setprot(p, sz, prot) UNUSED(p)
104 /* -- MCode area management ----------------------------------------------- */
106 /* Define this ONLY if the page protection twiddling becomes a bottleneck. */
107 #ifdef LUAJIT_UNPROTECT_MCODE
109 /* It's generally considered to be a potential security risk to have
110 ** pages with simultaneous write *and* execute access in a process.
112 ** Do not even think about using this mode for server processes or
113 ** apps handling untrusted external data (such as a browser).
115 ** The security risk is not in LuaJIT itself -- but if an adversary finds
116 ** any *other* flaw in your C application logic, then any RWX memory page
117 ** simplifies writing an exploit considerably.
119 #define MCPROT_GEN MCPROT_RWX
120 #define MCPROT_RUN MCPROT_RWX
124 /* This is the default behaviour and much safer:
126 ** Most of the time the memory pages holding machine code are executable,
127 ** but NONE of them is writable.
129 ** The current memory area is marked read-write (but NOT executable) only
130 ** during the short time window while the assembler generates machine code.
132 #define MCPROT_GEN MCPROT_RW
133 #define MCPROT_RUN MCPROT_RX
137 /* Change protection of MCode area. */
138 static void mcode_protect(jit_State
*J
, int prot
)
140 #ifdef LUAJIT_UNPROTECT_MCODE
141 UNUSED(J
); UNUSED(prot
);
143 if (J
->mcprot
!= prot
) {
144 mcode_setprot(J
->mcarea
, J
->szmcarea
, prot
);
150 /* Linked list of MCode areas. */
151 typedef struct MCLink
{
152 MCode
*next
; /* Next area. */
153 size_t size
; /* Size of current area. */
156 /* Allocate a new MCode area. */
157 static void mcode_allocarea(jit_State
*J
)
159 MCode
*oldarea
= J
->mcarea
;
160 size_t sz
= (size_t)J
->param
[JIT_P_sizemcode
] << 10;
161 sz
= (sz
+ LJ_PAGESIZE
-1) & ~(size_t)(LJ_PAGESIZE
- 1);
162 J
->mcarea
= (MCode
*)mcode_alloc(J
, sz
, MCPROT_GEN
);
164 J
->mcprot
= MCPROT_GEN
;
165 J
->mctop
= (MCode
*)((char *)J
->mcarea
+ J
->szmcarea
);
166 J
->mcbot
= (MCode
*)((char *)J
->mcarea
+ sizeof(MCLink
));
167 ((MCLink
*)J
->mcarea
)->next
= oldarea
;
168 ((MCLink
*)J
->mcarea
)->size
= sz
;
169 J
->szallmcarea
+= sz
;
172 /* Free all MCode areas. */
173 void lj_mcode_free(jit_State
*J
)
175 MCode
*mc
= J
->mcarea
;
179 MCode
*next
= ((MCLink
*)mc
)->next
;
180 mcode_free(J
, mc
, ((MCLink
*)mc
)->size
);
185 /* -- MCode transactions -------------------------------------------------- */
187 /* Reserve the remainder of the current MCode area. */
188 MCode
*lj_mcode_reserve(jit_State
*J
, MCode
**lim
)
193 mcode_protect(J
, MCPROT_GEN
);
198 /* Commit the top part of the current MCode area. */
199 void lj_mcode_commit(jit_State
*J
, MCode
*top
)
202 mcode_protect(J
, MCPROT_RUN
);
205 /* Abort the reservation. */
206 void lj_mcode_abort(jit_State
*J
)
208 mcode_protect(J
, MCPROT_RUN
);
211 /* Set/reset protection to allow patching of MCode areas. */
212 MCode
*lj_mcode_patch(jit_State
*J
, MCode
*ptr
, int finish
)
214 #ifdef LUAJIT_UNPROTECT_MCODE
215 UNUSED(J
); UNUSED(ptr
); UNUSED(finish
);
219 if (J
->mcarea
== ptr
)
220 mcode_protect(J
, MCPROT_RUN
);
222 mcode_setprot(ptr
, ((MCLink
*)ptr
)->size
, MCPROT_RUN
);
225 MCode
*mc
= J
->mcarea
;
226 /* Try current area first to use the protection cache. */
227 if (ptr
>= mc
&& ptr
< mc
+ J
->szmcarea
) {
228 mcode_protect(J
, MCPROT_GEN
);
231 /* Otherwise search through the list of MCode areas. */
233 mc
= ((MCLink
*)mc
)->next
;
234 lua_assert(mc
!= NULL
);
235 if (ptr
>= mc
&& ptr
< mc
+ ((MCLink
*)mc
)->size
) {
236 mcode_setprot(mc
, ((MCLink
*)mc
)->size
, MCPROT_GEN
);
244 /* Limit of MCode reservation reached. */
245 void lj_mcode_limiterr(jit_State
*J
, size_t need
)
247 size_t sizemcode
, maxmcode
;
249 sizemcode
= (size_t)J
->param
[JIT_P_sizemcode
] << 10;
250 sizemcode
= (sizemcode
+ LJ_PAGESIZE
-1) & ~(size_t)(LJ_PAGESIZE
- 1);
251 maxmcode
= (size_t)J
->param
[JIT_P_maxmcode
] << 10;
252 if ((size_t)need
> sizemcode
)
253 lj_trace_err(J
, LJ_TRERR_MCODEOV
); /* Too long for any area. */
254 if (J
->szallmcarea
+ sizemcode
> maxmcode
)
255 lj_trace_err(J
, LJ_TRERR_MCODEAL
);
257 lj_trace_err(J
, LJ_TRERR_MCODELM
); /* Retry with new area. */