Bug 617935: Check string lengths using StringBuffer. (r=lw)
[mozilla-central.git] / js / src / nanojit / CodeAlloc.cpp
blob2d0f0847473441dde43bf4882238aa72e3fea50b
1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2 /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3 /* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
16 * The Original Code is [Open Source Virtual Machine].
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2004-2007
21 * the Initial Developer. All Rights Reserved.
23 * Contributor(s):
24 * Adobe AS3 Team
26 * Alternatively, the contents of this file may be used under the terms of
27 * either the GNU General Public License Version 2 or later (the "GPL"), or
28 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
38 * ***** END LICENSE BLOCK ***** */
40 #include "nanojit.h"
42 //#define DOPROF
43 #include "../vprof/vprof.h"
45 #ifdef FEATURE_NANOJIT
47 namespace nanojit
49 static const bool verbose = false;
50 #ifdef VMCFG_VTUNE
51 // vtune jit profiling api can't handle non-contiguous methods,
52 // so make the allocation size huge to avoid non-contiguous methods
53 static const int pagesPerAlloc = 128; // 1MB
54 #elif defined(NANOJIT_ARM)
55 // ARM requires single-page allocations, due to the constant pool that
56 // lives on each page that must be reachable by a 4kb pcrel load.
57 static const int pagesPerAlloc = 1;
58 #else
59 static const int pagesPerAlloc = 16;
60 #endif
62 CodeAlloc::CodeAlloc()
63 : heapblocks(0)
64 , availblocks(0)
65 , totalAllocated(0)
66 , bytesPerPage(VMPI_getVMPageSize())
67 , bytesPerAlloc(pagesPerAlloc * bytesPerPage)
71 CodeAlloc::~CodeAlloc() {
72 reset();
75 void CodeAlloc::reset() {
76 // give all memory back to gcheap. Assumption is that all
77 // code is done being used by now.
78 for (CodeList* hb = heapblocks; hb != 0; ) {
79 _nvprof("free page",1);
80 CodeList* next = hb->next;
81 CodeList* fb = firstBlock(hb);
82 markBlockWrite(fb);
83 freeCodeChunk(fb, bytesPerAlloc);
84 totalAllocated -= bytesPerAlloc;
85 hb = next;
87 NanoAssert(!totalAllocated);
88 heapblocks = availblocks = 0;
91 CodeList* CodeAlloc::firstBlock(CodeList* term) {
92 // use uintptr_t, rather than char*, to avoid "increases required alignment" warning
93 uintptr_t end = (uintptr_t)alignUp(term, bytesPerPage);
94 return (CodeList*) (end - (uintptr_t)bytesPerAlloc);
97 static int round(size_t x) {
98 return (int)((x + 512) >> 10);
101 void CodeAlloc::logStats() {
102 size_t total = 0;
103 size_t frag_size = 0;
104 size_t free_size = 0;
105 int free_count = 0;
106 for (CodeList* hb = heapblocks; hb != 0; hb = hb->next) {
107 total += bytesPerAlloc;
108 for (CodeList* b = hb->lower; b != 0; b = b->lower) {
109 if (b->isFree) {
110 free_count++;
111 free_size += b->blockSize();
112 if (b->size() < minAllocSize)
113 frag_size += b->blockSize();
117 avmplus::AvmLog("code-heap: %dk free %dk fragmented %d\n",
118 round(total), round(free_size), frag_size);
121 inline void CodeAlloc::markBlockWrite(CodeList* b) {
122 NanoAssert(b->terminator != NULL);
123 CodeList* term = b->terminator;
124 if (term->isExec) {
125 markCodeChunkWrite(firstBlock(term), bytesPerAlloc);
126 term->isExec = false;
130 void CodeAlloc::alloc(NIns* &start, NIns* &end) {
131 if (!availblocks) {
132 // no free mem, get more
133 addMem();
136 // grab a block
137 markBlockWrite(availblocks);
138 CodeList* b = removeBlock(availblocks);
139 b->isFree = false;
140 start = b->start();
141 end = b->end;
142 if (verbose)
143 avmplus::AvmLog("CodeAlloc(%p).alloc %p-%p %d\n", this, start, end, int(end-start));
144 debug_only(sanity_check();)
147 void CodeAlloc::free(NIns* start, NIns *end) {
148 NanoAssert(heapblocks);
149 CodeList *blk = getBlock(start, end);
150 if (verbose)
151 avmplus::AvmLog("free %p-%p %d\n", start, end, (int)blk->size());
153 NanoAssert(!blk->isFree);
155 // coalesce adjacent blocks.
156 bool already_on_avail_list;
158 if (blk->lower && blk->lower->isFree) {
159 // combine blk into blk->lower (destroy blk)
160 CodeList* lower = blk->lower;
161 CodeList* higher = blk->higher;
162 already_on_avail_list = lower->size() >= minAllocSize;
163 lower->higher = higher;
164 higher->lower = lower;
165 blk = lower;
167 else
168 already_on_avail_list = false;
170 // the last block in each heapblock is a terminator block,
171 // which is never free, therefore blk->higher != null
172 if (blk->higher->isFree) {
173 CodeList *higher = blk->higher->higher;
174 CodeList *coalescedBlock = blk->higher;
176 if ( coalescedBlock->size() >= minAllocSize ) {
177 // Unlink coalescedBlock from the available block chain.
178 if ( availblocks == coalescedBlock ) {
179 removeBlock(availblocks);
181 else {
182 CodeList* free_block = availblocks;
183 while ( free_block && free_block->next != coalescedBlock) {
184 NanoAssert(free_block->size() >= minAllocSize);
185 NanoAssert(free_block->isFree);
186 NanoAssert(free_block->next);
187 free_block = free_block->next;
189 NanoAssert(free_block && free_block->next == coalescedBlock);
190 free_block->next = coalescedBlock->next;
194 // combine blk->higher into blk (destroy coalescedBlock)
195 blk->higher = higher;
196 higher->lower = blk;
198 blk->isFree = true;
199 NanoAssert(!blk->lower || !blk->lower->isFree);
200 NanoAssert(blk->higher && !blk->higher->isFree);
201 //memset(blk->start(), 0xCC, blk->size()); // INT 3 instruction
202 if ( !already_on_avail_list && blk->size() >= minAllocSize )
203 addBlock(availblocks, blk);
205 NanoAssert(heapblocks);
206 debug_only(sanity_check();)
209 void CodeAlloc::freeAll(CodeList* &code) {
210 while (code) {
211 CodeList *b = removeBlock(code);
212 free(b->start(), b->end);
216 #if defined NANOJIT_ARM && defined UNDER_CE
217 // Use a single flush for the whole CodeList, when we have no
218 // finer-granularity flush support, as on WinCE.
219 void CodeAlloc::flushICache(CodeList* &/*blocks*/) {
220 FlushInstructionCache(GetCurrentProcess(), NULL, NULL);
222 #else
223 void CodeAlloc::flushICache(CodeList* &blocks) {
224 for (CodeList *b = blocks; b != 0; b = b->next)
225 flushICache(b->start(), b->size());
227 #endif
229 #if defined(AVMPLUS_UNIX) && defined(NANOJIT_ARM)
230 #include <asm/unistd.h>
231 extern "C" void __clear_cache(char *BEG, char *END);
232 #endif
234 #if defined(AVMPLUS_UNIX) && defined(NANOJIT_MIPS)
235 #include <asm/cachectl.h>
236 extern "C" int cacheflush(char *addr, int nbytes, int cache);
237 #endif
239 #ifdef AVMPLUS_SPARC
240 // Note: the linux #define provided by the compiler.
241 #ifdef linux // bugzilla 502369
242 void sync_instruction_memory(caddr_t v, u_int len)
244 caddr_t end = v + len;
245 caddr_t p = v;
246 while (p < end) {
247 asm("flush %0" : : "r" (p));
248 p += 32;
251 #else
252 extern "C" void sync_instruction_memory(caddr_t v, u_int len);
253 #endif
254 #endif
256 #if defined NANOJIT_IA32 || defined NANOJIT_X64
257 // intel chips have dcache/icache interlock
258 void CodeAlloc::flushICache(void *start, size_t len) {
259 // Tell Valgrind that new code has been generated, and it must flush
260 // any translations it has for the memory range generated into.
261 (void)start;
262 (void)len;
263 VALGRIND_DISCARD_TRANSLATIONS(start, len);
266 #elif defined NANOJIT_ARM && defined UNDER_CE
267 // On arm/winmo, just flush the whole icache. The
268 // WinCE docs indicate that this function actually ignores its
269 // 2nd and 3rd arguments, and wants them to be NULL.
270 void CodeAlloc::flushICache(void *, size_t) {
271 FlushInstructionCache(GetCurrentProcess(), NULL, NULL);
274 #elif defined NANOJIT_ARM && defined DARWIN
275 void CodeAlloc::flushICache(void *, size_t) {
276 VMPI_debugBreak();
279 #elif defined AVMPLUS_MAC && defined NANOJIT_PPC
281 # ifdef NANOJIT_64BIT
282 extern "C" void sys_icache_invalidate(const void*, size_t len);
283 extern "C" void sys_dcache_flush(const void*, size_t len);
285 // mac 64bit requires 10.5 so use that api
286 void CodeAlloc::flushICache(void *start, size_t len) {
287 sys_dcache_flush(start, len);
288 sys_icache_invalidate(start, len);
290 # else
291 // mac ppc 32 could be 10.0 or later
292 // uses MakeDataExecutable() from Carbon api, OSUtils.h
293 // see http://developer.apple.com/documentation/Carbon/Reference/Memory_Manag_nt_Utilities/Reference/reference.html#//apple_ref/c/func/MakeDataExecutable
294 void CodeAlloc::flushICache(void *start, size_t len) {
295 MakeDataExecutable(start, len);
297 # endif
299 #elif defined NANOJIT_ARM && defined VMCFG_SYMBIAN
300 void CodeAlloc::flushICache(void *ptr, size_t len) {
301 uint32_t start = (uint32_t)ptr;
302 uint32_t rangeEnd = start + len;
303 User::IMB_Range((TAny*)start, (TAny*)rangeEnd);
306 #elif defined AVMPLUS_SPARC
307 // fixme: sync_instruction_memory is a solaris api, test for solaris not sparc
308 void CodeAlloc::flushICache(void *start, size_t len) {
309 sync_instruction_memory((char*)start, len);
312 #elif defined NANOJIT_SH4
313 #include <asm/cachectl.h> /* CACHEFLUSH_*, */
314 #include <sys/syscall.h> /* __NR_cacheflush, */
315 void CodeAlloc::flushICache(void *start, size_t len) {
316 syscall(__NR_cacheflush, start, len, CACHEFLUSH_D_WB | CACHEFLUSH_I);
319 #elif defined(AVMPLUS_UNIX) && defined(NANOJIT_MIPS)
320 void CodeAlloc::flushICache(void *start, size_t len) {
321 // FIXME Use synci on MIPS32R2
322 cacheflush((char *)start, len, BCACHE);
325 #elif defined AVMPLUS_UNIX
326 #ifdef ANDROID
327 void CodeAlloc::flushICache(void *start, size_t len) {
328 cacheflush((int)start, (int)start + len, 0);
330 #else
331 // fixme: __clear_cache is a libgcc feature, test for libgcc or gcc
332 void CodeAlloc::flushICache(void *start, size_t len) {
333 __clear_cache((char*)start, (char*)start + len);
335 #endif
336 #endif // AVMPLUS_MAC && NANOJIT_PPC
338 void CodeAlloc::addBlock(CodeList* &blocks, CodeList* b) {
339 NanoAssert(b->terminator != NULL); // should not be mucking with terminator blocks
340 b->next = blocks;
341 blocks = b;
344 void CodeAlloc::addMem() {
345 void *mem = allocCodeChunk(bytesPerAlloc); // allocations never fail
346 totalAllocated += bytesPerAlloc;
347 NanoAssert(mem != NULL); // see allocCodeChunk contract in CodeAlloc.h
348 _nvprof("alloc page", uintptr_t(mem)>>12);
350 CodeList* b = (CodeList*)mem;
351 b->lower = 0;
352 b->next = 0;
353 b->end = (NIns*) (uintptr_t(mem) + bytesPerAlloc - sizeofMinBlock);
354 b->isFree = true;
356 // create a tiny terminator block, add to fragmented list, this way
357 // all other blocks have a valid block at b->higher
358 CodeList* terminator = b->higher;
359 b->terminator = terminator;
360 terminator->lower = b;
361 terminator->end = 0; // this is how we identify the terminator
362 terminator->isFree = false;
363 terminator->isExec = false;
364 terminator->terminator = 0;
365 debug_only(sanity_check();)
367 // add terminator to heapblocks list so we can track whole blocks
368 terminator->next = heapblocks;
369 heapblocks = terminator;
371 addBlock(availblocks, b); // add to free list
374 CodeList* CodeAlloc::getBlock(NIns* start, NIns* end) {
375 CodeList* b = (CodeList*) (uintptr_t(start) - offsetof(CodeList, code));
376 NanoAssert(b->end == end && b->next == 0); (void) end;
377 return b;
380 CodeList* CodeAlloc::removeBlock(CodeList* &blocks) {
381 CodeList* b = blocks;
382 NanoAssert(b != NULL);
383 NanoAssert(b->terminator != NULL); // should not be mucking with terminator blocks
384 blocks = b->next;
385 b->next = 0;
386 return b;
389 void CodeAlloc::add(CodeList* &blocks, NIns* start, NIns* end) {
390 addBlock(blocks, getBlock(start, end));
394 * split a block by freeing the hole in the middle defined by [holeStart,holeEnd),
395 * and adding the used prefix and suffix parts to the blocks CodeList.
397 void CodeAlloc::addRemainder(CodeList* &blocks, NIns* start, NIns* end, NIns* holeStart, NIns* holeEnd) {
398 NanoAssert(start < end && start <= holeStart && holeStart <= holeEnd && holeEnd <= end);
399 // shrink the hole by aligning holeStart forward and holeEnd backward
400 holeStart = (NIns*) ((uintptr_t(holeStart) + sizeof(NIns*)-1) & ~(sizeof(NIns*)-1));
401 holeEnd = (NIns*) (uintptr_t(holeEnd) & ~(sizeof(NIns*)-1));
402 size_t minHole = minAllocSize;
403 if (minHole < 2*sizeofMinBlock)
404 minHole = 2*sizeofMinBlock;
405 if (uintptr_t(holeEnd) - uintptr_t(holeStart) < minHole) {
406 // the hole is too small to make a new free block and a new used block. just keep
407 // the whole original block and don't free anything.
408 add(blocks, start, end);
409 } else if (holeStart == start && holeEnd == end) {
410 // totally empty block. free whole start-end range
411 this->free(start, end);
412 } else if (holeStart == start) {
413 // hole is lower-aligned with start, so just need one new block
414 // b1 b2
415 CodeList* b1 = getBlock(start, end);
416 CodeList* b2 = (CodeList*) (uintptr_t(holeEnd) - offsetof(CodeList, code));
417 b2->terminator = b1->terminator;
418 b2->isFree = false;
419 b2->next = 0;
420 b2->higher = b1->higher;
421 b2->lower = b1;
422 b2->higher->lower = b2;
423 b1->higher = b2;
424 debug_only(sanity_check();)
425 this->free(b1->start(), b1->end);
426 addBlock(blocks, b2);
427 } else if (holeEnd == end) {
428 // hole is right-aligned with end, just need one new block
429 // todo
430 NanoAssert(false);
431 } else {
432 // there's enough space left to split into three blocks (two new ones)
433 CodeList* b1 = getBlock(start, end);
434 CodeList* b2 = (CodeList*) (void*) holeStart;
435 CodeList* b3 = (CodeList*) (uintptr_t(holeEnd) - offsetof(CodeList, code));
436 b1->higher = b2;
437 b2->lower = b1;
438 b2->higher = b3;
439 b2->isFree = false; // redundant, since we're about to free, but good hygiene
440 b2->terminator = b1->terminator;
441 b3->lower = b2;
442 b3->end = end;
443 b3->isFree = false;
444 b3->higher->lower = b3;
445 b3->terminator = b1->terminator;
446 b2->next = 0;
447 b3->next = 0;
448 debug_only(sanity_check();)
449 this->free(b2->start(), b2->end);
450 addBlock(blocks, b3);
451 addBlock(blocks, b1);
455 #ifdef PERFM
456 // This method is used only for profiling purposes.
457 // See CodegenLIR::emitMD() in Tamarin for an example.
459 size_t CodeAlloc::size(const CodeList* blocks) {
460 size_t size = 0;
461 for (const CodeList* b = blocks; b != 0; b = b->next)
462 size += int((uintptr_t)b->end - (uintptr_t)b);
463 return size;
465 #endif
467 size_t CodeAlloc::size() {
468 return totalAllocated;
471 // check that all block neighbors are correct
472 #ifdef _DEBUG
473 void CodeAlloc::sanity_check() {
474 for (CodeList* hb = heapblocks; hb != 0; hb = hb->next) {
475 NanoAssert(hb->higher == 0);
476 for (CodeList* b = hb->lower; b != 0; b = b->lower) {
477 NanoAssert(b->higher->lower == b);
480 for (CodeList* avail = this->availblocks; avail; avail = avail->next) {
481 NanoAssert(avail->isFree && avail->size() >= minAllocSize);
484 #if CROSS_CHECK_FREE_LIST
485 for(CodeList* term = heapblocks; term; term = term->next) {
486 for(CodeList* hb = term->lower; hb; hb = hb->lower) {
487 if (hb->isFree && hb->size() >= minAllocSize) {
488 bool found_on_avail = false;
489 for (CodeList* avail = this->availblocks; !found_on_avail && avail; avail = avail->next) {
490 found_on_avail = avail == hb;
493 NanoAssert(found_on_avail);
497 for (CodeList* avail = this->availblocks; avail; avail = avail->next) {
498 bool found_in_heapblocks = false;
499 for(CodeList* term = heapblocks; !found_in_heapblocks && term; term = term->next) {
500 for(CodeList* hb = term->lower; !found_in_heapblocks && hb; hb = hb->lower) {
501 found_in_heapblocks = hb == avail;
504 NanoAssert(found_in_heapblocks);
506 #endif /* CROSS_CHECK_FREE_LIST */
508 #endif
510 // Loop through a list of blocks marking the chunks executable. If we encounter
511 // multiple blocks in the same chunk, only the first block will cause the
512 // chunk to become executable, the other calls will no-op (isExec flag checked)
513 void CodeAlloc::markExec(CodeList* &blocks) {
514 for (CodeList *b = blocks; b != 0; b = b->next) {
515 markChunkExec(b->terminator);
519 // Variant of markExec(CodeList*) that walks all heapblocks (i.e. chunks) marking
520 // each one executable. On systems where bytesPerAlloc is low (i.e. have lots
521 // of elements in the list) this can be expensive.
522 void CodeAlloc::markAllExec() {
523 for (CodeList* hb = heapblocks; hb != NULL; hb = hb->next) {
524 markChunkExec(hb);
528 // make an entire chunk executable
529 void CodeAlloc::markChunkExec(CodeList* term) {
530 NanoAssert(term->terminator == NULL);
531 if (!term->isExec) {
532 term->isExec = true;
533 markCodeChunkExec(firstBlock(term), bytesPerAlloc);
537 #endif // FEATURE_NANOJIT