Fixed jstracer's operator delete leaking out into other shared libraries (bug 452721...
[mozilla-central.git] / js / src / nanojit / avmplus.h
blob56d3dc1e9660bad85b528e7a8742fc3de39fca4e
1 /* ***** BEGIN LICENSE BLOCK *****
2 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
4 * The contents of this file are subject to the Mozilla Public License Version 1.1 (the
5 * "License"); you may not use this file except in compliance with the License. You may obtain
6 * a copy of the License at http://www.mozilla.org/MPL/
7 *
8 * Software distributed under the License is distributed on an "AS IS" basis, WITHOUT
9 * WARRANTY OF ANY KIND, either express or implied. See the License for the specific
10 * language governing rights and limitations under the License.
12 * The Original Code is [Open Source Virtual Machine.]
14 * The Initial Developer of the Original Code is Adobe System Incorporated. Portions created
15 * by the Initial Developer are Copyright (C)[ 2004-2006 ] Adobe Systems Incorporated. All Rights
16 * Reserved.
18 * Contributor(s): Adobe AS3 Team
19 * Andreas Gal <gal@mozilla.com>
20 * Asko Tontti <atontti@cc.hut.fi>
22 * Alternatively, the contents of this file may be used under the terms of either the GNU
23 * General Public License Version 2 or later (the "GPL"), or the GNU Lesser General Public
24 * License Version 2.1 or later (the "LGPL"), in which case the provisions of the GPL or the
25 * LGPL are applicable instead of those above. If you wish to allow use of your version of this
26 * file only under the terms of either the GPL or the LGPL, and not to allow others to use your
27 * version of this file under the terms of the MPL, indicate your decision by deleting provisions
28 * above and replace them with the notice and other provisions required by the GPL or the
29 * LGPL. If you do not delete the provisions above, a recipient may use your version of this file
30 * under the terms of any one of the MPL, the GPL or the LGPL.
32 ***** END LICENSE BLOCK ***** */
34 #ifndef avm_h___
35 #define avm_h___
37 #include <assert.h>
38 #include <string.h>
39 #include <stdio.h>
40 #include <stdlib.h>
42 #if defined(AVMPLUS_UNIX)
43 #include <unistd.h>
44 #include <sys/mman.h>
45 #endif
47 #include "jstypes.h"
49 #define FASTCALL JS_FASTCALL
51 #if defined(JS_NO_FASTCALL)
52 #define NJ_NO_FASTCALL
53 #if defined(AVMPLUS_IA32)
54 #define SIMULATE_FASTCALL(lr, state_ptr, frag_ptr, func_addr) \
55 asm volatile( \
56 "call *%%esi" \
57 : "=a" (lr) \
58 : "c" (state_ptr), "d" (frag_ptr), "S" (func_addr) \
59 : "memory", "cc" \
61 #endif /* defined(AVMPLUS_IA32) */
62 #endif /* defined(JS_NO_FASTCALL) */
64 #ifdef WIN32
65 #include <windows.h>
66 #endif
68 #ifdef DEBUG
69 #if !defined _DEBUG
70 #define _DEBUG
71 #endif
72 #define NJ_VERBOSE
73 #define NJ_PROFILE
74 #include <stdarg.h>
75 #endif
77 #ifdef _DEBUG
78 void NanoAssertFail();
79 #endif
81 #define AvmAssert(x) assert(x)
82 #define AvmAssertMsg(x, y)
83 #define AvmDebugLog(x) printf x
85 #ifdef _MSC_VER
87 * Can we just take a moment to think about what it means that MSVC doesn't have stdint.h in 2008?
88 * Thanks for your time.
90 typedef JSUint8 uint8_t;
91 typedef JSInt8 int8_t;
92 typedef JSUint16 uint16_t;
93 typedef JSInt16 int16_t;
94 typedef JSUint32 uint32_t;
95 typedef JSInt32 int32_t;
96 typedef JSUint64 uint64_t;
97 typedef JSInt64 int64_t;
98 #else
99 #include <stdint.h>
100 #endif
102 #if defined(AVMPLUS_IA32)
103 #if defined(_MSC_VER)
104 __declspec(naked) static inline __int64 rdtsc()
106 __asm
108 rdtsc;
109 ret;
112 #elif defined(SOLARIS)
113 static inline unsigned long long rdtsc(void)
115 unsigned long long int x;
116 asm volatile (".byte 0x0f, 0x31" : "=A" (x));
117 return x;
119 #elif defined(__i386__)
120 static __inline__ unsigned long long rdtsc(void)
122 unsigned long long int x;
123 __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
124 return x;
126 #endif /* compilers */
128 #elif defined(__x86_64__)
130 static __inline__ uint64_t rdtsc(void)
132 unsigned hi, lo;
133 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
134 return ( (uint64_t)lo)|( ((uint64_t)hi)<<32 );
137 #elif defined(__powerpc__)
139 typedef unsigned long long int unsigned long long;
141 static __inline__ unsigned long long rdtsc(void)
143 unsigned long long int result=0;
144 unsigned long int upper, lower,tmp;
145 __asm__ volatile(
146 "0: \n"
147 "\tmftbu %0 \n"
148 "\tmftb %1 \n"
149 "\tmftbu %2 \n"
150 "\tcmpw %2,%0 \n"
151 "\tbne 0b \n"
152 : "=r"(upper),"=r"(lower),"=r"(tmp)
154 result = upper;
155 result = result<<32;
156 result = result|lower;
158 return(result);
161 #endif /* architecture */
163 struct JSContext;
165 namespace nanojit
167 class Fragment;
169 enum ExitType {
170 BRANCH_EXIT,
171 LOOP_EXIT,
172 NESTED_EXIT,
173 MISMATCH_EXIT,
174 OOM_EXIT,
175 OVERFLOW_EXIT
178 struct SideExit
180 intptr_t ip_adj;
181 intptr_t sp_adj;
182 intptr_t rp_adj;
183 Fragment *target;
184 Fragment *from;
185 int32_t calldepth;
186 uint32 numGlobalSlots;
187 uint32 numStackSlots;
188 uint32 numStackSlotsBelowCurrentFrame;
189 uint8 *typeMap;
190 ExitType exitType;
191 #if defined NJ_VERBOSE
192 uint32_t sid;
193 #endif
196 class LIns;
198 struct GuardRecord
200 Fragment *target;
201 Fragment *from;
202 void *jmp;
203 void *origTarget;
204 SideExit *exit;
205 GuardRecord *outgoing;
206 GuardRecord *next;
207 LIns *guard;
208 int32_t calldepth;
209 #if defined NJ_VERBOSE
210 uint32_t compileNbr;
211 uint32_t sid;
212 #endif
215 #define GuardRecordSize(g) sizeof(GuardRecord)
216 #define SideExitSize(e) sizeof(SideExit)
219 class GC;
221 class GCObject
223 public:
224 inline void*
225 operator new(size_t size, GC* gc)
227 return calloc(1, size);
230 static void operator delete (void *gcObject)
232 free(gcObject);
236 #define MMGC_SUBCLASS_DECL : public GCObject
238 class GCFinalizedObject : public GCObject
240 public:
241 static void operator delete (void *gcObject)
243 free(gcObject);
247 class GCHeap
249 public:
250 int32_t kNativePageSize;
252 GCHeap()
254 #if defined _SC_PAGE_SIZE
255 kNativePageSize = sysconf(_SC_PAGE_SIZE);
256 #else
257 kNativePageSize = 4096; // @todo: what is this?
258 #endif
261 inline void*
262 Alloc(uint32_t pages)
264 #ifdef XP_WIN
265 return VirtualAlloc(NULL,
266 pages * kNativePageSize,
267 MEM_COMMIT | MEM_RESERVE,
268 PAGE_EXECUTE_READWRITE);
269 #elif defined AVMPLUS_UNIX
271 * Don't use normal heap with mprotect+PROT_EXEC for executable code.
272 * SELinux and friends don't allow this.
274 return mmap(NULL,
275 pages * kNativePageSize,
276 PROT_READ | PROT_WRITE | PROT_EXEC,
277 MAP_PRIVATE | MAP_ANON,
280 #else
281 return valloc(pages * kNativePageSize);
282 #endif
285 inline void
286 Free(void* p, uint32_t pages)
288 #ifdef XP_WIN
289 VirtualFree(p, 0, MEM_RELEASE);
290 #elif defined AVMPLUS_UNIX
291 #if defined SOLARIS
292 munmap((char*)p, pages * kNativePageSize);
293 #else
294 munmap(p, pages * kNativePageSize);
295 #endif
296 #else
297 free(p);
298 #endif
303 class GC
305 static GCHeap heap;
307 public:
308 static inline void*
309 Alloc(uint32_t bytes)
311 return calloc(1, bytes);
314 static inline void
315 Free(void* p)
317 free(p);
320 static inline GCHeap*
321 GetGCHeap()
323 return &heap;
327 #define DWB(x) x
328 #define DRCWB(x) x
330 #define MMGC_MEM_TYPE(x)
332 typedef int FunctionID;
334 namespace avmplus
336 struct InterpState
338 void* sp; /* native stack pointer, stack[0] is spbase[0] */
339 void* rp; /* call stack pointer */
340 void* gp; /* global frame pointer */
341 JSContext *cx; /* current VM context handle */
342 void* eos; /* first unusable word after the native stack */
343 void* eor; /* first unusable word after the call stack */
344 nanojit::GuardRecord* lastTreeExitGuard; /* guard we exited on during a tree call */
345 nanojit::GuardRecord* lastTreeCallGuard; /* guard we want to grow from if the tree
346 call exit guard mismatched */
349 class String
353 typedef class String AvmString;
355 class StringNullTerminatedUTF8
357 const char* cstr;
359 public:
360 StringNullTerminatedUTF8(GC* gc, String* s)
362 cstr = strdup((const char*)s);
365 ~StringNullTerminatedUTF8()
367 free((void*)cstr);
370 inline
371 const char* c_str()
373 return cstr;
377 typedef String* Stringp;
379 class AvmConfiguration
381 public:
382 AvmConfiguration() {
383 memset(this, 0, sizeof(AvmConfiguration));
384 #ifdef DEBUG
385 verbose = getenv("TRACEMONKEY") && strstr(getenv("TRACEMONKEY"), "verbose");
386 verbose_addrs = 1;
387 verbose_exits = 1;
388 verbose_live = 1;
389 show_stats = 1;
390 #endif
391 tree_opt = 0;
394 uint32_t tree_opt:1;
395 uint32_t quiet_opt:1;
396 uint32_t verbose:1;
397 uint32_t verbose_addrs:1;
398 uint32_t verbose_live:1;
399 uint32_t verbose_exits:1;
400 uint32_t show_stats:1;
403 static const int kstrconst_emptyString = 0;
405 class AvmInterpreter
407 class Labels {
408 public:
409 const char* format(const void* ip)
411 static char buf[33];
412 sprintf(buf, "%p", ip);
413 return buf;
417 Labels _labels;
418 public:
419 Labels* labels;
421 AvmInterpreter()
423 labels = &_labels;
428 class AvmConsole
430 public:
431 AvmConsole& operator<<(const char* s)
433 fprintf(stdout, "%s", s);
434 return *this;
438 class AvmCore
440 public:
441 AvmInterpreter interp;
442 AvmConsole console;
444 static AvmConfiguration config;
445 static GC* gc;
446 static String* k_str[];
447 static bool sse2_available;
449 static inline bool
450 use_sse2()
452 return sse2_available;
455 static inline bool
456 quiet_opt()
458 return config.quiet_opt;
461 static inline bool
462 verbose()
464 return config.verbose;
467 static inline GC*
468 GetGC()
470 return gc;
473 static inline String* newString(const char* cstr) {
474 return (String*)strdup(cstr);
477 static inline void freeString(String* str) {
478 return free((char*)str);
482 class OSDep
484 public:
485 static inline void
486 getDate()
492 * The List<T> template implements a simple List, which can
493 * be templated to support different types.
495 * Elements can be added to the end, modified in the middle,
496 * but no holes are allowed. That is for set(n, v) to work
497 * size() > n
499 * Note that [] operators are provided and you can violate the
500 * set properties using these operators, if you want a real
501 * list dont use the [] operators, if you want a general purpose
502 * array use the [] operators.
505 enum ListElementType { LIST_NonGCObjects, LIST_GCObjects };
507 template <typename T, ListElementType kElementType>
508 class List
510 public:
511 enum { kInitialCapacity = 128 };
513 List(GC *_gc, uint32_t _capacity=kInitialCapacity) : data(NULL), len(0), capacity(0)
515 ensureCapacity(_capacity);
518 ~List()
520 //clear();
521 destroy();
522 // zero out in case we are part of an RCObject
523 len = 0;
526 inline void destroy()
528 if (data)
529 free(data);
532 // 'this' steals the guts of 'that' and 'that' gets reset.
533 void FASTCALL become(List& that)
535 this->destroy();
537 this->data = that.data;
538 this->len = that.len;
539 this->capacity = that.capacity;
541 that.data = 0;
542 that.len = 0;
543 that.capacity = 0;
545 uint32_t FASTCALL add(T value)
547 if (len >= capacity) {
548 grow();
550 wb(len++, value);
551 return len-1;
554 inline bool isEmpty() const
556 return len == 0;
559 inline uint32_t size() const
561 return len;
564 inline T get(uint32_t index) const
566 AvmAssert(index < len);
567 return *(T*)(data + index);
570 void FASTCALL set(uint32_t index, T value)
572 AvmAssert(index < capacity);
573 if (index >= len)
575 len = index+1;
577 AvmAssert(len <= capacity);
578 wb(index, value);
581 inline void clear()
583 zero_range(0, len);
584 len = 0;
587 int FASTCALL indexOf(T value) const
589 for(uint32_t i=0; i<len; i++)
590 if (get(i) == value)
591 return i;
592 return -1;
595 int FASTCALL lastIndexOf(T value) const
597 for(int32_t i=len-1; i>=0; i--)
598 if (get(i) == value)
599 return i;
600 return -1;
603 inline T last()
605 return get(len-1);
608 T FASTCALL removeLast()
610 if(isEmpty())
611 return undef_list_val();
612 T t = get(len-1);
613 set(len-1, undef_list_val());
614 len--;
615 return t;
618 inline T operator[](uint32_t index) const
620 AvmAssert(index < capacity);
621 return get(index);
624 void FASTCALL ensureCapacity(uint32_t cap)
626 if (cap > capacity) {
627 if (data == NULL) {
628 data = (T*)calloc(1, factor(cap));
629 } else {
630 data = (T*)realloc(data, factor(cap));
631 zero_range(capacity, cap - capacity);
633 capacity = cap;
637 void FASTCALL insert(uint32_t index, T value, uint32_t count = 1)
639 AvmAssert(index <= len);
640 AvmAssert(count > 0);
641 ensureCapacity(len+count);
642 memmove(data + index + count, data + index, factor(len - index));
643 wbzm(index, index+count, value);
644 len += count;
647 T FASTCALL removeAt(uint32_t index)
649 T old = get(index);
650 // dec the refcount on the one we're removing
651 wb(index, undef_list_val());
652 memmove(data + index, data + index + 1, factor(len - index - 1));
653 len--;
654 return old;
657 private:
658 void FASTCALL grow()
660 // growth is fast at first, then slows at larger list sizes.
661 uint32_t newMax = 0;
662 const uint32_t curMax = capacity;
663 if (curMax == 0)
664 newMax = kInitialCapacity;
665 else if(curMax > 15)
666 newMax = curMax * 3/2;
667 else
668 newMax = curMax * 2;
670 ensureCapacity(newMax);
673 inline void do_wb_nongc(T* slot, T value)
675 *slot = value;
678 inline void do_wb_gc(GCObject** slot, const GCObject** value)
680 *slot = (GCObject*)*value;
683 void FASTCALL wb(uint32_t index, T value)
685 AvmAssert(index < capacity);
686 AvmAssert(data != NULL);
687 T* slot = &data[index];
688 do_wb_nongc(slot, value);
691 // multiple wb call with the same value, and assumption that existing value is all zero bits,
692 // like
693 // for (uint32_t u = index; u < index_end; ++u)
694 // wb(u, value);
695 void FASTCALL wbzm(uint32_t index, uint32_t index_end, T value)
697 AvmAssert(index < capacity);
698 AvmAssert(index_end <= capacity);
699 AvmAssert(index < index_end);
700 AvmAssert(data != NULL);
701 T* slot = data + index;
702 for ( ; index < index_end; ++index, ++slot)
703 do_wb_nongc(slot, value);
706 inline uint32_t factor(uint32_t index) const
708 return index * sizeof(T);
711 void FASTCALL zero_range(uint32_t _first, uint32_t _count)
713 memset(data + _first, 0, factor(_count));
716 // stuff that needs specialization based on the type
717 static inline T undef_list_val();
719 private:
720 List(const List& toCopy); // unimplemented
721 void operator=(const List& that); // unimplemented
723 // ------------------------ DATA SECTION BEGIN
724 private:
725 T* data;
726 uint32_t len;
727 uint32_t capacity;
728 // ------------------------ DATA SECTION END
732 // stuff that needs specialization based on the type
733 template<typename T, ListElementType kElementType>
734 /* static */ inline T List<T, kElementType>::undef_list_val() { return T(0); }
737 * The SortedMap<K,T> template implements an object that
738 * maps keys to values. The keys are sorted
739 * from smallest to largest in the map. Time of operations
740 * is as follows:
741 * put() is O(1) if the key is higher than any existing
742 * key; O(logN) if the key already exists,
743 * and O(N) otherwise.
744 * get() is an O(logN) binary search.
746 * no duplicates are allowed.
748 template <class K, class T, ListElementType valType>
749 class SortedMap : public GCObject
751 public:
752 enum { kInitialCapacity= 64 };
754 SortedMap(GC* gc, int _capacity=kInitialCapacity)
755 : keys(gc, _capacity), values(gc, _capacity)
759 bool isEmpty() const
761 return keys.size() == 0;
764 int size() const
766 return keys.size();
769 void clear()
771 keys.clear();
772 values.clear();
775 void destroy()
777 keys.destroy();
778 values.destroy();
781 T put(K k, T v)
783 if (keys.size() == 0 || k > keys.last())
785 keys.add(k);
786 values.add(v);
787 return (T)v;
789 else
791 int i = find(k);
792 if (i >= 0)
794 T old = values[i];
795 keys.set(i, k);
796 values.set(i, v);
797 return old;
799 else
801 i = -i - 1; // recover the insertion point
802 AvmAssert(keys.size() != (uint32_t)i);
803 keys.insert(i, k);
804 values.insert(i, v);
805 return v;
810 T get(K k) const
812 int i = find(k);
813 return i >= 0 ? values[i] : 0;
816 bool get(K k, T& v) const
818 int i = find(k);
819 if (i >= 0)
821 v = values[i];
822 return true;
824 return false;
827 bool containsKey(K k) const
829 int i = find(k);
830 return (i >= 0) ? true : false;
833 T remove(K k)
835 int i = find(k);
836 return removeAt(i);
839 T removeAt(int i)
841 T old = values.removeAt(i);
842 keys.removeAt(i);
843 return old;
846 T removeFirst() { return isEmpty() ? (T)0 : removeAt(0); }
847 T removeLast() { return isEmpty() ? (T)0 : removeAt(keys.size()-1); }
848 T first() const { return isEmpty() ? (T)0 : values[0]; }
849 T last() const { return isEmpty() ? (T)0 : values[keys.size()-1]; }
851 K firstKey() const { return isEmpty() ? 0 : keys[0]; }
852 K lastKey() const { return isEmpty() ? 0 : keys[keys.size()-1]; }
854 // iterator
855 T at(int i) const { return values[i]; }
856 K keyAt(int i) const { return keys[i]; }
858 int findNear(K k) const {
859 int i = find(k);
860 return i >= 0 ? i : -i-2;
862 protected:
863 List<K, LIST_NonGCObjects> keys;
864 List<T, valType> values;
866 int find(K k) const
868 int lo = 0;
869 int hi = keys.size()-1;
871 while (lo <= hi)
873 int i = (lo + hi)/2;
874 K m = keys[i];
875 if (k > m)
876 lo = i + 1;
877 else if (k < m)
878 hi = i - 1;
879 else
880 return i; // key found
882 return -(lo + 1); // key not found, low is the insertion point
886 #define GCSortedMap SortedMap
889 * Bit vectors are an efficent method of keeping True/False information
890 * on a set of items or conditions. Class BitSet provides functions
891 * to manipulate individual bits in the vector.
893 * Since most vectors are rather small an array of longs is used by
894 * default to house the value of the bits. If more bits are needed
895 * then an array is allocated dynamically outside of this object.
897 * This object is not optimized for a fixed sized bit vector
898 * it instead allows for dynamically growing the bit vector.
900 class BitSet
902 public:
903 enum { kUnit = 8*sizeof(long),
904 kDefaultCapacity = 4 };
906 BitSet()
908 capacity = kDefaultCapacity;
909 reset();
912 ~BitSet()
914 if (capacity > kDefaultCapacity)
915 free(bits.ptr);
918 void reset()
920 if (capacity > kDefaultCapacity)
921 for(int i=0; i<capacity; i++)
922 bits.ptr[i] = 0;
923 else
924 for(int i=0; i<capacity; i++)
925 bits.ar[i] = 0;
928 void set(GC *gc, int bitNbr)
930 int index = bitNbr / kUnit;
931 int bit = bitNbr % kUnit;
932 if (index >= capacity)
933 grow(gc, index+1);
935 if (capacity > kDefaultCapacity)
936 bits.ptr[index] |= (1<<bit);
937 else
938 bits.ar[index] |= (1<<bit);
941 void clear(int bitNbr)
943 int index = bitNbr / kUnit;
944 int bit = bitNbr % kUnit;
945 if (index < capacity)
947 if (capacity > kDefaultCapacity)
948 bits.ptr[index] &= ~(1<<bit);
949 else
950 bits.ar[index] &= ~(1<<bit);
954 bool get(int bitNbr) const
956 int index = bitNbr / kUnit;
957 int bit = bitNbr % kUnit;
958 bool value = false;
959 if (index < capacity)
961 if (capacity > kDefaultCapacity)
962 value = ( bits.ptr[index] & (1<<bit) ) ? true : false;
963 else
964 value = ( bits.ar[index] & (1<<bit) ) ? true : false;
966 return value;
969 private:
970 // Grow the array until at least newCapacity big
971 void grow(GC *gc, int newCapacity)
973 // create vector that is 2x bigger than requested
974 newCapacity *= 2;
975 //MEMTAG("BitVector::Grow - long[]");
976 long* newBits = (long*)calloc(1, newCapacity * sizeof(long));
977 //memset(newBits, 0, newCapacity * sizeof(long));
979 // copy the old one
980 if (capacity > kDefaultCapacity)
981 for(int i=0; i<capacity; i++)
982 newBits[i] = bits.ptr[i];
983 else
984 for(int i=0; i<capacity; i++)
985 newBits[i] = bits.ar[i];
987 // in with the new out with the old
988 if (capacity > kDefaultCapacity)
989 free(bits.ptr);
991 bits.ptr = newBits;
992 capacity = newCapacity;
995 // by default we use the array, but if the vector
996 // size grows beyond kDefaultCapacity we allocate
997 // space dynamically.
998 int capacity;
999 union
1001 long ar[kDefaultCapacity];
1002 long* ptr;
1004 bits;
1008 #endif