1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2 /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3 /* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
16 * The Original Code is [Open Source Virtual Machine.].
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2004-2006
21 * the Initial Developer. All Rights Reserved.
27 * Alternatively, the contents of this file may be used under the terms of
28 * either the GNU General Public License Version 2 or later (the "GPL"), or
29 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
42 #include "StaticAssert.h"
46 // For now, too much code assumes these are the same.
47 MMGC_STATIC_ASSERT( PageMap::kPageSize
== GCHeap::kBlockSize
);
52 void SimpleExpandSetAll(PM
*pm
, GCHeap
*heap
, void *item
,
53 uint32_t numPages
, PageType val
)
55 pm
->EnsureCapacity(heap
, item
, numPages
);
56 uintptr_t addr
= (uintptr_t)item
;
59 GCAssert(pm
->AddrToVal(addr
) == 0);
60 pm
->AddrSet(addr
, val
);
61 GCAssert(pm
->AddrToVal(addr
) == val
);
62 addr
+= GCHeap::kBlockSize
;
67 void SimpleClearAddrs(PM
*pm
, void *item
, uint32_t numpages
)
69 uintptr_t addr
= (uintptr_t) item
;
74 addr
+= GCHeap::kBlockSize
;
78 static const uintptr_t MAX_UINTPTR
= ~uintptr_t(0);
80 PageMapBase::PageMapBase()
81 : memStart(MAX_UINTPTR
)
85 #ifdef MMGC_USE_UNIFORM_PAGEMAP
91 REALLY_INLINE
uint8_t& Uniform::IndexToByteRef(uintptr_t index
)
93 GCAssert(pageMap
!= NULL
);
94 return pageMap
[index
>> 2];
97 REALLY_INLINE
void Uniform::IndexSet(uintptr_t index
, PageType val
)
99 IndexToByteRef(index
) |= (val
<< IndexToByteShiftAmt(index
));
102 REALLY_INLINE
void Uniform::IndexClear(uintptr_t index
)
104 IndexToByteRef(index
) &= ~(3 << IndexToByteShiftAmt(index
));
107 REALLY_INLINE
void Uniform::AddrSet(uintptr_t addr
, PageType val
)
109 uintptr_t index
= AddrToIndex(addr
);
110 GCAssert(IndexSaneForAddressRange(index
));
111 IndexSet(index
, val
);
114 REALLY_INLINE
void Uniform::AddrClear(uintptr_t addr
)
116 IndexClear(AddrToIndex(addr
));
120 void Uniform::EnsureCapacity(GCHeap
*heap
, void *item
, uint32_t numPages
)
122 // Shift logic below used to depend on pageMap byte
123 // corresponding to 16k chunk (16k == kPageSize * 8/2);
124 // was dependent on kPageSize == 4096 (and on implicit
125 // 2-bit payload size). Probably no longer the case,
126 // but this code is about to be removed anyway, so just
127 // assert (more as documentation) equivalence for now.
128 // See https://bugzilla.mozilla.org/show_bug.cgi?id=581070
129 MMGC_STATIC_ASSERT(PageMap::kPageSize
== 4096);
131 const static int kByteOffsetShift
= PageMap::kPageShift
+ 2;
132 const static int kByteOffsetMask
= (1 << kByteOffsetShift
)-1;
134 // (For the benefit of a reader comparing this code to the
135 // pre-refactored version; derived from above kPageSize.)
136 MMGC_STATIC_ASSERT(kByteOffsetShift
== 14);
137 MMGC_STATIC_ASSERT(kByteOffsetMask
== 0x3fff);
139 uintptr_t addr
= (uintptr_t)item
;
140 size_t shiftAmount
=0;
141 uint8_t *dst
= pageMap
;
143 // save the current live range in case we need to move/copy
144 size_t numBytesToCopy
= (memEnd
-memStart
)>>kByteOffsetShift
;
146 if(addr
< memStart
) {
147 // round down to nearest 16k boundary, makes shift logic
148 // work because it works in bytes, ie 4-page chunks
149 addr
&= ~kByteOffsetMask
;
150 // marking earlier pages
151 if(memStart
!= MAX_UINTPTR
) {
152 shiftAmount
= (memStart
- addr
) >> kByteOffsetShift
;
157 // FIXME: https://bugzilla.mozilla.org/show_bug.cgi?id=588079
158 // use pre-shift addr (keep a copy, or just move this code up)
159 // FIXME: https://bugzilla.mozilla.org/show_bug.cgi?id=581070
160 // double-check +1 here + below; unnecessary extra page allocated?
161 if(addr
+ (numPages
+1)*PageMap::kPageSize
> memEnd
) {
162 // marking later pages
163 memEnd
= addr
+ (numPages
+1)*PageMap::kPageSize
;
165 memEnd
= (memEnd
+kByteOffsetMask
)&~kByteOffsetMask
;
168 uint32_t numBlocksNeeded
=
169 uint32_t(((memEnd
-memStart
)>>kByteOffsetShift
)
172 if(numBlocksNeeded
> heap
->Size(pageMap
)) {
173 dst
= (uint8_t*)heap
->AllocNoOOM(numBlocksNeeded
);
176 if(shiftAmount
|| dst
!= pageMap
) {
178 VMPI_memmove(dst
+ shiftAmount
, pageMap
, numBytesToCopy
);
180 VMPI_memset(dst
, 0, shiftAmount
);
184 heap
->FreeNoOOM(pageMap
);
190 void Uniform::ExpandSetAll(GCHeap
*heap
, void *item
,
191 uint32_t numPages
, PageType val
)
193 SimpleExpandSetAll(this, heap
, item
, numPages
, val
);
196 void Uniform::ClearAddrs(void *item
, uint32_t numpages
)
198 SimpleClearAddrs(this, item
, numpages
);
201 #endif // MMGC_USE_UNIFORM_PAGEMAP
203 #if ! defined(MMGC_USE_UNIFORM_PAGEMAP) && ! defined(MMGC_64BIT)
207 for (size_t i
= 0; i
< tier1_entries
; i
++) {
212 void Tiered2::DestroyPageMapVia(GCHeap
*heap
) {
213 // Was allocated with heap->Alloc and heap->AllocNoOOM,
214 // can't use heapFree here
215 for (size_t i
=0; i
< tier1_entries
; i
++) {
216 uint8_t *subMap
= pageMap
[i
];
224 void Tiered2::EnsureCapacity(GCHeap
*heap
, void *item
, uint32_t numPages
)
226 uintptr_t addr
= uintptr_t(item
);
227 // uintptr_t start_1 = uintptr_t(item) >> tier1_shift;
228 // uintptr_t finis_1 = uintptr_t(item) >> tier1_shift;
229 if(addr
< memStart
) {
230 // existing state left in place; thus no copy logic to
231 // accomodate (compare w/ Uniform::EnsureCapacity).
235 uintptr_t addr_lim
= addr
+ (numPages
+1)*GCHeap::kBlockSize
;
236 if (addr_lim
> memEnd
) {
237 // again, no copy logic to accomodate.
241 // AddrToIndex drops low order bits, but we want to round *up* for limit calc.
242 uint32_t i_lim
= AddrToIndex1(addr_lim
-1)+1;
243 GCAssert(i_lim
<= tier1_entries
);
244 for (uint32_t i
= AddrToIndex1(addr
); i
< i_lim
; i
++) {
245 GCAssert(tier2_entries
*sizeof(uint8_t)
246 <= tier2_pages
*GCHeap::kBlockSize
);
247 uint8_t *subMap
= pageMap
[i
];
248 if (subMap
== NULL
) {
249 subMap
= (uint8_t*)heap
->AllocNoOOM(tier2_pages
);
255 void Tiered2::ExpandSetAll(GCHeap
*heap
, void *item
,
256 uint32_t numPages
, PageType val
)
258 SimpleExpandSetAll(this, heap
, item
, numPages
, val
);
261 void Tiered2::ClearAddrs(void *item
, uint32_t numpages
)
263 SimpleClearAddrs(this, item
, numpages
);
266 #endif // ! defined(MMGC_USE_UNIFORM_PAGEMAP) && ! defined(MMGC_64BIT)
268 #if ! defined(MMGC_USE_UNIFORM_PAGEMAP) && defined(MMGC_64BIT)
272 for (size_t i
= 0; i
< tier0_entries
; i
++) {
277 PageType
Tiered4::AddrToVal(uintptr_t addr
) const
279 uint8_t* subsubMap
= Tiered4::AddrToLeafBytes(addr
);
280 MMGC_STATIC_ASSERT(kNonGC
== 0);
281 if (subsubMap
== NULL
)
283 return LeafAddrToVal(subsubMap
, addr
);
286 void Tiered4::AddrClear(uintptr_t addr
)
288 uint8_t* subsubMap
= Tiered4::AddrToLeafBytes(addr
);
289 if (subsubMap
== NULL
)
291 LeafAddrClear(subsubMap
, addr
);
294 void Tiered4::AddrSet(uintptr_t addr
, PageType val
)
296 uint8_t* subsubMap
= Tiered4::AddrToLeafBytes(addr
);
297 GCAssert(subsubMap
!= NULL
);
298 LeafAddrSet(subsubMap
, addr
, val
);
302 uint8_t *Tiered4::AddrToLeafBytes(uintptr_t addr
) const
304 GCAssert(pageMap
!= NULL
);
306 uint32_t i
= AddrToIndex0(addr
);
307 uint8_t ***subMap1
= pageMap
[i
];
311 uint32_t j
= AddrToIndex1(addr
);
312 uint8_t **subMap2
= subMap1
[j
];
315 uint32_t k
= AddrToIndex2(addr
);
316 uint8_t *subMap3
= subMap2
[k
];
317 return subMap3
; // (may be NULL)
320 // Note: code below assumes NULL is represented by all-bits 0.
322 void Tiered4::DestroyPageMapVia(GCHeap
*heap
) {
323 // Was allocated with heap->Alloc and heap->AllocNoOOM,
324 // can't use heapFree here
325 for (size_t i
=0; i
< tier0_entries
; i
++) {
326 uint8_t ***subMap1
= pageMap
[i
];
329 for (size_t j
=0; j
< tier1_entries
; j
++) {
330 uint8_t **subMap2
= subMap1
[j
];
333 for (size_t k
=0; k
< tier2_entries
; k
++) {
334 uint8_t *subMap3
= subMap2
[k
];
345 void Tiered4::InitPageMap(GCHeap
*heap
, uintptr_t start
, uintptr_t limit
)
347 // AddrToIndex drops low order bits, but we want to round *up* for limit calc.
348 uint32_t i_lim
= AddrToIndex0(limit
-1)+1;
349 GCAssert(i_lim
<= tier0_entries
);
350 uint32_t i
= AddrToIndex0(start
), j
= AddrToIndex1(start
), k
= AddrToIndex2(start
);
352 // common case: [x,x_lim) is small with respect to number
353 // of entries (for x in {i,j,k}). Easier to modify zero-
354 // initialized array than to allocate non-initialized and
355 // then use 3 loops on 0--x, x--x_lim, x_lim--x_entries.
357 for (; i
< i_lim
; i
++, j
=0, k
=0) {
358 uint8_t ***subMap1
= pageMap
[i
];
359 uint32_t j_lim
= (i
+1 < i_lim
) ? tier1_entries
: AddrToIndex1(limit
-1)+1;
360 if (subMap1
== NULL
) {
361 subMap1
= (uint8_t***)heap
->AllocNoOOM(tier1_pages
);
362 pageMap
[i
] = subMap1
;
364 for (; j
< j_lim
; j
++, k
=0) {
365 uint8_t **subMap2
= subMap1
[j
];
366 uint32_t k_lim
= (j
+1 < j_lim
) ? tier2_entries
: AddrToIndex2(limit
-1)+1;
367 if (subMap2
== NULL
) {
368 subMap2
= (uint8_t**)heap
->AllocNoOOM(tier2_pages
);
369 subMap1
[j
] = subMap2
;
371 for (; k
< k_lim
; k
++) {
372 uint8_t *subMap3
= subMap2
[k
];
373 if (subMap3
== NULL
) {
374 subMap3
= (uint8_t*)heap
->AllocNoOOM(tier3_pages
);
375 subMap2
[k
] = subMap3
;
382 void Tiered4::EnsureCapacity(GCHeap
*heap
, void *item
, uint32_t numPages
)
384 uintptr_t addr
= uintptr_t(item
);
385 if(addr
< memStart
) {
386 // existing state left in place, no need to align addr
387 // in any particular fashion. (compare w/
388 // Uniform::EnsureCapacity).
392 // FIXME: https://bugzilla.mozilla.org/show_bug.cgi?id=593351
393 // double-check +1 here + below; unnecessary extra page allocated?
394 uintptr_t addr_lim
= addr
+ (numPages
+1)*PageMap::kPageSize
;
395 if (addr_lim
> memEnd
) {
396 // again, no copy logic to accomodate.
400 // check each node in a tier has capacity for tier's entry count.
401 MMGC_STATIC_ASSERT(Tiered4::tier2_entries
*sizeof(uint8_t*)
402 <= Tiered4::tier2_pages
*GCHeap::kBlockSize
);
403 MMGC_STATIC_ASSERT(Tiered4::tier3_entries
*sizeof(uint8_t)
404 <= Tiered4::tier3_pages
*GCHeap::kBlockSize
);
406 InitPageMap(heap
, addr
, addr_lim
);
411 , cached_addr_prefix(CacheT4::uncached
)
412 , cached_leaf_bytes(NULL
)
418 , stillInitialDelay(true)
421 void DelayT4::DestroyPageMapVia(GCHeap
*heap
)
423 if (stillInitialDelay
) {
424 if (cached_addr_prefix
== CacheT4::uncached
) {
425 // nothing allocated, do nothing
426 GCAssert(cached_leaf_bytes
== NULL
);
428 // allocation of tree structure was delayed
429 // indefinitely but for a single leaf
430 GCAssert(cached_leaf_bytes
!= NULL
);
431 heap
->Free(cached_leaf_bytes
);
432 cached_leaf_bytes
= NULL
;
433 cached_addr_prefix
= NULL
;
436 // resort to standard free.
437 this->CacheT4::DestroyPageMapVia(heap
);
441 void DelayT4::SetupDelayedPagemap(GCHeap
*heap
,
445 // track that cached leaf is installed at most once.
446 bool used_cached_leaf
= false;
447 bool use_cached_leaf
;
453 if (cached_addr_prefix
!= CacheT4::uncached
) {
454 use_cached_leaf
= true;
455 i_cache
= AddrToIndex0(cached_addr_prefix
);
456 j_cache
= AddrToIndex1(cached_addr_prefix
);
457 k_cache
= AddrToIndex2(cached_addr_prefix
);
459 use_cached_leaf
= false;
460 // !use_cached_leaf implies should not be read.
461 i_cache
= uint32_t(-1);
462 j_cache
= uint32_t(-1);
463 k_cache
= uint32_t(-1);
466 MMGC_STATIC_ASSERT(tier1_entries
*sizeof(uint8_t**)
467 <= tier1_pages
*GCHeap::kBlockSize
);
468 MMGC_STATIC_ASSERT(tier2_entries
*sizeof(uint8_t*)
469 <= tier2_pages
*GCHeap::kBlockSize
);
470 MMGC_STATIC_ASSERT(tier3_entries
*sizeof(uint8_t)
471 <= tier3_pages
*GCHeap::kBlockSize
);
473 GCAssert(limit
!= 0); // a limit of 0 would underflow below.
474 // AddrToIndex drops low order bits, but we want to round *up* for limit calc.
475 uint32_t i_lim
= AddrToIndex0(limit
-1)+1;
476 GCAssert(i_lim
<= tier0_entries
);
477 uint32_t i
= AddrToIndex0(start
), j
= AddrToIndex1(start
), k
= AddrToIndex2(start
);
479 // common case: [x,x_lim) is small with respect to number
480 // of entries (for x in {i,j,k}). Easier to modify zero-
481 // initialized array than to allocate non-initialized and
482 // then use 3 loops on 0--x, x--x_lim, x_lim--x_entries.
484 for (; i
< i_lim
; i
++, j
=0, k
=0) {
485 uint8_t ***subMap1
= pageMap
[i
];
486 uint32_t j_lim
= (i
+1 < i_lim
) ? tier1_entries
: AddrToIndex1(limit
-1)+1;
487 if (subMap1
== NULL
) {
488 subMap1
= (uint8_t***)heap
->AllocNoOOM(tier1_pages
);
489 pageMap
[i
] = subMap1
;
491 for (; j
< j_lim
; j
++, k
=0) {
492 uint8_t **subMap2
= subMap1
[j
];
493 uint32_t k_lim
= (j
+1 < j_lim
) ? tier2_entries
: AddrToIndex2(limit
-1)+1;
494 if (subMap2
== NULL
) {
495 subMap2
= (uint8_t**)heap
->AllocNoOOM(tier2_pages
);
496 subMap1
[j
] = subMap2
;
498 for (; k
< k_lim
; k
++) {
499 uint8_t *subMap3
= subMap2
[k
];
500 if (use_cached_leaf
&& i
== i_cache
&& j
== j_cache
&& k
== k_cache
) {
501 GCAssert(! used_cached_leaf
);
502 GCAssert(subMap3
== NULL
);
504 subMap2
[k
] = cached_leaf_bytes
;
505 used_cached_leaf
= true;
507 // future invocations should not re-attempt to
508 // insert the cached leaf.
509 cached_addr_prefix
= CacheT4::uncached
;
511 } else if (subMap3
== NULL
) {
512 subMap3
= (uint8_t*)heap
->AllocNoOOM(tier3_pages
);
513 subMap2
[k
] = subMap3
;
519 GCAssert(use_cached_leaf
== used_cached_leaf
);
522 void DelayT4::EnsureCapacity(GCHeap
*heap
, void *item
, uint32_t numPages
)
524 const uintptr_t memStart_orig
= memStart
, memEnd_orig
= memEnd
;
526 if (! stillInitialDelay
) {
527 // we've already gone beyond 1 leaf and we should
528 // resort to non-delayed expansion logic.
529 this->CacheT4::EnsureCapacity(heap
, item
, numPages
);
533 uintptr_t addr
= uintptr_t(item
);
537 // the +1 might be unnecessary (see XXX w/ Uniform::EnsureCapacity)
538 uintptr_t addr_lim
= addr
+ (numPages
+1)*GCHeap::kBlockSize
;
539 if (addr_lim
> memEnd
)
542 bool fits_in_one_leaf
;
543 uintptr_t addr_prefix
= AddrPrefix(addr
);
544 if (AddrPrefix(addr_lim
) != addr_prefix
) {
545 // can't work, don't bother trying to use one leaf
546 fits_in_one_leaf
= false;
547 } else if (cached_addr_prefix
== CacheT4::uncached
) {
548 // if both pageMap and cache are unset, then this
549 // is the first EnsureCapacity call; use it to set
550 // up the single leaf.
551 fits_in_one_leaf
= true;
552 } else if (cached_addr_prefix
== addr_prefix
) {
553 // [addr,addr_lim) fit in one leaf we already have
554 fits_in_one_leaf
= true;
556 fits_in_one_leaf
= false;
559 // If it fits, then use one leaf; o/w perform
560 // delayed allocation of page map tree structure.
561 if (fits_in_one_leaf
) {
562 if (cached_addr_prefix
== CacheT4::uncached
) {
563 cached_addr_prefix
= AddrPrefix(addr
);
565 (uint8_t*)heap
->AllocNoOOM(tier3_pages
);
567 // the one cached leaf suffices.
570 // Invariant: start is uninit'd value iff end is uninit'd value
571 GCAssert((memStart_orig
== MAX_UINTPTR
) == (memEnd_orig
== 0));
572 // Do not compute w/ uninit'd values (limit of 0 is forbidden).
573 if (memEnd_orig
!= 0)
574 SetupDelayedPagemap(heap
, memStart_orig
, memEnd_orig
);
575 SetupDelayedPagemap(heap
, addr
, addr_lim
);
576 stillInitialDelay
= false;
580 void Tiered4::ExpandSetAll(GCHeap
*heap
, void *item
,
581 uint32_t numPages
, PageType val
)
583 SimpleExpandSetAll(this, heap
, item
, numPages
, val
);
586 void Tiered4::ClearAddrs(void *item
, uint32_t numpages
)
588 SimpleClearAddrs(this, item
, numpages
);
591 void CacheT4::ExpandSetAll(GCHeap
*heap
, void *item
,
592 uint32_t numPages
, PageType val
)
594 SimpleExpandSetAll(this, heap
, item
, numPages
, val
);
597 void CacheT4::ClearAddrs(void *item
, uint32_t numpages
)
599 SimpleClearAddrs(this, item
, numpages
);
602 void DelayT4::ExpandSetAll(GCHeap
*heap
, void *item
,
603 uint32_t numPages
, PageType val
)
605 SimpleExpandSetAll(this, heap
, item
, numPages
, val
);
607 #endif // ! defined(MMGC_USE_UNIFORM_PAGEMAP) && defined(MMGC_64BIT)