Bug 1867190 - Add prefs for PHC probablities r=glandium
[gecko.git] / js / src / wasm / WasmGC.h
bloba9552722a00246e70eb29de3528227082265ede2
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2019 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 #ifndef wasm_gc_h
20 #define wasm_gc_h
22 #include "mozilla/BinarySearch.h"
24 #include "jit/ABIArgGenerator.h" // For ABIArgIter
25 #include "js/AllocPolicy.h"
26 #include "js/Vector.h"
27 #include "util/Memory.h"
28 #include "wasm/WasmBuiltins.h"
29 #include "wasm/WasmFrame.h"
30 #include "wasm/WasmSerialize.h"
32 namespace js {
34 namespace jit {
35 class Label;
36 class MacroAssembler;
37 } // namespace jit
39 namespace wasm {
41 class ArgTypeVector;
42 class BytecodeOffset;
44 using jit::Label;
45 using jit::MIRType;
46 using jit::Register;
48 // Definitions for stackmaps.
50 using ExitStubMapVector = Vector<bool, 32, SystemAllocPolicy>;
52 struct StackMapHeader {
53 explicit StackMapHeader(uint32_t numMappedWords = 0)
54 : numMappedWords(numMappedWords),
55 numExitStubWords(0),
56 frameOffsetFromTop(0),
57 hasDebugFrameWithLiveRefs(0) {}
59 // The total number of stack words covered by the map ..
60 static constexpr size_t MappedWordsBits = 30;
61 uint32_t numMappedWords : MappedWordsBits;
63 // .. of which this many are "exit stub" extras
64 static constexpr size_t ExitStubWordsBits = 6;
65 uint32_t numExitStubWords : ExitStubWordsBits;
67 // Where is Frame* relative to the top? This is an offset in words. On every
68 // platform, FrameOffsetBits needs to be at least
69 // ceil(log2(MaxParams*sizeof-biggest-param-type-in-words)). The most
70 // constraining platforms are 32-bit with SIMD support, currently x86-32.
71 static constexpr size_t FrameOffsetBits = 12;
72 uint32_t frameOffsetFromTop : FrameOffsetBits;
74 // Notes the presence of a DebugFrame with possibly-live references. A
75 // DebugFrame may or may not contain GC-managed data; in situations when it is
76 // possible that any pointers in the DebugFrame are non-null, the DebugFrame
77 // gets a stackmap.
78 uint32_t hasDebugFrameWithLiveRefs : 1;
80 WASM_CHECK_CACHEABLE_POD(numMappedWords, numExitStubWords, frameOffsetFromTop,
81 hasDebugFrameWithLiveRefs);
83 static constexpr uint32_t maxMappedWords = (1 << MappedWordsBits) - 1;
84 static constexpr uint32_t maxExitStubWords = (1 << ExitStubWordsBits) - 1;
85 static constexpr uint32_t maxFrameOffsetFromTop = (1 << FrameOffsetBits) - 1;
87 static constexpr size_t MaxParamSize =
88 std::max(sizeof(jit::FloatRegisters::RegisterContent),
89 sizeof(jit::Registers::RegisterContent));
91 // Add 16 words to account for the size of FrameWithInstances including any
92 // shadow stack (at worst 8 words total), and then a little headroom in case
93 // the argument area had to be aligned.
94 static_assert(FrameWithInstances::sizeOf() / sizeof(void*) <= 8);
95 static_assert(maxFrameOffsetFromTop >=
96 (MaxParams * MaxParamSize / sizeof(void*)) + 16,
97 "limited size of the offset field");
100 WASM_DECLARE_CACHEABLE_POD(StackMapHeader);
102 // This is the expected size for the header
103 static_assert(sizeof(StackMapHeader) == 8,
104 "wasm::StackMapHeader has unexpected size");
106 // A StackMap is a bit-array containing numMappedWords bits, one bit per
107 // word of stack. Bit index zero is for the lowest addressed word in the
108 // range.
110 // This is a variable-length structure whose size must be known at creation
111 // time.
113 // Users of the map will know the address of the wasm::Frame that is covered
114 // by this map. In order that they can calculate the exact address range
115 // covered by the map, the map also stores the offset, from the highest
116 // addressed word of the map, of the embedded wasm::Frame. This is an offset
117 // down from the highest address, rather than up from the lowest, so as to
118 // limit its range to FrameOffsetBits bits.
120 // The stackmap may also cover a DebugFrame (all DebugFrames which may
121 // potentially contain live pointers into the JS heap get a map). If so that
122 // can be noted, since users of the map need to trace pointers in a
123 // DebugFrame.
125 // Finally, for sanity checking only, for stackmaps associated with a wasm
126 // trap exit stub, the number of words used by the trap exit stub save area
127 // is also noted. This is used in Instance::traceFrame to check that the
128 // TrapExitDummyValue is in the expected place in the frame.
129 struct StackMap final {
130 // The header contains the constant-sized fields before the variable-sized
131 // bitmap that follows.
132 StackMapHeader header;
134 private:
135 // The variable-sized bitmap.
136 uint32_t bitmap[1];
138 explicit StackMap(uint32_t numMappedWords) : header(numMappedWords) {
139 const uint32_t nBitmap = calcNBitmap(header.numMappedWords);
140 memset(bitmap, 0, nBitmap * sizeof(bitmap[0]));
142 explicit StackMap(const StackMapHeader& header) : header(header) {
143 const uint32_t nBitmap = calcNBitmap(header.numMappedWords);
144 memset(bitmap, 0, nBitmap * sizeof(bitmap[0]));
147 public:
148 static StackMap* create(uint32_t numMappedWords) {
149 size_t size = allocationSizeInBytes(numMappedWords);
150 char* buf = (char*)js_malloc(size);
151 if (!buf) {
152 return nullptr;
154 return ::new (buf) StackMap(numMappedWords);
156 static StackMap* create(const StackMapHeader& header) {
157 size_t size = allocationSizeInBytes(header.numMappedWords);
158 char* buf = (char*)js_malloc(size);
159 if (!buf) {
160 return nullptr;
162 return ::new (buf) StackMap(header);
165 void destroy() { js_free((char*)this); }
167 // Returns the size of a `StackMap` allocated with `numMappedWords`.
168 static size_t allocationSizeInBytes(uint32_t numMappedWords) {
169 uint32_t nBitmap = calcNBitmap(numMappedWords);
170 return sizeof(StackMap) + (nBitmap - 1) * sizeof(bitmap[0]);
173 // Returns the allocated size of this `StackMap`.
174 size_t allocationSizeInBytes() const {
175 return allocationSizeInBytes(header.numMappedWords);
178 // Record the number of words in the map used as a wasm trap exit stub
179 // save area. See comment above.
180 void setExitStubWords(uint32_t nWords) {
181 MOZ_ASSERT(header.numExitStubWords == 0);
182 MOZ_RELEASE_ASSERT(nWords <= header.maxExitStubWords);
183 MOZ_ASSERT(nWords <= header.numMappedWords);
184 header.numExitStubWords = nWords;
187 // Record the offset from the highest-addressed word of the map, that the
188 // wasm::Frame lives at. See comment above.
189 void setFrameOffsetFromTop(uint32_t nWords) {
190 MOZ_ASSERT(header.frameOffsetFromTop == 0);
191 MOZ_RELEASE_ASSERT(nWords <= StackMapHeader::maxFrameOffsetFromTop);
192 MOZ_ASSERT(header.frameOffsetFromTop < header.numMappedWords);
193 header.frameOffsetFromTop = nWords;
196 // If the frame described by this StackMap includes a DebugFrame, call here to
197 // record that fact.
198 void setHasDebugFrameWithLiveRefs() {
199 MOZ_ASSERT(header.hasDebugFrameWithLiveRefs == 0);
200 header.hasDebugFrameWithLiveRefs = 1;
203 inline void setBit(uint32_t bitIndex) {
204 MOZ_ASSERT(bitIndex < header.numMappedWords);
205 uint32_t wordIndex = bitIndex / wordsPerBitmapElem;
206 uint32_t wordOffset = bitIndex % wordsPerBitmapElem;
207 bitmap[wordIndex] |= (1 << wordOffset);
210 inline uint32_t getBit(uint32_t bitIndex) const {
211 MOZ_ASSERT(bitIndex < header.numMappedWords);
212 uint32_t wordIndex = bitIndex / wordsPerBitmapElem;
213 uint32_t wordOffset = bitIndex % wordsPerBitmapElem;
214 return (bitmap[wordIndex] >> wordOffset) & 1;
217 inline uint8_t* rawBitmap() { return (uint8_t*)&bitmap; }
218 inline const uint8_t* rawBitmap() const { return (const uint8_t*)&bitmap; }
219 inline size_t rawBitmapLengthInBytes() const {
220 return calcNBitmap(header.numMappedWords) * sizeof(uint32_t);
223 private:
224 static constexpr uint32_t wordsPerBitmapElem = sizeof(bitmap[0]) * 8;
226 static uint32_t calcNBitmap(uint32_t numMappedWords) {
227 MOZ_RELEASE_ASSERT(numMappedWords <= StackMapHeader::maxMappedWords);
228 uint32_t nBitmap =
229 (numMappedWords + wordsPerBitmapElem - 1) / wordsPerBitmapElem;
230 return nBitmap == 0 ? 1 : nBitmap;
234 // This is the expected size for a map that covers 32 or fewer words.
235 static_assert(sizeof(StackMap) == 12, "wasm::StackMap has unexpected size");
237 class StackMaps {
238 public:
239 // A Maplet holds a single code-address-to-map binding. Note that the
240 // code address is the lowest address of the instruction immediately
241 // following the instruction of interest, not of the instruction of
242 // interest itself. In practice (at least for the Wasm Baseline compiler)
243 // this means that |nextInsnAddr| points either immediately after a call
244 // instruction, after a trap instruction or after a no-op.
245 struct Maplet {
246 const uint8_t* nextInsnAddr;
247 StackMap* map;
248 Maplet(const uint8_t* nextInsnAddr, StackMap* map)
249 : nextInsnAddr(nextInsnAddr), map(map) {}
250 void offsetBy(uintptr_t delta) { nextInsnAddr += delta; }
251 bool operator<(const Maplet& other) const {
252 return uintptr_t(nextInsnAddr) < uintptr_t(other.nextInsnAddr);
256 private:
257 bool sorted_;
258 Vector<Maplet, 0, SystemAllocPolicy> mapping_;
260 public:
261 StackMaps() : sorted_(false) {}
262 ~StackMaps() {
263 for (auto& maplet : mapping_) {
264 maplet.map->destroy();
265 maplet.map = nullptr;
268 [[nodiscard]] bool add(const uint8_t* nextInsnAddr, StackMap* map) {
269 MOZ_ASSERT(!sorted_);
270 return mapping_.append(Maplet(nextInsnAddr, map));
272 [[nodiscard]] bool add(const Maplet& maplet) {
273 return add(maplet.nextInsnAddr, maplet.map);
275 void clear() {
276 for (auto& maplet : mapping_) {
277 maplet.nextInsnAddr = nullptr;
278 maplet.map = nullptr;
280 mapping_.clear();
282 bool empty() const { return mapping_.empty(); }
283 size_t length() const { return mapping_.length(); }
284 Maplet* getRef(size_t i) { return &mapping_[i]; }
285 Maplet get(size_t i) const { return mapping_[i]; }
286 Maplet move(size_t i) {
287 Maplet m = mapping_[i];
288 mapping_[i].map = nullptr;
289 return m;
291 void offsetBy(uintptr_t delta) {
292 for (auto& maplet : mapping_) maplet.offsetBy(delta);
294 void finishAndSort() {
295 MOZ_ASSERT(!sorted_);
296 std::sort(mapping_.begin(), mapping_.end());
297 sorted_ = true;
299 void finishAlreadySorted() {
300 MOZ_ASSERT(!sorted_);
301 MOZ_ASSERT(std::is_sorted(mapping_.begin(), mapping_.end()));
302 sorted_ = true;
304 const StackMap* findMap(const uint8_t* nextInsnAddr) const {
305 struct Comparator {
306 int operator()(Maplet aVal) const {
307 if (uintptr_t(mTarget) < uintptr_t(aVal.nextInsnAddr)) {
308 return -1;
310 if (uintptr_t(mTarget) > uintptr_t(aVal.nextInsnAddr)) {
311 return 1;
313 return 0;
315 explicit Comparator(const uint8_t* aTarget) : mTarget(aTarget) {}
316 const uint8_t* mTarget;
319 size_t result;
320 if (mozilla::BinarySearchIf(mapping_, 0, mapping_.length(),
321 Comparator(nextInsnAddr), &result)) {
322 return mapping_[result].map;
325 return nullptr;
328 size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
329 return mapping_.sizeOfExcludingThis(mallocSizeOf);
333 // Supporting code for creation of stackmaps.
335 // StackArgAreaSizeUnaligned returns the size, in bytes, of the stack arg area
336 // size needed to pass |argTypes|, excluding any alignment padding beyond the
337 // size of the area as a whole. The size is as determined by the platforms
338 // native ABI.
340 // StackArgAreaSizeAligned returns the same, but rounded up to the nearest 16
341 // byte boundary.
343 // Note, StackArgAreaSize{Unaligned,Aligned}() must process all the arguments
344 // in order to take into account all necessary alignment constraints. The
345 // signature must include any receiver argument -- in other words, it must be
346 // the complete native-ABI-level call signature.
347 template <class T>
348 static inline size_t StackArgAreaSizeUnaligned(const T& argTypes) {
349 jit::WasmABIArgIter<const T> i(argTypes);
350 while (!i.done()) {
351 i++;
353 return i.stackBytesConsumedSoFar();
356 static inline size_t StackArgAreaSizeUnaligned(
357 const SymbolicAddressSignature& saSig) {
358 // WasmABIArgIter::ABIArgIter wants the items to be iterated over to be
359 // presented in some type that has methods length() and operator[]. So we
360 // have to wrap up |saSig|'s array of types in this API-matching class.
361 class MOZ_STACK_CLASS ItemsAndLength {
362 const MIRType* items_;
363 size_t length_;
365 public:
366 ItemsAndLength(const MIRType* items, size_t length)
367 : items_(items), length_(length) {}
368 size_t length() const { return length_; }
369 MIRType operator[](size_t i) const { return items_[i]; }
372 // Assert, at least crudely, that we're not accidentally going to run off
373 // the end of the array of types, nor into undefined parts of it, while
374 // iterating.
375 MOZ_ASSERT(saSig.numArgs <
376 sizeof(saSig.argTypes) / sizeof(saSig.argTypes[0]));
377 MOZ_ASSERT(saSig.argTypes[saSig.numArgs] == MIRType::None /*the end marker*/);
379 ItemsAndLength itemsAndLength(saSig.argTypes, saSig.numArgs);
380 return StackArgAreaSizeUnaligned(itemsAndLength);
383 static inline size_t AlignStackArgAreaSize(size_t unalignedSize) {
384 return AlignBytes(unalignedSize, jit::WasmStackAlignment);
387 // A stackmap creation helper. Create a stackmap from a vector of booleans.
388 // The caller owns the resulting stackmap.
390 using StackMapBoolVector = Vector<bool, 128, SystemAllocPolicy>;
392 wasm::StackMap* ConvertStackMapBoolVectorToStackMap(
393 const StackMapBoolVector& vec, bool hasRefs);
395 // Generate a stackmap for a function's stack-overflow-at-entry trap, with
396 // the structure:
398 // <reg dump area>
399 // | ++ <space reserved before trap, if any>
400 // | ++ <space for Frame>
401 // | ++ <inbound arg area>
402 // | |
403 // Lowest Addr Highest Addr
405 // The caller owns the resulting stackmap. This assumes a grow-down stack.
407 // For non-debug builds, if the stackmap would contain no pointers, no
408 // stackmap is created, and nullptr is returned. For a debug build, a
409 // stackmap is always created and returned.
411 // The "space reserved before trap" is the space reserved by
412 // MacroAssembler::wasmReserveStackChecked, in the case where the frame is
413 // "small", as determined by that function.
414 [[nodiscard]] bool CreateStackMapForFunctionEntryTrap(
415 const ArgTypeVector& argTypes, const jit::RegisterOffsets& trapExitLayout,
416 size_t trapExitLayoutWords, size_t nBytesReservedBeforeTrap,
417 size_t nInboundStackArgBytes, wasm::StackMap** result);
419 // At a resumable wasm trap, the machine's registers are saved on the stack by
420 // (code generated by) GenerateTrapExit(). This function writes into |args| a
421 // vector of booleans describing the ref-ness of the saved integer registers.
422 // |args[0]| corresponds to the low addressed end of the described section of
423 // the save area.
424 [[nodiscard]] bool GenerateStackmapEntriesForTrapExit(
425 const ArgTypeVector& args, const jit::RegisterOffsets& trapExitLayout,
426 const size_t trapExitLayoutNumWords, ExitStubMapVector* extras);
428 // Shared write barrier code.
430 // A barriered store looks like this:
432 // Label skipPreBarrier;
433 // EmitWasmPreBarrierGuard(..., &skipPreBarrier);
434 // <COMPILER-SPECIFIC ACTIONS HERE>
435 // EmitWasmPreBarrierCall(...);
436 // bind(&skipPreBarrier);
438 // <STORE THE VALUE IN MEMORY HERE>
440 // Label skipPostBarrier;
441 // <COMPILER-SPECIFIC ACTIONS HERE>
442 // EmitWasmPostBarrierGuard(..., &skipPostBarrier);
443 // <CALL POST-BARRIER HERE IN A COMPILER-SPECIFIC WAY>
444 // bind(&skipPostBarrier);
446 // The actions are divided up to allow other actions to be placed between them,
447 // such as saving and restoring live registers. The postbarrier call invokes
448 // C++ and will kill all live registers.
450 // Before storing a GC pointer value in memory, skip to `skipBarrier` if the
451 // prebarrier is not needed. Will clobber `scratch`.
453 // It is OK for `instance` and `scratch` to be the same register.
455 // If `trapOffset` is non-null, then metadata to catch a null access and emit
456 // a null pointer exception will be emitted. This will only catch a null access
457 // due to an incremental GC being in progress, the write that follows this
458 // pre-barrier guard must also be guarded against null.
460 void EmitWasmPreBarrierGuard(jit::MacroAssembler& masm, Register instance,
461 Register scratch, Register valueAddr,
462 size_t valueOffset, Label* skipBarrier,
463 BytecodeOffset* trapOffset);
465 // Before storing a GC pointer value in memory, call out-of-line prebarrier
466 // code. This assumes `PreBarrierReg` contains the address that will be updated.
467 // On ARM64 it also assums that x28 (the PseudoStackPointer) has the same value
468 // as SP. `PreBarrierReg` is preserved by the barrier function. Will clobber
469 // `scratch`.
471 // It is OK for `instance` and `scratch` to be the same register.
473 void EmitWasmPreBarrierCall(jit::MacroAssembler& masm, Register instance,
474 Register scratch, Register valueAddr,
475 size_t valueOffset);
477 // After storing a GC pointer value in memory, skip to `skipBarrier` if a
478 // postbarrier is not needed. If the location being set is in an heap-allocated
479 // object then `object` must reference that object; otherwise it should be None.
480 // The value that was stored is `setValue`. Will clobber `otherScratch` and
481 // will use other available scratch registers.
483 // `otherScratch` cannot be a designated scratch register.
485 void EmitWasmPostBarrierGuard(jit::MacroAssembler& masm,
486 const mozilla::Maybe<Register>& object,
487 Register otherScratch, Register setValue,
488 Label* skipBarrier);
490 #ifdef DEBUG
491 // Check (approximately) whether `nextPC` is a valid code address for a
492 // stackmap created by this compiler. This is done by examining the
493 // instruction at `nextPC`. The matching is inexact, so it may err on the
494 // side of returning `true` if it doesn't know. Doing so reduces the
495 // effectiveness of the MOZ_ASSERTs that use this function, so at least for
496 // the four primary platforms we should keep it as exact as possible.
498 bool IsPlausibleStackMapKey(const uint8_t* nextPC);
499 #endif
501 } // namespace wasm
502 } // namespace js
504 #endif // wasm_gc_h