1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2016 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
22 #include "mozilla/Assertions.h"
23 #include "mozilla/Atomics.h"
24 #include "mozilla/Attributes.h"
25 #include "mozilla/EnumeratedArray.h"
26 #include "mozilla/Maybe.h"
27 #include "mozilla/MemoryReporting.h"
28 #include "mozilla/PodOperations.h"
29 #include "mozilla/RefPtr.h"
30 #include "mozilla/UniquePtr.h"
39 #include "gc/Memory.h"
40 #include "jit/ProcessExecutableMemory.h"
41 #include "js/AllocPolicy.h"
42 #include "js/UniquePtr.h"
43 #include "js/Utility.h"
44 #include "js/Vector.h"
45 #include "threading/ExclusiveData.h"
46 #include "util/Memory.h"
47 #include "vm/MutexIDs.h"
48 #include "wasm/WasmBuiltinModule.h"
49 #include "wasm/WasmBuiltins.h"
50 #include "wasm/WasmCodegenConstants.h"
51 #include "wasm/WasmCodegenTypes.h"
52 #include "wasm/WasmCompileArgs.h"
53 #include "wasm/WasmConstants.h"
54 #include "wasm/WasmExprType.h"
55 #include "wasm/WasmGC.h"
56 #include "wasm/WasmLog.h"
57 #include "wasm/WasmModuleTypes.h"
58 #include "wasm/WasmSerialize.h"
59 #include "wasm/WasmShareable.h"
60 #include "wasm/WasmTypeDecls.h"
61 #include "wasm/WasmTypeDef.h"
62 #include "wasm/WasmValType.h"
64 struct JS_PUBLIC_API JSContext
;
81 // LinkData contains all the metadata necessary to patch all the locations
82 // that depend on the absolute address of a ModuleSegment. This happens in a
83 // "linking" step after compilation and after the module's code is serialized.
84 // The LinkData is serialized along with the Module but does not (normally, see
85 // Module::debugLinkData_ comment) persist after (de)serialization, which
86 // distinguishes it from Metadata, which is stored in the Code object.
88 struct LinkDataCacheablePod
{
89 uint32_t trapOffset
= 0;
91 WASM_CHECK_CACHEABLE_POD(trapOffset
);
93 LinkDataCacheablePod() = default;
96 WASM_DECLARE_CACHEABLE_POD(LinkDataCacheablePod
);
98 WASM_CHECK_CACHEABLE_POD_PADDING(LinkDataCacheablePod
)
100 struct LinkData
: LinkDataCacheablePod
{
101 explicit LinkData(Tier tier
) : tier(tier
) {}
103 LinkDataCacheablePod
& pod() { return *this; }
104 const LinkDataCacheablePod
& pod() const { return *this; }
106 struct InternalLink
{
107 uint32_t patchAtOffset
;
108 uint32_t targetOffset
;
109 #ifdef JS_CODELABEL_LINKMODE
113 WASM_CHECK_CACHEABLE_POD(patchAtOffset
, targetOffset
);
114 #ifdef JS_CODELABEL_LINKMODE
115 WASM_CHECK_CACHEABLE_POD(mode
)
118 using InternalLinkVector
= Vector
<InternalLink
, 0, SystemAllocPolicy
>;
120 struct SymbolicLinkArray
121 : EnumeratedArray
<SymbolicAddress
, SymbolicAddress::Limit
, Uint32Vector
> {
122 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf
) const;
126 InternalLinkVector internalLinks
;
127 SymbolicLinkArray symbolicLinks
;
129 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf
) const;
132 WASM_DECLARE_CACHEABLE_POD(LinkData::InternalLink
);
134 using UniqueLinkData
= UniquePtr
<LinkData
>;
136 // Executable code must be deallocated specially.
140 FreeCode() : codeLength(0) {}
141 explicit FreeCode(uint32_t codeLength
) : codeLength(codeLength
) {}
142 void operator()(uint8_t* codeBytes
);
145 using UniqueCodeBytes
= UniquePtr
<uint8_t, FreeCode
>;
150 class LazyStubSegment
;
152 // CodeSegment contains common helpers for determining the base and length of a
153 // code segment and if a pc belongs to this segment. It is inherited by:
154 // - ModuleSegment, i.e. the code segment of a Module, generated
155 // eagerly when a Module is instanciated.
156 // - LazyStubSegment, i.e. the code segment of entry stubs that are lazily
161 enum class Kind
{ LazyStubs
, Module
};
163 CodeSegment(UniqueCodeBytes bytes
, uint32_t length
, Kind kind
)
164 : bytes_(std::move(bytes
)),
168 unregisterOnDestroy_(false) {}
170 bool initialize(const CodeTier
& codeTier
);
173 const UniqueCodeBytes bytes_
;
174 const uint32_t length_
;
176 const CodeTier
* codeTier_
;
177 bool unregisterOnDestroy_
;
180 bool initialized() const { return !!codeTier_
; }
183 bool isLazyStubs() const { return kind_
== Kind::LazyStubs
; }
184 bool isModule() const { return kind_
== Kind::Module
; }
185 const ModuleSegment
* asModule() const {
186 MOZ_ASSERT(isModule());
187 return (ModuleSegment
*)this;
189 const LazyStubSegment
* asLazyStub() const {
190 MOZ_ASSERT(isLazyStubs());
191 return (LazyStubSegment
*)this;
194 uint8_t* base() const { return bytes_
.get(); }
195 uint32_t length() const {
196 MOZ_ASSERT(length_
!= UINT32_MAX
);
200 bool containsCodePC(const void* pc
) const {
201 return pc
>= base() && pc
< (base() + length_
);
204 const CodeTier
& codeTier() const {
205 MOZ_ASSERT(initialized());
208 const Code
& code() const;
210 void addSizeOfMisc(MallocSizeOf mallocSizeOf
, size_t* code
) const;
213 // A wasm ModuleSegment owns the allocated executable code for a wasm module.
215 using UniqueModuleSegment
= UniquePtr
<ModuleSegment
>;
217 class ModuleSegment
: public CodeSegment
{
219 uint8_t* const trapCode_
;
222 ModuleSegment(Tier tier
, UniqueCodeBytes codeBytes
, uint32_t codeLength
,
223 const LinkData
& linkData
);
225 static UniqueModuleSegment
create(Tier tier
, jit::MacroAssembler
& masm
,
226 const LinkData
& linkData
);
227 static UniqueModuleSegment
create(Tier tier
, const Bytes
& unlinkedBytes
,
228 const LinkData
& linkData
);
230 bool initialize(const CodeTier
& codeTier
, const LinkData
& linkData
,
231 const Metadata
& metadata
, const MetadataTier
& metadataTier
);
233 Tier
tier() const { return tier_
; }
235 // Pointers to stubs to which PC is redirected from the signal-handler.
237 uint8_t* trapCode() const { return trapCode_
; }
239 const CodeRange
* lookupRange(const void* pc
) const;
241 void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf
, size_t* code
,
244 WASM_DECLARE_FRIEND_SERIALIZE(ModuleSegment
);
247 extern UniqueCodeBytes
AllocateCodeBytes(
248 mozilla::Maybe
<jit::AutoMarkJitCodeWritableForThread
>& writable
,
249 uint32_t codeLength
);
250 extern bool StaticallyLink(const ModuleSegment
& ms
, const LinkData
& linkData
);
251 extern void StaticallyUnlink(uint8_t* base
, const LinkData
& linkData
);
253 // A FuncExport represents a single function definition inside a wasm Module
254 // that has been exported one or more times. A FuncExport represents an
255 // internal entry point that can be called via function definition index by
256 // Instance::callExport(). To allow O(log(n)) lookup of a FuncExport by
257 // function definition index, the FuncExportVector is stored sorted by
258 // function definition index.
263 uint32_t eagerInterpEntryOffset_
; // Machine code offset
266 WASM_CHECK_CACHEABLE_POD(typeIndex_
, funcIndex_
, eagerInterpEntryOffset_
,
270 FuncExport() = default;
271 explicit FuncExport(uint32_t typeIndex
, uint32_t funcIndex
,
272 bool hasEagerStubs
) {
273 typeIndex_
= typeIndex
;
274 funcIndex_
= funcIndex
;
275 eagerInterpEntryOffset_
= UINT32_MAX
;
276 hasEagerStubs_
= hasEagerStubs
;
278 void initEagerInterpEntryOffset(uint32_t entryOffset
) {
279 MOZ_ASSERT(eagerInterpEntryOffset_
== UINT32_MAX
);
280 MOZ_ASSERT(hasEagerStubs());
281 eagerInterpEntryOffset_
= entryOffset
;
284 bool hasEagerStubs() const { return hasEagerStubs_
; }
285 uint32_t typeIndex() const { return typeIndex_
; }
286 uint32_t funcIndex() const { return funcIndex_
; }
287 uint32_t eagerInterpEntryOffset() const {
288 MOZ_ASSERT(eagerInterpEntryOffset_
!= UINT32_MAX
);
289 MOZ_ASSERT(hasEagerStubs());
290 return eagerInterpEntryOffset_
;
294 WASM_DECLARE_CACHEABLE_POD(FuncExport
);
296 using FuncExportVector
= Vector
<FuncExport
, 0, SystemAllocPolicy
>;
298 // An FuncImport contains the runtime metadata needed to implement a call to an
299 // imported function. Each function import has two call stubs: an optimized path
300 // into JIT code and a slow path into the generic C++ js::Invoke and these
301 // offsets of these stubs are stored so that function-import callsites can be
302 // dynamically patched at runtime.
307 uint32_t instanceOffset_
;
308 uint32_t interpExitCodeOffset_
; // Machine code offset
309 uint32_t jitExitCodeOffset_
; // Machine code offset
311 WASM_CHECK_CACHEABLE_POD(typeIndex_
, instanceOffset_
, interpExitCodeOffset_
,
318 interpExitCodeOffset_(0),
319 jitExitCodeOffset_(0) {}
321 FuncImport(uint32_t typeIndex
, uint32_t instanceOffset
) {
322 typeIndex_
= typeIndex
;
323 instanceOffset_
= instanceOffset
;
324 interpExitCodeOffset_
= 0;
325 jitExitCodeOffset_
= 0;
328 void initInterpExitOffset(uint32_t off
) {
329 MOZ_ASSERT(!interpExitCodeOffset_
);
330 interpExitCodeOffset_
= off
;
332 void initJitExitOffset(uint32_t off
) {
333 MOZ_ASSERT(!jitExitCodeOffset_
);
334 jitExitCodeOffset_
= off
;
337 uint32_t typeIndex() const { return typeIndex_
; }
338 uint32_t instanceOffset() const { return instanceOffset_
; }
339 uint32_t interpExitCodeOffset() const { return interpExitCodeOffset_
; }
340 uint32_t jitExitCodeOffset() const { return jitExitCodeOffset_
; }
343 WASM_DECLARE_CACHEABLE_POD(FuncImport
)
345 using FuncImportVector
= Vector
<FuncImport
, 0, SystemAllocPolicy
>;
347 // Metadata holds all the data that is needed to describe compiled wasm code
348 // at runtime (as opposed to data that is only used to statically link or
349 // instantiate a module).
351 // Metadata is built incrementally by ModuleGenerator and then shared immutably
354 // The Metadata structure is split into tier-invariant and tier-variant parts;
355 // the former points to instances of the latter. Additionally, the asm.js
356 // subsystem subclasses the Metadata, adding more tier-invariant data, some of
357 // which is serialized. See AsmJS.cpp.
359 struct MetadataCacheablePod
{
361 uint32_t instanceDataLength
;
362 Maybe
<uint32_t> startFuncIndex
;
363 Maybe
<uint32_t> nameCustomSectionIndex
;
364 BuiltinModuleIds builtinModules
;
365 FeatureUsage featureUsage
;
367 uint32_t typeDefsOffsetStart
;
368 uint32_t memoriesOffsetStart
;
369 uint32_t tablesOffsetStart
;
370 uint32_t tagsOffsetStart
;
373 WASM_CHECK_CACHEABLE_POD(kind
, instanceDataLength
, startFuncIndex
,
374 nameCustomSectionIndex
, builtinModules
, featureUsage
,
375 filenameIsURL
, typeDefsOffsetStart
,
376 memoriesOffsetStart
, tablesOffsetStart
,
379 explicit MetadataCacheablePod(ModuleKind kind
)
381 instanceDataLength(0),
382 featureUsage(FeatureUsage::None
),
383 filenameIsURL(false),
384 typeDefsOffsetStart(UINT32_MAX
),
385 memoriesOffsetStart(UINT32_MAX
),
386 tablesOffsetStart(UINT32_MAX
),
387 tagsOffsetStart(UINT32_MAX
),
391 WASM_DECLARE_CACHEABLE_POD(MetadataCacheablePod
)
393 WASM_CHECK_CACHEABLE_POD_PADDING(MetadataCacheablePod
)
395 using ModuleHash
= uint8_t[8];
397 struct Metadata
: public ShareableBase
<Metadata
>, public MetadataCacheablePod
{
398 SharedTypeContext types
;
399 MemoryDescVector memories
;
400 GlobalDescVector globals
;
401 TableDescVector tables
;
403 CacheableChars filename
;
404 CacheableChars sourceMapURL
;
406 // namePayload points at the name section's CustomSection::payload so that
407 // the Names (which are use payload-relative offsets) can be used
408 // independently of the Module without duplicating the name section.
409 SharedBytes namePayload
;
410 Maybe
<Name
> moduleName
;
411 NameVector funcNames
;
413 // Debug-enabled code is not serialized.
415 Uint32Vector debugFuncTypeIndices
;
416 ModuleHash debugHash
;
418 explicit Metadata(ModuleKind kind
= ModuleKind::Wasm
)
419 : MetadataCacheablePod(kind
), debugEnabled(false), debugHash() {}
420 virtual ~Metadata() = default;
422 MetadataCacheablePod
& pod() { return *this; }
423 const MetadataCacheablePod
& pod() const { return *this; }
425 const TypeDef
& getFuncImportTypeDef(const FuncImport
& funcImport
) const {
426 return types
->type(funcImport
.typeIndex());
428 const FuncType
& getFuncImportType(const FuncImport
& funcImport
) const {
429 return types
->type(funcImport
.typeIndex()).funcType();
431 const TypeDef
& getFuncExportTypeDef(const FuncExport
& funcExport
) const {
432 return types
->type(funcExport
.typeIndex());
434 const FuncType
& getFuncExportType(const FuncExport
& funcExport
) const {
435 return types
->type(funcExport
.typeIndex()).funcType();
438 size_t debugNumFuncs() const { return debugFuncTypeIndices
.length(); }
439 const FuncType
& debugFuncType(uint32_t funcIndex
) const {
440 MOZ_ASSERT(debugEnabled
);
441 return types
->type(debugFuncTypeIndices
[funcIndex
]).funcType();
444 // AsmJSMetadata derives Metadata iff isAsmJS(). Mostly this distinction is
445 // encapsulated within AsmJS.cpp, but the additional virtual functions allow
446 // asm.js to override wasm behavior in the handful of cases that can't be
447 // easily encapsulated by AsmJS.cpp.
449 bool isAsmJS() const { return kind
== ModuleKind::AsmJS
; }
450 const AsmJSMetadata
& asAsmJS() const {
451 MOZ_ASSERT(isAsmJS());
452 return *(const AsmJSMetadata
*)this;
454 virtual bool mutedErrors() const { return false; }
455 virtual const char16_t
* displayURL() const { return nullptr; }
456 virtual ScriptSource
* maybeScriptSource() const { return nullptr; }
458 // The Developer-Facing Display Conventions section of the WebAssembly Web
459 // API spec defines two cases for displaying a wasm function name:
460 // 1. the function name stands alone
461 // 2. the function name precedes the location
463 enum NameContext
{ Standalone
, BeforeLocation
};
465 virtual bool getFuncName(NameContext ctx
, uint32_t funcIndex
,
466 UTF8Bytes
* name
) const;
468 bool getFuncNameStandalone(uint32_t funcIndex
, UTF8Bytes
* name
) const {
469 return getFuncName(NameContext::Standalone
, funcIndex
, name
);
471 bool getFuncNameBeforeLocation(uint32_t funcIndex
, UTF8Bytes
* name
) const {
472 return getFuncName(NameContext::BeforeLocation
, funcIndex
, name
);
475 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf
) const;
476 WASM_DECLARE_FRIEND_SERIALIZE(Metadata
);
479 using MutableMetadata
= RefPtr
<Metadata
>;
480 using SharedMetadata
= RefPtr
<const Metadata
>;
482 struct MetadataTier
{
483 explicit MetadataTier(Tier tier
= Tier::Serialized
)
484 : tier(tier
), debugTrapOffset(0) {}
488 Uint32Vector funcToCodeRange
;
489 CodeRangeVector codeRanges
;
490 CallSiteVector callSites
;
491 TrapSiteVectorArray trapSites
;
492 FuncImportVector funcImports
;
493 FuncExportVector funcExports
;
495 TryNoteVector tryNotes
;
496 CodeRangeUnwindInfoVector codeRangeUnwindInfos
;
498 // Debug information, not serialized.
499 uint32_t debugTrapOffset
;
501 FuncExport
& lookupFuncExport(uint32_t funcIndex
,
502 size_t* funcExportIndex
= nullptr);
503 const FuncExport
& lookupFuncExport(uint32_t funcIndex
,
504 size_t* funcExportIndex
= nullptr) const;
506 const CodeRange
& codeRange(const FuncExport
& funcExport
) const {
507 return codeRanges
[funcToCodeRange
[funcExport
.funcIndex()]];
510 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf
) const;
513 using UniqueMetadataTier
= UniquePtr
<MetadataTier
>;
515 // LazyStubSegment is a code segment lazily generated for function entry stubs
516 // (both interpreter and jit ones).
518 // Because a stub is usually small (a few KiB) and an executable code segment
519 // isn't (64KiB), a given stub segment can contain entry stubs of many
522 using UniqueLazyStubSegment
= UniquePtr
<LazyStubSegment
>;
523 using LazyStubSegmentVector
=
524 Vector
<UniqueLazyStubSegment
, 0, SystemAllocPolicy
>;
526 class LazyStubSegment
: public CodeSegment
{
527 CodeRangeVector codeRanges_
;
531 LazyStubSegment(UniqueCodeBytes bytes
, size_t length
)
532 : CodeSegment(std::move(bytes
), length
, CodeSegment::Kind::LazyStubs
),
535 static UniqueLazyStubSegment
create(const CodeTier
& codeTier
,
538 static size_t AlignBytesNeeded(size_t bytes
) {
539 return AlignBytes(bytes
, gc::SystemPageSize());
542 bool hasSpace(size_t bytes
) const;
543 [[nodiscard
]] bool addStubs(const Metadata
& metadata
, size_t codeLength
,
544 const Uint32Vector
& funcExportIndices
,
545 const FuncExportVector
& funcExports
,
546 const CodeRangeVector
& codeRanges
,
548 size_t* indexFirstInsertedCodeRange
);
550 const CodeRangeVector
& codeRanges() const { return codeRanges_
; }
551 [[nodiscard
]] const CodeRange
* lookupRange(const void* pc
) const;
553 void addSizeOfMisc(MallocSizeOf mallocSizeOf
, size_t* code
,
557 // LazyFuncExport helps to efficiently lookup a CodeRange from a given function
558 // index. It is inserted in a vector sorted by function index, to perform
559 // binary search on it later.
561 struct LazyFuncExport
{
563 size_t lazyStubSegmentIndex
;
564 size_t funcCodeRangeIndex
;
565 LazyFuncExport(size_t funcIndex
, size_t lazyStubSegmentIndex
,
566 size_t funcCodeRangeIndex
)
567 : funcIndex(funcIndex
),
568 lazyStubSegmentIndex(lazyStubSegmentIndex
),
569 funcCodeRangeIndex(funcCodeRangeIndex
) {}
572 using LazyFuncExportVector
= Vector
<LazyFuncExport
, 0, SystemAllocPolicy
>;
574 // LazyStubTier contains all the necessary information for lazy function entry
575 // stubs that are generated at runtime. None of its data are ever serialized.
577 // It must be protected by a lock, because the main thread can both read and
578 // write lazy stubs at any time while a background thread can regenerate lazy
579 // stubs for tier2 at any time.
582 LazyStubSegmentVector stubSegments_
;
583 LazyFuncExportVector exports_
;
584 size_t lastStubSegmentIndex_
;
586 [[nodiscard
]] bool createManyEntryStubs(const Uint32Vector
& funcExportIndices
,
587 const Metadata
& metadata
,
588 const CodeTier
& codeTier
,
589 size_t* stubSegmentIndex
);
592 LazyStubTier() : lastStubSegmentIndex_(0) {}
594 // Creates one lazy stub for the exported function, for which the jit entry
595 // will be set to the lazily-generated one.
596 [[nodiscard
]] bool createOneEntryStub(uint32_t funcExportIndex
,
597 const Metadata
& metadata
,
598 const CodeTier
& codeTier
);
600 bool entryStubsEmpty() const { return stubSegments_
.empty(); }
601 bool hasEntryStub(uint32_t funcIndex
) const;
603 // Returns a pointer to the raw interpreter entry of a given function for
604 // which stubs have been lazily generated.
605 [[nodiscard
]] void* lookupInterpEntry(uint32_t funcIndex
) const;
607 // Create one lazy stub for all the functions in funcExportIndices, putting
608 // them in a single stub. Jit entries won't be used until
609 // setJitEntries() is actually called, after the Code owner has committed
611 [[nodiscard
]] bool createTier2(const Uint32Vector
& funcExportIndices
,
612 const Metadata
& metadata
,
613 const CodeTier
& codeTier
,
614 Maybe
<size_t>* stubSegmentIndex
);
615 void setJitEntries(const Maybe
<size_t>& stubSegmentIndex
, const Code
& code
);
617 void addSizeOfMisc(MallocSizeOf mallocSizeOf
, size_t* code
,
621 // CodeTier contains all the data related to a given compilation tier. It is
622 // built during module generation and then immutably stored in a Code.
624 using UniqueCodeTier
= UniquePtr
<CodeTier
>;
625 using UniqueConstCodeTier
= UniquePtr
<const CodeTier
>;
630 // Serialized information.
631 const UniqueMetadataTier metadata_
;
632 const UniqueModuleSegment segment_
;
634 // Lazy stubs, not serialized.
635 RWExclusiveData
<LazyStubTier
> lazyStubs_
;
637 static const MutexId
& mutexForTier(Tier tier
) {
638 if (tier
== Tier::Baseline
) {
639 return mutexid::WasmLazyStubsTier1
;
641 MOZ_ASSERT(tier
== Tier::Optimized
);
642 return mutexid::WasmLazyStubsTier2
;
646 CodeTier(UniqueMetadataTier metadata
, UniqueModuleSegment segment
)
648 metadata_(std::move(metadata
)),
649 segment_(std::move(segment
)),
650 lazyStubs_(mutexForTier(segment_
->tier())) {}
652 bool initialized() const { return !!code_
&& segment_
->initialized(); }
653 bool initialize(const Code
& code
, const LinkData
& linkData
,
654 const Metadata
& metadata
);
656 Tier
tier() const { return segment_
->tier(); }
657 const RWExclusiveData
<LazyStubTier
>& lazyStubs() const { return lazyStubs_
; }
658 const MetadataTier
& metadata() const { return *metadata_
.get(); }
659 const ModuleSegment
& segment() const { return *segment_
.get(); }
660 const Code
& code() const {
661 MOZ_ASSERT(initialized());
665 const CodeRange
* lookupRange(const void* pc
) const;
666 const TryNote
* lookupTryNote(const void* pc
) const;
668 void addSizeOfMisc(MallocSizeOf mallocSizeOf
, size_t* code
,
671 WASM_DECLARE_FRIEND_SERIALIZE_ARGS(CodeTier
, const wasm::LinkData
& data
);
674 // Jump tables that implement function tiering and fast js-to-wasm calls.
676 // There is one JumpTable object per Code object, holding two jump tables: the
677 // tiering jump table and the jit-entry jump table. The JumpTable is not
678 // serialized with its Code, but is a run-time entity only. At run-time it is
679 // shared across threads with its owning Code (and the Module that owns the
680 // Code). Values in the JumpTable /must/ /always/ be JSContext-agnostic and
681 // Instance-agnostic, because of this sharing.
683 // Both jump tables have a number of entries equal to the number of functions in
684 // their Module, including imports. In the tiering table, the elements
685 // corresponding to the Module's imported functions are unused; in the jit-entry
686 // table, the elements corresponding to the Module's non-exported functions are
687 // unused. (Functions can be exported explicitly via the exports section or
688 // implicitly via a mention of their indices outside function bodies.) See
689 // comments at JumpTables::init() and WasmInstanceObject::getExportedFunction().
690 // The entries are void*. Unused entries are null.
692 // The tiering jump table.
694 // This table holds code pointers that are used by baseline functions to enter
695 // optimized code. See the large comment block in WasmCompile.cpp for
696 // information about how tiering works.
698 // The jit-entry jump table.
700 // The jit-entry jump table entry for a function holds a stub that allows Jitted
701 // JS code to call wasm using the JS JIT ABI. See large comment block at
702 // WasmInstanceObject::getExportedFunction() for more about exported functions
703 // and stubs and the lifecycle of the entries in the jit-entry table - there are
704 // complex invariants.
707 using TablePointer
= mozilla::UniquePtr
<void*[], JS::FreePolicy
>;
710 TablePointer tiering_
;
715 JumpTableJitEntryOffset
== 0,
716 "Each jit entry in table must have compatible layout with BaseScript and"
717 "SelfHostedLazyScript");
720 bool init(CompileMode mode
, const ModuleSegment
& ms
,
721 const CodeRangeVector
& codeRanges
);
723 void setJitEntry(size_t i
, void* target
) const {
724 // Make sure that write is atomic; see comment in wasm::Module::finishTier2
726 MOZ_ASSERT(i
< numFuncs_
);
727 jit_
.get()[i
] = target
;
729 void setJitEntryIfNull(size_t i
, void* target
) const {
730 // Make sure that compare-and-write is atomic; see comment in
731 // wasm::Module::finishTier2 to that effect.
732 MOZ_ASSERT(i
< numFuncs_
);
733 void* expected
= nullptr;
734 (void)__atomic_compare_exchange_n(&jit_
.get()[i
], &expected
, target
,
735 /*weak=*/false, __ATOMIC_RELAXED
,
738 void** getAddressOfJitEntry(size_t i
) const {
739 MOZ_ASSERT(i
< numFuncs_
);
740 MOZ_ASSERT(jit_
.get()[i
]);
741 return &jit_
.get()[i
];
743 size_t funcIndexFromJitEntry(void** target
) const {
744 MOZ_ASSERT(target
>= &jit_
.get()[0]);
745 MOZ_ASSERT(target
<= &(jit_
.get()[numFuncs_
- 1]));
746 return (intptr_t*)target
- (intptr_t*)&jit_
.get()[0];
749 void setTieringEntry(size_t i
, void* target
) const {
750 MOZ_ASSERT(i
< numFuncs_
);
751 // See comment in wasm::Module::finishTier2.
752 if (mode_
== CompileMode::Tier1
) {
753 tiering_
.get()[i
] = target
;
756 void** tiering() const { return tiering_
.get(); }
758 size_t sizeOfMiscExcludingThis() const {
759 // 2 words per function for the jit entry table, plus maybe 1 per
760 // function if we're tiering.
761 return sizeof(void*) * (2 + (tiering_
? 1 : 0)) * numFuncs_
;
765 // Code objects own executable code and the metadata that describe it. A single
766 // Code object is normally shared between a module and all its instances.
768 // profilingLabels_ is lazily initialized, but behind a lock.
770 using SharedCode
= RefPtr
<const Code
>;
771 using MutableCode
= RefPtr
<Code
>;
772 using MetadataAnalysisHashMap
=
773 HashMap
<const char*, uint32_t, mozilla::CStringHasher
, SystemAllocPolicy
>;
775 class Code
: public ShareableBase
<Code
> {
776 UniqueCodeTier tier1_
;
778 // [SMDOC] Tier-2 data
780 // hasTier2_ and tier2_ implement a three-state protocol for broadcasting
781 // tier-2 data; this also amounts to a single-writer/multiple-reader setup.
783 // Initially hasTier2_ is false and tier2_ is null.
785 // While hasTier2_ is false, *no* thread may read tier2_, but one thread may
786 // make tier2_ non-null (this will be the tier-2 compiler thread). That same
787 // thread must then later set hasTier2_ to true to broadcast the tier2_ value
788 // and its availability. Note that the writing thread may not itself read
789 // tier2_ before setting hasTier2_, in order to simplify reasoning about
790 // global invariants.
792 // Once hasTier2_ is true, *no* thread may write tier2_ and *no* thread may
793 // read tier2_ without having observed hasTier2_ as true first. Once
794 // hasTier2_ is true, it stays true.
795 mutable UniqueConstCodeTier tier2_
;
796 mutable Atomic
<bool> hasTier2_
;
798 SharedMetadata metadata_
;
799 ExclusiveData
<CacheableCharsVector
> profilingLabels_
;
800 JumpTables jumpTables_
;
803 Code(UniqueCodeTier tier1
, const Metadata
& metadata
,
804 JumpTables
&& maybeJumpTables
);
805 bool initialized() const { return tier1_
->initialized(); }
807 bool initialize(const LinkData
& linkData
);
809 void setTieringEntry(size_t i
, void* target
) const {
810 jumpTables_
.setTieringEntry(i
, target
);
812 void** tieringJumpTable() const { return jumpTables_
.tiering(); }
814 void setJitEntry(size_t i
, void* target
) const {
815 jumpTables_
.setJitEntry(i
, target
);
817 void setJitEntryIfNull(size_t i
, void* target
) const {
818 jumpTables_
.setJitEntryIfNull(i
, target
);
820 void** getAddressOfJitEntry(size_t i
) const {
821 return jumpTables_
.getAddressOfJitEntry(i
);
823 uint32_t getFuncIndex(JSFunction
* fun
) const;
825 // Install the tier2 code without committing it. To maintain the invariant
826 // that tier2_ is never accessed without the tier having been committed, this
827 // returns a pointer to the installed tier that the caller can use for
828 // subsequent operations.
829 bool setAndBorrowTier2(UniqueCodeTier tier2
, const LinkData
& linkData
,
830 const CodeTier
** borrowedTier
) const;
831 void commitTier2() const;
833 bool hasTier2() const { return hasTier2_
; }
835 bool hasTier(Tier t
) const;
837 Tier
stableTier() const; // This is stable during a run
839 const; // This may transition from Baseline -> Ion at any time
841 const CodeTier
& codeTier(Tier tier
) const;
842 const Metadata
& metadata() const { return *metadata_
; }
844 const ModuleSegment
& segment(Tier iter
) const {
845 return codeTier(iter
).segment();
847 const MetadataTier
& metadata(Tier iter
) const {
848 return codeTier(iter
).metadata();
851 // Metadata lookup functions:
853 const CallSite
* lookupCallSite(void* returnAddress
) const;
854 const CodeRange
* lookupFuncRange(void* pc
) const;
855 const StackMap
* lookupStackMap(uint8_t* nextPC
) const;
856 const TryNote
* lookupTryNote(void* pc
, Tier
* tier
) const;
857 bool containsCodePC(const void* pc
) const;
858 bool lookupTrap(void* pc
, Trap
* trap
, BytecodeOffset
* bytecode
) const;
859 const CodeRangeUnwindInfo
* lookupUnwindInfo(void* pc
) const;
861 // To save memory, profilingLabels_ are generated lazily when profiling mode
864 void ensureProfilingLabels(bool profilingEnabled
) const;
865 const char* profilingLabel(uint32_t funcIndex
) const;
867 // Wasm disassembly support
869 void disassemble(JSContext
* cx
, Tier tier
, int kindSelection
,
870 PrintCallback printString
) const;
872 // Wasm metadata size analysis
873 MetadataAnalysisHashMap
metadataAnalysis(JSContext
* cx
) const;
875 // about:memory reporting:
877 void addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf
,
878 Metadata::SeenSet
* seenMetadata
,
879 Code::SeenSet
* seenCode
, size_t* code
,
882 WASM_DECLARE_FRIEND_SERIALIZE_ARGS(SharedCode
, const wasm::LinkData
& data
);
885 void PatchDebugSymbolicAccesses(uint8_t* codeBase
, jit::MacroAssembler
& masm
);
890 #endif // wasm_code_h