1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2021 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
22 #include "mozilla/DebugOnly.h"
23 #include "mozilla/Maybe.h"
25 #include <type_traits>
27 #include "js/WasmFeatures.h"
29 #include "wasm/WasmCompile.h"
30 #include "wasm/WasmCompileArgs.h"
31 #include "wasm/WasmConstants.h"
32 #include "wasm/WasmTypeDecls.h"
33 #include "wasm/WasmTypeDef.h"
34 #include "wasm/WasmValType.h"
39 using mozilla::DebugOnly
;
42 struct ModuleEnvironment
;
44 // The Opcode compactly and safely represents the primary opcode plus any
45 // extension, with convenient predicates and accessors.
51 MOZ_IMPLICIT
Opcode(Op op
) : bits_(uint32_t(op
)) {
52 static_assert(size_t(Op::Limit
) == 256, "fits");
53 MOZ_ASSERT(size_t(op
) < size_t(Op::Limit
));
55 MOZ_IMPLICIT
Opcode(MiscOp op
)
56 : bits_((uint32_t(op
) << 8) | uint32_t(Op::MiscPrefix
)) {
57 static_assert(size_t(MiscOp::Limit
) <= 0xFFFFFF, "fits");
58 MOZ_ASSERT(size_t(op
) < size_t(MiscOp::Limit
));
60 MOZ_IMPLICIT
Opcode(ThreadOp op
)
61 : bits_((uint32_t(op
) << 8) | uint32_t(Op::ThreadPrefix
)) {
62 static_assert(size_t(ThreadOp::Limit
) <= 0xFFFFFF, "fits");
63 MOZ_ASSERT(size_t(op
) < size_t(ThreadOp::Limit
));
65 MOZ_IMPLICIT
Opcode(MozOp op
)
66 : bits_((uint32_t(op
) << 8) | uint32_t(Op::MozPrefix
)) {
67 static_assert(size_t(MozOp::Limit
) <= 0xFFFFFF, "fits");
68 MOZ_ASSERT(size_t(op
) < size_t(MozOp::Limit
));
70 MOZ_IMPLICIT
Opcode(SimdOp op
)
71 : bits_((uint32_t(op
) << 8) | uint32_t(Op::SimdPrefix
)) {
72 static_assert(size_t(SimdOp::Limit
) <= 0xFFFFFF, "fits");
73 MOZ_ASSERT(size_t(op
) < size_t(SimdOp::Limit
));
76 bool isOp() const { return bits_
< uint32_t(Op::FirstPrefix
); }
77 bool isMisc() const { return (bits_
& 255) == uint32_t(Op::MiscPrefix
); }
78 bool isThread() const { return (bits_
& 255) == uint32_t(Op::ThreadPrefix
); }
79 bool isMoz() const { return (bits_
& 255) == uint32_t(Op::MozPrefix
); }
80 bool isSimd() const { return (bits_
& 255) == uint32_t(Op::SimdPrefix
); }
86 MiscOp
asMisc() const {
88 return MiscOp(bits_
>> 8);
90 ThreadOp
asThread() const {
91 MOZ_ASSERT(isThread());
92 return ThreadOp(bits_
>> 8);
96 return MozOp(bits_
>> 8);
98 SimdOp
asSimd() const {
100 return SimdOp(bits_
>> 8);
103 uint32_t bits() const { return bits_
; }
105 bool operator==(const Opcode
& that
) const { return bits_
== that
.bits_
; }
106 bool operator!=(const Opcode
& that
) const { return bits_
!= that
.bits_
; }
109 // This struct captures the bytecode offset of a section's payload (so not
110 // including the header) and the size of the payload.
112 struct SectionRange
{
116 uint32_t end() const { return start
+ size
; }
117 bool operator==(const SectionRange
& rhs
) const {
118 return start
== rhs
.start
&& size
== rhs
.size
;
122 using MaybeSectionRange
= Maybe
<SectionRange
>;
124 // The Encoder class appends bytes to the Bytes object it is given during
125 // construction. The client is responsible for the Bytes's lifetime and must
126 // keep the Bytes alive as long as the Encoder is used.
132 [[nodiscard
]] bool write(const T
& v
) {
133 return bytes_
.append(reinterpret_cast<const uint8_t*>(&v
), sizeof(T
));
136 template <typename UInt
>
137 [[nodiscard
]] bool writeVarU(UInt i
) {
139 uint8_t byte
= i
& 0x7f;
144 if (!bytes_
.append(byte
)) {
151 template <typename SInt
>
152 [[nodiscard
]] bool writeVarS(SInt i
) {
155 uint8_t byte
= i
& 0x7f;
157 done
= ((i
== 0) && !(byte
& 0x40)) || ((i
== -1) && (byte
& 0x40));
161 if (!bytes_
.append(byte
)) {
168 void patchVarU32(size_t offset
, uint32_t patchBits
, uint32_t assertBits
) {
170 uint8_t assertByte
= assertBits
& 0x7f;
171 uint8_t patchByte
= patchBits
& 0x7f;
174 if (assertBits
!= 0) {
178 MOZ_ASSERT(assertByte
== bytes_
[offset
]);
180 bytes_
[offset
] = patchByte
;
182 } while (assertBits
!= 0);
185 void patchFixedU7(size_t offset
, uint8_t patchBits
, uint8_t assertBits
) {
186 MOZ_ASSERT(patchBits
<= uint8_t(INT8_MAX
));
187 patchFixedU8(offset
, patchBits
, assertBits
);
190 void patchFixedU8(size_t offset
, uint8_t patchBits
, uint8_t assertBits
) {
191 MOZ_ASSERT(bytes_
[offset
] == assertBits
);
192 bytes_
[offset
] = patchBits
;
195 uint32_t varU32ByteLength(size_t offset
) const {
196 size_t start
= offset
;
197 while (bytes_
[offset
] & 0x80) {
200 return offset
- start
+ 1;
204 explicit Encoder(Bytes
& bytes
) : bytes_(bytes
) { MOZ_ASSERT(empty()); }
206 size_t currentOffset() const { return bytes_
.length(); }
207 bool empty() const { return currentOffset() == 0; }
209 // Fixed-size encoding operations simply copy the literal bytes (without
210 // attempting to align).
212 [[nodiscard
]] bool writeFixedU7(uint8_t i
) {
213 MOZ_ASSERT(i
<= uint8_t(INT8_MAX
));
214 return writeFixedU8(i
);
216 [[nodiscard
]] bool writeFixedU8(uint8_t i
) { return write
<uint8_t>(i
); }
217 [[nodiscard
]] bool writeFixedU32(uint32_t i
) { return write
<uint32_t>(i
); }
218 [[nodiscard
]] bool writeFixedF32(float f
) { return write
<float>(f
); }
219 [[nodiscard
]] bool writeFixedF64(double d
) { return write
<double>(d
); }
221 // Variable-length encodings that all use LEB128.
223 [[nodiscard
]] bool writeVarU32(uint32_t i
) { return writeVarU
<uint32_t>(i
); }
224 [[nodiscard
]] bool writeVarS32(int32_t i
) { return writeVarS
<int32_t>(i
); }
225 [[nodiscard
]] bool writeVarU64(uint64_t i
) { return writeVarU
<uint64_t>(i
); }
226 [[nodiscard
]] bool writeVarS64(int64_t i
) { return writeVarS
<int64_t>(i
); }
227 [[nodiscard
]] bool writeValType(ValType type
) {
228 static_assert(size_t(TypeCode::Limit
) <= UINT8_MAX
, "fits");
229 // writeValType is only used by asm.js, which doesn't use type
231 MOZ_RELEASE_ASSERT(!type
.isTypeRef(), "NYI");
232 TypeCode tc
= type
.packed().typeCode();
233 MOZ_ASSERT(size_t(tc
) < size_t(TypeCode::Limit
));
234 return writeFixedU8(uint8_t(tc
));
236 [[nodiscard
]] bool writeOp(Opcode opcode
) {
237 // The Opcode constructor has asserted that `opcode` is meaningful, so no
238 // further correctness checking is necessary here.
239 uint32_t bits
= opcode
.bits();
240 if (!writeFixedU8(bits
& 255)) {
246 return writeVarU32(bits
>> 8);
249 // Fixed-length encodings that allow back-patching.
251 [[nodiscard
]] bool writePatchableFixedU7(size_t* offset
) {
252 *offset
= bytes_
.length();
253 return writeFixedU8(UINT8_MAX
);
255 void patchFixedU7(size_t offset
, uint8_t patchBits
) {
256 return patchFixedU7(offset
, patchBits
, UINT8_MAX
);
259 // Variable-length encodings that allow back-patching.
261 [[nodiscard
]] bool writePatchableVarU32(size_t* offset
) {
262 *offset
= bytes_
.length();
263 return writeVarU32(UINT32_MAX
);
265 void patchVarU32(size_t offset
, uint32_t patchBits
) {
266 return patchVarU32(offset
, patchBits
, UINT32_MAX
);
269 // Byte ranges start with an LEB128 length followed by an arbitrary sequence
270 // of bytes. When used for strings, bytes are to be interpreted as utf8.
272 [[nodiscard
]] bool writeBytes(const void* bytes
, uint32_t numBytes
) {
273 return writeVarU32(numBytes
) &&
274 bytes_
.append(reinterpret_cast<const uint8_t*>(bytes
), numBytes
);
277 // A "section" is a contiguous range of bytes that stores its own size so
278 // that it may be trivially skipped without examining the payload. Sections
279 // require backpatching since the size of the section is only known at the
280 // end while the size's varU32 must be stored at the beginning. Immediately
281 // after the section length is the string id of the section.
283 [[nodiscard
]] bool startSection(SectionId id
, size_t* offset
) {
284 MOZ_ASSERT(uint32_t(id
) < 128);
285 return writeVarU32(uint32_t(id
)) && writePatchableVarU32(offset
);
287 void finishSection(size_t offset
) {
288 return patchVarU32(offset
,
289 bytes_
.length() - offset
- varU32ByteLength(offset
));
293 // The Decoder class decodes the bytes in the range it is given during
294 // construction. The client is responsible for keeping the byte range alive as
295 // long as the Decoder is used.
298 const uint8_t* const beg_
;
299 const uint8_t* const end_
;
301 const size_t offsetInModule_
;
303 UniqueCharsVector
* warnings_
;
307 [[nodiscard
]] bool read(T
* out
) {
308 if (bytesRemain() < sizeof(T
)) {
311 memcpy((void*)out
, cur_
, sizeof(T
));
318 MOZ_ASSERT(bytesRemain() >= sizeof(T
));
320 memcpy(&ret
, cur_
, sizeof(T
));
326 void uncheckedRead(T
* ret
) {
327 MOZ_ASSERT(bytesRemain() >= sizeof(T
));
328 memcpy(ret
, cur_
, sizeof(T
));
332 template <typename UInt
>
333 [[nodiscard
]] bool readVarU(UInt
* out
) {
334 DebugOnly
<const uint8_t*> before
= cur_
;
335 const unsigned numBits
= sizeof(UInt
) * CHAR_BIT
;
336 const unsigned remainderBits
= numBits
% 7;
337 const unsigned numBitsInSevens
= numBits
- remainderBits
;
342 if (!readFixedU8(&byte
)) {
345 if (!(byte
& 0x80)) {
346 *out
= u
| UInt(byte
) << shift
;
349 u
|= UInt(byte
& 0x7F) << shift
;
351 } while (shift
!= numBitsInSevens
);
352 if (!readFixedU8(&byte
) || (byte
& (unsigned(-1) << remainderBits
))) {
355 *out
= u
| (UInt(byte
) << numBitsInSevens
);
356 MOZ_ASSERT_IF(sizeof(UInt
) == 4,
357 unsigned(cur_
- before
) <= MaxVarU32DecodedBytes
);
361 template <typename SInt
>
362 [[nodiscard
]] bool readVarS(SInt
* out
) {
363 using UInt
= std::make_unsigned_t
<SInt
>;
364 const unsigned numBits
= sizeof(SInt
) * CHAR_BIT
;
365 const unsigned remainderBits
= numBits
% 7;
366 const unsigned numBitsInSevens
= numBits
- remainderBits
;
371 if (!readFixedU8(&byte
)) {
374 s
|= SInt(byte
& 0x7f) << shift
;
376 if (!(byte
& 0x80)) {
378 s
|= UInt(-1) << shift
;
383 } while (shift
< numBitsInSevens
);
384 if (!remainderBits
|| !readFixedU8(&byte
) || (byte
& 0x80)) {
387 uint8_t mask
= 0x7f & (uint8_t(-1) << remainderBits
);
388 if ((byte
& mask
) != ((byte
& (1 << (remainderBits
- 1))) ? mask
: 0)) {
391 *out
= s
| UInt(byte
) << shift
;
396 Decoder(const uint8_t* begin
, const uint8_t* end
, size_t offsetInModule
,
397 UniqueChars
* error
, UniqueCharsVector
* warnings
= nullptr,
398 bool resilientMode
= false)
402 offsetInModule_(offsetInModule
),
405 resilientMode_(resilientMode
) {
406 MOZ_ASSERT(begin
<= end
);
408 explicit Decoder(const Bytes
& bytes
, size_t offsetInModule
= 0,
409 UniqueChars
* error
= nullptr,
410 UniqueCharsVector
* warnings
= nullptr)
411 : beg_(bytes
.begin()),
414 offsetInModule_(offsetInModule
),
417 resilientMode_(false) {}
419 // These convenience functions use currentOffset() as the errorOffset.
420 bool fail(const char* msg
) { return fail(currentOffset(), msg
); }
421 bool failf(const char* msg
, ...) MOZ_FORMAT_PRINTF(2, 3);
422 void warnf(const char* msg
, ...) MOZ_FORMAT_PRINTF(2, 3);
424 // Report an error at the given offset (relative to the whole module).
425 bool fail(size_t errorOffset
, const char* msg
);
427 UniqueChars
* error() { return error_
; }
436 MOZ_ASSERT(cur_
<= end_
);
439 bool resilientMode() const { return resilientMode_
; }
441 size_t bytesRemain() const {
442 MOZ_ASSERT(end_
>= cur_
);
443 return size_t(end_
- cur_
);
445 // pos must be a value previously returned from currentPosition.
446 void rollbackPosition(const uint8_t* pos
) { cur_
= pos
; }
447 const uint8_t* currentPosition() const { return cur_
; }
448 size_t beginOffset() const { return offsetInModule_
; }
449 size_t currentOffset() const { return offsetInModule_
+ (cur_
- beg_
); }
450 const uint8_t* begin() const { return beg_
; }
451 const uint8_t* end() const { return end_
; }
453 // Peek at the next byte, if it exists, without advancing the position.
455 bool peekByte(uint8_t* byte
) {
463 // Fixed-size encoding operations simply copy the literal bytes (without
464 // attempting to align).
466 [[nodiscard
]] bool readFixedU8(uint8_t* i
) { return read
<uint8_t>(i
); }
467 [[nodiscard
]] bool readFixedU32(uint32_t* u
) { return read
<uint32_t>(u
); }
468 [[nodiscard
]] bool readFixedF32(float* f
) { return read
<float>(f
); }
469 [[nodiscard
]] bool readFixedF64(double* d
) { return read
<double>(d
); }
470 #ifdef ENABLE_WASM_SIMD
471 [[nodiscard
]] bool readFixedV128(V128
* d
) {
472 for (unsigned i
= 0; i
< 16; i
++) {
473 if (!read
<uint8_t>(d
->bytes
+ i
)) {
481 // Variable-length encodings that all use LEB128.
483 [[nodiscard
]] bool readVarU32(uint32_t* out
) {
484 return readVarU
<uint32_t>(out
);
486 [[nodiscard
]] bool readVarS32(int32_t* out
) { return readVarS
<int32_t>(out
); }
487 [[nodiscard
]] bool readVarU64(uint64_t* out
) {
488 return readVarU
<uint64_t>(out
);
490 [[nodiscard
]] bool readVarS64(int64_t* out
) { return readVarS
<int64_t>(out
); }
492 // Value and reference types
494 [[nodiscard
]] ValType
uncheckedReadValType(const TypeContext
& types
);
497 [[nodiscard
]] bool readPackedType(const TypeContext
& types
,
498 const FeatureArgs
& features
, T
* type
);
500 [[nodiscard
]] bool readValType(const TypeContext
& types
,
501 const FeatureArgs
& features
, ValType
* type
);
503 [[nodiscard
]] bool readFieldType(const TypeContext
& types
,
504 const FeatureArgs
& features
,
507 [[nodiscard
]] bool readHeapType(const TypeContext
& types
,
508 const FeatureArgs
& features
, bool nullable
,
511 [[nodiscard
]] bool readRefType(const TypeContext
& types
,
512 const FeatureArgs
& features
, RefType
* type
);
514 // Instruction opcode
516 [[nodiscard
]] bool readOp(OpBytes
* op
);
518 // Instruction immediates for constant instructions
520 [[nodiscard
]] bool readBinary() { return true; }
521 [[nodiscard
]] bool readTypeIndex(uint32_t* typeIndex
);
522 [[nodiscard
]] bool readGlobalIndex(uint32_t* globalIndex
);
523 [[nodiscard
]] bool readFuncIndex(uint32_t* funcIndex
);
524 [[nodiscard
]] bool readI32Const(int32_t* i32
);
525 [[nodiscard
]] bool readI64Const(int64_t* i64
);
526 [[nodiscard
]] bool readF32Const(float* f32
);
527 [[nodiscard
]] bool readF64Const(double* f64
);
528 #ifdef ENABLE_WASM_SIMD
529 [[nodiscard
]] bool readV128Const(V128
* value
);
531 [[nodiscard
]] bool readRefNull(const TypeContext
& types
,
532 const FeatureArgs
& features
, RefType
* type
);
534 // See writeBytes comment.
536 [[nodiscard
]] bool readBytes(uint32_t numBytes
,
537 const uint8_t** bytes
= nullptr) {
541 if (bytesRemain() < numBytes
) {
548 // See "section" description in Encoder.
550 [[nodiscard
]] bool readSectionHeader(uint8_t* id
, SectionRange
* range
);
552 [[nodiscard
]] bool startSection(SectionId id
, ModuleEnvironment
* env
,
553 MaybeSectionRange
* range
,
554 const char* sectionName
);
555 [[nodiscard
]] bool finishSection(const SectionRange
& range
,
556 const char* sectionName
);
558 // Custom sections do not cause validation errors unless the error is in
559 // the section header itself.
561 [[nodiscard
]] bool startCustomSection(const char* expected
,
562 size_t expectedLength
,
563 ModuleEnvironment
* env
,
564 MaybeSectionRange
* range
);
566 template <size_t NameSizeWith0
>
567 [[nodiscard
]] bool startCustomSection(const char (&name
)[NameSizeWith0
],
568 ModuleEnvironment
* env
,
569 MaybeSectionRange
* range
) {
570 MOZ_ASSERT(name
[NameSizeWith0
- 1] == '\0');
571 return startCustomSection(name
, NameSizeWith0
- 1, env
, range
);
574 void finishCustomSection(const char* name
, const SectionRange
& range
);
575 void skipAndFinishCustomSection(const SectionRange
& range
);
577 [[nodiscard
]] bool skipCustomSection(ModuleEnvironment
* env
);
579 // The Name section has its own optional subsections.
581 [[nodiscard
]] bool startNameSubsection(NameType nameType
,
582 Maybe
<uint32_t>* endOffset
);
583 [[nodiscard
]] bool finishNameSubsection(uint32_t endOffset
);
584 [[nodiscard
]] bool skipNameSubsection();
586 // The infallible "unchecked" decoding functions can be used when we are
587 // sure that the bytes are well-formed (by construction or due to previous
590 uint8_t uncheckedReadFixedU8() { return uncheckedRead
<uint8_t>(); }
591 uint32_t uncheckedReadFixedU32() { return uncheckedRead
<uint32_t>(); }
592 void uncheckedReadFixedF32(float* out
) { uncheckedRead
<float>(out
); }
593 void uncheckedReadFixedF64(double* out
) { uncheckedRead
<double>(out
); }
594 template <typename UInt
>
595 UInt
uncheckedReadVarU() {
596 static const unsigned numBits
= sizeof(UInt
) * CHAR_BIT
;
597 static const unsigned remainderBits
= numBits
% 7;
598 static const unsigned numBitsInSevens
= numBits
- remainderBits
;
602 uint8_t byte
= *cur_
++;
603 if (!(byte
& 0x80)) {
604 return decoded
| (UInt(byte
) << shift
);
606 decoded
|= UInt(byte
& 0x7f) << shift
;
608 } while (shift
!= numBitsInSevens
);
609 uint8_t byte
= *cur_
++;
610 MOZ_ASSERT(!(byte
& 0xf0));
611 return decoded
| (UInt(byte
) << numBitsInSevens
);
613 uint32_t uncheckedReadVarU32() { return uncheckedReadVarU
<uint32_t>(); }
614 int32_t uncheckedReadVarS32() {
616 MOZ_ALWAYS_TRUE(readVarS32(&i32
));
619 uint64_t uncheckedReadVarU64() { return uncheckedReadVarU
<uint64_t>(); }
620 int64_t uncheckedReadVarS64() {
622 MOZ_ALWAYS_TRUE(readVarS64(&i64
));
625 Op
uncheckedReadOp() {
626 static_assert(size_t(Op::Limit
) == 256, "fits");
627 uint8_t u8
= uncheckedReadFixedU8();
628 return u8
!= UINT8_MAX
? Op(u8
) : Op(uncheckedReadFixedU8() + UINT8_MAX
);
632 // Value and reference types
634 inline ValType
Decoder::uncheckedReadValType(const TypeContext
& types
) {
635 uint8_t code
= uncheckedReadFixedU8();
637 case uint8_t(TypeCode::FuncRef
):
638 case uint8_t(TypeCode::ExternRef
):
639 return RefType::fromTypeCode(TypeCode(code
), true);
640 case uint8_t(TypeCode::Ref
):
641 case uint8_t(TypeCode::NullableRef
): {
642 bool nullable
= code
== uint8_t(TypeCode::NullableRef
);
647 if ((nextByte
& SLEB128SignMask
) == SLEB128SignBit
) {
648 uint8_t code
= uncheckedReadFixedU8();
649 return RefType::fromTypeCode(TypeCode(code
), nullable
);
652 int32_t x
= uncheckedReadVarS32();
653 const TypeDef
* typeDef
= &types
.type(x
);
654 return RefType::fromTypeDef(typeDef
, nullable
);
657 return ValType::fromNonRefTypeCode(TypeCode(code
));
662 inline bool Decoder::readPackedType(const TypeContext
& types
,
663 const FeatureArgs
& features
, T
* type
) {
664 static_assert(uint8_t(TypeCode::Limit
) <= UINT8_MAX
, "fits");
666 if (!readFixedU8(&code
)) {
667 return fail("expected type code");
670 case uint8_t(TypeCode::V128
): {
671 #ifdef ENABLE_WASM_SIMD
672 if (!features
.simd
) {
673 return fail("v128 not enabled");
675 *type
= T::fromNonRefTypeCode(TypeCode(code
));
681 case uint8_t(TypeCode::FuncRef
):
682 case uint8_t(TypeCode::ExternRef
): {
683 *type
= RefType::fromTypeCode(TypeCode(code
), true);
686 case uint8_t(TypeCode::Ref
):
687 case uint8_t(TypeCode::NullableRef
): {
688 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
689 if (!features
.functionReferences
) {
690 return fail("(ref T) types not enabled");
692 bool nullable
= code
== uint8_t(TypeCode::NullableRef
);
694 if (!readHeapType(types
, features
, nullable
, &refType
)) {
703 case uint8_t(TypeCode::AnyRef
):
704 case uint8_t(TypeCode::I31Ref
):
705 case uint8_t(TypeCode::EqRef
):
706 case uint8_t(TypeCode::StructRef
):
707 case uint8_t(TypeCode::ArrayRef
):
708 case uint8_t(TypeCode::NullFuncRef
):
709 case uint8_t(TypeCode::NullExternRef
):
710 case uint8_t(TypeCode::NullAnyRef
): {
711 #ifdef ENABLE_WASM_GC
713 return fail("gc types not enabled");
715 *type
= RefType::fromTypeCode(TypeCode(code
), true);
722 if (!T::isValidTypeCode(TypeCode(code
))) {
725 *type
= T::fromNonRefTypeCode(TypeCode(code
));
729 return fail("bad type");
732 inline bool Decoder::readValType(const TypeContext
& types
,
733 const FeatureArgs
& features
, ValType
* type
) {
734 return readPackedType
<ValType
>(types
, features
, type
);
737 inline bool Decoder::readFieldType(const TypeContext
& types
,
738 const FeatureArgs
& features
,
740 return readPackedType
<FieldType
>(types
, features
, type
);
743 inline bool Decoder::readHeapType(const TypeContext
& types
,
744 const FeatureArgs
& features
, bool nullable
,
747 if (!peekByte(&nextByte
)) {
748 return fail("expected heap type code");
751 if ((nextByte
& SLEB128SignMask
) == SLEB128SignBit
) {
753 if (!readFixedU8(&code
)) {
758 case uint8_t(TypeCode::FuncRef
):
759 case uint8_t(TypeCode::ExternRef
):
760 *type
= RefType::fromTypeCode(TypeCode(code
), nullable
);
762 #ifdef ENABLE_WASM_GC
763 case uint8_t(TypeCode::AnyRef
):
764 case uint8_t(TypeCode::I31Ref
):
765 case uint8_t(TypeCode::EqRef
):
766 case uint8_t(TypeCode::StructRef
):
767 case uint8_t(TypeCode::ArrayRef
):
768 case uint8_t(TypeCode::NullFuncRef
):
769 case uint8_t(TypeCode::NullExternRef
):
770 case uint8_t(TypeCode::NullAnyRef
):
772 return fail("gc types not enabled");
774 *type
= RefType::fromTypeCode(TypeCode(code
), nullable
);
778 return fail("invalid heap type");
782 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
783 if (features
.functionReferences
) {
785 if (!readVarS32(&x
) || x
< 0 || uint32_t(x
) >= types
.length()) {
786 return fail("invalid heap type index");
788 const TypeDef
* typeDef
= &types
.type(x
);
789 *type
= RefType::fromTypeDef(typeDef
, nullable
);
793 return fail("invalid heap type");
796 inline bool Decoder::readRefType(const TypeContext
& types
,
797 const FeatureArgs
& features
, RefType
* type
) {
799 if (!readValType(types
, features
, &valType
)) {
802 if (!valType
.isRefType()) {
803 return fail("bad type");
805 *type
= valType
.refType();
809 // Instruction opcode
811 inline bool Decoder::readOp(OpBytes
* op
) {
812 static_assert(size_t(Op::Limit
) == 256, "fits");
814 if (!readFixedU8(&u8
)) {
818 if (MOZ_LIKELY(!IsPrefixByte(u8
))) {
821 return readVarU32(&op
->b1
);
824 // Instruction immediates for constant instructions
826 inline bool Decoder::readTypeIndex(uint32_t* typeIndex
) {
827 if (!readVarU32(typeIndex
)) {
828 return fail("unable to read type index");
833 inline bool Decoder::readGlobalIndex(uint32_t* globalIndex
) {
834 if (!readVarU32(globalIndex
)) {
835 return fail("unable to read global index");
840 inline bool Decoder::readFuncIndex(uint32_t* funcIndex
) {
841 if (!readVarU32(funcIndex
)) {
842 return fail("unable to read function index");
847 inline bool Decoder::readI32Const(int32_t* i32
) {
848 if (!readVarS32(i32
)) {
849 return fail("failed to read I32 constant");
854 inline bool Decoder::readI64Const(int64_t* i64
) {
855 if (!readVarS64(i64
)) {
856 return fail("failed to read I64 constant");
861 inline bool Decoder::readF32Const(float* f32
) {
862 if (!readFixedF32(f32
)) {
863 return fail("failed to read F32 constant");
868 inline bool Decoder::readF64Const(double* f64
) {
869 if (!readFixedF64(f64
)) {
870 return fail("failed to read F64 constant");
875 #ifdef ENABLE_WASM_SIMD
876 inline bool Decoder::readV128Const(V128
* value
) {
877 if (!readFixedV128(value
)) {
878 return fail("unable to read V128 constant");
884 inline bool Decoder::readRefNull(const TypeContext
& types
,
885 const FeatureArgs
& features
, RefType
* type
) {
886 return readHeapType(types
, features
, true, type
);
892 #endif // namespace wasm_binary_h