no bug - Bumping Firefox l10n changesets r=release a=l10n-bump DONTBUILD CLOSED TREE
[gecko.git] / js / src / wasm / WasmBinary.h
blobeb052d3337ec2ca5e2224b04eda977487ec9c7d7
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2021 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 #ifndef wasm_binary_h
20 #define wasm_binary_h
22 #include "mozilla/DebugOnly.h"
23 #include "mozilla/Maybe.h"
25 #include <type_traits>
27 #include "js/WasmFeatures.h"
29 #include "wasm/WasmCompile.h"
30 #include "wasm/WasmCompileArgs.h"
31 #include "wasm/WasmConstants.h"
32 #include "wasm/WasmTypeDecls.h"
33 #include "wasm/WasmTypeDef.h"
34 #include "wasm/WasmValType.h"
36 namespace js {
37 namespace wasm {
39 using mozilla::DebugOnly;
40 using mozilla::Maybe;
42 struct ModuleEnvironment;
44 // The Opcode compactly and safely represents the primary opcode plus any
45 // extension, with convenient predicates and accessors.
47 class Opcode {
48 uint32_t bits_;
50 public:
51 MOZ_IMPLICIT Opcode(Op op) : bits_(uint32_t(op)) {
52 static_assert(size_t(Op::Limit) == 256, "fits");
53 MOZ_ASSERT(size_t(op) < size_t(Op::Limit));
55 MOZ_IMPLICIT Opcode(MiscOp op)
56 : bits_((uint32_t(op) << 8) | uint32_t(Op::MiscPrefix)) {
57 static_assert(size_t(MiscOp::Limit) <= 0xFFFFFF, "fits");
58 MOZ_ASSERT(size_t(op) < size_t(MiscOp::Limit));
60 MOZ_IMPLICIT Opcode(ThreadOp op)
61 : bits_((uint32_t(op) << 8) | uint32_t(Op::ThreadPrefix)) {
62 static_assert(size_t(ThreadOp::Limit) <= 0xFFFFFF, "fits");
63 MOZ_ASSERT(size_t(op) < size_t(ThreadOp::Limit));
65 MOZ_IMPLICIT Opcode(MozOp op)
66 : bits_((uint32_t(op) << 8) | uint32_t(Op::MozPrefix)) {
67 static_assert(size_t(MozOp::Limit) <= 0xFFFFFF, "fits");
68 MOZ_ASSERT(size_t(op) < size_t(MozOp::Limit));
70 MOZ_IMPLICIT Opcode(SimdOp op)
71 : bits_((uint32_t(op) << 8) | uint32_t(Op::SimdPrefix)) {
72 static_assert(size_t(SimdOp::Limit) <= 0xFFFFFF, "fits");
73 MOZ_ASSERT(size_t(op) < size_t(SimdOp::Limit));
75 MOZ_IMPLICIT Opcode(GcOp op)
76 : bits_((uint32_t(op) << 8) | uint32_t(Op::GcPrefix)) {
77 static_assert(size_t(SimdOp::Limit) <= 0xFFFFFF, "fits");
78 MOZ_ASSERT(size_t(op) < size_t(SimdOp::Limit));
81 bool isOp() const { return bits_ < uint32_t(Op::FirstPrefix); }
82 bool isMisc() const { return (bits_ & 255) == uint32_t(Op::MiscPrefix); }
83 bool isThread() const { return (bits_ & 255) == uint32_t(Op::ThreadPrefix); }
84 bool isMoz() const { return (bits_ & 255) == uint32_t(Op::MozPrefix); }
85 bool isSimd() const { return (bits_ & 255) == uint32_t(Op::SimdPrefix); }
86 bool isGc() const { return (bits_ & 255) == uint32_t(Op::GcPrefix); }
88 Op asOp() const {
89 MOZ_ASSERT(isOp());
90 return Op(bits_);
92 MiscOp asMisc() const {
93 MOZ_ASSERT(isMisc());
94 return MiscOp(bits_ >> 8);
96 ThreadOp asThread() const {
97 MOZ_ASSERT(isThread());
98 return ThreadOp(bits_ >> 8);
100 MozOp asMoz() const {
101 MOZ_ASSERT(isMoz());
102 return MozOp(bits_ >> 8);
104 SimdOp asSimd() const {
105 MOZ_ASSERT(isSimd());
106 return SimdOp(bits_ >> 8);
108 GcOp asGc() const {
109 MOZ_ASSERT(isGc());
110 return GcOp(bits_ >> 8);
113 uint32_t bits() const { return bits_; }
115 bool operator==(const Opcode& that) const { return bits_ == that.bits_; }
116 bool operator!=(const Opcode& that) const { return bits_ != that.bits_; }
119 // This struct captures the bytecode offset of a section's payload (so not
120 // including the header) and the size of the payload.
122 struct SectionRange {
123 uint32_t start;
124 uint32_t size;
126 uint32_t end() const { return start + size; }
127 bool operator==(const SectionRange& rhs) const {
128 return start == rhs.start && size == rhs.size;
132 using MaybeSectionRange = Maybe<SectionRange>;
134 // The Encoder class appends bytes to the Bytes object it is given during
135 // construction. The client is responsible for the Bytes's lifetime and must
136 // keep the Bytes alive as long as the Encoder is used.
138 class Encoder {
139 Bytes& bytes_;
140 const TypeContext* types_;
142 template <class T>
143 [[nodiscard]] bool write(const T& v) {
144 return bytes_.append(reinterpret_cast<const uint8_t*>(&v), sizeof(T));
147 template <typename UInt>
148 [[nodiscard]] bool writeVarU(UInt i) {
149 do {
150 uint8_t byte = i & 0x7f;
151 i >>= 7;
152 if (i != 0) {
153 byte |= 0x80;
155 if (!bytes_.append(byte)) {
156 return false;
158 } while (i != 0);
159 return true;
162 template <typename SInt>
163 [[nodiscard]] bool writeVarS(SInt i) {
164 bool done;
165 do {
166 uint8_t byte = i & 0x7f;
167 i >>= 7;
168 done = ((i == 0) && !(byte & 0x40)) || ((i == -1) && (byte & 0x40));
169 if (!done) {
170 byte |= 0x80;
172 if (!bytes_.append(byte)) {
173 return false;
175 } while (!done);
176 return true;
179 void patchVarU32(size_t offset, uint32_t patchBits, uint32_t assertBits) {
180 do {
181 uint8_t assertByte = assertBits & 0x7f;
182 uint8_t patchByte = patchBits & 0x7f;
183 assertBits >>= 7;
184 patchBits >>= 7;
185 if (assertBits != 0) {
186 assertByte |= 0x80;
187 patchByte |= 0x80;
189 MOZ_ASSERT(assertByte == bytes_[offset]);
190 (void)assertByte;
191 bytes_[offset] = patchByte;
192 offset++;
193 } while (assertBits != 0);
196 void patchFixedU7(size_t offset, uint8_t patchBits, uint8_t assertBits) {
197 MOZ_ASSERT(patchBits <= uint8_t(INT8_MAX));
198 patchFixedU8(offset, patchBits, assertBits);
201 void patchFixedU8(size_t offset, uint8_t patchBits, uint8_t assertBits) {
202 MOZ_ASSERT(bytes_[offset] == assertBits);
203 bytes_[offset] = patchBits;
206 uint32_t varU32ByteLength(size_t offset) const {
207 size_t start = offset;
208 while (bytes_[offset] & 0x80) {
209 offset++;
211 return offset - start + 1;
214 public:
215 explicit Encoder(Bytes& bytes) : bytes_(bytes), types_(nullptr) {
216 MOZ_ASSERT(empty());
218 explicit Encoder(Bytes& bytes, const TypeContext& types)
219 : bytes_(bytes), types_(&types) {
220 MOZ_ASSERT(empty());
223 size_t currentOffset() const { return bytes_.length(); }
224 bool empty() const { return currentOffset() == 0; }
226 // Fixed-size encoding operations simply copy the literal bytes (without
227 // attempting to align).
229 [[nodiscard]] bool writeFixedU7(uint8_t i) {
230 MOZ_ASSERT(i <= uint8_t(INT8_MAX));
231 return writeFixedU8(i);
233 [[nodiscard]] bool writeFixedU8(uint8_t i) { return write<uint8_t>(i); }
234 [[nodiscard]] bool writeFixedU32(uint32_t i) { return write<uint32_t>(i); }
235 [[nodiscard]] bool writeFixedF32(float f) { return write<float>(f); }
236 [[nodiscard]] bool writeFixedF64(double d) { return write<double>(d); }
238 // Variable-length encodings that all use LEB128.
240 [[nodiscard]] bool writeVarU32(uint32_t i) { return writeVarU<uint32_t>(i); }
241 [[nodiscard]] bool writeVarS32(int32_t i) { return writeVarS<int32_t>(i); }
242 [[nodiscard]] bool writeVarU64(uint64_t i) { return writeVarU<uint64_t>(i); }
243 [[nodiscard]] bool writeVarS64(int64_t i) { return writeVarS<int64_t>(i); }
244 [[nodiscard]] bool writeValType(ValType type) {
245 static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
246 if (type.isTypeRef()) {
247 MOZ_RELEASE_ASSERT(types_,
248 "writeValType is used, but types were not specified.");
249 if (!writeFixedU8(uint8_t(type.isNullable() ? TypeCode::NullableRef
250 : TypeCode::Ref))) {
251 return false;
253 uint32_t typeIndex = types_->indexOf(*type.typeDef());
254 // Encode positive LEB S33 as S64.
255 return writeVarS64(typeIndex);
257 TypeCode tc = type.packed().typeCode();
258 MOZ_ASSERT(size_t(tc) < size_t(TypeCode::Limit));
259 return writeFixedU8(uint8_t(tc));
261 [[nodiscard]] bool writeOp(Opcode opcode) {
262 // The Opcode constructor has asserted that `opcode` is meaningful, so no
263 // further correctness checking is necessary here.
264 uint32_t bits = opcode.bits();
265 if (!writeFixedU8(bits & 255)) {
266 return false;
268 if (opcode.isOp()) {
269 return true;
271 return writeVarU32(bits >> 8);
274 // Fixed-length encodings that allow back-patching.
276 [[nodiscard]] bool writePatchableFixedU7(size_t* offset) {
277 *offset = bytes_.length();
278 return writeFixedU8(UINT8_MAX);
280 void patchFixedU7(size_t offset, uint8_t patchBits) {
281 return patchFixedU7(offset, patchBits, UINT8_MAX);
284 // Variable-length encodings that allow back-patching.
286 [[nodiscard]] bool writePatchableVarU32(size_t* offset) {
287 *offset = bytes_.length();
288 return writeVarU32(UINT32_MAX);
290 void patchVarU32(size_t offset, uint32_t patchBits) {
291 return patchVarU32(offset, patchBits, UINT32_MAX);
294 // Byte ranges start with an LEB128 length followed by an arbitrary sequence
295 // of bytes. When used for strings, bytes are to be interpreted as utf8.
297 [[nodiscard]] bool writeBytes(const void* bytes, uint32_t numBytes) {
298 return writeVarU32(numBytes) &&
299 bytes_.append(reinterpret_cast<const uint8_t*>(bytes), numBytes);
302 // A "section" is a contiguous range of bytes that stores its own size so
303 // that it may be trivially skipped without examining the payload. Sections
304 // require backpatching since the size of the section is only known at the
305 // end while the size's varU32 must be stored at the beginning. Immediately
306 // after the section length is the string id of the section.
308 [[nodiscard]] bool startSection(SectionId id, size_t* offset) {
309 MOZ_ASSERT(uint32_t(id) < 128);
310 return writeVarU32(uint32_t(id)) && writePatchableVarU32(offset);
312 void finishSection(size_t offset) {
313 return patchVarU32(offset,
314 bytes_.length() - offset - varU32ByteLength(offset));
318 // The Decoder class decodes the bytes in the range it is given during
319 // construction. The client is responsible for keeping the byte range alive as
320 // long as the Decoder is used.
322 class Decoder {
323 const uint8_t* const beg_;
324 const uint8_t* const end_;
325 const uint8_t* cur_;
326 const size_t offsetInModule_;
327 UniqueChars* error_;
328 UniqueCharsVector* warnings_;
329 bool resilientMode_;
331 template <class T>
332 [[nodiscard]] bool read(T* out) {
333 if (bytesRemain() < sizeof(T)) {
334 return false;
336 memcpy((void*)out, cur_, sizeof(T));
337 cur_ += sizeof(T);
338 return true;
341 template <class T>
342 T uncheckedRead() {
343 MOZ_ASSERT(bytesRemain() >= sizeof(T));
344 T ret;
345 memcpy(&ret, cur_, sizeof(T));
346 cur_ += sizeof(T);
347 return ret;
350 template <class T>
351 void uncheckedRead(T* ret) {
352 MOZ_ASSERT(bytesRemain() >= sizeof(T));
353 memcpy(ret, cur_, sizeof(T));
354 cur_ += sizeof(T);
357 template <typename UInt>
358 [[nodiscard]] bool readVarU(UInt* out) {
359 DebugOnly<const uint8_t*> before = cur_;
360 const unsigned numBits = sizeof(UInt) * CHAR_BIT;
361 const unsigned remainderBits = numBits % 7;
362 const unsigned numBitsInSevens = numBits - remainderBits;
363 UInt u = 0;
364 uint8_t byte;
365 UInt shift = 0;
366 do {
367 if (!readFixedU8(&byte)) {
368 return false;
370 if (!(byte & 0x80)) {
371 *out = u | UInt(byte) << shift;
372 return true;
374 u |= UInt(byte & 0x7F) << shift;
375 shift += 7;
376 } while (shift != numBitsInSevens);
377 if (!readFixedU8(&byte) || (byte & (unsigned(-1) << remainderBits))) {
378 return false;
380 *out = u | (UInt(byte) << numBitsInSevens);
381 MOZ_ASSERT_IF(sizeof(UInt) == 4,
382 unsigned(cur_ - before) <= MaxVarU32DecodedBytes);
383 return true;
386 template <typename SInt>
387 [[nodiscard]] bool readVarS(SInt* out) {
388 using UInt = std::make_unsigned_t<SInt>;
389 const unsigned numBits = sizeof(SInt) * CHAR_BIT;
390 const unsigned remainderBits = numBits % 7;
391 const unsigned numBitsInSevens = numBits - remainderBits;
392 SInt s = 0;
393 uint8_t byte;
394 unsigned shift = 0;
395 do {
396 if (!readFixedU8(&byte)) {
397 return false;
399 s |= SInt(byte & 0x7f) << shift;
400 shift += 7;
401 if (!(byte & 0x80)) {
402 if (byte & 0x40) {
403 s |= UInt(-1) << shift;
405 *out = s;
406 return true;
408 } while (shift < numBitsInSevens);
409 if (!remainderBits || !readFixedU8(&byte) || (byte & 0x80)) {
410 return false;
412 uint8_t mask = 0x7f & (uint8_t(-1) << remainderBits);
413 if ((byte & mask) != ((byte & (1 << (remainderBits - 1))) ? mask : 0)) {
414 return false;
416 *out = s | UInt(byte) << shift;
417 return true;
420 public:
421 Decoder(const uint8_t* begin, const uint8_t* end, size_t offsetInModule,
422 UniqueChars* error, UniqueCharsVector* warnings = nullptr,
423 bool resilientMode = false)
424 : beg_(begin),
425 end_(end),
426 cur_(begin),
427 offsetInModule_(offsetInModule),
428 error_(error),
429 warnings_(warnings),
430 resilientMode_(resilientMode) {
431 MOZ_ASSERT(begin <= end);
433 explicit Decoder(const Bytes& bytes, size_t offsetInModule = 0,
434 UniqueChars* error = nullptr,
435 UniqueCharsVector* warnings = nullptr)
436 : beg_(bytes.begin()),
437 end_(bytes.end()),
438 cur_(bytes.begin()),
439 offsetInModule_(offsetInModule),
440 error_(error),
441 warnings_(warnings),
442 resilientMode_(false) {}
444 // These convenience functions use currentOffset() as the errorOffset.
445 bool fail(const char* msg) { return fail(currentOffset(), msg); }
446 bool failf(const char* msg, ...) MOZ_FORMAT_PRINTF(2, 3);
447 void warnf(const char* msg, ...) MOZ_FORMAT_PRINTF(2, 3);
449 // Report an error at the given offset (relative to the whole module).
450 bool fail(size_t errorOffset, const char* msg);
452 UniqueChars* error() { return error_; }
454 void clearError() {
455 if (error_) {
456 error_->reset();
460 bool done() const {
461 MOZ_ASSERT(cur_ <= end_);
462 return cur_ == end_;
464 bool resilientMode() const { return resilientMode_; }
466 size_t bytesRemain() const {
467 MOZ_ASSERT(end_ >= cur_);
468 return size_t(end_ - cur_);
470 // pos must be a value previously returned from currentPosition.
471 void rollbackPosition(const uint8_t* pos) { cur_ = pos; }
472 const uint8_t* currentPosition() const { return cur_; }
473 size_t beginOffset() const { return offsetInModule_; }
474 size_t currentOffset() const { return offsetInModule_ + (cur_ - beg_); }
475 const uint8_t* begin() const { return beg_; }
476 const uint8_t* end() const { return end_; }
478 // Peek at the next byte, if it exists, without advancing the position.
480 bool peekByte(uint8_t* byte) {
481 if (done()) {
482 return false;
484 *byte = *cur_;
485 return true;
488 // Fixed-size encoding operations simply copy the literal bytes (without
489 // attempting to align).
491 [[nodiscard]] bool readFixedU8(uint8_t* i) { return read<uint8_t>(i); }
492 [[nodiscard]] bool readFixedU32(uint32_t* u) { return read<uint32_t>(u); }
493 [[nodiscard]] bool readFixedF32(float* f) { return read<float>(f); }
494 [[nodiscard]] bool readFixedF64(double* d) { return read<double>(d); }
495 #ifdef ENABLE_WASM_SIMD
496 [[nodiscard]] bool readFixedV128(V128* d) {
497 for (unsigned i = 0; i < 16; i++) {
498 if (!read<uint8_t>(d->bytes + i)) {
499 return false;
502 return true;
504 #endif
506 // Variable-length encodings that all use LEB128.
508 [[nodiscard]] bool readVarU32(uint32_t* out) {
509 return readVarU<uint32_t>(out);
511 [[nodiscard]] bool readVarS32(int32_t* out) { return readVarS<int32_t>(out); }
512 [[nodiscard]] bool readVarU64(uint64_t* out) {
513 return readVarU<uint64_t>(out);
515 [[nodiscard]] bool readVarS64(int64_t* out) { return readVarS<int64_t>(out); }
517 // Value and reference types
519 [[nodiscard]] ValType uncheckedReadValType(const TypeContext& types);
521 template <class T>
522 [[nodiscard]] bool readPackedType(const TypeContext& types,
523 const FeatureArgs& features, T* type);
525 [[nodiscard]] bool readValType(const TypeContext& types,
526 const FeatureArgs& features, ValType* type);
528 [[nodiscard]] bool readStorageType(const TypeContext& types,
529 const FeatureArgs& features,
530 StorageType* type);
532 [[nodiscard]] bool readHeapType(const TypeContext& types,
533 const FeatureArgs& features, bool nullable,
534 RefType* type);
536 [[nodiscard]] bool readRefType(const TypeContext& types,
537 const FeatureArgs& features, RefType* type);
539 // Instruction opcode
541 [[nodiscard]] bool readOp(OpBytes* op);
543 // Instruction immediates for constant instructions
545 [[nodiscard]] bool readBinary() { return true; }
546 [[nodiscard]] bool readTypeIndex(uint32_t* typeIndex);
547 [[nodiscard]] bool readGlobalIndex(uint32_t* globalIndex);
548 [[nodiscard]] bool readFuncIndex(uint32_t* funcIndex);
549 [[nodiscard]] bool readI32Const(int32_t* i32);
550 [[nodiscard]] bool readI64Const(int64_t* i64);
551 [[nodiscard]] bool readF32Const(float* f32);
552 [[nodiscard]] bool readF64Const(double* f64);
553 #ifdef ENABLE_WASM_SIMD
554 [[nodiscard]] bool readV128Const(V128* value);
555 #endif
556 [[nodiscard]] bool readRefNull(const TypeContext& types,
557 const FeatureArgs& features, RefType* type);
559 // See writeBytes comment.
561 [[nodiscard]] bool readBytes(uint32_t numBytes,
562 const uint8_t** bytes = nullptr) {
563 if (bytes) {
564 *bytes = cur_;
566 if (bytesRemain() < numBytes) {
567 return false;
569 cur_ += numBytes;
570 return true;
573 // See "section" description in Encoder.
575 [[nodiscard]] bool readSectionHeader(uint8_t* id, SectionRange* range);
577 [[nodiscard]] bool startSection(SectionId id, ModuleEnvironment* env,
578 MaybeSectionRange* range,
579 const char* sectionName);
580 [[nodiscard]] bool finishSection(const SectionRange& range,
581 const char* sectionName);
583 // Custom sections do not cause validation errors unless the error is in
584 // the section header itself.
586 [[nodiscard]] bool startCustomSection(const char* expected,
587 size_t expectedLength,
588 ModuleEnvironment* env,
589 MaybeSectionRange* range);
591 template <size_t NameSizeWith0>
592 [[nodiscard]] bool startCustomSection(const char (&name)[NameSizeWith0],
593 ModuleEnvironment* env,
594 MaybeSectionRange* range) {
595 MOZ_ASSERT(name[NameSizeWith0 - 1] == '\0');
596 return startCustomSection(name, NameSizeWith0 - 1, env, range);
599 void finishCustomSection(const char* name, const SectionRange& range);
600 void skipAndFinishCustomSection(const SectionRange& range);
602 [[nodiscard]] bool skipCustomSection(ModuleEnvironment* env);
604 // The Name section has its own optional subsections.
606 [[nodiscard]] bool startNameSubsection(NameType nameType,
607 Maybe<uint32_t>* endOffset);
608 [[nodiscard]] bool finishNameSubsection(uint32_t endOffset);
609 [[nodiscard]] bool skipNameSubsection();
611 // The infallible "unchecked" decoding functions can be used when we are
612 // sure that the bytes are well-formed (by construction or due to previous
613 // validation).
615 uint8_t uncheckedReadFixedU8() { return uncheckedRead<uint8_t>(); }
616 uint32_t uncheckedReadFixedU32() { return uncheckedRead<uint32_t>(); }
617 void uncheckedReadFixedF32(float* out) { uncheckedRead<float>(out); }
618 void uncheckedReadFixedF64(double* out) { uncheckedRead<double>(out); }
619 template <typename UInt>
620 UInt uncheckedReadVarU() {
621 static const unsigned numBits = sizeof(UInt) * CHAR_BIT;
622 static const unsigned remainderBits = numBits % 7;
623 static const unsigned numBitsInSevens = numBits - remainderBits;
624 UInt decoded = 0;
625 uint32_t shift = 0;
626 do {
627 uint8_t byte = *cur_++;
628 if (!(byte & 0x80)) {
629 return decoded | (UInt(byte) << shift);
631 decoded |= UInt(byte & 0x7f) << shift;
632 shift += 7;
633 } while (shift != numBitsInSevens);
634 uint8_t byte = *cur_++;
635 MOZ_ASSERT(!(byte & 0xf0));
636 return decoded | (UInt(byte) << numBitsInSevens);
638 uint32_t uncheckedReadVarU32() { return uncheckedReadVarU<uint32_t>(); }
639 int32_t uncheckedReadVarS32() {
640 int32_t i32 = 0;
641 MOZ_ALWAYS_TRUE(readVarS32(&i32));
642 return i32;
644 uint64_t uncheckedReadVarU64() { return uncheckedReadVarU<uint64_t>(); }
645 int64_t uncheckedReadVarS64() {
646 int64_t i64 = 0;
647 MOZ_ALWAYS_TRUE(readVarS64(&i64));
648 return i64;
650 Op uncheckedReadOp() {
651 static_assert(size_t(Op::Limit) == 256, "fits");
652 uint8_t u8 = uncheckedReadFixedU8();
653 return u8 != UINT8_MAX ? Op(u8) : Op(uncheckedReadFixedU8() + UINT8_MAX);
657 // Value and reference types
659 inline ValType Decoder::uncheckedReadValType(const TypeContext& types) {
660 uint8_t code = uncheckedReadFixedU8();
661 switch (code) {
662 case uint8_t(TypeCode::FuncRef):
663 case uint8_t(TypeCode::ExternRef):
664 case uint8_t(TypeCode::ExnRef):
665 return RefType::fromTypeCode(TypeCode(code), true);
666 case uint8_t(TypeCode::Ref):
667 case uint8_t(TypeCode::NullableRef): {
668 bool nullable = code == uint8_t(TypeCode::NullableRef);
670 uint8_t nextByte;
671 peekByte(&nextByte);
673 if ((nextByte & SLEB128SignMask) == SLEB128SignBit) {
674 uint8_t code = uncheckedReadFixedU8();
675 return RefType::fromTypeCode(TypeCode(code), nullable);
678 int32_t x = uncheckedReadVarS32();
679 const TypeDef* typeDef = &types.type(x);
680 return RefType::fromTypeDef(typeDef, nullable);
682 default:
683 return ValType::fromNonRefTypeCode(TypeCode(code));
687 template <class T>
688 inline bool Decoder::readPackedType(const TypeContext& types,
689 const FeatureArgs& features, T* type) {
690 static_assert(uint8_t(TypeCode::Limit) <= UINT8_MAX, "fits");
691 uint8_t code;
692 if (!readFixedU8(&code)) {
693 return fail("expected type code");
695 switch (code) {
696 case uint8_t(TypeCode::V128): {
697 #ifdef ENABLE_WASM_SIMD
698 if (!features.simd) {
699 return fail("v128 not enabled");
701 *type = T::fromNonRefTypeCode(TypeCode(code));
702 return true;
703 #else
704 break;
705 #endif
707 case uint8_t(TypeCode::FuncRef):
708 case uint8_t(TypeCode::ExternRef): {
709 *type = RefType::fromTypeCode(TypeCode(code), true);
710 return true;
712 case uint8_t(TypeCode::ExnRef): {
713 if (!features.exnref) {
714 return fail("exnref not enabled");
716 *type = RefType::fromTypeCode(TypeCode(code), true);
717 return true;
719 case uint8_t(TypeCode::Ref):
720 case uint8_t(TypeCode::NullableRef): {
721 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
722 if (!features.functionReferences) {
723 return fail("(ref T) types not enabled");
725 bool nullable = code == uint8_t(TypeCode::NullableRef);
726 RefType refType;
727 if (!readHeapType(types, features, nullable, &refType)) {
728 return false;
730 *type = refType;
731 return true;
732 #else
733 break;
734 #endif
736 case uint8_t(TypeCode::AnyRef):
737 case uint8_t(TypeCode::I31Ref):
738 case uint8_t(TypeCode::EqRef):
739 case uint8_t(TypeCode::StructRef):
740 case uint8_t(TypeCode::ArrayRef):
741 case uint8_t(TypeCode::NullFuncRef):
742 case uint8_t(TypeCode::NullExternRef):
743 case uint8_t(TypeCode::NullAnyRef): {
744 #ifdef ENABLE_WASM_GC
745 if (!features.gc) {
746 return fail("gc types not enabled");
748 *type = RefType::fromTypeCode(TypeCode(code), true);
749 return true;
750 #else
751 break;
752 #endif
754 default: {
755 if (!T::isValidTypeCode(TypeCode(code))) {
756 break;
758 *type = T::fromNonRefTypeCode(TypeCode(code));
759 return true;
762 return fail("bad type");
765 inline bool Decoder::readValType(const TypeContext& types,
766 const FeatureArgs& features, ValType* type) {
767 return readPackedType<ValType>(types, features, type);
770 inline bool Decoder::readStorageType(const TypeContext& types,
771 const FeatureArgs& features,
772 StorageType* type) {
773 return readPackedType<StorageType>(types, features, type);
776 inline bool Decoder::readHeapType(const TypeContext& types,
777 const FeatureArgs& features, bool nullable,
778 RefType* type) {
779 uint8_t nextByte;
780 if (!peekByte(&nextByte)) {
781 return fail("expected heap type code");
784 if ((nextByte & SLEB128SignMask) == SLEB128SignBit) {
785 uint8_t code;
786 if (!readFixedU8(&code)) {
787 return false;
790 switch (code) {
791 case uint8_t(TypeCode::FuncRef):
792 case uint8_t(TypeCode::ExternRef):
793 *type = RefType::fromTypeCode(TypeCode(code), nullable);
794 return true;
795 case uint8_t(TypeCode::ExnRef): {
796 if (!features.exnref) {
797 return fail("exnref not enabled");
799 *type = RefType::fromTypeCode(TypeCode(code), nullable);
800 return true;
802 #ifdef ENABLE_WASM_GC
803 case uint8_t(TypeCode::AnyRef):
804 case uint8_t(TypeCode::I31Ref):
805 case uint8_t(TypeCode::EqRef):
806 case uint8_t(TypeCode::StructRef):
807 case uint8_t(TypeCode::ArrayRef):
808 case uint8_t(TypeCode::NullFuncRef):
809 case uint8_t(TypeCode::NullExternRef):
810 case uint8_t(TypeCode::NullAnyRef):
811 if (!features.gc) {
812 return fail("gc types not enabled");
814 *type = RefType::fromTypeCode(TypeCode(code), nullable);
815 return true;
816 #endif
817 default:
818 return fail("invalid heap type");
822 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
823 if (features.functionReferences) {
824 int32_t x;
825 if (!readVarS32(&x) || x < 0 || uint32_t(x) >= types.length()) {
826 return fail("invalid heap type index");
828 const TypeDef* typeDef = &types.type(x);
829 *type = RefType::fromTypeDef(typeDef, nullable);
830 return true;
832 #endif
833 return fail("invalid heap type");
836 inline bool Decoder::readRefType(const TypeContext& types,
837 const FeatureArgs& features, RefType* type) {
838 ValType valType;
839 if (!readValType(types, features, &valType)) {
840 return false;
842 if (!valType.isRefType()) {
843 return fail("bad type");
845 *type = valType.refType();
846 return true;
849 // Instruction opcode
851 inline bool Decoder::readOp(OpBytes* op) {
852 static_assert(size_t(Op::Limit) == 256, "fits");
853 uint8_t u8;
854 if (!readFixedU8(&u8)) {
855 return false;
857 op->b0 = u8;
858 if (MOZ_LIKELY(!IsPrefixByte(u8))) {
859 return true;
861 return readVarU32(&op->b1);
864 // Instruction immediates for constant instructions
866 inline bool Decoder::readTypeIndex(uint32_t* typeIndex) {
867 if (!readVarU32(typeIndex)) {
868 return fail("unable to read type index");
870 return true;
873 inline bool Decoder::readGlobalIndex(uint32_t* globalIndex) {
874 if (!readVarU32(globalIndex)) {
875 return fail("unable to read global index");
877 return true;
880 inline bool Decoder::readFuncIndex(uint32_t* funcIndex) {
881 if (!readVarU32(funcIndex)) {
882 return fail("unable to read function index");
884 return true;
887 inline bool Decoder::readI32Const(int32_t* i32) {
888 if (!readVarS32(i32)) {
889 return fail("failed to read I32 constant");
891 return true;
894 inline bool Decoder::readI64Const(int64_t* i64) {
895 if (!readVarS64(i64)) {
896 return fail("failed to read I64 constant");
898 return true;
901 inline bool Decoder::readF32Const(float* f32) {
902 if (!readFixedF32(f32)) {
903 return fail("failed to read F32 constant");
905 return true;
908 inline bool Decoder::readF64Const(double* f64) {
909 if (!readFixedF64(f64)) {
910 return fail("failed to read F64 constant");
912 return true;
915 #ifdef ENABLE_WASM_SIMD
916 inline bool Decoder::readV128Const(V128* value) {
917 if (!readFixedV128(value)) {
918 return fail("unable to read V128 constant");
920 return true;
922 #endif
924 inline bool Decoder::readRefNull(const TypeContext& types,
925 const FeatureArgs& features, RefType* type) {
926 return readHeapType(types, features, true, type);
929 } // namespace wasm
930 } // namespace js
932 #endif // namespace wasm_binary_h