Backed out changeset d53c38086d1b (bug 1853454) for causing spidermonkey build bustag...
[gecko.git] / js / src / wasm / WasmValidate.cpp
blob5a50b091a4122da524675efb7ef950346f878b1a
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2016 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 #include "wasm/WasmValidate.h"
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/Span.h"
23 #include "mozilla/Utf8.h"
25 #include "js/Printf.h"
26 #include "js/String.h" // JS::MaxStringLength
27 #include "vm/JSContext.h"
28 #include "vm/Realm.h"
29 #include "wasm/WasmInitExpr.h"
30 #include "wasm/WasmOpIter.h"
31 #include "wasm/WasmTypeDecls.h"
33 using namespace js;
34 using namespace js::jit;
35 using namespace js::wasm;
37 using mozilla::AsChars;
38 using mozilla::CheckedInt;
39 using mozilla::CheckedInt32;
40 using mozilla::IsUtf8;
41 using mozilla::Span;
43 // Misc helpers.
45 bool wasm::EncodeLocalEntries(Encoder& e, const ValTypeVector& locals) {
46 if (locals.length() > MaxLocals) {
47 return false;
50 uint32_t numLocalEntries = 0;
51 if (locals.length()) {
52 ValType prev = locals[0];
53 numLocalEntries++;
54 for (ValType t : locals) {
55 if (t != prev) {
56 numLocalEntries++;
57 prev = t;
62 if (!e.writeVarU32(numLocalEntries)) {
63 return false;
66 if (numLocalEntries) {
67 ValType prev = locals[0];
68 uint32_t count = 1;
69 for (uint32_t i = 1; i < locals.length(); i++, count++) {
70 if (prev != locals[i]) {
71 if (!e.writeVarU32(count)) {
72 return false;
74 if (!e.writeValType(prev)) {
75 return false;
77 prev = locals[i];
78 count = 0;
81 if (!e.writeVarU32(count)) {
82 return false;
84 if (!e.writeValType(prev)) {
85 return false;
89 return true;
92 bool wasm::DecodeLocalEntries(Decoder& d, const TypeContext& types,
93 const FeatureArgs& features,
94 ValTypeVector* locals) {
95 uint32_t numLocalEntries;
96 if (!d.readVarU32(&numLocalEntries)) {
97 return d.fail("failed to read number of local entries");
100 for (uint32_t i = 0; i < numLocalEntries; i++) {
101 uint32_t count;
102 if (!d.readVarU32(&count)) {
103 return d.fail("failed to read local entry count");
106 if (MaxLocals - locals->length() < count) {
107 return d.fail("too many locals");
110 ValType type;
111 if (!d.readValType(types, features, &type)) {
112 return false;
115 if (!locals->appendN(type, count)) {
116 return false;
120 return true;
123 bool wasm::DecodeValidatedLocalEntries(const TypeContext& types, Decoder& d,
124 ValTypeVector* locals) {
125 uint32_t numLocalEntries;
126 MOZ_ALWAYS_TRUE(d.readVarU32(&numLocalEntries));
128 for (uint32_t i = 0; i < numLocalEntries; i++) {
129 uint32_t count = d.uncheckedReadVarU32();
130 MOZ_ASSERT(MaxLocals - locals->length() >= count);
131 if (!locals->appendN(d.uncheckedReadValType(types), count)) {
132 return false;
136 return true;
139 bool wasm::CheckIsSubtypeOf(Decoder& d, const ModuleEnvironment& env,
140 size_t opcodeOffset, FieldType subType,
141 FieldType superType) {
142 if (FieldType::isSubTypeOf(subType, superType)) {
143 return true;
146 UniqueChars subText = ToString(subType, env.types);
147 if (!subText) {
148 return false;
151 UniqueChars superText = ToString(superType, env.types);
152 if (!superText) {
153 return false;
156 UniqueChars error(
157 JS_smprintf("type mismatch: expression has type %s but expected %s",
158 subText.get(), superText.get()));
159 if (!error) {
160 return false;
163 return d.fail(opcodeOffset, error.get());
166 // Function body validation.
168 static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
169 uint32_t funcIndex,
170 const ValTypeVector& locals,
171 const uint8_t* bodyEnd, Decoder* d) {
172 ValidatingOpIter iter(env, *d);
174 if (!iter.startFunction(funcIndex, locals)) {
175 return false;
178 #define CHECK(c) \
179 if (!(c)) return false; \
180 break
182 while (true) {
183 OpBytes op;
184 if (!iter.readOp(&op)) {
185 return false;
188 Nothing nothing;
189 NothingVector nothings{};
190 ResultType unusedType;
192 switch (op.b0) {
193 case uint16_t(Op::End): {
194 LabelKind unusedKind;
195 if (!iter.readEnd(&unusedKind, &unusedType, &nothings, &nothings)) {
196 return false;
198 iter.popEnd();
199 if (iter.controlStackEmpty()) {
200 return iter.endFunction(bodyEnd);
202 break;
204 case uint16_t(Op::Nop):
205 CHECK(iter.readNop());
206 case uint16_t(Op::Drop):
207 CHECK(iter.readDrop());
208 case uint16_t(Op::Call): {
209 uint32_t unusedIndex;
210 NothingVector unusedArgs{};
211 CHECK(iter.readCall(&unusedIndex, &unusedArgs));
213 case uint16_t(Op::CallIndirect): {
214 uint32_t unusedIndex, unusedIndex2;
215 NothingVector unusedArgs{};
216 CHECK(iter.readCallIndirect(&unusedIndex, &unusedIndex2, &nothing,
217 &unusedArgs));
219 #ifdef ENABLE_WASM_TAIL_CALLS
220 case uint16_t(Op::ReturnCall): {
221 if (!env.tailCallsEnabled()) {
222 return iter.unrecognizedOpcode(&op);
224 uint32_t unusedIndex;
225 NothingVector unusedArgs{};
226 CHECK(iter.readReturnCall(&unusedIndex, &unusedArgs));
228 case uint16_t(Op::ReturnCallIndirect): {
229 if (!env.tailCallsEnabled()) {
230 return iter.unrecognizedOpcode(&op);
232 uint32_t unusedIndex, unusedIndex2;
233 NothingVector unusedArgs{};
234 CHECK(iter.readReturnCallIndirect(&unusedIndex, &unusedIndex2, &nothing,
235 &unusedArgs));
237 #endif
238 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
239 case uint16_t(Op::CallRef): {
240 if (!env.functionReferencesEnabled()) {
241 return iter.unrecognizedOpcode(&op);
243 const FuncType* unusedType;
244 NothingVector unusedArgs{};
245 CHECK(iter.readCallRef(&unusedType, &nothing, &unusedArgs));
247 # ifdef ENABLE_WASM_TAIL_CALLS
248 case uint16_t(Op::ReturnCallRef): {
249 if (!env.functionReferencesEnabled() || !env.tailCallsEnabled()) {
250 return iter.unrecognizedOpcode(&op);
252 const FuncType* unusedType;
253 NothingVector unusedArgs{};
254 CHECK(iter.readReturnCallRef(&unusedType, &nothing, &unusedArgs));
256 # endif
257 #endif
258 case uint16_t(Op::I32Const): {
259 int32_t unused;
260 CHECK(iter.readI32Const(&unused));
262 case uint16_t(Op::I64Const): {
263 int64_t unused;
264 CHECK(iter.readI64Const(&unused));
266 case uint16_t(Op::F32Const): {
267 float unused;
268 CHECK(iter.readF32Const(&unused));
270 case uint16_t(Op::F64Const): {
271 double unused;
272 CHECK(iter.readF64Const(&unused));
274 case uint16_t(Op::LocalGet): {
275 uint32_t unused;
276 CHECK(iter.readGetLocal(locals, &unused));
278 case uint16_t(Op::LocalSet): {
279 uint32_t unused;
280 CHECK(iter.readSetLocal(locals, &unused, &nothing));
282 case uint16_t(Op::LocalTee): {
283 uint32_t unused;
284 CHECK(iter.readTeeLocal(locals, &unused, &nothing));
286 case uint16_t(Op::GlobalGet): {
287 uint32_t unused;
288 CHECK(iter.readGetGlobal(&unused));
290 case uint16_t(Op::GlobalSet): {
291 uint32_t unused;
292 CHECK(iter.readSetGlobal(&unused, &nothing));
294 case uint16_t(Op::TableGet): {
295 uint32_t unusedTableIndex;
296 CHECK(iter.readTableGet(&unusedTableIndex, &nothing));
298 case uint16_t(Op::TableSet): {
299 uint32_t unusedTableIndex;
300 CHECK(iter.readTableSet(&unusedTableIndex, &nothing, &nothing));
302 case uint16_t(Op::SelectNumeric): {
303 StackType unused;
304 CHECK(iter.readSelect(/*typed*/ false, &unused, &nothing, &nothing,
305 &nothing));
307 case uint16_t(Op::SelectTyped): {
308 StackType unused;
309 CHECK(iter.readSelect(/*typed*/ true, &unused, &nothing, &nothing,
310 &nothing));
312 case uint16_t(Op::Block):
313 CHECK(iter.readBlock(&unusedType));
314 case uint16_t(Op::Loop):
315 CHECK(iter.readLoop(&unusedType));
316 case uint16_t(Op::If):
317 CHECK(iter.readIf(&unusedType, &nothing));
318 case uint16_t(Op::Else):
319 CHECK(iter.readElse(&unusedType, &unusedType, &nothings));
320 case uint16_t(Op::I32Clz):
321 case uint16_t(Op::I32Ctz):
322 case uint16_t(Op::I32Popcnt):
323 CHECK(iter.readUnary(ValType::I32, &nothing));
324 case uint16_t(Op::I64Clz):
325 case uint16_t(Op::I64Ctz):
326 case uint16_t(Op::I64Popcnt):
327 CHECK(iter.readUnary(ValType::I64, &nothing));
328 case uint16_t(Op::F32Abs):
329 case uint16_t(Op::F32Neg):
330 case uint16_t(Op::F32Ceil):
331 case uint16_t(Op::F32Floor):
332 case uint16_t(Op::F32Sqrt):
333 case uint16_t(Op::F32Trunc):
334 case uint16_t(Op::F32Nearest):
335 CHECK(iter.readUnary(ValType::F32, &nothing));
336 case uint16_t(Op::F64Abs):
337 case uint16_t(Op::F64Neg):
338 case uint16_t(Op::F64Ceil):
339 case uint16_t(Op::F64Floor):
340 case uint16_t(Op::F64Sqrt):
341 case uint16_t(Op::F64Trunc):
342 case uint16_t(Op::F64Nearest):
343 CHECK(iter.readUnary(ValType::F64, &nothing));
344 case uint16_t(Op::I32Add):
345 case uint16_t(Op::I32Sub):
346 case uint16_t(Op::I32Mul):
347 case uint16_t(Op::I32DivS):
348 case uint16_t(Op::I32DivU):
349 case uint16_t(Op::I32RemS):
350 case uint16_t(Op::I32RemU):
351 case uint16_t(Op::I32And):
352 case uint16_t(Op::I32Or):
353 case uint16_t(Op::I32Xor):
354 case uint16_t(Op::I32Shl):
355 case uint16_t(Op::I32ShrS):
356 case uint16_t(Op::I32ShrU):
357 case uint16_t(Op::I32Rotl):
358 case uint16_t(Op::I32Rotr):
359 CHECK(iter.readBinary(ValType::I32, &nothing, &nothing));
360 case uint16_t(Op::I64Add):
361 case uint16_t(Op::I64Sub):
362 case uint16_t(Op::I64Mul):
363 case uint16_t(Op::I64DivS):
364 case uint16_t(Op::I64DivU):
365 case uint16_t(Op::I64RemS):
366 case uint16_t(Op::I64RemU):
367 case uint16_t(Op::I64And):
368 case uint16_t(Op::I64Or):
369 case uint16_t(Op::I64Xor):
370 case uint16_t(Op::I64Shl):
371 case uint16_t(Op::I64ShrS):
372 case uint16_t(Op::I64ShrU):
373 case uint16_t(Op::I64Rotl):
374 case uint16_t(Op::I64Rotr):
375 CHECK(iter.readBinary(ValType::I64, &nothing, &nothing));
376 case uint16_t(Op::F32Add):
377 case uint16_t(Op::F32Sub):
378 case uint16_t(Op::F32Mul):
379 case uint16_t(Op::F32Div):
380 case uint16_t(Op::F32Min):
381 case uint16_t(Op::F32Max):
382 case uint16_t(Op::F32CopySign):
383 CHECK(iter.readBinary(ValType::F32, &nothing, &nothing));
384 case uint16_t(Op::F64Add):
385 case uint16_t(Op::F64Sub):
386 case uint16_t(Op::F64Mul):
387 case uint16_t(Op::F64Div):
388 case uint16_t(Op::F64Min):
389 case uint16_t(Op::F64Max):
390 case uint16_t(Op::F64CopySign):
391 CHECK(iter.readBinary(ValType::F64, &nothing, &nothing));
392 case uint16_t(Op::I32Eq):
393 case uint16_t(Op::I32Ne):
394 case uint16_t(Op::I32LtS):
395 case uint16_t(Op::I32LtU):
396 case uint16_t(Op::I32LeS):
397 case uint16_t(Op::I32LeU):
398 case uint16_t(Op::I32GtS):
399 case uint16_t(Op::I32GtU):
400 case uint16_t(Op::I32GeS):
401 case uint16_t(Op::I32GeU):
402 CHECK(iter.readComparison(ValType::I32, &nothing, &nothing));
403 case uint16_t(Op::I64Eq):
404 case uint16_t(Op::I64Ne):
405 case uint16_t(Op::I64LtS):
406 case uint16_t(Op::I64LtU):
407 case uint16_t(Op::I64LeS):
408 case uint16_t(Op::I64LeU):
409 case uint16_t(Op::I64GtS):
410 case uint16_t(Op::I64GtU):
411 case uint16_t(Op::I64GeS):
412 case uint16_t(Op::I64GeU):
413 CHECK(iter.readComparison(ValType::I64, &nothing, &nothing));
414 case uint16_t(Op::F32Eq):
415 case uint16_t(Op::F32Ne):
416 case uint16_t(Op::F32Lt):
417 case uint16_t(Op::F32Le):
418 case uint16_t(Op::F32Gt):
419 case uint16_t(Op::F32Ge):
420 CHECK(iter.readComparison(ValType::F32, &nothing, &nothing));
421 case uint16_t(Op::F64Eq):
422 case uint16_t(Op::F64Ne):
423 case uint16_t(Op::F64Lt):
424 case uint16_t(Op::F64Le):
425 case uint16_t(Op::F64Gt):
426 case uint16_t(Op::F64Ge):
427 CHECK(iter.readComparison(ValType::F64, &nothing, &nothing));
428 case uint16_t(Op::I32Eqz):
429 CHECK(iter.readConversion(ValType::I32, ValType::I32, &nothing));
430 case uint16_t(Op::I64Eqz):
431 case uint16_t(Op::I32WrapI64):
432 CHECK(iter.readConversion(ValType::I64, ValType::I32, &nothing));
433 case uint16_t(Op::I32TruncF32S):
434 case uint16_t(Op::I32TruncF32U):
435 case uint16_t(Op::I32ReinterpretF32):
436 CHECK(iter.readConversion(ValType::F32, ValType::I32, &nothing));
437 case uint16_t(Op::I32TruncF64S):
438 case uint16_t(Op::I32TruncF64U):
439 CHECK(iter.readConversion(ValType::F64, ValType::I32, &nothing));
440 case uint16_t(Op::I64ExtendI32S):
441 case uint16_t(Op::I64ExtendI32U):
442 CHECK(iter.readConversion(ValType::I32, ValType::I64, &nothing));
443 case uint16_t(Op::I64TruncF32S):
444 case uint16_t(Op::I64TruncF32U):
445 CHECK(iter.readConversion(ValType::F32, ValType::I64, &nothing));
446 case uint16_t(Op::I64TruncF64S):
447 case uint16_t(Op::I64TruncF64U):
448 case uint16_t(Op::I64ReinterpretF64):
449 CHECK(iter.readConversion(ValType::F64, ValType::I64, &nothing));
450 case uint16_t(Op::F32ConvertI32S):
451 case uint16_t(Op::F32ConvertI32U):
452 case uint16_t(Op::F32ReinterpretI32):
453 CHECK(iter.readConversion(ValType::I32, ValType::F32, &nothing));
454 case uint16_t(Op::F32ConvertI64S):
455 case uint16_t(Op::F32ConvertI64U):
456 CHECK(iter.readConversion(ValType::I64, ValType::F32, &nothing));
457 case uint16_t(Op::F32DemoteF64):
458 CHECK(iter.readConversion(ValType::F64, ValType::F32, &nothing));
459 case uint16_t(Op::F64ConvertI32S):
460 case uint16_t(Op::F64ConvertI32U):
461 CHECK(iter.readConversion(ValType::I32, ValType::F64, &nothing));
462 case uint16_t(Op::F64ConvertI64S):
463 case uint16_t(Op::F64ConvertI64U):
464 case uint16_t(Op::F64ReinterpretI64):
465 CHECK(iter.readConversion(ValType::I64, ValType::F64, &nothing));
466 case uint16_t(Op::F64PromoteF32):
467 CHECK(iter.readConversion(ValType::F32, ValType::F64, &nothing));
468 case uint16_t(Op::I32Extend8S):
469 case uint16_t(Op::I32Extend16S):
470 CHECK(iter.readConversion(ValType::I32, ValType::I32, &nothing));
471 case uint16_t(Op::I64Extend8S):
472 case uint16_t(Op::I64Extend16S):
473 case uint16_t(Op::I64Extend32S):
474 CHECK(iter.readConversion(ValType::I64, ValType::I64, &nothing));
475 case uint16_t(Op::I32Load8S):
476 case uint16_t(Op::I32Load8U): {
477 LinearMemoryAddress<Nothing> addr;
478 CHECK(iter.readLoad(ValType::I32, 1, &addr));
480 case uint16_t(Op::I32Load16S):
481 case uint16_t(Op::I32Load16U): {
482 LinearMemoryAddress<Nothing> addr;
483 CHECK(iter.readLoad(ValType::I32, 2, &addr));
485 case uint16_t(Op::I32Load): {
486 LinearMemoryAddress<Nothing> addr;
487 CHECK(iter.readLoad(ValType::I32, 4, &addr));
489 case uint16_t(Op::I64Load8S):
490 case uint16_t(Op::I64Load8U): {
491 LinearMemoryAddress<Nothing> addr;
492 CHECK(iter.readLoad(ValType::I64, 1, &addr));
494 case uint16_t(Op::I64Load16S):
495 case uint16_t(Op::I64Load16U): {
496 LinearMemoryAddress<Nothing> addr;
497 CHECK(iter.readLoad(ValType::I64, 2, &addr));
499 case uint16_t(Op::I64Load32S):
500 case uint16_t(Op::I64Load32U): {
501 LinearMemoryAddress<Nothing> addr;
502 CHECK(iter.readLoad(ValType::I64, 4, &addr));
504 case uint16_t(Op::I64Load): {
505 LinearMemoryAddress<Nothing> addr;
506 CHECK(iter.readLoad(ValType::I64, 8, &addr));
508 case uint16_t(Op::F32Load): {
509 LinearMemoryAddress<Nothing> addr;
510 CHECK(iter.readLoad(ValType::F32, 4, &addr));
512 case uint16_t(Op::F64Load): {
513 LinearMemoryAddress<Nothing> addr;
514 CHECK(iter.readLoad(ValType::F64, 8, &addr));
516 case uint16_t(Op::I32Store8): {
517 LinearMemoryAddress<Nothing> addr;
518 CHECK(iter.readStore(ValType::I32, 1, &addr, &nothing));
520 case uint16_t(Op::I32Store16): {
521 LinearMemoryAddress<Nothing> addr;
522 CHECK(iter.readStore(ValType::I32, 2, &addr, &nothing));
524 case uint16_t(Op::I32Store): {
525 LinearMemoryAddress<Nothing> addr;
526 CHECK(iter.readStore(ValType::I32, 4, &addr, &nothing));
528 case uint16_t(Op::I64Store8): {
529 LinearMemoryAddress<Nothing> addr;
530 CHECK(iter.readStore(ValType::I64, 1, &addr, &nothing));
532 case uint16_t(Op::I64Store16): {
533 LinearMemoryAddress<Nothing> addr;
534 CHECK(iter.readStore(ValType::I64, 2, &addr, &nothing));
536 case uint16_t(Op::I64Store32): {
537 LinearMemoryAddress<Nothing> addr;
538 CHECK(iter.readStore(ValType::I64, 4, &addr, &nothing));
540 case uint16_t(Op::I64Store): {
541 LinearMemoryAddress<Nothing> addr;
542 CHECK(iter.readStore(ValType::I64, 8, &addr, &nothing));
544 case uint16_t(Op::F32Store): {
545 LinearMemoryAddress<Nothing> addr;
546 CHECK(iter.readStore(ValType::F32, 4, &addr, &nothing));
548 case uint16_t(Op::F64Store): {
549 LinearMemoryAddress<Nothing> addr;
550 CHECK(iter.readStore(ValType::F64, 8, &addr, &nothing));
552 case uint16_t(Op::MemoryGrow): {
553 uint32_t memoryIndex;
554 CHECK(iter.readMemoryGrow(&memoryIndex, &nothing));
556 case uint16_t(Op::MemorySize): {
557 uint32_t memoryIndex;
558 CHECK(iter.readMemorySize(&memoryIndex));
560 case uint16_t(Op::Br): {
561 uint32_t unusedDepth;
562 CHECK(iter.readBr(&unusedDepth, &unusedType, &nothings));
564 case uint16_t(Op::BrIf): {
565 uint32_t unusedDepth;
566 CHECK(iter.readBrIf(&unusedDepth, &unusedType, &nothings, &nothing));
568 case uint16_t(Op::BrTable): {
569 Uint32Vector unusedDepths;
570 uint32_t unusedDefault;
571 CHECK(iter.readBrTable(&unusedDepths, &unusedDefault, &unusedType,
572 &nothings, &nothing));
574 case uint16_t(Op::Return):
575 CHECK(iter.readReturn(&nothings));
576 case uint16_t(Op::Unreachable):
577 CHECK(iter.readUnreachable());
578 #ifdef ENABLE_WASM_GC
579 case uint16_t(Op::GcPrefix): {
580 if (!env.gcEnabled()) {
581 return iter.unrecognizedOpcode(&op);
583 switch (op.b1) {
584 case uint32_t(GcOp::StructNew): {
585 uint32_t unusedUint;
586 NothingVector unusedArgs{};
587 CHECK(iter.readStructNew(&unusedUint, &unusedArgs));
589 case uint32_t(GcOp::StructNewDefault): {
590 uint32_t unusedUint;
591 CHECK(iter.readStructNewDefault(&unusedUint));
593 case uint32_t(GcOp::StructGet): {
594 uint32_t unusedUint1, unusedUint2;
595 CHECK(iter.readStructGet(&unusedUint1, &unusedUint2,
596 FieldWideningOp::None, &nothing));
598 case uint32_t(GcOp::StructGetS): {
599 uint32_t unusedUint1, unusedUint2;
600 CHECK(iter.readStructGet(&unusedUint1, &unusedUint2,
601 FieldWideningOp::Signed, &nothing));
603 case uint32_t(GcOp::StructGetU): {
604 uint32_t unusedUint1, unusedUint2;
605 CHECK(iter.readStructGet(&unusedUint1, &unusedUint2,
606 FieldWideningOp::Unsigned, &nothing));
608 case uint32_t(GcOp::StructSet): {
609 uint32_t unusedUint1, unusedUint2;
610 CHECK(iter.readStructSet(&unusedUint1, &unusedUint2, &nothing,
611 &nothing));
613 case uint32_t(GcOp::ArrayNew): {
614 uint32_t unusedUint;
615 CHECK(iter.readArrayNew(&unusedUint, &nothing, &nothing));
617 case uint32_t(GcOp::ArrayNewFixed): {
618 uint32_t unusedUint1, unusedUint2;
619 CHECK(
620 iter.readArrayNewFixed(&unusedUint1, &unusedUint2, &nothings));
622 case uint32_t(GcOp::ArrayNewDefault): {
623 uint32_t unusedUint;
624 CHECK(iter.readArrayNewDefault(&unusedUint, &nothing));
626 case uint32_t(GcOp::ArrayNewData): {
627 uint32_t unusedUint1, unusedUint2;
628 CHECK(iter.readArrayNewData(&unusedUint1, &unusedUint2, &nothing,
629 &nothing));
631 case uint32_t(GcOp::ArrayNewElem): {
632 uint32_t unusedUint1, unusedUint2;
633 CHECK(iter.readArrayNewElem(&unusedUint1, &unusedUint2, &nothing,
634 &nothing));
636 case uint32_t(GcOp::ArrayInitData): {
637 uint32_t unusedUint1, unusedUint2;
638 CHECK(iter.readArrayInitData(&unusedUint1, &unusedUint2, &nothing,
639 &nothing, &nothing, &nothing));
641 case uint32_t(GcOp::ArrayInitElem): {
642 uint32_t unusedUint1, unusedUint2;
643 CHECK(iter.readArrayInitElem(&unusedUint1, &unusedUint2, &nothing,
644 &nothing, &nothing, &nothing));
646 case uint32_t(GcOp::ArrayGet): {
647 uint32_t unusedUint1;
648 CHECK(iter.readArrayGet(&unusedUint1, FieldWideningOp::None,
649 &nothing, &nothing));
651 case uint32_t(GcOp::ArrayGetS): {
652 uint32_t unusedUint1;
653 CHECK(iter.readArrayGet(&unusedUint1, FieldWideningOp::Signed,
654 &nothing, &nothing));
656 case uint32_t(GcOp::ArrayGetU): {
657 uint32_t unusedUint1;
658 CHECK(iter.readArrayGet(&unusedUint1, FieldWideningOp::Unsigned,
659 &nothing, &nothing));
661 case uint32_t(GcOp::ArraySet): {
662 uint32_t unusedUint1;
663 CHECK(
664 iter.readArraySet(&unusedUint1, &nothing, &nothing, &nothing));
666 case uint32_t(GcOp::ArrayLen): {
667 CHECK(iter.readArrayLen(&nothing));
669 case uint32_t(GcOp::ArrayCopy): {
670 int32_t unusedInt;
671 bool unusedBool;
672 CHECK(iter.readArrayCopy(&unusedInt, &unusedBool, &nothing,
673 &nothing, &nothing, &nothing, &nothing));
675 case uint32_t(GcOp::ArrayFill): {
676 uint32_t unusedTypeIndex;
677 CHECK(iter.readArrayFill(&unusedTypeIndex, &nothing, &nothing,
678 &nothing, &nothing));
680 case uint32_t(GcOp::RefI31): {
681 CHECK(iter.readConversion(ValType::I32,
682 ValType(RefType::i31().asNonNullable()),
683 &nothing));
685 case uint32_t(GcOp::I31GetS): {
686 CHECK(iter.readConversion(ValType(RefType::i31()), ValType::I32,
687 &nothing));
689 case uint32_t(GcOp::I31GetU): {
690 CHECK(iter.readConversion(ValType(RefType::i31()), ValType::I32,
691 &nothing));
693 case uint16_t(GcOp::RefTest): {
694 RefType unusedSourceType;
695 RefType unusedDestType;
696 CHECK(iter.readRefTest(false, &unusedSourceType, &unusedDestType,
697 &nothing));
699 case uint16_t(GcOp::RefTestNull): {
700 RefType unusedSourceType;
701 RefType unusedDestType;
702 CHECK(iter.readRefTest(true, &unusedSourceType, &unusedDestType,
703 &nothing));
705 case uint16_t(GcOp::RefCast): {
706 RefType unusedSourceType;
707 RefType unusedDestType;
708 CHECK(iter.readRefCast(false, &unusedSourceType, &unusedDestType,
709 &nothing));
711 case uint16_t(GcOp::RefCastNull): {
712 RefType unusedSourceType;
713 RefType unusedDestType;
714 CHECK(iter.readRefCast(true, &unusedSourceType, &unusedDestType,
715 &nothing));
717 case uint16_t(GcOp::BrOnCast): {
718 uint32_t unusedRelativeDepth;
719 RefType unusedSourceType;
720 RefType unusedDestType;
721 CHECK(iter.readBrOnCast(true, &unusedRelativeDepth,
722 &unusedSourceType, &unusedDestType,
723 &unusedType, &nothings));
725 case uint16_t(GcOp::BrOnCastFail): {
726 uint32_t unusedRelativeDepth;
727 RefType unusedSourceType;
728 RefType unusedDestType;
729 CHECK(iter.readBrOnCast(false, &unusedRelativeDepth,
730 &unusedSourceType, &unusedDestType,
731 &unusedType, &nothings));
733 case uint16_t(GcOp::AnyConvertExtern): {
734 CHECK(iter.readRefConversion(RefType::extern_(), RefType::any(),
735 &nothing));
737 case uint16_t(GcOp::ExternConvertAny): {
738 CHECK(iter.readRefConversion(RefType::any(), RefType::extern_(),
739 &nothing));
741 default:
742 return iter.unrecognizedOpcode(&op);
744 break;
746 #endif
748 #ifdef ENABLE_WASM_SIMD
749 case uint16_t(Op::SimdPrefix): {
750 if (!env.simdAvailable()) {
751 return iter.unrecognizedOpcode(&op);
753 uint32_t noIndex;
754 switch (op.b1) {
755 case uint32_t(SimdOp::I8x16ExtractLaneS):
756 case uint32_t(SimdOp::I8x16ExtractLaneU):
757 CHECK(iter.readExtractLane(ValType::I32, 16, &noIndex, &nothing));
758 case uint32_t(SimdOp::I16x8ExtractLaneS):
759 case uint32_t(SimdOp::I16x8ExtractLaneU):
760 CHECK(iter.readExtractLane(ValType::I32, 8, &noIndex, &nothing));
761 case uint32_t(SimdOp::I32x4ExtractLane):
762 CHECK(iter.readExtractLane(ValType::I32, 4, &noIndex, &nothing));
763 case uint32_t(SimdOp::I64x2ExtractLane):
764 CHECK(iter.readExtractLane(ValType::I64, 2, &noIndex, &nothing));
765 case uint32_t(SimdOp::F32x4ExtractLane):
766 CHECK(iter.readExtractLane(ValType::F32, 4, &noIndex, &nothing));
767 case uint32_t(SimdOp::F64x2ExtractLane):
768 CHECK(iter.readExtractLane(ValType::F64, 2, &noIndex, &nothing));
770 case uint32_t(SimdOp::I8x16Splat):
771 case uint32_t(SimdOp::I16x8Splat):
772 case uint32_t(SimdOp::I32x4Splat):
773 CHECK(iter.readConversion(ValType::I32, ValType::V128, &nothing));
774 case uint32_t(SimdOp::I64x2Splat):
775 CHECK(iter.readConversion(ValType::I64, ValType::V128, &nothing));
776 case uint32_t(SimdOp::F32x4Splat):
777 CHECK(iter.readConversion(ValType::F32, ValType::V128, &nothing));
778 case uint32_t(SimdOp::F64x2Splat):
779 CHECK(iter.readConversion(ValType::F64, ValType::V128, &nothing));
781 case uint32_t(SimdOp::V128AnyTrue):
782 case uint32_t(SimdOp::I8x16AllTrue):
783 case uint32_t(SimdOp::I16x8AllTrue):
784 case uint32_t(SimdOp::I32x4AllTrue):
785 case uint32_t(SimdOp::I64x2AllTrue):
786 case uint32_t(SimdOp::I8x16Bitmask):
787 case uint32_t(SimdOp::I16x8Bitmask):
788 case uint32_t(SimdOp::I32x4Bitmask):
789 case uint32_t(SimdOp::I64x2Bitmask):
790 CHECK(iter.readConversion(ValType::V128, ValType::I32, &nothing));
792 case uint32_t(SimdOp::I8x16ReplaceLane):
793 CHECK(iter.readReplaceLane(ValType::I32, 16, &noIndex, &nothing,
794 &nothing));
795 case uint32_t(SimdOp::I16x8ReplaceLane):
796 CHECK(iter.readReplaceLane(ValType::I32, 8, &noIndex, &nothing,
797 &nothing));
798 case uint32_t(SimdOp::I32x4ReplaceLane):
799 CHECK(iter.readReplaceLane(ValType::I32, 4, &noIndex, &nothing,
800 &nothing));
801 case uint32_t(SimdOp::I64x2ReplaceLane):
802 CHECK(iter.readReplaceLane(ValType::I64, 2, &noIndex, &nothing,
803 &nothing));
804 case uint32_t(SimdOp::F32x4ReplaceLane):
805 CHECK(iter.readReplaceLane(ValType::F32, 4, &noIndex, &nothing,
806 &nothing));
807 case uint32_t(SimdOp::F64x2ReplaceLane):
808 CHECK(iter.readReplaceLane(ValType::F64, 2, &noIndex, &nothing,
809 &nothing));
811 case uint32_t(SimdOp::I8x16Eq):
812 case uint32_t(SimdOp::I8x16Ne):
813 case uint32_t(SimdOp::I8x16LtS):
814 case uint32_t(SimdOp::I8x16LtU):
815 case uint32_t(SimdOp::I8x16GtS):
816 case uint32_t(SimdOp::I8x16GtU):
817 case uint32_t(SimdOp::I8x16LeS):
818 case uint32_t(SimdOp::I8x16LeU):
819 case uint32_t(SimdOp::I8x16GeS):
820 case uint32_t(SimdOp::I8x16GeU):
821 case uint32_t(SimdOp::I16x8Eq):
822 case uint32_t(SimdOp::I16x8Ne):
823 case uint32_t(SimdOp::I16x8LtS):
824 case uint32_t(SimdOp::I16x8LtU):
825 case uint32_t(SimdOp::I16x8GtS):
826 case uint32_t(SimdOp::I16x8GtU):
827 case uint32_t(SimdOp::I16x8LeS):
828 case uint32_t(SimdOp::I16x8LeU):
829 case uint32_t(SimdOp::I16x8GeS):
830 case uint32_t(SimdOp::I16x8GeU):
831 case uint32_t(SimdOp::I32x4Eq):
832 case uint32_t(SimdOp::I32x4Ne):
833 case uint32_t(SimdOp::I32x4LtS):
834 case uint32_t(SimdOp::I32x4LtU):
835 case uint32_t(SimdOp::I32x4GtS):
836 case uint32_t(SimdOp::I32x4GtU):
837 case uint32_t(SimdOp::I32x4LeS):
838 case uint32_t(SimdOp::I32x4LeU):
839 case uint32_t(SimdOp::I32x4GeS):
840 case uint32_t(SimdOp::I32x4GeU):
841 case uint32_t(SimdOp::I64x2Eq):
842 case uint32_t(SimdOp::I64x2Ne):
843 case uint32_t(SimdOp::I64x2LtS):
844 case uint32_t(SimdOp::I64x2GtS):
845 case uint32_t(SimdOp::I64x2LeS):
846 case uint32_t(SimdOp::I64x2GeS):
847 case uint32_t(SimdOp::F32x4Eq):
848 case uint32_t(SimdOp::F32x4Ne):
849 case uint32_t(SimdOp::F32x4Lt):
850 case uint32_t(SimdOp::F32x4Gt):
851 case uint32_t(SimdOp::F32x4Le):
852 case uint32_t(SimdOp::F32x4Ge):
853 case uint32_t(SimdOp::F64x2Eq):
854 case uint32_t(SimdOp::F64x2Ne):
855 case uint32_t(SimdOp::F64x2Lt):
856 case uint32_t(SimdOp::F64x2Gt):
857 case uint32_t(SimdOp::F64x2Le):
858 case uint32_t(SimdOp::F64x2Ge):
859 case uint32_t(SimdOp::V128And):
860 case uint32_t(SimdOp::V128Or):
861 case uint32_t(SimdOp::V128Xor):
862 case uint32_t(SimdOp::V128AndNot):
863 case uint32_t(SimdOp::I8x16AvgrU):
864 case uint32_t(SimdOp::I16x8AvgrU):
865 case uint32_t(SimdOp::I8x16Add):
866 case uint32_t(SimdOp::I8x16AddSatS):
867 case uint32_t(SimdOp::I8x16AddSatU):
868 case uint32_t(SimdOp::I8x16Sub):
869 case uint32_t(SimdOp::I8x16SubSatS):
870 case uint32_t(SimdOp::I8x16SubSatU):
871 case uint32_t(SimdOp::I8x16MinS):
872 case uint32_t(SimdOp::I8x16MinU):
873 case uint32_t(SimdOp::I8x16MaxS):
874 case uint32_t(SimdOp::I8x16MaxU):
875 case uint32_t(SimdOp::I16x8Add):
876 case uint32_t(SimdOp::I16x8AddSatS):
877 case uint32_t(SimdOp::I16x8AddSatU):
878 case uint32_t(SimdOp::I16x8Sub):
879 case uint32_t(SimdOp::I16x8SubSatS):
880 case uint32_t(SimdOp::I16x8SubSatU):
881 case uint32_t(SimdOp::I16x8Mul):
882 case uint32_t(SimdOp::I16x8MinS):
883 case uint32_t(SimdOp::I16x8MinU):
884 case uint32_t(SimdOp::I16x8MaxS):
885 case uint32_t(SimdOp::I16x8MaxU):
886 case uint32_t(SimdOp::I32x4Add):
887 case uint32_t(SimdOp::I32x4Sub):
888 case uint32_t(SimdOp::I32x4Mul):
889 case uint32_t(SimdOp::I32x4MinS):
890 case uint32_t(SimdOp::I32x4MinU):
891 case uint32_t(SimdOp::I32x4MaxS):
892 case uint32_t(SimdOp::I32x4MaxU):
893 case uint32_t(SimdOp::I64x2Add):
894 case uint32_t(SimdOp::I64x2Sub):
895 case uint32_t(SimdOp::I64x2Mul):
896 case uint32_t(SimdOp::F32x4Add):
897 case uint32_t(SimdOp::F32x4Sub):
898 case uint32_t(SimdOp::F32x4Mul):
899 case uint32_t(SimdOp::F32x4Div):
900 case uint32_t(SimdOp::F32x4Min):
901 case uint32_t(SimdOp::F32x4Max):
902 case uint32_t(SimdOp::F64x2Add):
903 case uint32_t(SimdOp::F64x2Sub):
904 case uint32_t(SimdOp::F64x2Mul):
905 case uint32_t(SimdOp::F64x2Div):
906 case uint32_t(SimdOp::F64x2Min):
907 case uint32_t(SimdOp::F64x2Max):
908 case uint32_t(SimdOp::I8x16NarrowI16x8S):
909 case uint32_t(SimdOp::I8x16NarrowI16x8U):
910 case uint32_t(SimdOp::I16x8NarrowI32x4S):
911 case uint32_t(SimdOp::I16x8NarrowI32x4U):
912 case uint32_t(SimdOp::I8x16Swizzle):
913 case uint32_t(SimdOp::F32x4PMax):
914 case uint32_t(SimdOp::F32x4PMin):
915 case uint32_t(SimdOp::F64x2PMax):
916 case uint32_t(SimdOp::F64x2PMin):
917 case uint32_t(SimdOp::I32x4DotI16x8S):
918 case uint32_t(SimdOp::I16x8ExtmulLowI8x16S):
919 case uint32_t(SimdOp::I16x8ExtmulHighI8x16S):
920 case uint32_t(SimdOp::I16x8ExtmulLowI8x16U):
921 case uint32_t(SimdOp::I16x8ExtmulHighI8x16U):
922 case uint32_t(SimdOp::I32x4ExtmulLowI16x8S):
923 case uint32_t(SimdOp::I32x4ExtmulHighI16x8S):
924 case uint32_t(SimdOp::I32x4ExtmulLowI16x8U):
925 case uint32_t(SimdOp::I32x4ExtmulHighI16x8U):
926 case uint32_t(SimdOp::I64x2ExtmulLowI32x4S):
927 case uint32_t(SimdOp::I64x2ExtmulHighI32x4S):
928 case uint32_t(SimdOp::I64x2ExtmulLowI32x4U):
929 case uint32_t(SimdOp::I64x2ExtmulHighI32x4U):
930 case uint32_t(SimdOp::I16x8Q15MulrSatS):
931 CHECK(iter.readBinary(ValType::V128, &nothing, &nothing));
933 case uint32_t(SimdOp::I8x16Neg):
934 case uint32_t(SimdOp::I16x8Neg):
935 case uint32_t(SimdOp::I16x8ExtendLowI8x16S):
936 case uint32_t(SimdOp::I16x8ExtendHighI8x16S):
937 case uint32_t(SimdOp::I16x8ExtendLowI8x16U):
938 case uint32_t(SimdOp::I16x8ExtendHighI8x16U):
939 case uint32_t(SimdOp::I32x4Neg):
940 case uint32_t(SimdOp::I32x4ExtendLowI16x8S):
941 case uint32_t(SimdOp::I32x4ExtendHighI16x8S):
942 case uint32_t(SimdOp::I32x4ExtendLowI16x8U):
943 case uint32_t(SimdOp::I32x4ExtendHighI16x8U):
944 case uint32_t(SimdOp::I32x4TruncSatF32x4S):
945 case uint32_t(SimdOp::I32x4TruncSatF32x4U):
946 case uint32_t(SimdOp::I64x2Neg):
947 case uint32_t(SimdOp::I64x2ExtendLowI32x4S):
948 case uint32_t(SimdOp::I64x2ExtendHighI32x4S):
949 case uint32_t(SimdOp::I64x2ExtendLowI32x4U):
950 case uint32_t(SimdOp::I64x2ExtendHighI32x4U):
951 case uint32_t(SimdOp::F32x4Abs):
952 case uint32_t(SimdOp::F32x4Neg):
953 case uint32_t(SimdOp::F32x4Sqrt):
954 case uint32_t(SimdOp::F32x4ConvertI32x4S):
955 case uint32_t(SimdOp::F32x4ConvertI32x4U):
956 case uint32_t(SimdOp::F64x2Abs):
957 case uint32_t(SimdOp::F64x2Neg):
958 case uint32_t(SimdOp::F64x2Sqrt):
959 case uint32_t(SimdOp::V128Not):
960 case uint32_t(SimdOp::I8x16Popcnt):
961 case uint32_t(SimdOp::I8x16Abs):
962 case uint32_t(SimdOp::I16x8Abs):
963 case uint32_t(SimdOp::I32x4Abs):
964 case uint32_t(SimdOp::I64x2Abs):
965 case uint32_t(SimdOp::F32x4Ceil):
966 case uint32_t(SimdOp::F32x4Floor):
967 case uint32_t(SimdOp::F32x4Trunc):
968 case uint32_t(SimdOp::F32x4Nearest):
969 case uint32_t(SimdOp::F64x2Ceil):
970 case uint32_t(SimdOp::F64x2Floor):
971 case uint32_t(SimdOp::F64x2Trunc):
972 case uint32_t(SimdOp::F64x2Nearest):
973 case uint32_t(SimdOp::F32x4DemoteF64x2Zero):
974 case uint32_t(SimdOp::F64x2PromoteLowF32x4):
975 case uint32_t(SimdOp::F64x2ConvertLowI32x4S):
976 case uint32_t(SimdOp::F64x2ConvertLowI32x4U):
977 case uint32_t(SimdOp::I32x4TruncSatF64x2SZero):
978 case uint32_t(SimdOp::I32x4TruncSatF64x2UZero):
979 case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16S):
980 case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16U):
981 case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8S):
982 case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8U):
983 CHECK(iter.readUnary(ValType::V128, &nothing));
985 case uint32_t(SimdOp::I8x16Shl):
986 case uint32_t(SimdOp::I8x16ShrS):
987 case uint32_t(SimdOp::I8x16ShrU):
988 case uint32_t(SimdOp::I16x8Shl):
989 case uint32_t(SimdOp::I16x8ShrS):
990 case uint32_t(SimdOp::I16x8ShrU):
991 case uint32_t(SimdOp::I32x4Shl):
992 case uint32_t(SimdOp::I32x4ShrS):
993 case uint32_t(SimdOp::I32x4ShrU):
994 case uint32_t(SimdOp::I64x2Shl):
995 case uint32_t(SimdOp::I64x2ShrS):
996 case uint32_t(SimdOp::I64x2ShrU):
997 CHECK(iter.readVectorShift(&nothing, &nothing));
999 case uint32_t(SimdOp::V128Bitselect):
1000 CHECK(
1001 iter.readTernary(ValType::V128, &nothing, &nothing, &nothing));
1003 case uint32_t(SimdOp::I8x16Shuffle): {
1004 V128 mask;
1005 CHECK(iter.readVectorShuffle(&nothing, &nothing, &mask));
1008 case uint32_t(SimdOp::V128Const): {
1009 V128 noVector;
1010 CHECK(iter.readV128Const(&noVector));
1013 case uint32_t(SimdOp::V128Load): {
1014 LinearMemoryAddress<Nothing> addr;
1015 CHECK(iter.readLoad(ValType::V128, 16, &addr));
1018 case uint32_t(SimdOp::V128Load8Splat): {
1019 LinearMemoryAddress<Nothing> addr;
1020 CHECK(iter.readLoadSplat(1, &addr));
1023 case uint32_t(SimdOp::V128Load16Splat): {
1024 LinearMemoryAddress<Nothing> addr;
1025 CHECK(iter.readLoadSplat(2, &addr));
1028 case uint32_t(SimdOp::V128Load32Splat): {
1029 LinearMemoryAddress<Nothing> addr;
1030 CHECK(iter.readLoadSplat(4, &addr));
1033 case uint32_t(SimdOp::V128Load64Splat): {
1034 LinearMemoryAddress<Nothing> addr;
1035 CHECK(iter.readLoadSplat(8, &addr));
1038 case uint32_t(SimdOp::V128Load8x8S):
1039 case uint32_t(SimdOp::V128Load8x8U): {
1040 LinearMemoryAddress<Nothing> addr;
1041 CHECK(iter.readLoadExtend(&addr));
1044 case uint32_t(SimdOp::V128Load16x4S):
1045 case uint32_t(SimdOp::V128Load16x4U): {
1046 LinearMemoryAddress<Nothing> addr;
1047 CHECK(iter.readLoadExtend(&addr));
1050 case uint32_t(SimdOp::V128Load32x2S):
1051 case uint32_t(SimdOp::V128Load32x2U): {
1052 LinearMemoryAddress<Nothing> addr;
1053 CHECK(iter.readLoadExtend(&addr));
1056 case uint32_t(SimdOp::V128Store): {
1057 LinearMemoryAddress<Nothing> addr;
1058 CHECK(iter.readStore(ValType::V128, 16, &addr, &nothing));
1061 case uint32_t(SimdOp::V128Load32Zero): {
1062 LinearMemoryAddress<Nothing> addr;
1063 CHECK(iter.readLoadSplat(4, &addr));
1066 case uint32_t(SimdOp::V128Load64Zero): {
1067 LinearMemoryAddress<Nothing> addr;
1068 CHECK(iter.readLoadSplat(8, &addr));
1071 case uint32_t(SimdOp::V128Load8Lane): {
1072 LinearMemoryAddress<Nothing> addr;
1073 CHECK(iter.readLoadLane(1, &addr, &noIndex, &nothing));
1076 case uint32_t(SimdOp::V128Load16Lane): {
1077 LinearMemoryAddress<Nothing> addr;
1078 CHECK(iter.readLoadLane(2, &addr, &noIndex, &nothing));
1081 case uint32_t(SimdOp::V128Load32Lane): {
1082 LinearMemoryAddress<Nothing> addr;
1083 CHECK(iter.readLoadLane(4, &addr, &noIndex, &nothing));
1086 case uint32_t(SimdOp::V128Load64Lane): {
1087 LinearMemoryAddress<Nothing> addr;
1088 CHECK(iter.readLoadLane(8, &addr, &noIndex, &nothing));
1091 case uint32_t(SimdOp::V128Store8Lane): {
1092 LinearMemoryAddress<Nothing> addr;
1093 CHECK(iter.readStoreLane(1, &addr, &noIndex, &nothing));
1096 case uint32_t(SimdOp::V128Store16Lane): {
1097 LinearMemoryAddress<Nothing> addr;
1098 CHECK(iter.readStoreLane(2, &addr, &noIndex, &nothing));
1101 case uint32_t(SimdOp::V128Store32Lane): {
1102 LinearMemoryAddress<Nothing> addr;
1103 CHECK(iter.readStoreLane(4, &addr, &noIndex, &nothing));
1106 case uint32_t(SimdOp::V128Store64Lane): {
1107 LinearMemoryAddress<Nothing> addr;
1108 CHECK(iter.readStoreLane(8, &addr, &noIndex, &nothing));
1111 # ifdef ENABLE_WASM_RELAXED_SIMD
1112 case uint32_t(SimdOp::F32x4RelaxedMadd):
1113 case uint32_t(SimdOp::F32x4RelaxedNmadd):
1114 case uint32_t(SimdOp::F64x2RelaxedMadd):
1115 case uint32_t(SimdOp::F64x2RelaxedNmadd):
1116 case uint32_t(SimdOp::I8x16RelaxedLaneSelect):
1117 case uint32_t(SimdOp::I16x8RelaxedLaneSelect):
1118 case uint32_t(SimdOp::I32x4RelaxedLaneSelect):
1119 case uint32_t(SimdOp::I64x2RelaxedLaneSelect):
1120 case uint32_t(SimdOp::I32x4DotI8x16I7x16AddS): {
1121 if (!env.v128RelaxedEnabled()) {
1122 return iter.unrecognizedOpcode(&op);
1124 CHECK(
1125 iter.readTernary(ValType::V128, &nothing, &nothing, &nothing));
1127 case uint32_t(SimdOp::F32x4RelaxedMin):
1128 case uint32_t(SimdOp::F32x4RelaxedMax):
1129 case uint32_t(SimdOp::F64x2RelaxedMin):
1130 case uint32_t(SimdOp::F64x2RelaxedMax):
1131 case uint32_t(SimdOp::I16x8RelaxedQ15MulrS):
1132 case uint32_t(SimdOp::I16x8DotI8x16I7x16S): {
1133 if (!env.v128RelaxedEnabled()) {
1134 return iter.unrecognizedOpcode(&op);
1136 CHECK(iter.readBinary(ValType::V128, &nothing, &nothing));
1138 case uint32_t(SimdOp::I32x4RelaxedTruncF32x4S):
1139 case uint32_t(SimdOp::I32x4RelaxedTruncF32x4U):
1140 case uint32_t(SimdOp::I32x4RelaxedTruncF64x2SZero):
1141 case uint32_t(SimdOp::I32x4RelaxedTruncF64x2UZero): {
1142 if (!env.v128RelaxedEnabled()) {
1143 return iter.unrecognizedOpcode(&op);
1145 CHECK(iter.readUnary(ValType::V128, &nothing));
1147 case uint32_t(SimdOp::I8x16RelaxedSwizzle): {
1148 if (!env.v128RelaxedEnabled()) {
1149 return iter.unrecognizedOpcode(&op);
1151 CHECK(iter.readBinary(ValType::V128, &nothing, &nothing));
1153 # endif
1155 default:
1156 return iter.unrecognizedOpcode(&op);
1158 break;
1160 #endif // ENABLE_WASM_SIMD
1162 case uint16_t(Op::MiscPrefix): {
1163 switch (op.b1) {
1164 case uint32_t(MiscOp::I32TruncSatF32S):
1165 case uint32_t(MiscOp::I32TruncSatF32U):
1166 CHECK(iter.readConversion(ValType::F32, ValType::I32, &nothing));
1167 case uint32_t(MiscOp::I32TruncSatF64S):
1168 case uint32_t(MiscOp::I32TruncSatF64U):
1169 CHECK(iter.readConversion(ValType::F64, ValType::I32, &nothing));
1170 case uint32_t(MiscOp::I64TruncSatF32S):
1171 case uint32_t(MiscOp::I64TruncSatF32U):
1172 CHECK(iter.readConversion(ValType::F32, ValType::I64, &nothing));
1173 case uint32_t(MiscOp::I64TruncSatF64S):
1174 case uint32_t(MiscOp::I64TruncSatF64U):
1175 CHECK(iter.readConversion(ValType::F64, ValType::I64, &nothing));
1176 case uint32_t(MiscOp::MemoryCopy): {
1177 uint32_t unusedDestMemIndex;
1178 uint32_t unusedSrcMemIndex;
1179 CHECK(iter.readMemOrTableCopy(/*isMem=*/true, &unusedDestMemIndex,
1180 &nothing, &unusedSrcMemIndex,
1181 &nothing, &nothing));
1183 case uint32_t(MiscOp::DataDrop): {
1184 uint32_t unusedSegIndex;
1185 CHECK(iter.readDataOrElemDrop(/*isData=*/true, &unusedSegIndex));
1187 case uint32_t(MiscOp::MemoryFill): {
1188 uint32_t memoryIndex;
1189 CHECK(iter.readMemFill(&memoryIndex, &nothing, &nothing, &nothing));
1191 case uint32_t(MiscOp::MemoryInit): {
1192 uint32_t unusedSegIndex;
1193 uint32_t unusedMemoryIndex;
1194 CHECK(iter.readMemOrTableInit(/*isMem=*/true, &unusedSegIndex,
1195 &unusedMemoryIndex, &nothing,
1196 &nothing, &nothing));
1198 case uint32_t(MiscOp::TableCopy): {
1199 uint32_t unusedDestTableIndex;
1200 uint32_t unusedSrcTableIndex;
1201 CHECK(iter.readMemOrTableCopy(
1202 /*isMem=*/false, &unusedDestTableIndex, &nothing,
1203 &unusedSrcTableIndex, &nothing, &nothing));
1205 case uint32_t(MiscOp::ElemDrop): {
1206 uint32_t unusedSegIndex;
1207 CHECK(iter.readDataOrElemDrop(/*isData=*/false, &unusedSegIndex));
1209 case uint32_t(MiscOp::TableInit): {
1210 uint32_t unusedSegIndex;
1211 uint32_t unusedTableIndex;
1212 CHECK(iter.readMemOrTableInit(/*isMem=*/false, &unusedSegIndex,
1213 &unusedTableIndex, &nothing, &nothing,
1214 &nothing));
1216 case uint32_t(MiscOp::TableFill): {
1217 uint32_t unusedTableIndex;
1218 CHECK(iter.readTableFill(&unusedTableIndex, &nothing, &nothing,
1219 &nothing));
1221 #ifdef ENABLE_WASM_MEMORY_CONTROL
1222 case uint32_t(MiscOp::MemoryDiscard): {
1223 if (!env.memoryControlEnabled()) {
1224 return iter.unrecognizedOpcode(&op);
1226 uint32_t unusedMemoryIndex;
1227 CHECK(iter.readMemDiscard(&unusedMemoryIndex, &nothing, &nothing));
1229 #endif
1230 case uint32_t(MiscOp::TableGrow): {
1231 uint32_t unusedTableIndex;
1232 CHECK(iter.readTableGrow(&unusedTableIndex, &nothing, &nothing));
1234 case uint32_t(MiscOp::TableSize): {
1235 uint32_t unusedTableIndex;
1236 CHECK(iter.readTableSize(&unusedTableIndex));
1238 default:
1239 return iter.unrecognizedOpcode(&op);
1241 break;
1243 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
1244 case uint16_t(Op::RefAsNonNull): {
1245 if (!env.functionReferencesEnabled()) {
1246 return iter.unrecognizedOpcode(&op);
1248 CHECK(iter.readRefAsNonNull(&nothing));
1250 case uint16_t(Op::BrOnNull): {
1251 if (!env.functionReferencesEnabled()) {
1252 return iter.unrecognizedOpcode(&op);
1254 uint32_t unusedDepth;
1255 CHECK(
1256 iter.readBrOnNull(&unusedDepth, &unusedType, &nothings, &nothing));
1258 case uint16_t(Op::BrOnNonNull): {
1259 if (!env.functionReferencesEnabled()) {
1260 return iter.unrecognizedOpcode(&op);
1262 uint32_t unusedDepth;
1263 CHECK(iter.readBrOnNonNull(&unusedDepth, &unusedType, &nothings,
1264 &nothing));
1266 #endif
1267 #ifdef ENABLE_WASM_GC
1268 case uint16_t(Op::RefEq): {
1269 if (!env.gcEnabled()) {
1270 return iter.unrecognizedOpcode(&op);
1272 CHECK(iter.readComparison(RefType::eq(), &nothing, &nothing));
1274 #endif
1275 case uint16_t(Op::RefFunc): {
1276 uint32_t unusedIndex;
1277 CHECK(iter.readRefFunc(&unusedIndex));
1279 case uint16_t(Op::RefNull): {
1280 RefType type;
1281 CHECK(iter.readRefNull(&type));
1283 case uint16_t(Op::RefIsNull): {
1284 Nothing nothing;
1285 CHECK(iter.readRefIsNull(&nothing));
1287 case uint16_t(Op::Try):
1288 if (!env.exceptionsEnabled()) {
1289 return iter.unrecognizedOpcode(&op);
1291 CHECK(iter.readTry(&unusedType));
1292 case uint16_t(Op::Catch): {
1293 if (!env.exceptionsEnabled()) {
1294 return iter.unrecognizedOpcode(&op);
1296 LabelKind unusedKind;
1297 uint32_t unusedIndex;
1298 CHECK(iter.readCatch(&unusedKind, &unusedIndex, &unusedType,
1299 &unusedType, &nothings));
1301 case uint16_t(Op::CatchAll): {
1302 if (!env.exceptionsEnabled()) {
1303 return iter.unrecognizedOpcode(&op);
1305 LabelKind unusedKind;
1306 CHECK(iter.readCatchAll(&unusedKind, &unusedType, &unusedType,
1307 &nothings));
1309 case uint16_t(Op::Delegate): {
1310 if (!env.exceptionsEnabled()) {
1311 return iter.unrecognizedOpcode(&op);
1313 uint32_t unusedDepth;
1314 if (!iter.readDelegate(&unusedDepth, &unusedType, &nothings)) {
1315 return false;
1317 iter.popDelegate();
1318 break;
1320 case uint16_t(Op::Throw): {
1321 if (!env.exceptionsEnabled()) {
1322 return iter.unrecognizedOpcode(&op);
1324 uint32_t unusedIndex;
1325 CHECK(iter.readThrow(&unusedIndex, &nothings));
1327 case uint16_t(Op::Rethrow): {
1328 if (!env.exceptionsEnabled()) {
1329 return iter.unrecognizedOpcode(&op);
1331 uint32_t unusedDepth;
1332 CHECK(iter.readRethrow(&unusedDepth));
1334 case uint16_t(Op::ThreadPrefix): {
1335 // Though thread ops can be used on nonshared memories, we make them
1336 // unavailable if shared memory has been disabled in the prefs, for
1337 // maximum predictability and safety and consistency with JS.
1338 if (env.sharedMemoryEnabled() == Shareable::False) {
1339 return iter.unrecognizedOpcode(&op);
1341 switch (op.b1) {
1342 case uint32_t(ThreadOp::Wake): {
1343 LinearMemoryAddress<Nothing> addr;
1344 CHECK(iter.readWake(&addr, &nothing));
1346 case uint32_t(ThreadOp::I32Wait): {
1347 LinearMemoryAddress<Nothing> addr;
1348 CHECK(iter.readWait(&addr, ValType::I32, 4, &nothing, &nothing));
1350 case uint32_t(ThreadOp::I64Wait): {
1351 LinearMemoryAddress<Nothing> addr;
1352 CHECK(iter.readWait(&addr, ValType::I64, 8, &nothing, &nothing));
1354 case uint32_t(ThreadOp::Fence): {
1355 CHECK(iter.readFence());
1357 case uint32_t(ThreadOp::I32AtomicLoad): {
1358 LinearMemoryAddress<Nothing> addr;
1359 CHECK(iter.readAtomicLoad(&addr, ValType::I32, 4));
1361 case uint32_t(ThreadOp::I64AtomicLoad): {
1362 LinearMemoryAddress<Nothing> addr;
1363 CHECK(iter.readAtomicLoad(&addr, ValType::I64, 8));
1365 case uint32_t(ThreadOp::I32AtomicLoad8U): {
1366 LinearMemoryAddress<Nothing> addr;
1367 CHECK(iter.readAtomicLoad(&addr, ValType::I32, 1));
1369 case uint32_t(ThreadOp::I32AtomicLoad16U): {
1370 LinearMemoryAddress<Nothing> addr;
1371 CHECK(iter.readAtomicLoad(&addr, ValType::I32, 2));
1373 case uint32_t(ThreadOp::I64AtomicLoad8U): {
1374 LinearMemoryAddress<Nothing> addr;
1375 CHECK(iter.readAtomicLoad(&addr, ValType::I64, 1));
1377 case uint32_t(ThreadOp::I64AtomicLoad16U): {
1378 LinearMemoryAddress<Nothing> addr;
1379 CHECK(iter.readAtomicLoad(&addr, ValType::I64, 2));
1381 case uint32_t(ThreadOp::I64AtomicLoad32U): {
1382 LinearMemoryAddress<Nothing> addr;
1383 CHECK(iter.readAtomicLoad(&addr, ValType::I64, 4));
1385 case uint32_t(ThreadOp::I32AtomicStore): {
1386 LinearMemoryAddress<Nothing> addr;
1387 CHECK(iter.readAtomicStore(&addr, ValType::I32, 4, &nothing));
1389 case uint32_t(ThreadOp::I64AtomicStore): {
1390 LinearMemoryAddress<Nothing> addr;
1391 CHECK(iter.readAtomicStore(&addr, ValType::I64, 8, &nothing));
1393 case uint32_t(ThreadOp::I32AtomicStore8U): {
1394 LinearMemoryAddress<Nothing> addr;
1395 CHECK(iter.readAtomicStore(&addr, ValType::I32, 1, &nothing));
1397 case uint32_t(ThreadOp::I32AtomicStore16U): {
1398 LinearMemoryAddress<Nothing> addr;
1399 CHECK(iter.readAtomicStore(&addr, ValType::I32, 2, &nothing));
1401 case uint32_t(ThreadOp::I64AtomicStore8U): {
1402 LinearMemoryAddress<Nothing> addr;
1403 CHECK(iter.readAtomicStore(&addr, ValType::I64, 1, &nothing));
1405 case uint32_t(ThreadOp::I64AtomicStore16U): {
1406 LinearMemoryAddress<Nothing> addr;
1407 CHECK(iter.readAtomicStore(&addr, ValType::I64, 2, &nothing));
1409 case uint32_t(ThreadOp::I64AtomicStore32U): {
1410 LinearMemoryAddress<Nothing> addr;
1411 CHECK(iter.readAtomicStore(&addr, ValType::I64, 4, &nothing));
1413 case uint32_t(ThreadOp::I32AtomicAdd):
1414 case uint32_t(ThreadOp::I32AtomicSub):
1415 case uint32_t(ThreadOp::I32AtomicAnd):
1416 case uint32_t(ThreadOp::I32AtomicOr):
1417 case uint32_t(ThreadOp::I32AtomicXor):
1418 case uint32_t(ThreadOp::I32AtomicXchg): {
1419 LinearMemoryAddress<Nothing> addr;
1420 CHECK(iter.readAtomicRMW(&addr, ValType::I32, 4, &nothing));
1422 case uint32_t(ThreadOp::I64AtomicAdd):
1423 case uint32_t(ThreadOp::I64AtomicSub):
1424 case uint32_t(ThreadOp::I64AtomicAnd):
1425 case uint32_t(ThreadOp::I64AtomicOr):
1426 case uint32_t(ThreadOp::I64AtomicXor):
1427 case uint32_t(ThreadOp::I64AtomicXchg): {
1428 LinearMemoryAddress<Nothing> addr;
1429 CHECK(iter.readAtomicRMW(&addr, ValType::I64, 8, &nothing));
1431 case uint32_t(ThreadOp::I32AtomicAdd8U):
1432 case uint32_t(ThreadOp::I32AtomicSub8U):
1433 case uint32_t(ThreadOp::I32AtomicAnd8U):
1434 case uint32_t(ThreadOp::I32AtomicOr8U):
1435 case uint32_t(ThreadOp::I32AtomicXor8U):
1436 case uint32_t(ThreadOp::I32AtomicXchg8U): {
1437 LinearMemoryAddress<Nothing> addr;
1438 CHECK(iter.readAtomicRMW(&addr, ValType::I32, 1, &nothing));
1440 case uint32_t(ThreadOp::I32AtomicAdd16U):
1441 case uint32_t(ThreadOp::I32AtomicSub16U):
1442 case uint32_t(ThreadOp::I32AtomicAnd16U):
1443 case uint32_t(ThreadOp::I32AtomicOr16U):
1444 case uint32_t(ThreadOp::I32AtomicXor16U):
1445 case uint32_t(ThreadOp::I32AtomicXchg16U): {
1446 LinearMemoryAddress<Nothing> addr;
1447 CHECK(iter.readAtomicRMW(&addr, ValType::I32, 2, &nothing));
1449 case uint32_t(ThreadOp::I64AtomicAdd8U):
1450 case uint32_t(ThreadOp::I64AtomicSub8U):
1451 case uint32_t(ThreadOp::I64AtomicAnd8U):
1452 case uint32_t(ThreadOp::I64AtomicOr8U):
1453 case uint32_t(ThreadOp::I64AtomicXor8U):
1454 case uint32_t(ThreadOp::I64AtomicXchg8U): {
1455 LinearMemoryAddress<Nothing> addr;
1456 CHECK(iter.readAtomicRMW(&addr, ValType::I64, 1, &nothing));
1458 case uint32_t(ThreadOp::I64AtomicAdd16U):
1459 case uint32_t(ThreadOp::I64AtomicSub16U):
1460 case uint32_t(ThreadOp::I64AtomicAnd16U):
1461 case uint32_t(ThreadOp::I64AtomicOr16U):
1462 case uint32_t(ThreadOp::I64AtomicXor16U):
1463 case uint32_t(ThreadOp::I64AtomicXchg16U): {
1464 LinearMemoryAddress<Nothing> addr;
1465 CHECK(iter.readAtomicRMW(&addr, ValType::I64, 2, &nothing));
1467 case uint32_t(ThreadOp::I64AtomicAdd32U):
1468 case uint32_t(ThreadOp::I64AtomicSub32U):
1469 case uint32_t(ThreadOp::I64AtomicAnd32U):
1470 case uint32_t(ThreadOp::I64AtomicOr32U):
1471 case uint32_t(ThreadOp::I64AtomicXor32U):
1472 case uint32_t(ThreadOp::I64AtomicXchg32U): {
1473 LinearMemoryAddress<Nothing> addr;
1474 CHECK(iter.readAtomicRMW(&addr, ValType::I64, 4, &nothing));
1476 case uint32_t(ThreadOp::I32AtomicCmpXchg): {
1477 LinearMemoryAddress<Nothing> addr;
1478 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 4, &nothing,
1479 &nothing));
1481 case uint32_t(ThreadOp::I64AtomicCmpXchg): {
1482 LinearMemoryAddress<Nothing> addr;
1483 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 8, &nothing,
1484 &nothing));
1486 case uint32_t(ThreadOp::I32AtomicCmpXchg8U): {
1487 LinearMemoryAddress<Nothing> addr;
1488 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 1, &nothing,
1489 &nothing));
1491 case uint32_t(ThreadOp::I32AtomicCmpXchg16U): {
1492 LinearMemoryAddress<Nothing> addr;
1493 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 2, &nothing,
1494 &nothing));
1496 case uint32_t(ThreadOp::I64AtomicCmpXchg8U): {
1497 LinearMemoryAddress<Nothing> addr;
1498 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 1, &nothing,
1499 &nothing));
1501 case uint32_t(ThreadOp::I64AtomicCmpXchg16U): {
1502 LinearMemoryAddress<Nothing> addr;
1503 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 2, &nothing,
1504 &nothing));
1506 case uint32_t(ThreadOp::I64AtomicCmpXchg32U): {
1507 LinearMemoryAddress<Nothing> addr;
1508 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 4, &nothing,
1509 &nothing));
1511 default:
1512 return iter.unrecognizedOpcode(&op);
1514 break;
1516 case uint16_t(Op::MozPrefix):
1517 return iter.unrecognizedOpcode(&op);
1518 default:
1519 return iter.unrecognizedOpcode(&op);
1523 MOZ_CRASH("unreachable");
1525 #undef CHECK
1528 bool wasm::ValidateFunctionBody(const ModuleEnvironment& env,
1529 uint32_t funcIndex, uint32_t bodySize,
1530 Decoder& d) {
1531 ValTypeVector locals;
1532 if (!locals.appendAll(env.funcs[funcIndex].type->args())) {
1533 return false;
1536 const uint8_t* bodyBegin = d.currentPosition();
1538 if (!DecodeLocalEntries(d, *env.types, env.features, &locals)) {
1539 return false;
1542 return DecodeFunctionBodyExprs(env, funcIndex, locals, bodyBegin + bodySize,
1543 &d);
1546 // Section macros.
1548 static bool DecodePreamble(Decoder& d) {
1549 if (d.bytesRemain() > MaxModuleBytes) {
1550 return d.fail("module too big");
1553 uint32_t u32;
1554 if (!d.readFixedU32(&u32) || u32 != MagicNumber) {
1555 return d.fail("failed to match magic number");
1558 if (!d.readFixedU32(&u32) || u32 != EncodingVersion) {
1559 return d.failf("binary version 0x%" PRIx32
1560 " does not match expected version 0x%" PRIx32,
1561 u32, EncodingVersion);
1564 return true;
1567 static bool DecodeValTypeVector(Decoder& d, ModuleEnvironment* env,
1568 uint32_t count, ValTypeVector* valTypes) {
1569 if (!valTypes->resize(count)) {
1570 return false;
1573 for (uint32_t i = 0; i < count; i++) {
1574 if (!d.readValType(*env->types, env->features, &(*valTypes)[i])) {
1575 return false;
1578 return true;
1581 static bool DecodeFuncType(Decoder& d, ModuleEnvironment* env,
1582 FuncType* funcType) {
1583 uint32_t numArgs;
1584 if (!d.readVarU32(&numArgs)) {
1585 return d.fail("bad number of function args");
1587 if (numArgs > MaxParams) {
1588 return d.fail("too many arguments in signature");
1590 ValTypeVector args;
1591 if (!DecodeValTypeVector(d, env, numArgs, &args)) {
1592 return false;
1595 uint32_t numResults;
1596 if (!d.readVarU32(&numResults)) {
1597 return d.fail("bad number of function returns");
1599 if (numResults > MaxResults) {
1600 return d.fail("too many returns in signature");
1602 ValTypeVector results;
1603 if (!DecodeValTypeVector(d, env, numResults, &results)) {
1604 return false;
1607 *funcType = FuncType(std::move(args), std::move(results));
1608 return true;
1611 static bool DecodeStructType(Decoder& d, ModuleEnvironment* env,
1612 StructType* structType) {
1613 if (!env->gcEnabled()) {
1614 return d.fail("Structure types not enabled");
1617 uint32_t numFields;
1618 if (!d.readVarU32(&numFields)) {
1619 return d.fail("Bad number of fields");
1622 if (numFields > MaxStructFields) {
1623 return d.fail("too many fields in struct");
1626 StructFieldVector fields;
1627 if (!fields.resize(numFields)) {
1628 return false;
1631 for (uint32_t i = 0; i < numFields; i++) {
1632 if (!d.readFieldType(*env->types, env->features, &fields[i].type)) {
1633 return false;
1636 uint8_t flags;
1637 if (!d.readFixedU8(&flags)) {
1638 return d.fail("expected flag");
1640 if ((flags & ~uint8_t(FieldFlags::AllowedMask)) != 0) {
1641 return d.fail("garbage flag bits");
1643 fields[i].isMutable = flags & uint8_t(FieldFlags::Mutable);
1646 *structType = StructType(std::move(fields));
1648 // Compute the struct layout, and fail if the struct is too large
1649 if (!structType->init()) {
1650 return d.fail("too many fields in struct");
1652 return true;
1655 static bool DecodeArrayType(Decoder& d, ModuleEnvironment* env,
1656 ArrayType* arrayType) {
1657 if (!env->gcEnabled()) {
1658 return d.fail("gc types not enabled");
1661 FieldType elementType;
1662 if (!d.readFieldType(*env->types, env->features, &elementType)) {
1663 return false;
1666 uint8_t flags;
1667 if (!d.readFixedU8(&flags)) {
1668 return d.fail("expected flag");
1670 if ((flags & ~uint8_t(FieldFlags::AllowedMask)) != 0) {
1671 return d.fail("garbage flag bits");
1673 bool isMutable = flags & uint8_t(FieldFlags::Mutable);
1675 *arrayType = ArrayType(elementType, isMutable);
1676 return true;
1679 static bool DecodeTypeSection(Decoder& d, ModuleEnvironment* env) {
1680 MaybeSectionRange range;
1681 if (!d.startSection(SectionId::Type, env, &range, "type")) {
1682 return false;
1684 if (!range) {
1685 return true;
1688 uint32_t numRecGroups;
1689 if (!d.readVarU32(&numRecGroups)) {
1690 return d.fail("expected number of types");
1693 // Check if we've reached our implementation defined limit of recursion
1694 // groups.
1695 if (numRecGroups > MaxRecGroups) {
1696 return d.fail("too many types");
1699 for (uint32_t recGroupIndex = 0; recGroupIndex < numRecGroups;
1700 recGroupIndex++) {
1701 uint32_t recGroupLength = 1;
1703 // Decode an optional recursion group length, if the GC proposal is
1704 // enabled.
1705 if (env->gcEnabled()) {
1706 uint8_t firstTypeCode;
1707 if (!d.peekByte(&firstTypeCode)) {
1708 return d.fail("expected type form");
1711 if (firstTypeCode == (uint8_t)TypeCode::RecGroup) {
1712 // Skip over the prefix byte that was peeked.
1713 d.uncheckedReadFixedU8();
1715 // Read the number of types in this recursion group
1716 if (!d.readVarU32(&recGroupLength)) {
1717 return d.fail("expected recursion group length");
1722 // Start a recursion group. This will extend the type context with empty
1723 // type definitions to be filled.
1724 MutableRecGroup recGroup = env->types->startRecGroup(recGroupLength);
1725 if (!recGroup) {
1726 return false;
1729 // First, iterate over the types, validate them and set super types.
1730 // Subtyping relationship will be checked in a second iteration.
1731 for (uint32_t recGroupTypeIndex = 0; recGroupTypeIndex < recGroupLength;
1732 recGroupTypeIndex++) {
1733 uint32_t typeIndex =
1734 env->types->length() - recGroupLength + recGroupTypeIndex;
1736 // Check if we've reached our implementation defined limit of type
1737 // definitions.
1738 if (typeIndex >= MaxTypes) {
1739 return d.fail("too many types");
1742 uint8_t form;
1743 const TypeDef* superTypeDef = nullptr;
1745 // By default, all types are final unless the sub keyword is specified.
1746 bool finalTypeFlag = true;
1748 // Decode an optional declared super type index, if the GC proposal is
1749 // enabled.
1750 if (env->gcEnabled() && d.peekByte(&form) &&
1751 (form == (uint8_t)TypeCode::SubNoFinalType ||
1752 form == (uint8_t)TypeCode::SubFinalType)) {
1753 if (form == (uint8_t)TypeCode::SubNoFinalType) {
1754 finalTypeFlag = false;
1757 // Skip over the `sub` or `final` prefix byte we peeked.
1758 d.uncheckedReadFixedU8();
1760 // Decode the number of super types, which is currently limited to at
1761 // most one.
1762 uint32_t numSuperTypes;
1763 if (!d.readVarU32(&numSuperTypes)) {
1764 return d.fail("expected number of super types");
1766 if (numSuperTypes > 1) {
1767 return d.fail("too many super types");
1770 // Decode the super type, if any.
1771 if (numSuperTypes == 1) {
1772 uint32_t superTypeDefIndex;
1773 if (!d.readVarU32(&superTypeDefIndex)) {
1774 return d.fail("expected super type index");
1777 // A super type index must be strictly less than the current type
1778 // index in order to avoid cycles.
1779 if (superTypeDefIndex >= typeIndex) {
1780 return d.fail("invalid super type index");
1783 superTypeDef = &env->types->type(superTypeDefIndex);
1787 // Decode the kind of type definition
1788 if (!d.readFixedU8(&form)) {
1789 return d.fail("expected type form");
1792 TypeDef* typeDef = &recGroup->type(recGroupTypeIndex);
1793 switch (form) {
1794 case uint8_t(TypeCode::Func): {
1795 FuncType funcType;
1796 if (!DecodeFuncType(d, env, &funcType)) {
1797 return false;
1799 *typeDef = std::move(funcType);
1800 break;
1802 case uint8_t(TypeCode::Struct): {
1803 StructType structType;
1804 if (!DecodeStructType(d, env, &structType)) {
1805 return false;
1807 *typeDef = std::move(structType);
1808 break;
1810 case uint8_t(TypeCode::Array): {
1811 ArrayType arrayType;
1812 if (!DecodeArrayType(d, env, &arrayType)) {
1813 return false;
1815 *typeDef = std::move(arrayType);
1816 break;
1818 default:
1819 return d.fail("expected type form");
1822 typeDef->setFinal(finalTypeFlag);
1823 if (superTypeDef) {
1824 // Check that we aren't creating too deep of a subtyping chain
1825 if (superTypeDef->subTypingDepth() >= MaxSubTypingDepth) {
1826 return d.fail("type is too deep");
1829 typeDef->setSuperTypeDef(superTypeDef);
1832 if (typeDef->isFuncType()) {
1833 typeDef->funcType().initImmediateTypeId(
1834 env->gcEnabled(), typeDef->isFinal(), superTypeDef, recGroupLength);
1838 // Check the super types to make sure they are compatible with their
1839 // subtypes. This is done in a second iteration to avoid dealing with not
1840 // yet loaded types.
1841 for (uint32_t recGroupTypeIndex = 0; recGroupTypeIndex < recGroupLength;
1842 recGroupTypeIndex++) {
1843 TypeDef* typeDef = &recGroup->type(recGroupTypeIndex);
1844 if (typeDef->superTypeDef()) {
1845 // Check that the super type is compatible with this type
1846 if (!TypeDef::canBeSubTypeOf(typeDef, typeDef->superTypeDef())) {
1847 return d.fail("incompatible super type");
1852 // Finish the recursion group, which will canonicalize the types.
1853 if (!env->types->endRecGroup()) {
1854 return false;
1858 return d.finishSection(*range, "type");
1861 [[nodiscard]] static bool DecodeName(Decoder& d, CacheableName* name) {
1862 uint32_t numBytes;
1863 if (!d.readVarU32(&numBytes)) {
1864 return false;
1867 if (numBytes > MaxStringBytes) {
1868 return false;
1871 const uint8_t* bytes;
1872 if (!d.readBytes(numBytes, &bytes)) {
1873 return false;
1876 if (!IsUtf8(AsChars(Span(bytes, numBytes)))) {
1877 return false;
1880 UTF8Bytes utf8Bytes;
1881 if (!utf8Bytes.resizeUninitialized(numBytes)) {
1882 return false;
1884 memcpy(utf8Bytes.begin(), bytes, numBytes);
1886 *name = CacheableName(std::move(utf8Bytes));
1887 return true;
1890 static bool DecodeFuncTypeIndex(Decoder& d, const SharedTypeContext& types,
1891 uint32_t* funcTypeIndex) {
1892 if (!d.readVarU32(funcTypeIndex)) {
1893 return d.fail("expected signature index");
1896 if (*funcTypeIndex >= types->length()) {
1897 return d.fail("signature index out of range");
1900 const TypeDef& def = (*types)[*funcTypeIndex];
1902 if (!def.isFuncType()) {
1903 return d.fail("signature index references non-signature");
1906 return true;
1909 static bool DecodeLimitBound(Decoder& d, IndexType indexType, uint64_t* bound) {
1910 if (indexType == IndexType::I64) {
1911 return d.readVarU64(bound);
1914 // Spec tests assert that we only decode a LEB32 when index type is I32.
1915 uint32_t bound32;
1916 if (!d.readVarU32(&bound32)) {
1917 return false;
1919 *bound = bound32;
1920 return true;
1923 static bool DecodeLimits(Decoder& d, LimitsKind kind, Limits* limits) {
1924 uint8_t flags;
1925 if (!d.readFixedU8(&flags)) {
1926 return d.fail("expected flags");
1929 uint8_t mask = kind == LimitsKind::Memory ? uint8_t(LimitsMask::Memory)
1930 : uint8_t(LimitsMask::Table);
1932 if (flags & ~uint8_t(mask)) {
1933 return d.failf("unexpected bits set in flags: %" PRIu32,
1934 uint32_t(flags & ~uint8_t(mask)));
1937 // Memory limits may be shared or specify an alternate index type
1938 if (kind == LimitsKind::Memory) {
1939 if ((flags & uint8_t(LimitsFlags::IsShared)) &&
1940 !(flags & uint8_t(LimitsFlags::HasMaximum))) {
1941 return d.fail("maximum length required for shared memory");
1944 limits->shared = (flags & uint8_t(LimitsFlags::IsShared))
1945 ? Shareable::True
1946 : Shareable::False;
1948 #ifdef ENABLE_WASM_MEMORY64
1949 limits->indexType =
1950 (flags & uint8_t(LimitsFlags::IsI64)) ? IndexType::I64 : IndexType::I32;
1951 #else
1952 limits->indexType = IndexType::I32;
1953 if (flags & uint8_t(LimitsFlags::IsI64)) {
1954 return d.fail("i64 is not supported for memory limits");
1956 #endif
1957 } else {
1958 limits->shared = Shareable::False;
1959 limits->indexType = IndexType::I32;
1962 uint64_t initial;
1963 if (!DecodeLimitBound(d, limits->indexType, &initial)) {
1964 return d.fail("expected initial length");
1966 limits->initial = initial;
1968 if (flags & uint8_t(LimitsFlags::HasMaximum)) {
1969 uint64_t maximum;
1970 if (!DecodeLimitBound(d, limits->indexType, &maximum)) {
1971 return d.fail("expected maximum length");
1974 if (limits->initial > maximum) {
1975 return d.failf(
1976 "memory size minimum must not be greater than maximum; "
1977 "maximum length %" PRIu64 " is less than initial length %" PRIu64,
1978 maximum, limits->initial);
1981 limits->maximum.emplace(maximum);
1984 return true;
1987 static bool DecodeTableTypeAndLimits(Decoder& d, ModuleEnvironment* env) {
1988 bool initExprPresent = false;
1989 uint8_t typeCode;
1990 if (!d.peekByte(&typeCode)) {
1991 return d.fail("expected type code");
1993 if (typeCode == (uint8_t)TypeCode::TableHasInitExpr) {
1994 d.uncheckedReadFixedU8();
1995 uint8_t flags;
1996 if (!d.readFixedU8(&flags) || flags != 0) {
1997 return d.fail("expected reserved byte to be 0");
1999 initExprPresent = true;
2002 RefType tableElemType;
2003 if (!d.readRefType(*env->types, env->features, &tableElemType)) {
2004 return false;
2007 Limits limits;
2008 if (!DecodeLimits(d, LimitsKind::Table, &limits)) {
2009 return false;
2012 // Decoding limits for a table only supports i32
2013 MOZ_ASSERT(limits.indexType == IndexType::I32);
2015 // If there's a maximum, check it is in range. The check to exclude
2016 // initial > maximum is carried out by the DecodeLimits call above, so
2017 // we don't repeat it here.
2018 if (limits.initial > MaxTableLimitField ||
2019 ((limits.maximum.isSome() &&
2020 limits.maximum.value() > MaxTableLimitField))) {
2021 return d.fail("too many table elements");
2024 if (env->tables.length() >= MaxTables) {
2025 return d.fail("too many tables");
2028 // The rest of the runtime expects table limits to be within a 32-bit range.
2029 static_assert(MaxTableLimitField <= UINT32_MAX, "invariant");
2030 uint32_t initialLength = uint32_t(limits.initial);
2031 Maybe<uint32_t> maximumLength;
2032 if (limits.maximum) {
2033 maximumLength = Some(uint32_t(*limits.maximum));
2036 Maybe<InitExpr> initExpr;
2037 if (initExprPresent) {
2038 InitExpr initializer;
2039 if (!InitExpr::decodeAndValidate(d, env, tableElemType, &initializer)) {
2040 return false;
2042 initExpr = Some(std::move(initializer));
2043 } else {
2044 if (!tableElemType.isNullable()) {
2045 return d.fail("table with non-nullable references requires initializer");
2049 return env->tables.emplaceBack(tableElemType, initialLength, maximumLength,
2050 std::move(initExpr), /* isAsmJS */ false);
2053 static bool DecodeGlobalType(Decoder& d, const SharedTypeContext& types,
2054 const FeatureArgs& features, ValType* type,
2055 bool* isMutable) {
2056 if (!d.readValType(*types, features, type)) {
2057 return d.fail("expected global type");
2060 uint8_t flags;
2061 if (!d.readFixedU8(&flags)) {
2062 return d.fail("expected global flags");
2065 if (flags & ~uint8_t(GlobalTypeImmediate::AllowedMask)) {
2066 return d.fail("unexpected bits set in global flags");
2069 *isMutable = flags & uint8_t(GlobalTypeImmediate::IsMutable);
2070 return true;
2073 static bool DecodeMemoryTypeAndLimits(Decoder& d, ModuleEnvironment* env,
2074 MemoryDescVector* memories) {
2075 if (!env->features.multiMemory && env->numMemories() == 1) {
2076 return d.fail("already have default memory");
2079 if (env->numMemories() >= MaxMemories) {
2080 return d.fail("too many memories");
2083 Limits limits;
2084 if (!DecodeLimits(d, LimitsKind::Memory, &limits)) {
2085 return false;
2088 uint64_t maxField = MaxMemoryLimitField(limits.indexType);
2090 if (limits.initial > maxField) {
2091 return d.fail("initial memory size too big");
2094 if (limits.maximum && *limits.maximum > maxField) {
2095 return d.fail("maximum memory size too big");
2098 if (limits.shared == Shareable::True &&
2099 env->sharedMemoryEnabled() == Shareable::False) {
2100 return d.fail("shared memory is disabled");
2103 if (limits.indexType == IndexType::I64 && !env->memory64Enabled()) {
2104 return d.fail("memory64 is disabled");
2107 return memories->emplaceBack(MemoryDesc(limits));
2110 static bool DecodeTag(Decoder& d, ModuleEnvironment* env, TagKind* tagKind,
2111 uint32_t* funcTypeIndex) {
2112 uint32_t tagCode;
2113 if (!d.readVarU32(&tagCode)) {
2114 return d.fail("expected tag kind");
2117 if (TagKind(tagCode) != TagKind::Exception) {
2118 return d.fail("illegal tag kind");
2120 *tagKind = TagKind(tagCode);
2122 if (!d.readVarU32(funcTypeIndex)) {
2123 return d.fail("expected function index in tag");
2125 if (*funcTypeIndex >= env->numTypes()) {
2126 return d.fail("function type index in tag out of bounds");
2128 if (!(*env->types)[*funcTypeIndex].isFuncType()) {
2129 return d.fail("function type index must index a function type");
2131 if ((*env->types)[*funcTypeIndex].funcType().results().length() != 0) {
2132 return d.fail("tag function types must not return anything");
2134 return true;
2137 static bool DecodeImport(Decoder& d, ModuleEnvironment* env) {
2138 CacheableName moduleName;
2139 if (!DecodeName(d, &moduleName)) {
2140 return d.fail("expected valid import module name");
2143 CacheableName fieldName;
2144 if (!DecodeName(d, &fieldName)) {
2145 return d.fail("expected valid import field name");
2148 uint8_t rawImportKind;
2149 if (!d.readFixedU8(&rawImportKind)) {
2150 return d.fail("failed to read import kind");
2153 DefinitionKind importKind = DefinitionKind(rawImportKind);
2155 switch (importKind) {
2156 case DefinitionKind::Function: {
2157 uint32_t funcTypeIndex;
2158 if (!DecodeFuncTypeIndex(d, env->types, &funcTypeIndex)) {
2159 return false;
2161 if (!env->funcs.append(FuncDesc(
2162 &env->types->type(funcTypeIndex).funcType(), funcTypeIndex))) {
2163 return false;
2165 if (env->funcs.length() > MaxFuncs) {
2166 return d.fail("too many functions");
2168 break;
2170 case DefinitionKind::Table: {
2171 if (!DecodeTableTypeAndLimits(d, env)) {
2172 return false;
2174 env->tables.back().isImported = true;
2175 break;
2177 case DefinitionKind::Memory: {
2178 if (!DecodeMemoryTypeAndLimits(d, env, &env->memories)) {
2179 return false;
2181 break;
2183 case DefinitionKind::Global: {
2184 ValType type;
2185 bool isMutable;
2186 if (!DecodeGlobalType(d, env->types, env->features, &type, &isMutable)) {
2187 return false;
2189 if (!env->globals.append(
2190 GlobalDesc(type, isMutable, env->globals.length()))) {
2191 return false;
2193 if (env->globals.length() > MaxGlobals) {
2194 return d.fail("too many globals");
2196 break;
2198 case DefinitionKind::Tag: {
2199 TagKind tagKind;
2200 uint32_t funcTypeIndex;
2201 if (!DecodeTag(d, env, &tagKind, &funcTypeIndex)) {
2202 return false;
2204 ValTypeVector args;
2205 if (!args.appendAll((*env->types)[funcTypeIndex].funcType().args())) {
2206 return false;
2208 MutableTagType tagType = js_new<TagType>();
2209 if (!tagType || !tagType->initialize(std::move(args))) {
2210 return false;
2212 if (!env->tags.emplaceBack(tagKind, tagType)) {
2213 return false;
2215 if (env->tags.length() > MaxTags) {
2216 return d.fail("too many tags");
2218 break;
2220 default:
2221 return d.fail("unsupported import kind");
2224 return env->imports.emplaceBack(std::move(moduleName), std::move(fieldName),
2225 importKind);
2228 static bool CheckImportsAgainstBuiltinModules(Decoder& d,
2229 ModuleEnvironment* env) {
2230 const BuiltinModuleIds& builtinModules = env->features.builtinModules;
2232 // Skip this pass if there are no builtin modules enabled
2233 if (builtinModules.hasNone()) {
2234 return true;
2237 // Allocate a type context for builtin types so we can canonicalize them
2238 // and use them in type comparisons
2239 RefPtr<TypeContext> builtinTypes = js_new<TypeContext>();
2240 if (!builtinTypes) {
2241 return false;
2244 uint32_t importFuncIndex = 0;
2245 for (auto& import : env->imports) {
2246 Maybe<BuiltinModuleId> builtinModule =
2247 ImportMatchesBuiltinModule(import.module.utf8Bytes(), builtinModules);
2249 switch (import.kind) {
2250 case DefinitionKind::Function: {
2251 const FuncDesc& func = env->funcs[importFuncIndex];
2252 importFuncIndex += 1;
2254 // Skip this import if it doesn't refer to a builtin module. We do have
2255 // to increment the import function index regardless though.
2256 if (!builtinModule) {
2257 continue;
2260 // Check if this import refers to a builtin module function
2261 Maybe<const BuiltinModuleFunc*> builtinFunc =
2262 ImportMatchesBuiltinModuleFunc(import.field.utf8Bytes(),
2263 *builtinModule);
2264 if (!builtinFunc) {
2265 return d.fail("unrecognized builtin module field");
2268 // Get a canonicalized type definition for this builtin so we can
2269 // accurately compare it against the import type.
2270 FuncType builtinFuncType;
2271 if (!(*builtinFunc)->funcType(&builtinFuncType)) {
2272 return false;
2274 if (!builtinTypes->addType(builtinFuncType)) {
2275 return false;
2277 const TypeDef& builtinTypeDef =
2278 builtinTypes->type(builtinTypes->length() - 1);
2280 const TypeDef& importTypeDef = (*env->types)[func.typeIndex];
2281 if (!TypeDef::isSubTypeOf(&builtinTypeDef, &importTypeDef)) {
2282 return d.failf("type mismatch in %s", (*builtinFunc)->exportName);
2284 break;
2286 default: {
2287 if (!builtinModule) {
2288 continue;
2290 return d.fail("unrecognized builtin import");
2295 return true;
2298 static bool DecodeImportSection(Decoder& d, ModuleEnvironment* env) {
2299 MaybeSectionRange range;
2300 if (!d.startSection(SectionId::Import, env, &range, "import")) {
2301 return false;
2303 if (!range) {
2304 return true;
2307 uint32_t numImports;
2308 if (!d.readVarU32(&numImports)) {
2309 return d.fail("failed to read number of imports");
2312 if (numImports > MaxImports) {
2313 return d.fail("too many imports");
2316 for (uint32_t i = 0; i < numImports; i++) {
2317 if (!DecodeImport(d, env)) {
2318 return false;
2322 if (!d.finishSection(*range, "import")) {
2323 return false;
2326 env->numFuncImports = env->funcs.length();
2327 env->numGlobalImports = env->globals.length();
2328 return true;
2331 static bool DecodeFunctionSection(Decoder& d, ModuleEnvironment* env) {
2332 MaybeSectionRange range;
2333 if (!d.startSection(SectionId::Function, env, &range, "function")) {
2334 return false;
2336 if (!range) {
2337 return true;
2340 uint32_t numDefs;
2341 if (!d.readVarU32(&numDefs)) {
2342 return d.fail("expected number of function definitions");
2345 CheckedInt<uint32_t> numFuncs = env->funcs.length();
2346 numFuncs += numDefs;
2347 if (!numFuncs.isValid() || numFuncs.value() > MaxFuncs) {
2348 return d.fail("too many functions");
2351 if (!env->funcs.reserve(numFuncs.value())) {
2352 return false;
2355 for (uint32_t i = 0; i < numDefs; i++) {
2356 uint32_t funcTypeIndex;
2357 if (!DecodeFuncTypeIndex(d, env->types, &funcTypeIndex)) {
2358 return false;
2360 env->funcs.infallibleAppend(
2361 FuncDesc(&env->types->type(funcTypeIndex).funcType(), funcTypeIndex));
2364 return d.finishSection(*range, "function");
2367 static bool DecodeTableSection(Decoder& d, ModuleEnvironment* env) {
2368 MaybeSectionRange range;
2369 if (!d.startSection(SectionId::Table, env, &range, "table")) {
2370 return false;
2372 if (!range) {
2373 return true;
2376 uint32_t numTables;
2377 if (!d.readVarU32(&numTables)) {
2378 return d.fail("failed to read number of tables");
2381 for (uint32_t i = 0; i < numTables; ++i) {
2382 if (!DecodeTableTypeAndLimits(d, env)) {
2383 return false;
2387 return d.finishSection(*range, "table");
2390 static bool DecodeMemorySection(Decoder& d, ModuleEnvironment* env) {
2391 MaybeSectionRange range;
2392 if (!d.startSection(SectionId::Memory, env, &range, "memory")) {
2393 return false;
2395 if (!range) {
2396 return true;
2399 uint32_t numMemories;
2400 if (!d.readVarU32(&numMemories)) {
2401 return d.fail("failed to read number of memories");
2404 if (!env->features.multiMemory && numMemories > 1) {
2405 return d.fail("the number of memories must be at most one");
2408 for (uint32_t i = 0; i < numMemories; ++i) {
2409 if (!DecodeMemoryTypeAndLimits(d, env, &env->memories)) {
2410 return false;
2414 return d.finishSection(*range, "memory");
2417 static bool DecodeGlobalSection(Decoder& d, ModuleEnvironment* env) {
2418 MaybeSectionRange range;
2419 if (!d.startSection(SectionId::Global, env, &range, "global")) {
2420 return false;
2422 if (!range) {
2423 return true;
2426 uint32_t numDefs;
2427 if (!d.readVarU32(&numDefs)) {
2428 return d.fail("expected number of globals");
2431 CheckedInt<uint32_t> numGlobals = env->globals.length();
2432 numGlobals += numDefs;
2433 if (!numGlobals.isValid() || numGlobals.value() > MaxGlobals) {
2434 return d.fail("too many globals");
2437 if (!env->globals.reserve(numGlobals.value())) {
2438 return false;
2441 for (uint32_t i = 0; i < numDefs; i++) {
2442 ValType type;
2443 bool isMutable;
2444 if (!DecodeGlobalType(d, env->types, env->features, &type, &isMutable)) {
2445 return false;
2448 InitExpr initializer;
2449 if (!InitExpr::decodeAndValidate(d, env, type, &initializer)) {
2450 return false;
2453 env->globals.infallibleAppend(
2454 GlobalDesc(std::move(initializer), isMutable));
2457 return d.finishSection(*range, "global");
2460 static bool DecodeTagSection(Decoder& d, ModuleEnvironment* env) {
2461 MaybeSectionRange range;
2462 if (!d.startSection(SectionId::Tag, env, &range, "tag")) {
2463 return false;
2465 if (!range) {
2466 return true;
2469 if (!env->exceptionsEnabled()) {
2470 return d.fail("exceptions not enabled");
2473 uint32_t numDefs;
2474 if (!d.readVarU32(&numDefs)) {
2475 return d.fail("expected number of tags");
2478 CheckedInt<uint32_t> numTags = env->tags.length();
2479 numTags += numDefs;
2480 if (!numTags.isValid() || numTags.value() > MaxTags) {
2481 return d.fail("too many tags");
2484 if (!env->tags.reserve(numTags.value())) {
2485 return false;
2488 for (uint32_t i = 0; i < numDefs; i++) {
2489 TagKind tagKind;
2490 uint32_t funcTypeIndex;
2491 if (!DecodeTag(d, env, &tagKind, &funcTypeIndex)) {
2492 return false;
2494 ValTypeVector args;
2495 if (!args.appendAll((*env->types)[funcTypeIndex].funcType().args())) {
2496 return false;
2498 MutableTagType tagType = js_new<TagType>();
2499 if (!tagType || !tagType->initialize(std::move(args))) {
2500 return false;
2502 env->tags.infallibleEmplaceBack(tagKind, tagType);
2505 return d.finishSection(*range, "tag");
2508 using NameSet = HashSet<Span<char>, NameHasher, SystemAllocPolicy>;
2510 [[nodiscard]] static bool DecodeExportName(Decoder& d, NameSet* dupSet,
2511 CacheableName* exportName) {
2512 if (!DecodeName(d, exportName)) {
2513 d.fail("expected valid export name");
2514 return false;
2517 NameSet::AddPtr p = dupSet->lookupForAdd(exportName->utf8Bytes());
2518 if (p) {
2519 d.fail("duplicate export");
2520 return false;
2523 return dupSet->add(p, exportName->utf8Bytes());
2526 static bool DecodeExport(Decoder& d, ModuleEnvironment* env, NameSet* dupSet) {
2527 CacheableName fieldName;
2528 if (!DecodeExportName(d, dupSet, &fieldName)) {
2529 return false;
2532 uint8_t exportKind;
2533 if (!d.readFixedU8(&exportKind)) {
2534 return d.fail("failed to read export kind");
2537 switch (DefinitionKind(exportKind)) {
2538 case DefinitionKind::Function: {
2539 uint32_t funcIndex;
2540 if (!d.readVarU32(&funcIndex)) {
2541 return d.fail("expected function index");
2544 if (funcIndex >= env->numFuncs()) {
2545 return d.fail("exported function index out of bounds");
2548 env->declareFuncExported(funcIndex, /* eager */ true,
2549 /* canRefFunc */ true);
2550 return env->exports.emplaceBack(std::move(fieldName), funcIndex,
2551 DefinitionKind::Function);
2553 case DefinitionKind::Table: {
2554 uint32_t tableIndex;
2555 if (!d.readVarU32(&tableIndex)) {
2556 return d.fail("expected table index");
2559 if (tableIndex >= env->tables.length()) {
2560 return d.fail("exported table index out of bounds");
2562 env->tables[tableIndex].isExported = true;
2563 return env->exports.emplaceBack(std::move(fieldName), tableIndex,
2564 DefinitionKind::Table);
2566 case DefinitionKind::Memory: {
2567 uint32_t memoryIndex;
2568 if (!d.readVarU32(&memoryIndex)) {
2569 return d.fail("expected memory index");
2572 if (memoryIndex >= env->numMemories()) {
2573 return d.fail("exported memory index out of bounds");
2576 return env->exports.emplaceBack(std::move(fieldName), memoryIndex,
2577 DefinitionKind::Memory);
2579 case DefinitionKind::Global: {
2580 uint32_t globalIndex;
2581 if (!d.readVarU32(&globalIndex)) {
2582 return d.fail("expected global index");
2585 if (globalIndex >= env->globals.length()) {
2586 return d.fail("exported global index out of bounds");
2589 GlobalDesc* global = &env->globals[globalIndex];
2590 global->setIsExport();
2592 return env->exports.emplaceBack(std::move(fieldName), globalIndex,
2593 DefinitionKind::Global);
2595 case DefinitionKind::Tag: {
2596 uint32_t tagIndex;
2597 if (!d.readVarU32(&tagIndex)) {
2598 return d.fail("expected tag index");
2600 if (tagIndex >= env->tags.length()) {
2601 return d.fail("exported tag index out of bounds");
2604 env->tags[tagIndex].isExport = true;
2605 return env->exports.emplaceBack(std::move(fieldName), tagIndex,
2606 DefinitionKind::Tag);
2608 default:
2609 return d.fail("unexpected export kind");
2612 MOZ_CRASH("unreachable");
2615 static bool DecodeExportSection(Decoder& d, ModuleEnvironment* env) {
2616 MaybeSectionRange range;
2617 if (!d.startSection(SectionId::Export, env, &range, "export")) {
2618 return false;
2620 if (!range) {
2621 return true;
2624 NameSet dupSet;
2626 uint32_t numExports;
2627 if (!d.readVarU32(&numExports)) {
2628 return d.fail("failed to read number of exports");
2631 if (numExports > MaxExports) {
2632 return d.fail("too many exports");
2635 for (uint32_t i = 0; i < numExports; i++) {
2636 if (!DecodeExport(d, env, &dupSet)) {
2637 return false;
2641 return d.finishSection(*range, "export");
2644 static bool DecodeStartSection(Decoder& d, ModuleEnvironment* env) {
2645 MaybeSectionRange range;
2646 if (!d.startSection(SectionId::Start, env, &range, "start")) {
2647 return false;
2649 if (!range) {
2650 return true;
2653 uint32_t funcIndex;
2654 if (!d.readVarU32(&funcIndex)) {
2655 return d.fail("failed to read start func index");
2658 if (funcIndex >= env->numFuncs()) {
2659 return d.fail("unknown start function");
2662 const FuncType& funcType = *env->funcs[funcIndex].type;
2663 if (funcType.results().length() > 0) {
2664 return d.fail("start function must not return anything");
2667 if (funcType.args().length()) {
2668 return d.fail("start function must be nullary");
2671 env->declareFuncExported(funcIndex, /* eager */ true, /* canFuncRef */ false);
2672 env->startFuncIndex = Some(funcIndex);
2674 return d.finishSection(*range, "start");
2677 static inline ModuleElemSegment::Kind NormalizeElemSegmentKind(
2678 ElemSegmentKind decodedKind) {
2679 switch (decodedKind) {
2680 case ElemSegmentKind::Active:
2681 case ElemSegmentKind::ActiveWithTableIndex: {
2682 return ModuleElemSegment::Kind::Active;
2684 case ElemSegmentKind::Passive: {
2685 return ModuleElemSegment::Kind::Passive;
2687 case ElemSegmentKind::Declared: {
2688 return ModuleElemSegment::Kind::Declared;
2691 MOZ_CRASH("unexpected elem segment kind");
2694 static bool DecodeElemSegment(Decoder& d, ModuleEnvironment* env) {
2695 uint32_t segmentFlags;
2696 if (!d.readVarU32(&segmentFlags)) {
2697 return d.fail("expected elem segment flags field");
2700 Maybe<ElemSegmentFlags> flags = ElemSegmentFlags::construct(segmentFlags);
2701 if (!flags) {
2702 return d.fail("invalid elem segment flags field");
2705 ModuleElemSegment seg = ModuleElemSegment();
2707 ElemSegmentKind segmentKind = flags->kind();
2708 seg.kind = NormalizeElemSegmentKind(segmentKind);
2710 if (segmentKind == ElemSegmentKind::Active ||
2711 segmentKind == ElemSegmentKind::ActiveWithTableIndex) {
2712 if (env->tables.length() == 0) {
2713 return d.fail("active elem segment requires a table");
2716 uint32_t tableIndex = 0;
2717 if (segmentKind == ElemSegmentKind::ActiveWithTableIndex &&
2718 !d.readVarU32(&tableIndex)) {
2719 return d.fail("expected table index");
2721 if (tableIndex >= env->tables.length()) {
2722 return d.fail("table index out of range for element segment");
2724 seg.tableIndex = tableIndex;
2726 InitExpr offset;
2727 if (!InitExpr::decodeAndValidate(d, env, ValType::I32, &offset)) {
2728 return false;
2730 seg.offsetIfActive.emplace(std::move(offset));
2731 } else {
2732 // Too many bugs result from keeping this value zero. For passive
2733 // or declared segments, there really is no table index, and we should
2734 // never touch the field.
2735 MOZ_ASSERT(segmentKind == ElemSegmentKind::Passive ||
2736 segmentKind == ElemSegmentKind::Declared);
2737 seg.tableIndex = (uint32_t)-1;
2740 ElemSegmentPayload payload = flags->payload();
2741 RefType elemType;
2743 // `ActiveWithTableIndex`, `Declared`, and `Passive` element segments encode
2744 // the type or definition kind of the payload. `Active` element segments are
2745 // restricted to MVP behavior, which assumes only function indices.
2746 if (segmentKind == ElemSegmentKind::Active) {
2747 elemType = RefType::func();
2748 } else {
2749 switch (payload) {
2750 case ElemSegmentPayload::Expressions: {
2751 if (!d.readRefType(*env->types, env->features, &elemType)) {
2752 return false;
2754 } break;
2755 case ElemSegmentPayload::Indices: {
2756 uint8_t elemKind;
2757 if (!d.readFixedU8(&elemKind)) {
2758 return d.fail("expected element kind");
2761 if (elemKind != uint8_t(DefinitionKind::Function)) {
2762 return d.fail("invalid element kind");
2764 elemType = RefType::func();
2765 } break;
2769 // For active segments, check if the element type is compatible with the
2770 // destination table type.
2771 if (seg.active()) {
2772 RefType tblElemType = env->tables[seg.tableIndex].elemType;
2773 if (!CheckIsSubtypeOf(d, *env, d.currentOffset(),
2774 ValType(elemType).fieldType(),
2775 ValType(tblElemType).fieldType())) {
2776 return false;
2779 seg.elemType = elemType;
2781 uint32_t numElems;
2782 if (!d.readVarU32(&numElems)) {
2783 return d.fail("expected element segment size");
2786 if (numElems > MaxElemSegmentLength) {
2787 return d.fail("too many elements in element segment");
2790 bool isAsmJS = seg.active() && env->tables[seg.tableIndex].isAsmJS;
2792 switch (payload) {
2793 case ElemSegmentPayload::Indices: {
2794 seg.encoding = ModuleElemSegment::Encoding::Indices;
2795 if (!seg.elemIndices.reserve(numElems)) {
2796 return false;
2799 for (uint32_t i = 0; i < numElems; i++) {
2800 uint32_t elemIndex;
2801 if (!d.readVarU32(&elemIndex)) {
2802 return d.fail("failed to read element index");
2804 // The only valid type of index right now is a function index.
2805 if (elemIndex >= env->numFuncs()) {
2806 return d.fail("element index out of range");
2809 seg.elemIndices.infallibleAppend(elemIndex);
2810 if (!isAsmJS) {
2811 env->declareFuncExported(elemIndex, /*eager=*/false,
2812 /*canRefFunc=*/true);
2815 } break;
2816 case ElemSegmentPayload::Expressions: {
2817 seg.encoding = ModuleElemSegment::Encoding::Expressions;
2818 const uint8_t* exprsStart = d.currentPosition();
2819 seg.elemExpressions.count = numElems;
2820 for (uint32_t i = 0; i < numElems; i++) {
2821 Maybe<LitVal> unusedLiteral;
2822 if (!DecodeConstantExpression(d, env, elemType, &unusedLiteral)) {
2823 return false;
2826 const uint8_t* exprsEnd = d.currentPosition();
2827 if (!seg.elemExpressions.exprBytes.append(exprsStart, exprsEnd)) {
2828 return false;
2830 } break;
2833 env->elemSegments.infallibleAppend(std::move(seg));
2834 return true;
2837 static bool DecodeElemSection(Decoder& d, ModuleEnvironment* env) {
2838 MaybeSectionRange range;
2839 if (!d.startSection(SectionId::Elem, env, &range, "elem")) {
2840 return false;
2842 if (!range) {
2843 return true;
2846 uint32_t numSegments;
2847 if (!d.readVarU32(&numSegments)) {
2848 return d.fail("failed to read number of elem segments");
2851 if (numSegments > MaxElemSegments) {
2852 return d.fail("too many elem segments");
2855 if (!env->elemSegments.reserve(numSegments)) {
2856 return false;
2859 for (uint32_t i = 0; i < numSegments; i++) {
2860 if (!DecodeElemSegment(d, env)) {
2861 return false;
2865 return d.finishSection(*range, "elem");
2868 static bool DecodeDataCountSection(Decoder& d, ModuleEnvironment* env) {
2869 MaybeSectionRange range;
2870 if (!d.startSection(SectionId::DataCount, env, &range, "datacount")) {
2871 return false;
2873 if (!range) {
2874 return true;
2877 uint32_t dataCount;
2878 if (!d.readVarU32(&dataCount)) {
2879 return d.fail("expected data segment count");
2882 env->dataCount.emplace(dataCount);
2884 return d.finishSection(*range, "datacount");
2887 bool wasm::StartsCodeSection(const uint8_t* begin, const uint8_t* end,
2888 SectionRange* codeSection) {
2889 UniqueChars unused;
2890 Decoder d(begin, end, 0, &unused);
2892 if (!DecodePreamble(d)) {
2893 return false;
2896 while (!d.done()) {
2897 uint8_t id;
2898 SectionRange range;
2899 if (!d.readSectionHeader(&id, &range)) {
2900 return false;
2903 if (id == uint8_t(SectionId::Code)) {
2904 *codeSection = range;
2905 return true;
2908 if (!d.readBytes(range.size)) {
2909 return false;
2913 return false;
2916 bool wasm::DecodeModuleEnvironment(Decoder& d, ModuleEnvironment* env) {
2917 if (!DecodePreamble(d)) {
2918 return false;
2921 if (!DecodeTypeSection(d, env)) {
2922 return false;
2925 if (!DecodeImportSection(d, env)) {
2926 return false;
2929 // Eagerly check imports for future link errors against any known builtin
2930 // module.
2931 if (!CheckImportsAgainstBuiltinModules(d, env)) {
2932 return false;
2935 if (!DecodeFunctionSection(d, env)) {
2936 return false;
2939 if (!DecodeTableSection(d, env)) {
2940 return false;
2943 if (!DecodeMemorySection(d, env)) {
2944 return false;
2947 if (!DecodeTagSection(d, env)) {
2948 return false;
2951 if (!DecodeGlobalSection(d, env)) {
2952 return false;
2955 if (!DecodeExportSection(d, env)) {
2956 return false;
2959 if (!DecodeStartSection(d, env)) {
2960 return false;
2963 if (!DecodeElemSection(d, env)) {
2964 return false;
2967 if (!DecodeDataCountSection(d, env)) {
2968 return false;
2971 if (!d.startSection(SectionId::Code, env, &env->codeSection, "code")) {
2972 return false;
2975 if (env->codeSection && env->codeSection->size > MaxCodeSectionBytes) {
2976 return d.fail("code section too big");
2979 return true;
2982 static bool DecodeFunctionBody(Decoder& d, const ModuleEnvironment& env,
2983 uint32_t funcIndex) {
2984 uint32_t bodySize;
2985 if (!d.readVarU32(&bodySize)) {
2986 return d.fail("expected number of function body bytes");
2989 if (bodySize > MaxFunctionBytes) {
2990 return d.fail("function body too big");
2993 if (d.bytesRemain() < bodySize) {
2994 return d.fail("function body length too big");
2997 return ValidateFunctionBody(env, funcIndex, bodySize, d);
3000 static bool DecodeCodeSection(Decoder& d, ModuleEnvironment* env) {
3001 if (!env->codeSection) {
3002 if (env->numFuncDefs() != 0) {
3003 return d.fail("expected code section");
3005 return true;
3008 uint32_t numFuncDefs;
3009 if (!d.readVarU32(&numFuncDefs)) {
3010 return d.fail("expected function body count");
3013 if (numFuncDefs != env->numFuncDefs()) {
3014 return d.fail(
3015 "function body count does not match function signature count");
3018 for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs; funcDefIndex++) {
3019 if (!DecodeFunctionBody(d, *env, env->numFuncImports + funcDefIndex)) {
3020 return false;
3024 return d.finishSection(*env->codeSection, "code");
3027 static bool DecodeDataSection(Decoder& d, ModuleEnvironment* env) {
3028 MaybeSectionRange range;
3029 if (!d.startSection(SectionId::Data, env, &range, "data")) {
3030 return false;
3032 if (!range) {
3033 if (env->dataCount.isSome() && *env->dataCount > 0) {
3034 return d.fail("number of data segments does not match declared count");
3036 return true;
3039 uint32_t numSegments;
3040 if (!d.readVarU32(&numSegments)) {
3041 return d.fail("failed to read number of data segments");
3044 if (numSegments > MaxDataSegments) {
3045 return d.fail("too many data segments");
3048 if (env->dataCount.isSome() && numSegments != *env->dataCount) {
3049 return d.fail("number of data segments does not match declared count");
3052 for (uint32_t i = 0; i < numSegments; i++) {
3053 uint32_t initializerKindVal;
3054 if (!d.readVarU32(&initializerKindVal)) {
3055 return d.fail("expected data initializer-kind field");
3058 switch (initializerKindVal) {
3059 case uint32_t(DataSegmentKind::Active):
3060 case uint32_t(DataSegmentKind::Passive):
3061 case uint32_t(DataSegmentKind::ActiveWithMemoryIndex):
3062 break;
3063 default:
3064 return d.fail("invalid data initializer-kind field");
3067 DataSegmentKind initializerKind = DataSegmentKind(initializerKindVal);
3069 if (initializerKind != DataSegmentKind::Passive &&
3070 env->numMemories() == 0) {
3071 return d.fail("active data segment requires a memory section");
3074 DataSegmentEnv seg;
3075 if (initializerKind == DataSegmentKind::ActiveWithMemoryIndex) {
3076 if (!d.readVarU32(&seg.memoryIndex)) {
3077 return d.fail("expected memory index");
3079 } else if (initializerKind == DataSegmentKind::Active) {
3080 seg.memoryIndex = 0;
3081 } else {
3082 seg.memoryIndex = InvalidMemoryIndex;
3085 if (initializerKind == DataSegmentKind::Active ||
3086 initializerKind == DataSegmentKind::ActiveWithMemoryIndex) {
3087 if (seg.memoryIndex >= env->numMemories()) {
3088 return d.fail("invalid memory index");
3091 InitExpr segOffset;
3092 ValType exprType = ToValType(env->memories[seg.memoryIndex].indexType());
3093 if (!InitExpr::decodeAndValidate(d, env, exprType, &segOffset)) {
3094 return false;
3096 seg.offsetIfActive.emplace(std::move(segOffset));
3099 if (!d.readVarU32(&seg.length)) {
3100 return d.fail("expected segment size");
3103 if (seg.length > MaxDataSegmentLengthPages * PageSize) {
3104 return d.fail("segment size too big");
3107 seg.bytecodeOffset = d.currentOffset();
3109 if (!d.readBytes(seg.length)) {
3110 return d.fail("data segment shorter than declared");
3113 if (!env->dataSegments.append(std::move(seg))) {
3114 return false;
3118 return d.finishSection(*range, "data");
3121 static bool DecodeModuleNameSubsection(Decoder& d,
3122 const CustomSectionEnv& nameSection,
3123 ModuleEnvironment* env) {
3124 Maybe<uint32_t> endOffset;
3125 if (!d.startNameSubsection(NameType::Module, &endOffset)) {
3126 return false;
3128 if (!endOffset) {
3129 return true;
3132 Name moduleName;
3133 if (!d.readVarU32(&moduleName.length)) {
3134 return d.fail("failed to read module name length");
3137 MOZ_ASSERT(d.currentOffset() >= nameSection.payloadOffset);
3138 moduleName.offsetInNamePayload =
3139 d.currentOffset() - nameSection.payloadOffset;
3141 const uint8_t* bytes;
3142 if (!d.readBytes(moduleName.length, &bytes)) {
3143 return d.fail("failed to read module name bytes");
3146 if (!d.finishNameSubsection(*endOffset)) {
3147 return false;
3150 // Only save the module name if the whole subsection validates.
3151 env->moduleName.emplace(moduleName);
3152 return true;
3155 static bool DecodeFunctionNameSubsection(Decoder& d,
3156 const CustomSectionEnv& nameSection,
3157 ModuleEnvironment* env) {
3158 Maybe<uint32_t> endOffset;
3159 if (!d.startNameSubsection(NameType::Function, &endOffset)) {
3160 return false;
3162 if (!endOffset) {
3163 return true;
3166 uint32_t nameCount = 0;
3167 if (!d.readVarU32(&nameCount) || nameCount > MaxFuncs) {
3168 return d.fail("bad function name count");
3171 NameVector funcNames;
3173 for (uint32_t i = 0; i < nameCount; ++i) {
3174 uint32_t funcIndex = 0;
3175 if (!d.readVarU32(&funcIndex)) {
3176 return d.fail("unable to read function index");
3179 // Names must refer to real functions and be given in ascending order.
3180 if (funcIndex >= env->numFuncs() || funcIndex < funcNames.length()) {
3181 return d.fail("invalid function index");
3184 Name funcName;
3185 if (!d.readVarU32(&funcName.length) ||
3186 funcName.length > JS::MaxStringLength) {
3187 return d.fail("unable to read function name length");
3190 if (!funcName.length) {
3191 continue;
3194 if (!funcNames.resize(funcIndex + 1)) {
3195 return false;
3198 MOZ_ASSERT(d.currentOffset() >= nameSection.payloadOffset);
3199 funcName.offsetInNamePayload =
3200 d.currentOffset() - nameSection.payloadOffset;
3202 if (!d.readBytes(funcName.length)) {
3203 return d.fail("unable to read function name bytes");
3206 funcNames[funcIndex] = funcName;
3209 if (!d.finishNameSubsection(*endOffset)) {
3210 return false;
3213 // To encourage fully valid function names subsections; only save names if
3214 // the entire subsection decoded correctly.
3215 env->funcNames = std::move(funcNames);
3216 return true;
3219 static bool DecodeNameSection(Decoder& d, ModuleEnvironment* env) {
3220 MaybeSectionRange range;
3221 if (!d.startCustomSection(NameSectionName, env, &range)) {
3222 return false;
3224 if (!range) {
3225 return true;
3228 env->nameCustomSectionIndex = Some(env->customSections.length() - 1);
3229 const CustomSectionEnv& nameSection = env->customSections.back();
3231 // Once started, custom sections do not report validation errors.
3233 if (!DecodeModuleNameSubsection(d, nameSection, env)) {
3234 goto finish;
3237 if (!DecodeFunctionNameSubsection(d, nameSection, env)) {
3238 goto finish;
3241 while (d.currentOffset() < range->end()) {
3242 if (!d.skipNameSubsection()) {
3243 goto finish;
3247 finish:
3248 d.finishCustomSection(NameSectionName, *range);
3249 return true;
3252 bool wasm::DecodeModuleTail(Decoder& d, ModuleEnvironment* env) {
3253 if (!DecodeDataSection(d, env)) {
3254 return false;
3257 if (!DecodeNameSection(d, env)) {
3258 return false;
3261 while (!d.done()) {
3262 if (!d.skipCustomSection(env)) {
3263 if (d.resilientMode()) {
3264 d.clearError();
3265 return true;
3267 return false;
3271 return true;
3274 // Validate algorithm.
3276 bool wasm::Validate(JSContext* cx, const ShareableBytes& bytecode,
3277 const FeatureOptions& options, UniqueChars* error) {
3278 Decoder d(bytecode.bytes, 0, error);
3280 FeatureArgs features = FeatureArgs::build(cx, options);
3281 ModuleEnvironment env(features);
3282 if (!env.init()) {
3283 return false;
3286 if (!DecodeModuleEnvironment(d, &env)) {
3287 return false;
3290 if (!DecodeCodeSection(d, &env)) {
3291 return false;
3294 if (!DecodeModuleTail(d, &env)) {
3295 return false;
3298 MOZ_ASSERT(!*error, "unreported error in decoding");
3299 return true;