Bug 1867190 - Add prefs for PHC probablities r=glandium
[gecko.git] / js / src / wasm / WasmValidate.cpp
blobe964c11d04deefc0048f406ab4a867e080e68a47
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2016 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 #include "wasm/WasmValidate.h"
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/Span.h"
23 #include "mozilla/Utf8.h"
25 #include "js/Printf.h"
26 #include "js/String.h" // JS::MaxStringLength
27 #include "vm/JSContext.h"
28 #include "vm/Realm.h"
29 #include "wasm/WasmInitExpr.h"
30 #include "wasm/WasmOpIter.h"
31 #include "wasm/WasmTypeDecls.h"
33 using namespace js;
34 using namespace js::jit;
35 using namespace js::wasm;
37 using mozilla::AsChars;
38 using mozilla::CheckedInt;
39 using mozilla::CheckedInt32;
40 using mozilla::IsUtf8;
41 using mozilla::Span;
43 // Misc helpers.
45 bool wasm::EncodeLocalEntries(Encoder& e, const ValTypeVector& locals) {
46 if (locals.length() > MaxLocals) {
47 return false;
50 uint32_t numLocalEntries = 0;
51 if (locals.length()) {
52 ValType prev = locals[0];
53 numLocalEntries++;
54 for (ValType t : locals) {
55 if (t != prev) {
56 numLocalEntries++;
57 prev = t;
62 if (!e.writeVarU32(numLocalEntries)) {
63 return false;
66 if (numLocalEntries) {
67 ValType prev = locals[0];
68 uint32_t count = 1;
69 for (uint32_t i = 1; i < locals.length(); i++, count++) {
70 if (prev != locals[i]) {
71 if (!e.writeVarU32(count)) {
72 return false;
74 if (!e.writeValType(prev)) {
75 return false;
77 prev = locals[i];
78 count = 0;
81 if (!e.writeVarU32(count)) {
82 return false;
84 if (!e.writeValType(prev)) {
85 return false;
89 return true;
92 bool wasm::DecodeLocalEntries(Decoder& d, const TypeContext& types,
93 const FeatureArgs& features,
94 ValTypeVector* locals) {
95 uint32_t numLocalEntries;
96 if (!d.readVarU32(&numLocalEntries)) {
97 return d.fail("failed to read number of local entries");
100 for (uint32_t i = 0; i < numLocalEntries; i++) {
101 uint32_t count;
102 if (!d.readVarU32(&count)) {
103 return d.fail("failed to read local entry count");
106 if (MaxLocals - locals->length() < count) {
107 return d.fail("too many locals");
110 ValType type;
111 if (!d.readValType(types, features, &type)) {
112 return false;
115 if (!locals->appendN(type, count)) {
116 return false;
120 return true;
123 bool wasm::DecodeValidatedLocalEntries(const TypeContext& types, Decoder& d,
124 ValTypeVector* locals) {
125 uint32_t numLocalEntries;
126 MOZ_ALWAYS_TRUE(d.readVarU32(&numLocalEntries));
128 for (uint32_t i = 0; i < numLocalEntries; i++) {
129 uint32_t count = d.uncheckedReadVarU32();
130 MOZ_ASSERT(MaxLocals - locals->length() >= count);
131 if (!locals->appendN(d.uncheckedReadValType(types), count)) {
132 return false;
136 return true;
139 bool wasm::CheckIsSubtypeOf(Decoder& d, const ModuleEnvironment& env,
140 size_t opcodeOffset, StorageType subType,
141 StorageType superType) {
142 if (StorageType::isSubTypeOf(subType, superType)) {
143 return true;
146 UniqueChars subText = ToString(subType, env.types);
147 if (!subText) {
148 return false;
151 UniqueChars superText = ToString(superType, env.types);
152 if (!superText) {
153 return false;
156 UniqueChars error(
157 JS_smprintf("type mismatch: expression has type %s but expected %s",
158 subText.get(), superText.get()));
159 if (!error) {
160 return false;
163 return d.fail(opcodeOffset, error.get());
166 // Function body validation.
168 static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
169 uint32_t funcIndex,
170 const ValTypeVector& locals,
171 const uint8_t* bodyEnd, Decoder* d) {
172 ValidatingOpIter iter(env, *d);
174 if (!iter.startFunction(funcIndex, locals)) {
175 return false;
178 #define CHECK(c) \
179 if (!(c)) return false; \
180 break
182 while (true) {
183 OpBytes op;
184 if (!iter.readOp(&op)) {
185 return false;
188 Nothing nothing;
189 NothingVector nothings{};
190 ResultType unusedType;
192 switch (op.b0) {
193 case uint16_t(Op::End): {
194 LabelKind unusedKind;
195 if (!iter.readEnd(&unusedKind, &unusedType, &nothings, &nothings)) {
196 return false;
198 iter.popEnd();
199 if (iter.controlStackEmpty()) {
200 return iter.endFunction(bodyEnd);
202 break;
204 case uint16_t(Op::Nop):
205 CHECK(iter.readNop());
206 case uint16_t(Op::Drop):
207 CHECK(iter.readDrop());
208 case uint16_t(Op::Call): {
209 uint32_t unusedIndex;
210 NothingVector unusedArgs{};
211 CHECK(iter.readCall(&unusedIndex, &unusedArgs));
213 case uint16_t(Op::CallIndirect): {
214 uint32_t unusedIndex, unusedIndex2;
215 NothingVector unusedArgs{};
216 CHECK(iter.readCallIndirect(&unusedIndex, &unusedIndex2, &nothing,
217 &unusedArgs));
219 #ifdef ENABLE_WASM_TAIL_CALLS
220 case uint16_t(Op::ReturnCall): {
221 if (!env.tailCallsEnabled()) {
222 return iter.unrecognizedOpcode(&op);
224 uint32_t unusedIndex;
225 NothingVector unusedArgs{};
226 CHECK(iter.readReturnCall(&unusedIndex, &unusedArgs));
228 case uint16_t(Op::ReturnCallIndirect): {
229 if (!env.tailCallsEnabled()) {
230 return iter.unrecognizedOpcode(&op);
232 uint32_t unusedIndex, unusedIndex2;
233 NothingVector unusedArgs{};
234 CHECK(iter.readReturnCallIndirect(&unusedIndex, &unusedIndex2, &nothing,
235 &unusedArgs));
237 #endif
238 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
239 case uint16_t(Op::CallRef): {
240 if (!env.functionReferencesEnabled()) {
241 return iter.unrecognizedOpcode(&op);
243 const FuncType* unusedType;
244 NothingVector unusedArgs{};
245 CHECK(iter.readCallRef(&unusedType, &nothing, &unusedArgs));
247 # ifdef ENABLE_WASM_TAIL_CALLS
248 case uint16_t(Op::ReturnCallRef): {
249 if (!env.functionReferencesEnabled() || !env.tailCallsEnabled()) {
250 return iter.unrecognizedOpcode(&op);
252 const FuncType* unusedType;
253 NothingVector unusedArgs{};
254 CHECK(iter.readReturnCallRef(&unusedType, &nothing, &unusedArgs));
256 # endif
257 #endif
258 case uint16_t(Op::I32Const): {
259 int32_t unused;
260 CHECK(iter.readI32Const(&unused));
262 case uint16_t(Op::I64Const): {
263 int64_t unused;
264 CHECK(iter.readI64Const(&unused));
266 case uint16_t(Op::F32Const): {
267 float unused;
268 CHECK(iter.readF32Const(&unused));
270 case uint16_t(Op::F64Const): {
271 double unused;
272 CHECK(iter.readF64Const(&unused));
274 case uint16_t(Op::LocalGet): {
275 uint32_t unused;
276 CHECK(iter.readGetLocal(locals, &unused));
278 case uint16_t(Op::LocalSet): {
279 uint32_t unused;
280 CHECK(iter.readSetLocal(locals, &unused, &nothing));
282 case uint16_t(Op::LocalTee): {
283 uint32_t unused;
284 CHECK(iter.readTeeLocal(locals, &unused, &nothing));
286 case uint16_t(Op::GlobalGet): {
287 uint32_t unused;
288 CHECK(iter.readGetGlobal(&unused));
290 case uint16_t(Op::GlobalSet): {
291 uint32_t unused;
292 CHECK(iter.readSetGlobal(&unused, &nothing));
294 case uint16_t(Op::TableGet): {
295 uint32_t unusedTableIndex;
296 CHECK(iter.readTableGet(&unusedTableIndex, &nothing));
298 case uint16_t(Op::TableSet): {
299 uint32_t unusedTableIndex;
300 CHECK(iter.readTableSet(&unusedTableIndex, &nothing, &nothing));
302 case uint16_t(Op::SelectNumeric): {
303 StackType unused;
304 CHECK(iter.readSelect(/*typed*/ false, &unused, &nothing, &nothing,
305 &nothing));
307 case uint16_t(Op::SelectTyped): {
308 StackType unused;
309 CHECK(iter.readSelect(/*typed*/ true, &unused, &nothing, &nothing,
310 &nothing));
312 case uint16_t(Op::Block):
313 CHECK(iter.readBlock(&unusedType));
314 case uint16_t(Op::Loop):
315 CHECK(iter.readLoop(&unusedType));
316 case uint16_t(Op::If):
317 CHECK(iter.readIf(&unusedType, &nothing));
318 case uint16_t(Op::Else):
319 CHECK(iter.readElse(&unusedType, &unusedType, &nothings));
320 case uint16_t(Op::I32Clz):
321 case uint16_t(Op::I32Ctz):
322 case uint16_t(Op::I32Popcnt):
323 CHECK(iter.readUnary(ValType::I32, &nothing));
324 case uint16_t(Op::I64Clz):
325 case uint16_t(Op::I64Ctz):
326 case uint16_t(Op::I64Popcnt):
327 CHECK(iter.readUnary(ValType::I64, &nothing));
328 case uint16_t(Op::F32Abs):
329 case uint16_t(Op::F32Neg):
330 case uint16_t(Op::F32Ceil):
331 case uint16_t(Op::F32Floor):
332 case uint16_t(Op::F32Sqrt):
333 case uint16_t(Op::F32Trunc):
334 case uint16_t(Op::F32Nearest):
335 CHECK(iter.readUnary(ValType::F32, &nothing));
336 case uint16_t(Op::F64Abs):
337 case uint16_t(Op::F64Neg):
338 case uint16_t(Op::F64Ceil):
339 case uint16_t(Op::F64Floor):
340 case uint16_t(Op::F64Sqrt):
341 case uint16_t(Op::F64Trunc):
342 case uint16_t(Op::F64Nearest):
343 CHECK(iter.readUnary(ValType::F64, &nothing));
344 case uint16_t(Op::I32Add):
345 case uint16_t(Op::I32Sub):
346 case uint16_t(Op::I32Mul):
347 case uint16_t(Op::I32DivS):
348 case uint16_t(Op::I32DivU):
349 case uint16_t(Op::I32RemS):
350 case uint16_t(Op::I32RemU):
351 case uint16_t(Op::I32And):
352 case uint16_t(Op::I32Or):
353 case uint16_t(Op::I32Xor):
354 case uint16_t(Op::I32Shl):
355 case uint16_t(Op::I32ShrS):
356 case uint16_t(Op::I32ShrU):
357 case uint16_t(Op::I32Rotl):
358 case uint16_t(Op::I32Rotr):
359 CHECK(iter.readBinary(ValType::I32, &nothing, &nothing));
360 case uint16_t(Op::I64Add):
361 case uint16_t(Op::I64Sub):
362 case uint16_t(Op::I64Mul):
363 case uint16_t(Op::I64DivS):
364 case uint16_t(Op::I64DivU):
365 case uint16_t(Op::I64RemS):
366 case uint16_t(Op::I64RemU):
367 case uint16_t(Op::I64And):
368 case uint16_t(Op::I64Or):
369 case uint16_t(Op::I64Xor):
370 case uint16_t(Op::I64Shl):
371 case uint16_t(Op::I64ShrS):
372 case uint16_t(Op::I64ShrU):
373 case uint16_t(Op::I64Rotl):
374 case uint16_t(Op::I64Rotr):
375 CHECK(iter.readBinary(ValType::I64, &nothing, &nothing));
376 case uint16_t(Op::F32Add):
377 case uint16_t(Op::F32Sub):
378 case uint16_t(Op::F32Mul):
379 case uint16_t(Op::F32Div):
380 case uint16_t(Op::F32Min):
381 case uint16_t(Op::F32Max):
382 case uint16_t(Op::F32CopySign):
383 CHECK(iter.readBinary(ValType::F32, &nothing, &nothing));
384 case uint16_t(Op::F64Add):
385 case uint16_t(Op::F64Sub):
386 case uint16_t(Op::F64Mul):
387 case uint16_t(Op::F64Div):
388 case uint16_t(Op::F64Min):
389 case uint16_t(Op::F64Max):
390 case uint16_t(Op::F64CopySign):
391 CHECK(iter.readBinary(ValType::F64, &nothing, &nothing));
392 case uint16_t(Op::I32Eq):
393 case uint16_t(Op::I32Ne):
394 case uint16_t(Op::I32LtS):
395 case uint16_t(Op::I32LtU):
396 case uint16_t(Op::I32LeS):
397 case uint16_t(Op::I32LeU):
398 case uint16_t(Op::I32GtS):
399 case uint16_t(Op::I32GtU):
400 case uint16_t(Op::I32GeS):
401 case uint16_t(Op::I32GeU):
402 CHECK(iter.readComparison(ValType::I32, &nothing, &nothing));
403 case uint16_t(Op::I64Eq):
404 case uint16_t(Op::I64Ne):
405 case uint16_t(Op::I64LtS):
406 case uint16_t(Op::I64LtU):
407 case uint16_t(Op::I64LeS):
408 case uint16_t(Op::I64LeU):
409 case uint16_t(Op::I64GtS):
410 case uint16_t(Op::I64GtU):
411 case uint16_t(Op::I64GeS):
412 case uint16_t(Op::I64GeU):
413 CHECK(iter.readComparison(ValType::I64, &nothing, &nothing));
414 case uint16_t(Op::F32Eq):
415 case uint16_t(Op::F32Ne):
416 case uint16_t(Op::F32Lt):
417 case uint16_t(Op::F32Le):
418 case uint16_t(Op::F32Gt):
419 case uint16_t(Op::F32Ge):
420 CHECK(iter.readComparison(ValType::F32, &nothing, &nothing));
421 case uint16_t(Op::F64Eq):
422 case uint16_t(Op::F64Ne):
423 case uint16_t(Op::F64Lt):
424 case uint16_t(Op::F64Le):
425 case uint16_t(Op::F64Gt):
426 case uint16_t(Op::F64Ge):
427 CHECK(iter.readComparison(ValType::F64, &nothing, &nothing));
428 case uint16_t(Op::I32Eqz):
429 CHECK(iter.readConversion(ValType::I32, ValType::I32, &nothing));
430 case uint16_t(Op::I64Eqz):
431 case uint16_t(Op::I32WrapI64):
432 CHECK(iter.readConversion(ValType::I64, ValType::I32, &nothing));
433 case uint16_t(Op::I32TruncF32S):
434 case uint16_t(Op::I32TruncF32U):
435 case uint16_t(Op::I32ReinterpretF32):
436 CHECK(iter.readConversion(ValType::F32, ValType::I32, &nothing));
437 case uint16_t(Op::I32TruncF64S):
438 case uint16_t(Op::I32TruncF64U):
439 CHECK(iter.readConversion(ValType::F64, ValType::I32, &nothing));
440 case uint16_t(Op::I64ExtendI32S):
441 case uint16_t(Op::I64ExtendI32U):
442 CHECK(iter.readConversion(ValType::I32, ValType::I64, &nothing));
443 case uint16_t(Op::I64TruncF32S):
444 case uint16_t(Op::I64TruncF32U):
445 CHECK(iter.readConversion(ValType::F32, ValType::I64, &nothing));
446 case uint16_t(Op::I64TruncF64S):
447 case uint16_t(Op::I64TruncF64U):
448 case uint16_t(Op::I64ReinterpretF64):
449 CHECK(iter.readConversion(ValType::F64, ValType::I64, &nothing));
450 case uint16_t(Op::F32ConvertI32S):
451 case uint16_t(Op::F32ConvertI32U):
452 case uint16_t(Op::F32ReinterpretI32):
453 CHECK(iter.readConversion(ValType::I32, ValType::F32, &nothing));
454 case uint16_t(Op::F32ConvertI64S):
455 case uint16_t(Op::F32ConvertI64U):
456 CHECK(iter.readConversion(ValType::I64, ValType::F32, &nothing));
457 case uint16_t(Op::F32DemoteF64):
458 CHECK(iter.readConversion(ValType::F64, ValType::F32, &nothing));
459 case uint16_t(Op::F64ConvertI32S):
460 case uint16_t(Op::F64ConvertI32U):
461 CHECK(iter.readConversion(ValType::I32, ValType::F64, &nothing));
462 case uint16_t(Op::F64ConvertI64S):
463 case uint16_t(Op::F64ConvertI64U):
464 case uint16_t(Op::F64ReinterpretI64):
465 CHECK(iter.readConversion(ValType::I64, ValType::F64, &nothing));
466 case uint16_t(Op::F64PromoteF32):
467 CHECK(iter.readConversion(ValType::F32, ValType::F64, &nothing));
468 case uint16_t(Op::I32Extend8S):
469 case uint16_t(Op::I32Extend16S):
470 CHECK(iter.readConversion(ValType::I32, ValType::I32, &nothing));
471 case uint16_t(Op::I64Extend8S):
472 case uint16_t(Op::I64Extend16S):
473 case uint16_t(Op::I64Extend32S):
474 CHECK(iter.readConversion(ValType::I64, ValType::I64, &nothing));
475 case uint16_t(Op::I32Load8S):
476 case uint16_t(Op::I32Load8U): {
477 LinearMemoryAddress<Nothing> addr;
478 CHECK(iter.readLoad(ValType::I32, 1, &addr));
480 case uint16_t(Op::I32Load16S):
481 case uint16_t(Op::I32Load16U): {
482 LinearMemoryAddress<Nothing> addr;
483 CHECK(iter.readLoad(ValType::I32, 2, &addr));
485 case uint16_t(Op::I32Load): {
486 LinearMemoryAddress<Nothing> addr;
487 CHECK(iter.readLoad(ValType::I32, 4, &addr));
489 case uint16_t(Op::I64Load8S):
490 case uint16_t(Op::I64Load8U): {
491 LinearMemoryAddress<Nothing> addr;
492 CHECK(iter.readLoad(ValType::I64, 1, &addr));
494 case uint16_t(Op::I64Load16S):
495 case uint16_t(Op::I64Load16U): {
496 LinearMemoryAddress<Nothing> addr;
497 CHECK(iter.readLoad(ValType::I64, 2, &addr));
499 case uint16_t(Op::I64Load32S):
500 case uint16_t(Op::I64Load32U): {
501 LinearMemoryAddress<Nothing> addr;
502 CHECK(iter.readLoad(ValType::I64, 4, &addr));
504 case uint16_t(Op::I64Load): {
505 LinearMemoryAddress<Nothing> addr;
506 CHECK(iter.readLoad(ValType::I64, 8, &addr));
508 case uint16_t(Op::F32Load): {
509 LinearMemoryAddress<Nothing> addr;
510 CHECK(iter.readLoad(ValType::F32, 4, &addr));
512 case uint16_t(Op::F64Load): {
513 LinearMemoryAddress<Nothing> addr;
514 CHECK(iter.readLoad(ValType::F64, 8, &addr));
516 case uint16_t(Op::I32Store8): {
517 LinearMemoryAddress<Nothing> addr;
518 CHECK(iter.readStore(ValType::I32, 1, &addr, &nothing));
520 case uint16_t(Op::I32Store16): {
521 LinearMemoryAddress<Nothing> addr;
522 CHECK(iter.readStore(ValType::I32, 2, &addr, &nothing));
524 case uint16_t(Op::I32Store): {
525 LinearMemoryAddress<Nothing> addr;
526 CHECK(iter.readStore(ValType::I32, 4, &addr, &nothing));
528 case uint16_t(Op::I64Store8): {
529 LinearMemoryAddress<Nothing> addr;
530 CHECK(iter.readStore(ValType::I64, 1, &addr, &nothing));
532 case uint16_t(Op::I64Store16): {
533 LinearMemoryAddress<Nothing> addr;
534 CHECK(iter.readStore(ValType::I64, 2, &addr, &nothing));
536 case uint16_t(Op::I64Store32): {
537 LinearMemoryAddress<Nothing> addr;
538 CHECK(iter.readStore(ValType::I64, 4, &addr, &nothing));
540 case uint16_t(Op::I64Store): {
541 LinearMemoryAddress<Nothing> addr;
542 CHECK(iter.readStore(ValType::I64, 8, &addr, &nothing));
544 case uint16_t(Op::F32Store): {
545 LinearMemoryAddress<Nothing> addr;
546 CHECK(iter.readStore(ValType::F32, 4, &addr, &nothing));
548 case uint16_t(Op::F64Store): {
549 LinearMemoryAddress<Nothing> addr;
550 CHECK(iter.readStore(ValType::F64, 8, &addr, &nothing));
552 case uint16_t(Op::MemoryGrow): {
553 uint32_t memoryIndex;
554 CHECK(iter.readMemoryGrow(&memoryIndex, &nothing));
556 case uint16_t(Op::MemorySize): {
557 uint32_t memoryIndex;
558 CHECK(iter.readMemorySize(&memoryIndex));
560 case uint16_t(Op::Br): {
561 uint32_t unusedDepth;
562 CHECK(iter.readBr(&unusedDepth, &unusedType, &nothings));
564 case uint16_t(Op::BrIf): {
565 uint32_t unusedDepth;
566 CHECK(iter.readBrIf(&unusedDepth, &unusedType, &nothings, &nothing));
568 case uint16_t(Op::BrTable): {
569 Uint32Vector unusedDepths;
570 uint32_t unusedDefault;
571 CHECK(iter.readBrTable(&unusedDepths, &unusedDefault, &unusedType,
572 &nothings, &nothing));
574 case uint16_t(Op::Return):
575 CHECK(iter.readReturn(&nothings));
576 case uint16_t(Op::Unreachable):
577 CHECK(iter.readUnreachable());
578 #ifdef ENABLE_WASM_GC
579 case uint16_t(Op::GcPrefix): {
580 if (!env.gcEnabled()) {
581 return iter.unrecognizedOpcode(&op);
583 switch (op.b1) {
584 case uint32_t(GcOp::StructNew): {
585 uint32_t unusedUint;
586 NothingVector unusedArgs{};
587 CHECK(iter.readStructNew(&unusedUint, &unusedArgs));
589 case uint32_t(GcOp::StructNewDefault): {
590 uint32_t unusedUint;
591 CHECK(iter.readStructNewDefault(&unusedUint));
593 case uint32_t(GcOp::StructGet): {
594 uint32_t unusedUint1, unusedUint2;
595 CHECK(iter.readStructGet(&unusedUint1, &unusedUint2,
596 FieldWideningOp::None, &nothing));
598 case uint32_t(GcOp::StructGetS): {
599 uint32_t unusedUint1, unusedUint2;
600 CHECK(iter.readStructGet(&unusedUint1, &unusedUint2,
601 FieldWideningOp::Signed, &nothing));
603 case uint32_t(GcOp::StructGetU): {
604 uint32_t unusedUint1, unusedUint2;
605 CHECK(iter.readStructGet(&unusedUint1, &unusedUint2,
606 FieldWideningOp::Unsigned, &nothing));
608 case uint32_t(GcOp::StructSet): {
609 uint32_t unusedUint1, unusedUint2;
610 CHECK(iter.readStructSet(&unusedUint1, &unusedUint2, &nothing,
611 &nothing));
613 case uint32_t(GcOp::ArrayNew): {
614 uint32_t unusedUint;
615 CHECK(iter.readArrayNew(&unusedUint, &nothing, &nothing));
617 case uint32_t(GcOp::ArrayNewFixed): {
618 uint32_t unusedUint1, unusedUint2;
619 CHECK(
620 iter.readArrayNewFixed(&unusedUint1, &unusedUint2, &nothings));
622 case uint32_t(GcOp::ArrayNewDefault): {
623 uint32_t unusedUint;
624 CHECK(iter.readArrayNewDefault(&unusedUint, &nothing));
626 case uint32_t(GcOp::ArrayNewData): {
627 uint32_t unusedUint1, unusedUint2;
628 CHECK(iter.readArrayNewData(&unusedUint1, &unusedUint2, &nothing,
629 &nothing));
631 case uint32_t(GcOp::ArrayNewElem): {
632 uint32_t unusedUint1, unusedUint2;
633 CHECK(iter.readArrayNewElem(&unusedUint1, &unusedUint2, &nothing,
634 &nothing));
636 case uint32_t(GcOp::ArrayInitData): {
637 uint32_t unusedUint1, unusedUint2;
638 CHECK(iter.readArrayInitData(&unusedUint1, &unusedUint2, &nothing,
639 &nothing, &nothing, &nothing));
641 case uint32_t(GcOp::ArrayInitElem): {
642 uint32_t unusedUint1, unusedUint2;
643 CHECK(iter.readArrayInitElem(&unusedUint1, &unusedUint2, &nothing,
644 &nothing, &nothing, &nothing));
646 case uint32_t(GcOp::ArrayGet): {
647 uint32_t unusedUint1;
648 CHECK(iter.readArrayGet(&unusedUint1, FieldWideningOp::None,
649 &nothing, &nothing));
651 case uint32_t(GcOp::ArrayGetS): {
652 uint32_t unusedUint1;
653 CHECK(iter.readArrayGet(&unusedUint1, FieldWideningOp::Signed,
654 &nothing, &nothing));
656 case uint32_t(GcOp::ArrayGetU): {
657 uint32_t unusedUint1;
658 CHECK(iter.readArrayGet(&unusedUint1, FieldWideningOp::Unsigned,
659 &nothing, &nothing));
661 case uint32_t(GcOp::ArraySet): {
662 uint32_t unusedUint1;
663 CHECK(
664 iter.readArraySet(&unusedUint1, &nothing, &nothing, &nothing));
666 case uint32_t(GcOp::ArrayLen): {
667 CHECK(iter.readArrayLen(&nothing));
669 case uint32_t(GcOp::ArrayCopy): {
670 int32_t unusedInt;
671 bool unusedBool;
672 CHECK(iter.readArrayCopy(&unusedInt, &unusedBool, &nothing,
673 &nothing, &nothing, &nothing, &nothing));
675 case uint32_t(GcOp::ArrayFill): {
676 uint32_t unusedTypeIndex;
677 CHECK(iter.readArrayFill(&unusedTypeIndex, &nothing, &nothing,
678 &nothing, &nothing));
680 case uint32_t(GcOp::RefI31): {
681 CHECK(iter.readConversion(ValType::I32,
682 ValType(RefType::i31().asNonNullable()),
683 &nothing));
685 case uint32_t(GcOp::I31GetS): {
686 CHECK(iter.readConversion(ValType(RefType::i31()), ValType::I32,
687 &nothing));
689 case uint32_t(GcOp::I31GetU): {
690 CHECK(iter.readConversion(ValType(RefType::i31()), ValType::I32,
691 &nothing));
693 case uint16_t(GcOp::RefTest): {
694 RefType unusedSourceType;
695 RefType unusedDestType;
696 CHECK(iter.readRefTest(false, &unusedSourceType, &unusedDestType,
697 &nothing));
699 case uint16_t(GcOp::RefTestNull): {
700 RefType unusedSourceType;
701 RefType unusedDestType;
702 CHECK(iter.readRefTest(true, &unusedSourceType, &unusedDestType,
703 &nothing));
705 case uint16_t(GcOp::RefCast): {
706 RefType unusedSourceType;
707 RefType unusedDestType;
708 CHECK(iter.readRefCast(false, &unusedSourceType, &unusedDestType,
709 &nothing));
711 case uint16_t(GcOp::RefCastNull): {
712 RefType unusedSourceType;
713 RefType unusedDestType;
714 CHECK(iter.readRefCast(true, &unusedSourceType, &unusedDestType,
715 &nothing));
717 case uint16_t(GcOp::BrOnCast): {
718 uint32_t unusedRelativeDepth;
719 RefType unusedSourceType;
720 RefType unusedDestType;
721 CHECK(iter.readBrOnCast(true, &unusedRelativeDepth,
722 &unusedSourceType, &unusedDestType,
723 &unusedType, &nothings));
725 case uint16_t(GcOp::BrOnCastFail): {
726 uint32_t unusedRelativeDepth;
727 RefType unusedSourceType;
728 RefType unusedDestType;
729 CHECK(iter.readBrOnCast(false, &unusedRelativeDepth,
730 &unusedSourceType, &unusedDestType,
731 &unusedType, &nothings));
733 case uint16_t(GcOp::AnyConvertExtern): {
734 CHECK(iter.readRefConversion(RefType::extern_(), RefType::any(),
735 &nothing));
737 case uint16_t(GcOp::ExternConvertAny): {
738 CHECK(iter.readRefConversion(RefType::any(), RefType::extern_(),
739 &nothing));
741 default:
742 return iter.unrecognizedOpcode(&op);
744 break;
746 #endif
748 #ifdef ENABLE_WASM_SIMD
749 case uint16_t(Op::SimdPrefix): {
750 if (!env.simdAvailable()) {
751 return iter.unrecognizedOpcode(&op);
753 uint32_t noIndex;
754 switch (op.b1) {
755 case uint32_t(SimdOp::I8x16ExtractLaneS):
756 case uint32_t(SimdOp::I8x16ExtractLaneU):
757 CHECK(iter.readExtractLane(ValType::I32, 16, &noIndex, &nothing));
758 case uint32_t(SimdOp::I16x8ExtractLaneS):
759 case uint32_t(SimdOp::I16x8ExtractLaneU):
760 CHECK(iter.readExtractLane(ValType::I32, 8, &noIndex, &nothing));
761 case uint32_t(SimdOp::I32x4ExtractLane):
762 CHECK(iter.readExtractLane(ValType::I32, 4, &noIndex, &nothing));
763 case uint32_t(SimdOp::I64x2ExtractLane):
764 CHECK(iter.readExtractLane(ValType::I64, 2, &noIndex, &nothing));
765 case uint32_t(SimdOp::F32x4ExtractLane):
766 CHECK(iter.readExtractLane(ValType::F32, 4, &noIndex, &nothing));
767 case uint32_t(SimdOp::F64x2ExtractLane):
768 CHECK(iter.readExtractLane(ValType::F64, 2, &noIndex, &nothing));
770 case uint32_t(SimdOp::I8x16Splat):
771 case uint32_t(SimdOp::I16x8Splat):
772 case uint32_t(SimdOp::I32x4Splat):
773 CHECK(iter.readConversion(ValType::I32, ValType::V128, &nothing));
774 case uint32_t(SimdOp::I64x2Splat):
775 CHECK(iter.readConversion(ValType::I64, ValType::V128, &nothing));
776 case uint32_t(SimdOp::F32x4Splat):
777 CHECK(iter.readConversion(ValType::F32, ValType::V128, &nothing));
778 case uint32_t(SimdOp::F64x2Splat):
779 CHECK(iter.readConversion(ValType::F64, ValType::V128, &nothing));
781 case uint32_t(SimdOp::V128AnyTrue):
782 case uint32_t(SimdOp::I8x16AllTrue):
783 case uint32_t(SimdOp::I16x8AllTrue):
784 case uint32_t(SimdOp::I32x4AllTrue):
785 case uint32_t(SimdOp::I64x2AllTrue):
786 case uint32_t(SimdOp::I8x16Bitmask):
787 case uint32_t(SimdOp::I16x8Bitmask):
788 case uint32_t(SimdOp::I32x4Bitmask):
789 case uint32_t(SimdOp::I64x2Bitmask):
790 CHECK(iter.readConversion(ValType::V128, ValType::I32, &nothing));
792 case uint32_t(SimdOp::I8x16ReplaceLane):
793 CHECK(iter.readReplaceLane(ValType::I32, 16, &noIndex, &nothing,
794 &nothing));
795 case uint32_t(SimdOp::I16x8ReplaceLane):
796 CHECK(iter.readReplaceLane(ValType::I32, 8, &noIndex, &nothing,
797 &nothing));
798 case uint32_t(SimdOp::I32x4ReplaceLane):
799 CHECK(iter.readReplaceLane(ValType::I32, 4, &noIndex, &nothing,
800 &nothing));
801 case uint32_t(SimdOp::I64x2ReplaceLane):
802 CHECK(iter.readReplaceLane(ValType::I64, 2, &noIndex, &nothing,
803 &nothing));
804 case uint32_t(SimdOp::F32x4ReplaceLane):
805 CHECK(iter.readReplaceLane(ValType::F32, 4, &noIndex, &nothing,
806 &nothing));
807 case uint32_t(SimdOp::F64x2ReplaceLane):
808 CHECK(iter.readReplaceLane(ValType::F64, 2, &noIndex, &nothing,
809 &nothing));
811 case uint32_t(SimdOp::I8x16Eq):
812 case uint32_t(SimdOp::I8x16Ne):
813 case uint32_t(SimdOp::I8x16LtS):
814 case uint32_t(SimdOp::I8x16LtU):
815 case uint32_t(SimdOp::I8x16GtS):
816 case uint32_t(SimdOp::I8x16GtU):
817 case uint32_t(SimdOp::I8x16LeS):
818 case uint32_t(SimdOp::I8x16LeU):
819 case uint32_t(SimdOp::I8x16GeS):
820 case uint32_t(SimdOp::I8x16GeU):
821 case uint32_t(SimdOp::I16x8Eq):
822 case uint32_t(SimdOp::I16x8Ne):
823 case uint32_t(SimdOp::I16x8LtS):
824 case uint32_t(SimdOp::I16x8LtU):
825 case uint32_t(SimdOp::I16x8GtS):
826 case uint32_t(SimdOp::I16x8GtU):
827 case uint32_t(SimdOp::I16x8LeS):
828 case uint32_t(SimdOp::I16x8LeU):
829 case uint32_t(SimdOp::I16x8GeS):
830 case uint32_t(SimdOp::I16x8GeU):
831 case uint32_t(SimdOp::I32x4Eq):
832 case uint32_t(SimdOp::I32x4Ne):
833 case uint32_t(SimdOp::I32x4LtS):
834 case uint32_t(SimdOp::I32x4LtU):
835 case uint32_t(SimdOp::I32x4GtS):
836 case uint32_t(SimdOp::I32x4GtU):
837 case uint32_t(SimdOp::I32x4LeS):
838 case uint32_t(SimdOp::I32x4LeU):
839 case uint32_t(SimdOp::I32x4GeS):
840 case uint32_t(SimdOp::I32x4GeU):
841 case uint32_t(SimdOp::I64x2Eq):
842 case uint32_t(SimdOp::I64x2Ne):
843 case uint32_t(SimdOp::I64x2LtS):
844 case uint32_t(SimdOp::I64x2GtS):
845 case uint32_t(SimdOp::I64x2LeS):
846 case uint32_t(SimdOp::I64x2GeS):
847 case uint32_t(SimdOp::F32x4Eq):
848 case uint32_t(SimdOp::F32x4Ne):
849 case uint32_t(SimdOp::F32x4Lt):
850 case uint32_t(SimdOp::F32x4Gt):
851 case uint32_t(SimdOp::F32x4Le):
852 case uint32_t(SimdOp::F32x4Ge):
853 case uint32_t(SimdOp::F64x2Eq):
854 case uint32_t(SimdOp::F64x2Ne):
855 case uint32_t(SimdOp::F64x2Lt):
856 case uint32_t(SimdOp::F64x2Gt):
857 case uint32_t(SimdOp::F64x2Le):
858 case uint32_t(SimdOp::F64x2Ge):
859 case uint32_t(SimdOp::V128And):
860 case uint32_t(SimdOp::V128Or):
861 case uint32_t(SimdOp::V128Xor):
862 case uint32_t(SimdOp::V128AndNot):
863 case uint32_t(SimdOp::I8x16AvgrU):
864 case uint32_t(SimdOp::I16x8AvgrU):
865 case uint32_t(SimdOp::I8x16Add):
866 case uint32_t(SimdOp::I8x16AddSatS):
867 case uint32_t(SimdOp::I8x16AddSatU):
868 case uint32_t(SimdOp::I8x16Sub):
869 case uint32_t(SimdOp::I8x16SubSatS):
870 case uint32_t(SimdOp::I8x16SubSatU):
871 case uint32_t(SimdOp::I8x16MinS):
872 case uint32_t(SimdOp::I8x16MinU):
873 case uint32_t(SimdOp::I8x16MaxS):
874 case uint32_t(SimdOp::I8x16MaxU):
875 case uint32_t(SimdOp::I16x8Add):
876 case uint32_t(SimdOp::I16x8AddSatS):
877 case uint32_t(SimdOp::I16x8AddSatU):
878 case uint32_t(SimdOp::I16x8Sub):
879 case uint32_t(SimdOp::I16x8SubSatS):
880 case uint32_t(SimdOp::I16x8SubSatU):
881 case uint32_t(SimdOp::I16x8Mul):
882 case uint32_t(SimdOp::I16x8MinS):
883 case uint32_t(SimdOp::I16x8MinU):
884 case uint32_t(SimdOp::I16x8MaxS):
885 case uint32_t(SimdOp::I16x8MaxU):
886 case uint32_t(SimdOp::I32x4Add):
887 case uint32_t(SimdOp::I32x4Sub):
888 case uint32_t(SimdOp::I32x4Mul):
889 case uint32_t(SimdOp::I32x4MinS):
890 case uint32_t(SimdOp::I32x4MinU):
891 case uint32_t(SimdOp::I32x4MaxS):
892 case uint32_t(SimdOp::I32x4MaxU):
893 case uint32_t(SimdOp::I64x2Add):
894 case uint32_t(SimdOp::I64x2Sub):
895 case uint32_t(SimdOp::I64x2Mul):
896 case uint32_t(SimdOp::F32x4Add):
897 case uint32_t(SimdOp::F32x4Sub):
898 case uint32_t(SimdOp::F32x4Mul):
899 case uint32_t(SimdOp::F32x4Div):
900 case uint32_t(SimdOp::F32x4Min):
901 case uint32_t(SimdOp::F32x4Max):
902 case uint32_t(SimdOp::F64x2Add):
903 case uint32_t(SimdOp::F64x2Sub):
904 case uint32_t(SimdOp::F64x2Mul):
905 case uint32_t(SimdOp::F64x2Div):
906 case uint32_t(SimdOp::F64x2Min):
907 case uint32_t(SimdOp::F64x2Max):
908 case uint32_t(SimdOp::I8x16NarrowI16x8S):
909 case uint32_t(SimdOp::I8x16NarrowI16x8U):
910 case uint32_t(SimdOp::I16x8NarrowI32x4S):
911 case uint32_t(SimdOp::I16x8NarrowI32x4U):
912 case uint32_t(SimdOp::I8x16Swizzle):
913 case uint32_t(SimdOp::F32x4PMax):
914 case uint32_t(SimdOp::F32x4PMin):
915 case uint32_t(SimdOp::F64x2PMax):
916 case uint32_t(SimdOp::F64x2PMin):
917 case uint32_t(SimdOp::I32x4DotI16x8S):
918 case uint32_t(SimdOp::I16x8ExtmulLowI8x16S):
919 case uint32_t(SimdOp::I16x8ExtmulHighI8x16S):
920 case uint32_t(SimdOp::I16x8ExtmulLowI8x16U):
921 case uint32_t(SimdOp::I16x8ExtmulHighI8x16U):
922 case uint32_t(SimdOp::I32x4ExtmulLowI16x8S):
923 case uint32_t(SimdOp::I32x4ExtmulHighI16x8S):
924 case uint32_t(SimdOp::I32x4ExtmulLowI16x8U):
925 case uint32_t(SimdOp::I32x4ExtmulHighI16x8U):
926 case uint32_t(SimdOp::I64x2ExtmulLowI32x4S):
927 case uint32_t(SimdOp::I64x2ExtmulHighI32x4S):
928 case uint32_t(SimdOp::I64x2ExtmulLowI32x4U):
929 case uint32_t(SimdOp::I64x2ExtmulHighI32x4U):
930 case uint32_t(SimdOp::I16x8Q15MulrSatS):
931 CHECK(iter.readBinary(ValType::V128, &nothing, &nothing));
933 case uint32_t(SimdOp::I8x16Neg):
934 case uint32_t(SimdOp::I16x8Neg):
935 case uint32_t(SimdOp::I16x8ExtendLowI8x16S):
936 case uint32_t(SimdOp::I16x8ExtendHighI8x16S):
937 case uint32_t(SimdOp::I16x8ExtendLowI8x16U):
938 case uint32_t(SimdOp::I16x8ExtendHighI8x16U):
939 case uint32_t(SimdOp::I32x4Neg):
940 case uint32_t(SimdOp::I32x4ExtendLowI16x8S):
941 case uint32_t(SimdOp::I32x4ExtendHighI16x8S):
942 case uint32_t(SimdOp::I32x4ExtendLowI16x8U):
943 case uint32_t(SimdOp::I32x4ExtendHighI16x8U):
944 case uint32_t(SimdOp::I32x4TruncSatF32x4S):
945 case uint32_t(SimdOp::I32x4TruncSatF32x4U):
946 case uint32_t(SimdOp::I64x2Neg):
947 case uint32_t(SimdOp::I64x2ExtendLowI32x4S):
948 case uint32_t(SimdOp::I64x2ExtendHighI32x4S):
949 case uint32_t(SimdOp::I64x2ExtendLowI32x4U):
950 case uint32_t(SimdOp::I64x2ExtendHighI32x4U):
951 case uint32_t(SimdOp::F32x4Abs):
952 case uint32_t(SimdOp::F32x4Neg):
953 case uint32_t(SimdOp::F32x4Sqrt):
954 case uint32_t(SimdOp::F32x4ConvertI32x4S):
955 case uint32_t(SimdOp::F32x4ConvertI32x4U):
956 case uint32_t(SimdOp::F64x2Abs):
957 case uint32_t(SimdOp::F64x2Neg):
958 case uint32_t(SimdOp::F64x2Sqrt):
959 case uint32_t(SimdOp::V128Not):
960 case uint32_t(SimdOp::I8x16Popcnt):
961 case uint32_t(SimdOp::I8x16Abs):
962 case uint32_t(SimdOp::I16x8Abs):
963 case uint32_t(SimdOp::I32x4Abs):
964 case uint32_t(SimdOp::I64x2Abs):
965 case uint32_t(SimdOp::F32x4Ceil):
966 case uint32_t(SimdOp::F32x4Floor):
967 case uint32_t(SimdOp::F32x4Trunc):
968 case uint32_t(SimdOp::F32x4Nearest):
969 case uint32_t(SimdOp::F64x2Ceil):
970 case uint32_t(SimdOp::F64x2Floor):
971 case uint32_t(SimdOp::F64x2Trunc):
972 case uint32_t(SimdOp::F64x2Nearest):
973 case uint32_t(SimdOp::F32x4DemoteF64x2Zero):
974 case uint32_t(SimdOp::F64x2PromoteLowF32x4):
975 case uint32_t(SimdOp::F64x2ConvertLowI32x4S):
976 case uint32_t(SimdOp::F64x2ConvertLowI32x4U):
977 case uint32_t(SimdOp::I32x4TruncSatF64x2SZero):
978 case uint32_t(SimdOp::I32x4TruncSatF64x2UZero):
979 case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16S):
980 case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16U):
981 case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8S):
982 case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8U):
983 CHECK(iter.readUnary(ValType::V128, &nothing));
985 case uint32_t(SimdOp::I8x16Shl):
986 case uint32_t(SimdOp::I8x16ShrS):
987 case uint32_t(SimdOp::I8x16ShrU):
988 case uint32_t(SimdOp::I16x8Shl):
989 case uint32_t(SimdOp::I16x8ShrS):
990 case uint32_t(SimdOp::I16x8ShrU):
991 case uint32_t(SimdOp::I32x4Shl):
992 case uint32_t(SimdOp::I32x4ShrS):
993 case uint32_t(SimdOp::I32x4ShrU):
994 case uint32_t(SimdOp::I64x2Shl):
995 case uint32_t(SimdOp::I64x2ShrS):
996 case uint32_t(SimdOp::I64x2ShrU):
997 CHECK(iter.readVectorShift(&nothing, &nothing));
999 case uint32_t(SimdOp::V128Bitselect):
1000 CHECK(
1001 iter.readTernary(ValType::V128, &nothing, &nothing, &nothing));
1003 case uint32_t(SimdOp::I8x16Shuffle): {
1004 V128 mask;
1005 CHECK(iter.readVectorShuffle(&nothing, &nothing, &mask));
1008 case uint32_t(SimdOp::V128Const): {
1009 V128 noVector;
1010 CHECK(iter.readV128Const(&noVector));
1013 case uint32_t(SimdOp::V128Load): {
1014 LinearMemoryAddress<Nothing> addr;
1015 CHECK(iter.readLoad(ValType::V128, 16, &addr));
1018 case uint32_t(SimdOp::V128Load8Splat): {
1019 LinearMemoryAddress<Nothing> addr;
1020 CHECK(iter.readLoadSplat(1, &addr));
1023 case uint32_t(SimdOp::V128Load16Splat): {
1024 LinearMemoryAddress<Nothing> addr;
1025 CHECK(iter.readLoadSplat(2, &addr));
1028 case uint32_t(SimdOp::V128Load32Splat): {
1029 LinearMemoryAddress<Nothing> addr;
1030 CHECK(iter.readLoadSplat(4, &addr));
1033 case uint32_t(SimdOp::V128Load64Splat): {
1034 LinearMemoryAddress<Nothing> addr;
1035 CHECK(iter.readLoadSplat(8, &addr));
1038 case uint32_t(SimdOp::V128Load8x8S):
1039 case uint32_t(SimdOp::V128Load8x8U): {
1040 LinearMemoryAddress<Nothing> addr;
1041 CHECK(iter.readLoadExtend(&addr));
1044 case uint32_t(SimdOp::V128Load16x4S):
1045 case uint32_t(SimdOp::V128Load16x4U): {
1046 LinearMemoryAddress<Nothing> addr;
1047 CHECK(iter.readLoadExtend(&addr));
1050 case uint32_t(SimdOp::V128Load32x2S):
1051 case uint32_t(SimdOp::V128Load32x2U): {
1052 LinearMemoryAddress<Nothing> addr;
1053 CHECK(iter.readLoadExtend(&addr));
1056 case uint32_t(SimdOp::V128Store): {
1057 LinearMemoryAddress<Nothing> addr;
1058 CHECK(iter.readStore(ValType::V128, 16, &addr, &nothing));
1061 case uint32_t(SimdOp::V128Load32Zero): {
1062 LinearMemoryAddress<Nothing> addr;
1063 CHECK(iter.readLoadSplat(4, &addr));
1066 case uint32_t(SimdOp::V128Load64Zero): {
1067 LinearMemoryAddress<Nothing> addr;
1068 CHECK(iter.readLoadSplat(8, &addr));
1071 case uint32_t(SimdOp::V128Load8Lane): {
1072 LinearMemoryAddress<Nothing> addr;
1073 CHECK(iter.readLoadLane(1, &addr, &noIndex, &nothing));
1076 case uint32_t(SimdOp::V128Load16Lane): {
1077 LinearMemoryAddress<Nothing> addr;
1078 CHECK(iter.readLoadLane(2, &addr, &noIndex, &nothing));
1081 case uint32_t(SimdOp::V128Load32Lane): {
1082 LinearMemoryAddress<Nothing> addr;
1083 CHECK(iter.readLoadLane(4, &addr, &noIndex, &nothing));
1086 case uint32_t(SimdOp::V128Load64Lane): {
1087 LinearMemoryAddress<Nothing> addr;
1088 CHECK(iter.readLoadLane(8, &addr, &noIndex, &nothing));
1091 case uint32_t(SimdOp::V128Store8Lane): {
1092 LinearMemoryAddress<Nothing> addr;
1093 CHECK(iter.readStoreLane(1, &addr, &noIndex, &nothing));
1096 case uint32_t(SimdOp::V128Store16Lane): {
1097 LinearMemoryAddress<Nothing> addr;
1098 CHECK(iter.readStoreLane(2, &addr, &noIndex, &nothing));
1101 case uint32_t(SimdOp::V128Store32Lane): {
1102 LinearMemoryAddress<Nothing> addr;
1103 CHECK(iter.readStoreLane(4, &addr, &noIndex, &nothing));
1106 case uint32_t(SimdOp::V128Store64Lane): {
1107 LinearMemoryAddress<Nothing> addr;
1108 CHECK(iter.readStoreLane(8, &addr, &noIndex, &nothing));
1111 # ifdef ENABLE_WASM_RELAXED_SIMD
1112 case uint32_t(SimdOp::F32x4RelaxedMadd):
1113 case uint32_t(SimdOp::F32x4RelaxedNmadd):
1114 case uint32_t(SimdOp::F64x2RelaxedMadd):
1115 case uint32_t(SimdOp::F64x2RelaxedNmadd):
1116 case uint32_t(SimdOp::I8x16RelaxedLaneSelect):
1117 case uint32_t(SimdOp::I16x8RelaxedLaneSelect):
1118 case uint32_t(SimdOp::I32x4RelaxedLaneSelect):
1119 case uint32_t(SimdOp::I64x2RelaxedLaneSelect):
1120 case uint32_t(SimdOp::I32x4DotI8x16I7x16AddS): {
1121 if (!env.v128RelaxedEnabled()) {
1122 return iter.unrecognizedOpcode(&op);
1124 CHECK(
1125 iter.readTernary(ValType::V128, &nothing, &nothing, &nothing));
1127 case uint32_t(SimdOp::F32x4RelaxedMin):
1128 case uint32_t(SimdOp::F32x4RelaxedMax):
1129 case uint32_t(SimdOp::F64x2RelaxedMin):
1130 case uint32_t(SimdOp::F64x2RelaxedMax):
1131 case uint32_t(SimdOp::I16x8RelaxedQ15MulrS):
1132 case uint32_t(SimdOp::I16x8DotI8x16I7x16S): {
1133 if (!env.v128RelaxedEnabled()) {
1134 return iter.unrecognizedOpcode(&op);
1136 CHECK(iter.readBinary(ValType::V128, &nothing, &nothing));
1138 case uint32_t(SimdOp::I32x4RelaxedTruncF32x4S):
1139 case uint32_t(SimdOp::I32x4RelaxedTruncF32x4U):
1140 case uint32_t(SimdOp::I32x4RelaxedTruncF64x2SZero):
1141 case uint32_t(SimdOp::I32x4RelaxedTruncF64x2UZero): {
1142 if (!env.v128RelaxedEnabled()) {
1143 return iter.unrecognizedOpcode(&op);
1145 CHECK(iter.readUnary(ValType::V128, &nothing));
1147 case uint32_t(SimdOp::I8x16RelaxedSwizzle): {
1148 if (!env.v128RelaxedEnabled()) {
1149 return iter.unrecognizedOpcode(&op);
1151 CHECK(iter.readBinary(ValType::V128, &nothing, &nothing));
1153 # endif
1155 default:
1156 return iter.unrecognizedOpcode(&op);
1158 break;
1160 #endif // ENABLE_WASM_SIMD
1162 case uint16_t(Op::MiscPrefix): {
1163 switch (op.b1) {
1164 case uint32_t(MiscOp::I32TruncSatF32S):
1165 case uint32_t(MiscOp::I32TruncSatF32U):
1166 CHECK(iter.readConversion(ValType::F32, ValType::I32, &nothing));
1167 case uint32_t(MiscOp::I32TruncSatF64S):
1168 case uint32_t(MiscOp::I32TruncSatF64U):
1169 CHECK(iter.readConversion(ValType::F64, ValType::I32, &nothing));
1170 case uint32_t(MiscOp::I64TruncSatF32S):
1171 case uint32_t(MiscOp::I64TruncSatF32U):
1172 CHECK(iter.readConversion(ValType::F32, ValType::I64, &nothing));
1173 case uint32_t(MiscOp::I64TruncSatF64S):
1174 case uint32_t(MiscOp::I64TruncSatF64U):
1175 CHECK(iter.readConversion(ValType::F64, ValType::I64, &nothing));
1176 case uint32_t(MiscOp::MemoryCopy): {
1177 uint32_t unusedDestMemIndex;
1178 uint32_t unusedSrcMemIndex;
1179 CHECK(iter.readMemOrTableCopy(/*isMem=*/true, &unusedDestMemIndex,
1180 &nothing, &unusedSrcMemIndex,
1181 &nothing, &nothing));
1183 case uint32_t(MiscOp::DataDrop): {
1184 uint32_t unusedSegIndex;
1185 CHECK(iter.readDataOrElemDrop(/*isData=*/true, &unusedSegIndex));
1187 case uint32_t(MiscOp::MemoryFill): {
1188 uint32_t memoryIndex;
1189 CHECK(iter.readMemFill(&memoryIndex, &nothing, &nothing, &nothing));
1191 case uint32_t(MiscOp::MemoryInit): {
1192 uint32_t unusedSegIndex;
1193 uint32_t unusedMemoryIndex;
1194 CHECK(iter.readMemOrTableInit(/*isMem=*/true, &unusedSegIndex,
1195 &unusedMemoryIndex, &nothing,
1196 &nothing, &nothing));
1198 case uint32_t(MiscOp::TableCopy): {
1199 uint32_t unusedDestTableIndex;
1200 uint32_t unusedSrcTableIndex;
1201 CHECK(iter.readMemOrTableCopy(
1202 /*isMem=*/false, &unusedDestTableIndex, &nothing,
1203 &unusedSrcTableIndex, &nothing, &nothing));
1205 case uint32_t(MiscOp::ElemDrop): {
1206 uint32_t unusedSegIndex;
1207 CHECK(iter.readDataOrElemDrop(/*isData=*/false, &unusedSegIndex));
1209 case uint32_t(MiscOp::TableInit): {
1210 uint32_t unusedSegIndex;
1211 uint32_t unusedTableIndex;
1212 CHECK(iter.readMemOrTableInit(/*isMem=*/false, &unusedSegIndex,
1213 &unusedTableIndex, &nothing, &nothing,
1214 &nothing));
1216 case uint32_t(MiscOp::TableFill): {
1217 uint32_t unusedTableIndex;
1218 CHECK(iter.readTableFill(&unusedTableIndex, &nothing, &nothing,
1219 &nothing));
1221 #ifdef ENABLE_WASM_MEMORY_CONTROL
1222 case uint32_t(MiscOp::MemoryDiscard): {
1223 if (!env.memoryControlEnabled()) {
1224 return iter.unrecognizedOpcode(&op);
1226 uint32_t unusedMemoryIndex;
1227 CHECK(iter.readMemDiscard(&unusedMemoryIndex, &nothing, &nothing));
1229 #endif
1230 case uint32_t(MiscOp::TableGrow): {
1231 uint32_t unusedTableIndex;
1232 CHECK(iter.readTableGrow(&unusedTableIndex, &nothing, &nothing));
1234 case uint32_t(MiscOp::TableSize): {
1235 uint32_t unusedTableIndex;
1236 CHECK(iter.readTableSize(&unusedTableIndex));
1238 default:
1239 return iter.unrecognizedOpcode(&op);
1241 break;
1243 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
1244 case uint16_t(Op::RefAsNonNull): {
1245 if (!env.functionReferencesEnabled()) {
1246 return iter.unrecognizedOpcode(&op);
1248 CHECK(iter.readRefAsNonNull(&nothing));
1250 case uint16_t(Op::BrOnNull): {
1251 if (!env.functionReferencesEnabled()) {
1252 return iter.unrecognizedOpcode(&op);
1254 uint32_t unusedDepth;
1255 CHECK(
1256 iter.readBrOnNull(&unusedDepth, &unusedType, &nothings, &nothing));
1258 case uint16_t(Op::BrOnNonNull): {
1259 if (!env.functionReferencesEnabled()) {
1260 return iter.unrecognizedOpcode(&op);
1262 uint32_t unusedDepth;
1263 CHECK(iter.readBrOnNonNull(&unusedDepth, &unusedType, &nothings,
1264 &nothing));
1266 #endif
1267 #ifdef ENABLE_WASM_GC
1268 case uint16_t(Op::RefEq): {
1269 if (!env.gcEnabled()) {
1270 return iter.unrecognizedOpcode(&op);
1272 CHECK(iter.readComparison(RefType::eq(), &nothing, &nothing));
1274 #endif
1275 case uint16_t(Op::RefFunc): {
1276 uint32_t unusedIndex;
1277 CHECK(iter.readRefFunc(&unusedIndex));
1279 case uint16_t(Op::RefNull): {
1280 RefType type;
1281 CHECK(iter.readRefNull(&type));
1283 case uint16_t(Op::RefIsNull): {
1284 Nothing nothing;
1285 CHECK(iter.readRefIsNull(&nothing));
1287 case uint16_t(Op::Try):
1288 if (!env.exceptionsEnabled()) {
1289 return iter.unrecognizedOpcode(&op);
1291 CHECK(iter.readTry(&unusedType));
1292 case uint16_t(Op::Catch): {
1293 if (!env.exceptionsEnabled()) {
1294 return iter.unrecognizedOpcode(&op);
1296 LabelKind unusedKind;
1297 uint32_t unusedIndex;
1298 CHECK(iter.readCatch(&unusedKind, &unusedIndex, &unusedType,
1299 &unusedType, &nothings));
1301 case uint16_t(Op::CatchAll): {
1302 if (!env.exceptionsEnabled()) {
1303 return iter.unrecognizedOpcode(&op);
1305 LabelKind unusedKind;
1306 CHECK(iter.readCatchAll(&unusedKind, &unusedType, &unusedType,
1307 &nothings));
1309 case uint16_t(Op::Delegate): {
1310 if (!env.exceptionsEnabled()) {
1311 return iter.unrecognizedOpcode(&op);
1313 uint32_t unusedDepth;
1314 if (!iter.readDelegate(&unusedDepth, &unusedType, &nothings)) {
1315 return false;
1317 iter.popDelegate();
1318 break;
1320 case uint16_t(Op::Throw): {
1321 if (!env.exceptionsEnabled()) {
1322 return iter.unrecognizedOpcode(&op);
1324 uint32_t unusedIndex;
1325 CHECK(iter.readThrow(&unusedIndex, &nothings));
1327 case uint16_t(Op::Rethrow): {
1328 if (!env.exceptionsEnabled()) {
1329 return iter.unrecognizedOpcode(&op);
1331 uint32_t unusedDepth;
1332 CHECK(iter.readRethrow(&unusedDepth));
1334 case uint16_t(Op::ThrowRef): {
1335 if (!env.exnrefEnabled()) {
1336 return iter.unrecognizedOpcode(&op);
1338 CHECK(iter.readThrowRef(&nothing));
1340 case uint16_t(Op::TryTable): {
1341 if (!env.exnrefEnabled()) {
1342 return iter.unrecognizedOpcode(&op);
1344 TryTableCatchVector catches;
1345 CHECK(iter.readTryTable(&unusedType, &catches));
1347 case uint16_t(Op::ThreadPrefix): {
1348 // Though thread ops can be used on nonshared memories, we make them
1349 // unavailable if shared memory has been disabled in the prefs, for
1350 // maximum predictability and safety and consistency with JS.
1351 if (env.sharedMemoryEnabled() == Shareable::False) {
1352 return iter.unrecognizedOpcode(&op);
1354 switch (op.b1) {
1355 case uint32_t(ThreadOp::Wake): {
1356 LinearMemoryAddress<Nothing> addr;
1357 CHECK(iter.readWake(&addr, &nothing));
1359 case uint32_t(ThreadOp::I32Wait): {
1360 LinearMemoryAddress<Nothing> addr;
1361 CHECK(iter.readWait(&addr, ValType::I32, 4, &nothing, &nothing));
1363 case uint32_t(ThreadOp::I64Wait): {
1364 LinearMemoryAddress<Nothing> addr;
1365 CHECK(iter.readWait(&addr, ValType::I64, 8, &nothing, &nothing));
1367 case uint32_t(ThreadOp::Fence): {
1368 CHECK(iter.readFence());
1370 case uint32_t(ThreadOp::I32AtomicLoad): {
1371 LinearMemoryAddress<Nothing> addr;
1372 CHECK(iter.readAtomicLoad(&addr, ValType::I32, 4));
1374 case uint32_t(ThreadOp::I64AtomicLoad): {
1375 LinearMemoryAddress<Nothing> addr;
1376 CHECK(iter.readAtomicLoad(&addr, ValType::I64, 8));
1378 case uint32_t(ThreadOp::I32AtomicLoad8U): {
1379 LinearMemoryAddress<Nothing> addr;
1380 CHECK(iter.readAtomicLoad(&addr, ValType::I32, 1));
1382 case uint32_t(ThreadOp::I32AtomicLoad16U): {
1383 LinearMemoryAddress<Nothing> addr;
1384 CHECK(iter.readAtomicLoad(&addr, ValType::I32, 2));
1386 case uint32_t(ThreadOp::I64AtomicLoad8U): {
1387 LinearMemoryAddress<Nothing> addr;
1388 CHECK(iter.readAtomicLoad(&addr, ValType::I64, 1));
1390 case uint32_t(ThreadOp::I64AtomicLoad16U): {
1391 LinearMemoryAddress<Nothing> addr;
1392 CHECK(iter.readAtomicLoad(&addr, ValType::I64, 2));
1394 case uint32_t(ThreadOp::I64AtomicLoad32U): {
1395 LinearMemoryAddress<Nothing> addr;
1396 CHECK(iter.readAtomicLoad(&addr, ValType::I64, 4));
1398 case uint32_t(ThreadOp::I32AtomicStore): {
1399 LinearMemoryAddress<Nothing> addr;
1400 CHECK(iter.readAtomicStore(&addr, ValType::I32, 4, &nothing));
1402 case uint32_t(ThreadOp::I64AtomicStore): {
1403 LinearMemoryAddress<Nothing> addr;
1404 CHECK(iter.readAtomicStore(&addr, ValType::I64, 8, &nothing));
1406 case uint32_t(ThreadOp::I32AtomicStore8U): {
1407 LinearMemoryAddress<Nothing> addr;
1408 CHECK(iter.readAtomicStore(&addr, ValType::I32, 1, &nothing));
1410 case uint32_t(ThreadOp::I32AtomicStore16U): {
1411 LinearMemoryAddress<Nothing> addr;
1412 CHECK(iter.readAtomicStore(&addr, ValType::I32, 2, &nothing));
1414 case uint32_t(ThreadOp::I64AtomicStore8U): {
1415 LinearMemoryAddress<Nothing> addr;
1416 CHECK(iter.readAtomicStore(&addr, ValType::I64, 1, &nothing));
1418 case uint32_t(ThreadOp::I64AtomicStore16U): {
1419 LinearMemoryAddress<Nothing> addr;
1420 CHECK(iter.readAtomicStore(&addr, ValType::I64, 2, &nothing));
1422 case uint32_t(ThreadOp::I64AtomicStore32U): {
1423 LinearMemoryAddress<Nothing> addr;
1424 CHECK(iter.readAtomicStore(&addr, ValType::I64, 4, &nothing));
1426 case uint32_t(ThreadOp::I32AtomicAdd):
1427 case uint32_t(ThreadOp::I32AtomicSub):
1428 case uint32_t(ThreadOp::I32AtomicAnd):
1429 case uint32_t(ThreadOp::I32AtomicOr):
1430 case uint32_t(ThreadOp::I32AtomicXor):
1431 case uint32_t(ThreadOp::I32AtomicXchg): {
1432 LinearMemoryAddress<Nothing> addr;
1433 CHECK(iter.readAtomicRMW(&addr, ValType::I32, 4, &nothing));
1435 case uint32_t(ThreadOp::I64AtomicAdd):
1436 case uint32_t(ThreadOp::I64AtomicSub):
1437 case uint32_t(ThreadOp::I64AtomicAnd):
1438 case uint32_t(ThreadOp::I64AtomicOr):
1439 case uint32_t(ThreadOp::I64AtomicXor):
1440 case uint32_t(ThreadOp::I64AtomicXchg): {
1441 LinearMemoryAddress<Nothing> addr;
1442 CHECK(iter.readAtomicRMW(&addr, ValType::I64, 8, &nothing));
1444 case uint32_t(ThreadOp::I32AtomicAdd8U):
1445 case uint32_t(ThreadOp::I32AtomicSub8U):
1446 case uint32_t(ThreadOp::I32AtomicAnd8U):
1447 case uint32_t(ThreadOp::I32AtomicOr8U):
1448 case uint32_t(ThreadOp::I32AtomicXor8U):
1449 case uint32_t(ThreadOp::I32AtomicXchg8U): {
1450 LinearMemoryAddress<Nothing> addr;
1451 CHECK(iter.readAtomicRMW(&addr, ValType::I32, 1, &nothing));
1453 case uint32_t(ThreadOp::I32AtomicAdd16U):
1454 case uint32_t(ThreadOp::I32AtomicSub16U):
1455 case uint32_t(ThreadOp::I32AtomicAnd16U):
1456 case uint32_t(ThreadOp::I32AtomicOr16U):
1457 case uint32_t(ThreadOp::I32AtomicXor16U):
1458 case uint32_t(ThreadOp::I32AtomicXchg16U): {
1459 LinearMemoryAddress<Nothing> addr;
1460 CHECK(iter.readAtomicRMW(&addr, ValType::I32, 2, &nothing));
1462 case uint32_t(ThreadOp::I64AtomicAdd8U):
1463 case uint32_t(ThreadOp::I64AtomicSub8U):
1464 case uint32_t(ThreadOp::I64AtomicAnd8U):
1465 case uint32_t(ThreadOp::I64AtomicOr8U):
1466 case uint32_t(ThreadOp::I64AtomicXor8U):
1467 case uint32_t(ThreadOp::I64AtomicXchg8U): {
1468 LinearMemoryAddress<Nothing> addr;
1469 CHECK(iter.readAtomicRMW(&addr, ValType::I64, 1, &nothing));
1471 case uint32_t(ThreadOp::I64AtomicAdd16U):
1472 case uint32_t(ThreadOp::I64AtomicSub16U):
1473 case uint32_t(ThreadOp::I64AtomicAnd16U):
1474 case uint32_t(ThreadOp::I64AtomicOr16U):
1475 case uint32_t(ThreadOp::I64AtomicXor16U):
1476 case uint32_t(ThreadOp::I64AtomicXchg16U): {
1477 LinearMemoryAddress<Nothing> addr;
1478 CHECK(iter.readAtomicRMW(&addr, ValType::I64, 2, &nothing));
1480 case uint32_t(ThreadOp::I64AtomicAdd32U):
1481 case uint32_t(ThreadOp::I64AtomicSub32U):
1482 case uint32_t(ThreadOp::I64AtomicAnd32U):
1483 case uint32_t(ThreadOp::I64AtomicOr32U):
1484 case uint32_t(ThreadOp::I64AtomicXor32U):
1485 case uint32_t(ThreadOp::I64AtomicXchg32U): {
1486 LinearMemoryAddress<Nothing> addr;
1487 CHECK(iter.readAtomicRMW(&addr, ValType::I64, 4, &nothing));
1489 case uint32_t(ThreadOp::I32AtomicCmpXchg): {
1490 LinearMemoryAddress<Nothing> addr;
1491 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 4, &nothing,
1492 &nothing));
1494 case uint32_t(ThreadOp::I64AtomicCmpXchg): {
1495 LinearMemoryAddress<Nothing> addr;
1496 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 8, &nothing,
1497 &nothing));
1499 case uint32_t(ThreadOp::I32AtomicCmpXchg8U): {
1500 LinearMemoryAddress<Nothing> addr;
1501 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 1, &nothing,
1502 &nothing));
1504 case uint32_t(ThreadOp::I32AtomicCmpXchg16U): {
1505 LinearMemoryAddress<Nothing> addr;
1506 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 2, &nothing,
1507 &nothing));
1509 case uint32_t(ThreadOp::I64AtomicCmpXchg8U): {
1510 LinearMemoryAddress<Nothing> addr;
1511 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 1, &nothing,
1512 &nothing));
1514 case uint32_t(ThreadOp::I64AtomicCmpXchg16U): {
1515 LinearMemoryAddress<Nothing> addr;
1516 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 2, &nothing,
1517 &nothing));
1519 case uint32_t(ThreadOp::I64AtomicCmpXchg32U): {
1520 LinearMemoryAddress<Nothing> addr;
1521 CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 4, &nothing,
1522 &nothing));
1524 default:
1525 return iter.unrecognizedOpcode(&op);
1527 break;
1529 case uint16_t(Op::MozPrefix):
1530 return iter.unrecognizedOpcode(&op);
1531 default:
1532 return iter.unrecognizedOpcode(&op);
1536 MOZ_CRASH("unreachable");
1538 #undef CHECK
1541 bool wasm::ValidateFunctionBody(const ModuleEnvironment& env,
1542 uint32_t funcIndex, uint32_t bodySize,
1543 Decoder& d) {
1544 ValTypeVector locals;
1545 if (!locals.appendAll(env.funcs[funcIndex].type->args())) {
1546 return false;
1549 const uint8_t* bodyBegin = d.currentPosition();
1551 if (!DecodeLocalEntries(d, *env.types, env.features, &locals)) {
1552 return false;
1555 return DecodeFunctionBodyExprs(env, funcIndex, locals, bodyBegin + bodySize,
1556 &d);
1559 // Section macros.
1561 static bool DecodePreamble(Decoder& d) {
1562 if (d.bytesRemain() > MaxModuleBytes) {
1563 return d.fail("module too big");
1566 uint32_t u32;
1567 if (!d.readFixedU32(&u32) || u32 != MagicNumber) {
1568 return d.fail("failed to match magic number");
1571 if (!d.readFixedU32(&u32) || u32 != EncodingVersion) {
1572 return d.failf("binary version 0x%" PRIx32
1573 " does not match expected version 0x%" PRIx32,
1574 u32, EncodingVersion);
1577 return true;
1580 static bool DecodeValTypeVector(Decoder& d, ModuleEnvironment* env,
1581 uint32_t count, ValTypeVector* valTypes) {
1582 if (!valTypes->resize(count)) {
1583 return false;
1586 for (uint32_t i = 0; i < count; i++) {
1587 if (!d.readValType(*env->types, env->features, &(*valTypes)[i])) {
1588 return false;
1591 return true;
1594 static bool DecodeFuncType(Decoder& d, ModuleEnvironment* env,
1595 FuncType* funcType) {
1596 uint32_t numArgs;
1597 if (!d.readVarU32(&numArgs)) {
1598 return d.fail("bad number of function args");
1600 if (numArgs > MaxParams) {
1601 return d.fail("too many arguments in signature");
1603 ValTypeVector args;
1604 if (!DecodeValTypeVector(d, env, numArgs, &args)) {
1605 return false;
1608 uint32_t numResults;
1609 if (!d.readVarU32(&numResults)) {
1610 return d.fail("bad number of function returns");
1612 if (numResults > MaxResults) {
1613 return d.fail("too many returns in signature");
1615 ValTypeVector results;
1616 if (!DecodeValTypeVector(d, env, numResults, &results)) {
1617 return false;
1620 *funcType = FuncType(std::move(args), std::move(results));
1621 return true;
1624 static bool DecodeStructType(Decoder& d, ModuleEnvironment* env,
1625 StructType* structType) {
1626 if (!env->gcEnabled()) {
1627 return d.fail("Structure types not enabled");
1630 uint32_t numFields;
1631 if (!d.readVarU32(&numFields)) {
1632 return d.fail("Bad number of fields");
1635 if (numFields > MaxStructFields) {
1636 return d.fail("too many fields in struct");
1639 StructFieldVector fields;
1640 if (!fields.resize(numFields)) {
1641 return false;
1644 for (uint32_t i = 0; i < numFields; i++) {
1645 if (!d.readStorageType(*env->types, env->features, &fields[i].type)) {
1646 return false;
1649 uint8_t flags;
1650 if (!d.readFixedU8(&flags)) {
1651 return d.fail("expected flag");
1653 if ((flags & ~uint8_t(FieldFlags::AllowedMask)) != 0) {
1654 return d.fail("garbage flag bits");
1656 fields[i].isMutable = flags & uint8_t(FieldFlags::Mutable);
1659 *structType = StructType(std::move(fields));
1661 // Compute the struct layout, and fail if the struct is too large
1662 if (!structType->init()) {
1663 return d.fail("too many fields in struct");
1665 return true;
1668 static bool DecodeArrayType(Decoder& d, ModuleEnvironment* env,
1669 ArrayType* arrayType) {
1670 if (!env->gcEnabled()) {
1671 return d.fail("gc types not enabled");
1674 StorageType elementType;
1675 if (!d.readStorageType(*env->types, env->features, &elementType)) {
1676 return false;
1679 uint8_t flags;
1680 if (!d.readFixedU8(&flags)) {
1681 return d.fail("expected flag");
1683 if ((flags & ~uint8_t(FieldFlags::AllowedMask)) != 0) {
1684 return d.fail("garbage flag bits");
1686 bool isMutable = flags & uint8_t(FieldFlags::Mutable);
1688 *arrayType = ArrayType(elementType, isMutable);
1689 return true;
1692 static bool DecodeTypeSection(Decoder& d, ModuleEnvironment* env) {
1693 MaybeSectionRange range;
1694 if (!d.startSection(SectionId::Type, env, &range, "type")) {
1695 return false;
1697 if (!range) {
1698 return true;
1701 uint32_t numRecGroups;
1702 if (!d.readVarU32(&numRecGroups)) {
1703 return d.fail("expected number of types");
1706 // Check if we've reached our implementation defined limit of recursion
1707 // groups.
1708 if (numRecGroups > MaxRecGroups) {
1709 return d.fail("too many types");
1712 for (uint32_t recGroupIndex = 0; recGroupIndex < numRecGroups;
1713 recGroupIndex++) {
1714 uint32_t recGroupLength = 1;
1716 // Decode an optional recursion group length, if the GC proposal is
1717 // enabled.
1718 if (env->gcEnabled()) {
1719 uint8_t firstTypeCode;
1720 if (!d.peekByte(&firstTypeCode)) {
1721 return d.fail("expected type form");
1724 if (firstTypeCode == (uint8_t)TypeCode::RecGroup) {
1725 // Skip over the prefix byte that was peeked.
1726 d.uncheckedReadFixedU8();
1728 // Read the number of types in this recursion group
1729 if (!d.readVarU32(&recGroupLength)) {
1730 return d.fail("expected recursion group length");
1735 // Start a recursion group. This will extend the type context with empty
1736 // type definitions to be filled.
1737 MutableRecGroup recGroup = env->types->startRecGroup(recGroupLength);
1738 if (!recGroup) {
1739 return false;
1742 // First, iterate over the types, validate them and set super types.
1743 // Subtyping relationship will be checked in a second iteration.
1744 for (uint32_t recGroupTypeIndex = 0; recGroupTypeIndex < recGroupLength;
1745 recGroupTypeIndex++) {
1746 uint32_t typeIndex =
1747 env->types->length() - recGroupLength + recGroupTypeIndex;
1749 // Check if we've reached our implementation defined limit of type
1750 // definitions.
1751 if (typeIndex >= MaxTypes) {
1752 return d.fail("too many types");
1755 uint8_t form;
1756 const TypeDef* superTypeDef = nullptr;
1758 // By default, all types are final unless the sub keyword is specified.
1759 bool finalTypeFlag = true;
1761 // Decode an optional declared super type index, if the GC proposal is
1762 // enabled.
1763 if (env->gcEnabled() && d.peekByte(&form) &&
1764 (form == (uint8_t)TypeCode::SubNoFinalType ||
1765 form == (uint8_t)TypeCode::SubFinalType)) {
1766 if (form == (uint8_t)TypeCode::SubNoFinalType) {
1767 finalTypeFlag = false;
1770 // Skip over the `sub` or `final` prefix byte we peeked.
1771 d.uncheckedReadFixedU8();
1773 // Decode the number of super types, which is currently limited to at
1774 // most one.
1775 uint32_t numSuperTypes;
1776 if (!d.readVarU32(&numSuperTypes)) {
1777 return d.fail("expected number of super types");
1779 if (numSuperTypes > 1) {
1780 return d.fail("too many super types");
1783 // Decode the super type, if any.
1784 if (numSuperTypes == 1) {
1785 uint32_t superTypeDefIndex;
1786 if (!d.readVarU32(&superTypeDefIndex)) {
1787 return d.fail("expected super type index");
1790 // A super type index must be strictly less than the current type
1791 // index in order to avoid cycles.
1792 if (superTypeDefIndex >= typeIndex) {
1793 return d.fail("invalid super type index");
1796 superTypeDef = &env->types->type(superTypeDefIndex);
1800 // Decode the kind of type definition
1801 if (!d.readFixedU8(&form)) {
1802 return d.fail("expected type form");
1805 TypeDef* typeDef = &recGroup->type(recGroupTypeIndex);
1806 switch (form) {
1807 case uint8_t(TypeCode::Func): {
1808 FuncType funcType;
1809 if (!DecodeFuncType(d, env, &funcType)) {
1810 return false;
1812 *typeDef = std::move(funcType);
1813 break;
1815 case uint8_t(TypeCode::Struct): {
1816 StructType structType;
1817 if (!DecodeStructType(d, env, &structType)) {
1818 return false;
1820 *typeDef = std::move(structType);
1821 break;
1823 case uint8_t(TypeCode::Array): {
1824 ArrayType arrayType;
1825 if (!DecodeArrayType(d, env, &arrayType)) {
1826 return false;
1828 *typeDef = std::move(arrayType);
1829 break;
1831 default:
1832 return d.fail("expected type form");
1835 typeDef->setFinal(finalTypeFlag);
1836 if (superTypeDef) {
1837 // Check that we aren't creating too deep of a subtyping chain
1838 if (superTypeDef->subTypingDepth() >= MaxSubTypingDepth) {
1839 return d.fail("type is too deep");
1842 typeDef->setSuperTypeDef(superTypeDef);
1845 if (typeDef->isFuncType()) {
1846 typeDef->funcType().initImmediateTypeId(
1847 env->gcEnabled(), typeDef->isFinal(), superTypeDef, recGroupLength);
1851 // Check the super types to make sure they are compatible with their
1852 // subtypes. This is done in a second iteration to avoid dealing with not
1853 // yet loaded types.
1854 for (uint32_t recGroupTypeIndex = 0; recGroupTypeIndex < recGroupLength;
1855 recGroupTypeIndex++) {
1856 TypeDef* typeDef = &recGroup->type(recGroupTypeIndex);
1857 if (typeDef->superTypeDef()) {
1858 // Check that the super type is compatible with this type
1859 if (!TypeDef::canBeSubTypeOf(typeDef, typeDef->superTypeDef())) {
1860 return d.fail("incompatible super type");
1865 // Finish the recursion group, which will canonicalize the types.
1866 if (!env->types->endRecGroup()) {
1867 return false;
1871 return d.finishSection(*range, "type");
1874 [[nodiscard]] static bool DecodeName(Decoder& d, CacheableName* name) {
1875 uint32_t numBytes;
1876 if (!d.readVarU32(&numBytes)) {
1877 return false;
1880 if (numBytes > MaxStringBytes) {
1881 return false;
1884 const uint8_t* bytes;
1885 if (!d.readBytes(numBytes, &bytes)) {
1886 return false;
1889 if (!IsUtf8(AsChars(Span(bytes, numBytes)))) {
1890 return false;
1893 UTF8Bytes utf8Bytes;
1894 if (!utf8Bytes.resizeUninitialized(numBytes)) {
1895 return false;
1897 memcpy(utf8Bytes.begin(), bytes, numBytes);
1899 *name = CacheableName(std::move(utf8Bytes));
1900 return true;
1903 static bool DecodeFuncTypeIndex(Decoder& d, const SharedTypeContext& types,
1904 uint32_t* funcTypeIndex) {
1905 if (!d.readVarU32(funcTypeIndex)) {
1906 return d.fail("expected signature index");
1909 if (*funcTypeIndex >= types->length()) {
1910 return d.fail("signature index out of range");
1913 const TypeDef& def = (*types)[*funcTypeIndex];
1915 if (!def.isFuncType()) {
1916 return d.fail("signature index references non-signature");
1919 return true;
1922 static bool DecodeLimitBound(Decoder& d, IndexType indexType, uint64_t* bound) {
1923 if (indexType == IndexType::I64) {
1924 return d.readVarU64(bound);
1927 // Spec tests assert that we only decode a LEB32 when index type is I32.
1928 uint32_t bound32;
1929 if (!d.readVarU32(&bound32)) {
1930 return false;
1932 *bound = bound32;
1933 return true;
1936 static bool DecodeLimits(Decoder& d, LimitsKind kind, Limits* limits) {
1937 uint8_t flags;
1938 if (!d.readFixedU8(&flags)) {
1939 return d.fail("expected flags");
1942 uint8_t mask = kind == LimitsKind::Memory ? uint8_t(LimitsMask::Memory)
1943 : uint8_t(LimitsMask::Table);
1945 if (flags & ~uint8_t(mask)) {
1946 return d.failf("unexpected bits set in flags: %" PRIu32,
1947 uint32_t(flags & ~uint8_t(mask)));
1950 // Memory limits may be shared or specify an alternate index type
1951 if (kind == LimitsKind::Memory) {
1952 if ((flags & uint8_t(LimitsFlags::IsShared)) &&
1953 !(flags & uint8_t(LimitsFlags::HasMaximum))) {
1954 return d.fail("maximum length required for shared memory");
1957 limits->shared = (flags & uint8_t(LimitsFlags::IsShared))
1958 ? Shareable::True
1959 : Shareable::False;
1961 #ifdef ENABLE_WASM_MEMORY64
1962 limits->indexType =
1963 (flags & uint8_t(LimitsFlags::IsI64)) ? IndexType::I64 : IndexType::I32;
1964 #else
1965 limits->indexType = IndexType::I32;
1966 if (flags & uint8_t(LimitsFlags::IsI64)) {
1967 return d.fail("i64 is not supported for memory limits");
1969 #endif
1970 } else {
1971 limits->shared = Shareable::False;
1972 limits->indexType = IndexType::I32;
1975 uint64_t initial;
1976 if (!DecodeLimitBound(d, limits->indexType, &initial)) {
1977 return d.fail("expected initial length");
1979 limits->initial = initial;
1981 if (flags & uint8_t(LimitsFlags::HasMaximum)) {
1982 uint64_t maximum;
1983 if (!DecodeLimitBound(d, limits->indexType, &maximum)) {
1984 return d.fail("expected maximum length");
1987 if (limits->initial > maximum) {
1988 return d.failf(
1989 "memory size minimum must not be greater than maximum; "
1990 "maximum length %" PRIu64 " is less than initial length %" PRIu64,
1991 maximum, limits->initial);
1994 limits->maximum.emplace(maximum);
1997 return true;
2000 static bool DecodeTableTypeAndLimits(Decoder& d, ModuleEnvironment* env) {
2001 bool initExprPresent = false;
2002 uint8_t typeCode;
2003 if (!d.peekByte(&typeCode)) {
2004 return d.fail("expected type code");
2006 if (typeCode == (uint8_t)TypeCode::TableHasInitExpr) {
2007 d.uncheckedReadFixedU8();
2008 uint8_t flags;
2009 if (!d.readFixedU8(&flags) || flags != 0) {
2010 return d.fail("expected reserved byte to be 0");
2012 initExprPresent = true;
2015 RefType tableElemType;
2016 if (!d.readRefType(*env->types, env->features, &tableElemType)) {
2017 return false;
2020 Limits limits;
2021 if (!DecodeLimits(d, LimitsKind::Table, &limits)) {
2022 return false;
2025 // Decoding limits for a table only supports i32
2026 MOZ_ASSERT(limits.indexType == IndexType::I32);
2028 // If there's a maximum, check it is in range. The check to exclude
2029 // initial > maximum is carried out by the DecodeLimits call above, so
2030 // we don't repeat it here.
2031 if (limits.initial > MaxTableLimitField ||
2032 ((limits.maximum.isSome() &&
2033 limits.maximum.value() > MaxTableLimitField))) {
2034 return d.fail("too many table elements");
2037 if (env->tables.length() >= MaxTables) {
2038 return d.fail("too many tables");
2041 // The rest of the runtime expects table limits to be within a 32-bit range.
2042 static_assert(MaxTableLimitField <= UINT32_MAX, "invariant");
2043 uint32_t initialLength = uint32_t(limits.initial);
2044 Maybe<uint32_t> maximumLength;
2045 if (limits.maximum) {
2046 maximumLength = Some(uint32_t(*limits.maximum));
2049 Maybe<InitExpr> initExpr;
2050 if (initExprPresent) {
2051 InitExpr initializer;
2052 if (!InitExpr::decodeAndValidate(d, env, tableElemType, &initializer)) {
2053 return false;
2055 initExpr = Some(std::move(initializer));
2056 } else {
2057 if (!tableElemType.isNullable()) {
2058 return d.fail("table with non-nullable references requires initializer");
2062 return env->tables.emplaceBack(tableElemType, initialLength, maximumLength,
2063 std::move(initExpr), /* isAsmJS */ false);
2066 static bool DecodeGlobalType(Decoder& d, const SharedTypeContext& types,
2067 const FeatureArgs& features, ValType* type,
2068 bool* isMutable) {
2069 if (!d.readValType(*types, features, type)) {
2070 return d.fail("expected global type");
2073 uint8_t flags;
2074 if (!d.readFixedU8(&flags)) {
2075 return d.fail("expected global flags");
2078 if (flags & ~uint8_t(GlobalTypeImmediate::AllowedMask)) {
2079 return d.fail("unexpected bits set in global flags");
2082 *isMutable = flags & uint8_t(GlobalTypeImmediate::IsMutable);
2083 return true;
2086 static bool DecodeMemoryTypeAndLimits(Decoder& d, ModuleEnvironment* env,
2087 MemoryDescVector* memories) {
2088 if (!env->features.multiMemory && env->numMemories() == 1) {
2089 return d.fail("already have default memory");
2092 if (env->numMemories() >= MaxMemories) {
2093 return d.fail("too many memories");
2096 Limits limits;
2097 if (!DecodeLimits(d, LimitsKind::Memory, &limits)) {
2098 return false;
2101 uint64_t maxField = MaxMemoryLimitField(limits.indexType);
2103 if (limits.initial > maxField) {
2104 return d.fail("initial memory size too big");
2107 if (limits.maximum && *limits.maximum > maxField) {
2108 return d.fail("maximum memory size too big");
2111 if (limits.shared == Shareable::True &&
2112 env->sharedMemoryEnabled() == Shareable::False) {
2113 return d.fail("shared memory is disabled");
2116 if (limits.indexType == IndexType::I64 && !env->memory64Enabled()) {
2117 return d.fail("memory64 is disabled");
2120 return memories->emplaceBack(MemoryDesc(limits));
2123 static bool DecodeTag(Decoder& d, ModuleEnvironment* env, TagKind* tagKind,
2124 uint32_t* funcTypeIndex) {
2125 uint32_t tagCode;
2126 if (!d.readVarU32(&tagCode)) {
2127 return d.fail("expected tag kind");
2130 if (TagKind(tagCode) != TagKind::Exception) {
2131 return d.fail("illegal tag kind");
2133 *tagKind = TagKind(tagCode);
2135 if (!d.readVarU32(funcTypeIndex)) {
2136 return d.fail("expected function index in tag");
2138 if (*funcTypeIndex >= env->numTypes()) {
2139 return d.fail("function type index in tag out of bounds");
2141 if (!(*env->types)[*funcTypeIndex].isFuncType()) {
2142 return d.fail("function type index must index a function type");
2144 if ((*env->types)[*funcTypeIndex].funcType().results().length() != 0) {
2145 return d.fail("tag function types must not return anything");
2147 return true;
2150 static bool DecodeImport(Decoder& d, ModuleEnvironment* env) {
2151 CacheableName moduleName;
2152 if (!DecodeName(d, &moduleName)) {
2153 return d.fail("expected valid import module name");
2156 CacheableName fieldName;
2157 if (!DecodeName(d, &fieldName)) {
2158 return d.fail("expected valid import field name");
2161 uint8_t rawImportKind;
2162 if (!d.readFixedU8(&rawImportKind)) {
2163 return d.fail("failed to read import kind");
2166 DefinitionKind importKind = DefinitionKind(rawImportKind);
2168 switch (importKind) {
2169 case DefinitionKind::Function: {
2170 uint32_t funcTypeIndex;
2171 if (!DecodeFuncTypeIndex(d, env->types, &funcTypeIndex)) {
2172 return false;
2174 if (!env->funcs.append(FuncDesc(
2175 &env->types->type(funcTypeIndex).funcType(), funcTypeIndex))) {
2176 return false;
2178 if (env->funcs.length() > MaxFuncs) {
2179 return d.fail("too many functions");
2181 break;
2183 case DefinitionKind::Table: {
2184 if (!DecodeTableTypeAndLimits(d, env)) {
2185 return false;
2187 env->tables.back().isImported = true;
2188 break;
2190 case DefinitionKind::Memory: {
2191 if (!DecodeMemoryTypeAndLimits(d, env, &env->memories)) {
2192 return false;
2194 break;
2196 case DefinitionKind::Global: {
2197 ValType type;
2198 bool isMutable;
2199 if (!DecodeGlobalType(d, env->types, env->features, &type, &isMutable)) {
2200 return false;
2202 if (!env->globals.append(
2203 GlobalDesc(type, isMutable, env->globals.length()))) {
2204 return false;
2206 if (env->globals.length() > MaxGlobals) {
2207 return d.fail("too many globals");
2209 break;
2211 case DefinitionKind::Tag: {
2212 TagKind tagKind;
2213 uint32_t funcTypeIndex;
2214 if (!DecodeTag(d, env, &tagKind, &funcTypeIndex)) {
2215 return false;
2217 ValTypeVector args;
2218 if (!args.appendAll((*env->types)[funcTypeIndex].funcType().args())) {
2219 return false;
2221 MutableTagType tagType = js_new<TagType>();
2222 if (!tagType || !tagType->initialize(std::move(args))) {
2223 return false;
2225 if (!env->tags.emplaceBack(tagKind, tagType)) {
2226 return false;
2228 if (env->tags.length() > MaxTags) {
2229 return d.fail("too many tags");
2231 break;
2233 default:
2234 return d.fail("unsupported import kind");
2237 return env->imports.emplaceBack(std::move(moduleName), std::move(fieldName),
2238 importKind);
2241 static bool CheckImportsAgainstBuiltinModules(Decoder& d,
2242 ModuleEnvironment* env) {
2243 const BuiltinModuleIds& builtinModules = env->features.builtinModules;
2245 // Skip this pass if there are no builtin modules enabled
2246 if (builtinModules.hasNone()) {
2247 return true;
2250 // Allocate a type context for builtin types so we can canonicalize them
2251 // and use them in type comparisons
2252 RefPtr<TypeContext> builtinTypes = js_new<TypeContext>();
2253 if (!builtinTypes) {
2254 return false;
2257 uint32_t importFuncIndex = 0;
2258 for (auto& import : env->imports) {
2259 Maybe<BuiltinModuleId> builtinModule =
2260 ImportMatchesBuiltinModule(import.module.utf8Bytes(), builtinModules);
2262 switch (import.kind) {
2263 case DefinitionKind::Function: {
2264 const FuncDesc& func = env->funcs[importFuncIndex];
2265 importFuncIndex += 1;
2267 // Skip this import if it doesn't refer to a builtin module. We do have
2268 // to increment the import function index regardless though.
2269 if (!builtinModule) {
2270 continue;
2273 // Check if this import refers to a builtin module function
2274 Maybe<const BuiltinModuleFunc*> builtinFunc =
2275 ImportMatchesBuiltinModuleFunc(import.field.utf8Bytes(),
2276 *builtinModule);
2277 if (!builtinFunc) {
2278 return d.fail("unrecognized builtin module field");
2281 // Get a canonicalized type definition for this builtin so we can
2282 // accurately compare it against the import type.
2283 FuncType builtinFuncType;
2284 if (!(*builtinFunc)->funcType(&builtinFuncType)) {
2285 return false;
2287 if (!builtinTypes->addType(builtinFuncType)) {
2288 return false;
2290 const TypeDef& builtinTypeDef =
2291 builtinTypes->type(builtinTypes->length() - 1);
2293 const TypeDef& importTypeDef = (*env->types)[func.typeIndex];
2294 if (!TypeDef::isSubTypeOf(&builtinTypeDef, &importTypeDef)) {
2295 return d.failf("type mismatch in %s", (*builtinFunc)->exportName);
2297 break;
2299 default: {
2300 if (!builtinModule) {
2301 continue;
2303 return d.fail("unrecognized builtin import");
2308 return true;
2311 static bool DecodeImportSection(Decoder& d, ModuleEnvironment* env) {
2312 MaybeSectionRange range;
2313 if (!d.startSection(SectionId::Import, env, &range, "import")) {
2314 return false;
2316 if (!range) {
2317 return true;
2320 uint32_t numImports;
2321 if (!d.readVarU32(&numImports)) {
2322 return d.fail("failed to read number of imports");
2325 if (numImports > MaxImports) {
2326 return d.fail("too many imports");
2329 for (uint32_t i = 0; i < numImports; i++) {
2330 if (!DecodeImport(d, env)) {
2331 return false;
2335 if (!d.finishSection(*range, "import")) {
2336 return false;
2339 env->numFuncImports = env->funcs.length();
2340 env->numGlobalImports = env->globals.length();
2341 return true;
2344 static bool DecodeFunctionSection(Decoder& d, ModuleEnvironment* env) {
2345 MaybeSectionRange range;
2346 if (!d.startSection(SectionId::Function, env, &range, "function")) {
2347 return false;
2349 if (!range) {
2350 return true;
2353 uint32_t numDefs;
2354 if (!d.readVarU32(&numDefs)) {
2355 return d.fail("expected number of function definitions");
2358 CheckedInt<uint32_t> numFuncs = env->funcs.length();
2359 numFuncs += numDefs;
2360 if (!numFuncs.isValid() || numFuncs.value() > MaxFuncs) {
2361 return d.fail("too many functions");
2364 if (!env->funcs.reserve(numFuncs.value())) {
2365 return false;
2368 for (uint32_t i = 0; i < numDefs; i++) {
2369 uint32_t funcTypeIndex;
2370 if (!DecodeFuncTypeIndex(d, env->types, &funcTypeIndex)) {
2371 return false;
2373 env->funcs.infallibleAppend(
2374 FuncDesc(&env->types->type(funcTypeIndex).funcType(), funcTypeIndex));
2377 return d.finishSection(*range, "function");
2380 static bool DecodeTableSection(Decoder& d, ModuleEnvironment* env) {
2381 MaybeSectionRange range;
2382 if (!d.startSection(SectionId::Table, env, &range, "table")) {
2383 return false;
2385 if (!range) {
2386 return true;
2389 uint32_t numTables;
2390 if (!d.readVarU32(&numTables)) {
2391 return d.fail("failed to read number of tables");
2394 for (uint32_t i = 0; i < numTables; ++i) {
2395 if (!DecodeTableTypeAndLimits(d, env)) {
2396 return false;
2400 return d.finishSection(*range, "table");
2403 static bool DecodeMemorySection(Decoder& d, ModuleEnvironment* env) {
2404 MaybeSectionRange range;
2405 if (!d.startSection(SectionId::Memory, env, &range, "memory")) {
2406 return false;
2408 if (!range) {
2409 return true;
2412 uint32_t numMemories;
2413 if (!d.readVarU32(&numMemories)) {
2414 return d.fail("failed to read number of memories");
2417 if (!env->features.multiMemory && numMemories > 1) {
2418 return d.fail("the number of memories must be at most one");
2421 for (uint32_t i = 0; i < numMemories; ++i) {
2422 if (!DecodeMemoryTypeAndLimits(d, env, &env->memories)) {
2423 return false;
2427 return d.finishSection(*range, "memory");
2430 static bool DecodeGlobalSection(Decoder& d, ModuleEnvironment* env) {
2431 MaybeSectionRange range;
2432 if (!d.startSection(SectionId::Global, env, &range, "global")) {
2433 return false;
2435 if (!range) {
2436 return true;
2439 uint32_t numDefs;
2440 if (!d.readVarU32(&numDefs)) {
2441 return d.fail("expected number of globals");
2444 CheckedInt<uint32_t> numGlobals = env->globals.length();
2445 numGlobals += numDefs;
2446 if (!numGlobals.isValid() || numGlobals.value() > MaxGlobals) {
2447 return d.fail("too many globals");
2450 if (!env->globals.reserve(numGlobals.value())) {
2451 return false;
2454 for (uint32_t i = 0; i < numDefs; i++) {
2455 ValType type;
2456 bool isMutable;
2457 if (!DecodeGlobalType(d, env->types, env->features, &type, &isMutable)) {
2458 return false;
2461 InitExpr initializer;
2462 if (!InitExpr::decodeAndValidate(d, env, type, &initializer)) {
2463 return false;
2466 env->globals.infallibleAppend(
2467 GlobalDesc(std::move(initializer), isMutable));
2470 return d.finishSection(*range, "global");
2473 static bool DecodeTagSection(Decoder& d, ModuleEnvironment* env) {
2474 MaybeSectionRange range;
2475 if (!d.startSection(SectionId::Tag, env, &range, "tag")) {
2476 return false;
2478 if (!range) {
2479 return true;
2482 if (!env->exceptionsEnabled()) {
2483 return d.fail("exceptions not enabled");
2486 uint32_t numDefs;
2487 if (!d.readVarU32(&numDefs)) {
2488 return d.fail("expected number of tags");
2491 CheckedInt<uint32_t> numTags = env->tags.length();
2492 numTags += numDefs;
2493 if (!numTags.isValid() || numTags.value() > MaxTags) {
2494 return d.fail("too many tags");
2497 if (!env->tags.reserve(numTags.value())) {
2498 return false;
2501 for (uint32_t i = 0; i < numDefs; i++) {
2502 TagKind tagKind;
2503 uint32_t funcTypeIndex;
2504 if (!DecodeTag(d, env, &tagKind, &funcTypeIndex)) {
2505 return false;
2507 ValTypeVector args;
2508 if (!args.appendAll((*env->types)[funcTypeIndex].funcType().args())) {
2509 return false;
2511 MutableTagType tagType = js_new<TagType>();
2512 if (!tagType || !tagType->initialize(std::move(args))) {
2513 return false;
2515 env->tags.infallibleEmplaceBack(tagKind, tagType);
2518 return d.finishSection(*range, "tag");
2521 using NameSet = HashSet<Span<char>, NameHasher, SystemAllocPolicy>;
2523 [[nodiscard]] static bool DecodeExportName(Decoder& d, NameSet* dupSet,
2524 CacheableName* exportName) {
2525 if (!DecodeName(d, exportName)) {
2526 d.fail("expected valid export name");
2527 return false;
2530 NameSet::AddPtr p = dupSet->lookupForAdd(exportName->utf8Bytes());
2531 if (p) {
2532 d.fail("duplicate export");
2533 return false;
2536 return dupSet->add(p, exportName->utf8Bytes());
2539 static bool DecodeExport(Decoder& d, ModuleEnvironment* env, NameSet* dupSet) {
2540 CacheableName fieldName;
2541 if (!DecodeExportName(d, dupSet, &fieldName)) {
2542 return false;
2545 uint8_t exportKind;
2546 if (!d.readFixedU8(&exportKind)) {
2547 return d.fail("failed to read export kind");
2550 switch (DefinitionKind(exportKind)) {
2551 case DefinitionKind::Function: {
2552 uint32_t funcIndex;
2553 if (!d.readVarU32(&funcIndex)) {
2554 return d.fail("expected function index");
2557 if (funcIndex >= env->numFuncs()) {
2558 return d.fail("exported function index out of bounds");
2561 env->declareFuncExported(funcIndex, /* eager */ true,
2562 /* canRefFunc */ true);
2563 return env->exports.emplaceBack(std::move(fieldName), funcIndex,
2564 DefinitionKind::Function);
2566 case DefinitionKind::Table: {
2567 uint32_t tableIndex;
2568 if (!d.readVarU32(&tableIndex)) {
2569 return d.fail("expected table index");
2572 if (tableIndex >= env->tables.length()) {
2573 return d.fail("exported table index out of bounds");
2575 env->tables[tableIndex].isExported = true;
2576 return env->exports.emplaceBack(std::move(fieldName), tableIndex,
2577 DefinitionKind::Table);
2579 case DefinitionKind::Memory: {
2580 uint32_t memoryIndex;
2581 if (!d.readVarU32(&memoryIndex)) {
2582 return d.fail("expected memory index");
2585 if (memoryIndex >= env->numMemories()) {
2586 return d.fail("exported memory index out of bounds");
2589 return env->exports.emplaceBack(std::move(fieldName), memoryIndex,
2590 DefinitionKind::Memory);
2592 case DefinitionKind::Global: {
2593 uint32_t globalIndex;
2594 if (!d.readVarU32(&globalIndex)) {
2595 return d.fail("expected global index");
2598 if (globalIndex >= env->globals.length()) {
2599 return d.fail("exported global index out of bounds");
2602 GlobalDesc* global = &env->globals[globalIndex];
2603 global->setIsExport();
2605 return env->exports.emplaceBack(std::move(fieldName), globalIndex,
2606 DefinitionKind::Global);
2608 case DefinitionKind::Tag: {
2609 uint32_t tagIndex;
2610 if (!d.readVarU32(&tagIndex)) {
2611 return d.fail("expected tag index");
2613 if (tagIndex >= env->tags.length()) {
2614 return d.fail("exported tag index out of bounds");
2617 env->tags[tagIndex].isExport = true;
2618 return env->exports.emplaceBack(std::move(fieldName), tagIndex,
2619 DefinitionKind::Tag);
2621 default:
2622 return d.fail("unexpected export kind");
2625 MOZ_CRASH("unreachable");
2628 static bool DecodeExportSection(Decoder& d, ModuleEnvironment* env) {
2629 MaybeSectionRange range;
2630 if (!d.startSection(SectionId::Export, env, &range, "export")) {
2631 return false;
2633 if (!range) {
2634 return true;
2637 NameSet dupSet;
2639 uint32_t numExports;
2640 if (!d.readVarU32(&numExports)) {
2641 return d.fail("failed to read number of exports");
2644 if (numExports > MaxExports) {
2645 return d.fail("too many exports");
2648 for (uint32_t i = 0; i < numExports; i++) {
2649 if (!DecodeExport(d, env, &dupSet)) {
2650 return false;
2654 return d.finishSection(*range, "export");
2657 static bool DecodeStartSection(Decoder& d, ModuleEnvironment* env) {
2658 MaybeSectionRange range;
2659 if (!d.startSection(SectionId::Start, env, &range, "start")) {
2660 return false;
2662 if (!range) {
2663 return true;
2666 uint32_t funcIndex;
2667 if (!d.readVarU32(&funcIndex)) {
2668 return d.fail("failed to read start func index");
2671 if (funcIndex >= env->numFuncs()) {
2672 return d.fail("unknown start function");
2675 const FuncType& funcType = *env->funcs[funcIndex].type;
2676 if (funcType.results().length() > 0) {
2677 return d.fail("start function must not return anything");
2680 if (funcType.args().length()) {
2681 return d.fail("start function must be nullary");
2684 env->declareFuncExported(funcIndex, /* eager */ true, /* canFuncRef */ false);
2685 env->startFuncIndex = Some(funcIndex);
2687 return d.finishSection(*range, "start");
2690 static inline ModuleElemSegment::Kind NormalizeElemSegmentKind(
2691 ElemSegmentKind decodedKind) {
2692 switch (decodedKind) {
2693 case ElemSegmentKind::Active:
2694 case ElemSegmentKind::ActiveWithTableIndex: {
2695 return ModuleElemSegment::Kind::Active;
2697 case ElemSegmentKind::Passive: {
2698 return ModuleElemSegment::Kind::Passive;
2700 case ElemSegmentKind::Declared: {
2701 return ModuleElemSegment::Kind::Declared;
2704 MOZ_CRASH("unexpected elem segment kind");
2707 static bool DecodeElemSegment(Decoder& d, ModuleEnvironment* env) {
2708 uint32_t segmentFlags;
2709 if (!d.readVarU32(&segmentFlags)) {
2710 return d.fail("expected elem segment flags field");
2713 Maybe<ElemSegmentFlags> flags = ElemSegmentFlags::construct(segmentFlags);
2714 if (!flags) {
2715 return d.fail("invalid elem segment flags field");
2718 ModuleElemSegment seg = ModuleElemSegment();
2720 ElemSegmentKind segmentKind = flags->kind();
2721 seg.kind = NormalizeElemSegmentKind(segmentKind);
2723 if (segmentKind == ElemSegmentKind::Active ||
2724 segmentKind == ElemSegmentKind::ActiveWithTableIndex) {
2725 if (env->tables.length() == 0) {
2726 return d.fail("active elem segment requires a table");
2729 uint32_t tableIndex = 0;
2730 if (segmentKind == ElemSegmentKind::ActiveWithTableIndex &&
2731 !d.readVarU32(&tableIndex)) {
2732 return d.fail("expected table index");
2734 if (tableIndex >= env->tables.length()) {
2735 return d.fail("table index out of range for element segment");
2737 seg.tableIndex = tableIndex;
2739 InitExpr offset;
2740 if (!InitExpr::decodeAndValidate(d, env, ValType::I32, &offset)) {
2741 return false;
2743 seg.offsetIfActive.emplace(std::move(offset));
2744 } else {
2745 // Too many bugs result from keeping this value zero. For passive
2746 // or declared segments, there really is no table index, and we should
2747 // never touch the field.
2748 MOZ_ASSERT(segmentKind == ElemSegmentKind::Passive ||
2749 segmentKind == ElemSegmentKind::Declared);
2750 seg.tableIndex = (uint32_t)-1;
2753 ElemSegmentPayload payload = flags->payload();
2754 RefType elemType;
2756 // `ActiveWithTableIndex`, `Declared`, and `Passive` element segments encode
2757 // the type or definition kind of the payload. `Active` element segments are
2758 // restricted to MVP behavior, which assumes only function indices.
2759 if (segmentKind == ElemSegmentKind::Active) {
2760 elemType = RefType::func();
2761 } else {
2762 switch (payload) {
2763 case ElemSegmentPayload::Expressions: {
2764 if (!d.readRefType(*env->types, env->features, &elemType)) {
2765 return false;
2767 } break;
2768 case ElemSegmentPayload::Indices: {
2769 uint8_t elemKind;
2770 if (!d.readFixedU8(&elemKind)) {
2771 return d.fail("expected element kind");
2774 if (elemKind != uint8_t(DefinitionKind::Function)) {
2775 return d.fail("invalid element kind");
2777 elemType = RefType::func();
2778 } break;
2782 // For active segments, check if the element type is compatible with the
2783 // destination table type.
2784 if (seg.active()) {
2785 RefType tblElemType = env->tables[seg.tableIndex].elemType;
2786 if (!CheckIsSubtypeOf(d, *env, d.currentOffset(),
2787 ValType(elemType).storageType(),
2788 ValType(tblElemType).storageType())) {
2789 return false;
2792 seg.elemType = elemType;
2794 uint32_t numElems;
2795 if (!d.readVarU32(&numElems)) {
2796 return d.fail("expected element segment size");
2799 if (numElems > MaxElemSegmentLength) {
2800 return d.fail("too many elements in element segment");
2803 bool isAsmJS = seg.active() && env->tables[seg.tableIndex].isAsmJS;
2805 switch (payload) {
2806 case ElemSegmentPayload::Indices: {
2807 seg.encoding = ModuleElemSegment::Encoding::Indices;
2808 if (!seg.elemIndices.reserve(numElems)) {
2809 return false;
2812 for (uint32_t i = 0; i < numElems; i++) {
2813 uint32_t elemIndex;
2814 if (!d.readVarU32(&elemIndex)) {
2815 return d.fail("failed to read element index");
2817 // The only valid type of index right now is a function index.
2818 if (elemIndex >= env->numFuncs()) {
2819 return d.fail("element index out of range");
2822 seg.elemIndices.infallibleAppend(elemIndex);
2823 if (!isAsmJS) {
2824 env->declareFuncExported(elemIndex, /*eager=*/false,
2825 /*canRefFunc=*/true);
2828 } break;
2829 case ElemSegmentPayload::Expressions: {
2830 seg.encoding = ModuleElemSegment::Encoding::Expressions;
2831 const uint8_t* exprsStart = d.currentPosition();
2832 seg.elemExpressions.count = numElems;
2833 for (uint32_t i = 0; i < numElems; i++) {
2834 Maybe<LitVal> unusedLiteral;
2835 if (!DecodeConstantExpression(d, env, elemType, &unusedLiteral)) {
2836 return false;
2839 const uint8_t* exprsEnd = d.currentPosition();
2840 if (!seg.elemExpressions.exprBytes.append(exprsStart, exprsEnd)) {
2841 return false;
2843 } break;
2846 env->elemSegments.infallibleAppend(std::move(seg));
2847 return true;
2850 static bool DecodeElemSection(Decoder& d, ModuleEnvironment* env) {
2851 MaybeSectionRange range;
2852 if (!d.startSection(SectionId::Elem, env, &range, "elem")) {
2853 return false;
2855 if (!range) {
2856 return true;
2859 uint32_t numSegments;
2860 if (!d.readVarU32(&numSegments)) {
2861 return d.fail("failed to read number of elem segments");
2864 if (numSegments > MaxElemSegments) {
2865 return d.fail("too many elem segments");
2868 if (!env->elemSegments.reserve(numSegments)) {
2869 return false;
2872 for (uint32_t i = 0; i < numSegments; i++) {
2873 if (!DecodeElemSegment(d, env)) {
2874 return false;
2878 return d.finishSection(*range, "elem");
2881 static bool DecodeDataCountSection(Decoder& d, ModuleEnvironment* env) {
2882 MaybeSectionRange range;
2883 if (!d.startSection(SectionId::DataCount, env, &range, "datacount")) {
2884 return false;
2886 if (!range) {
2887 return true;
2890 uint32_t dataCount;
2891 if (!d.readVarU32(&dataCount)) {
2892 return d.fail("expected data segment count");
2895 env->dataCount.emplace(dataCount);
2897 return d.finishSection(*range, "datacount");
2900 bool wasm::StartsCodeSection(const uint8_t* begin, const uint8_t* end,
2901 SectionRange* codeSection) {
2902 UniqueChars unused;
2903 Decoder d(begin, end, 0, &unused);
2905 if (!DecodePreamble(d)) {
2906 return false;
2909 while (!d.done()) {
2910 uint8_t id;
2911 SectionRange range;
2912 if (!d.readSectionHeader(&id, &range)) {
2913 return false;
2916 if (id == uint8_t(SectionId::Code)) {
2917 *codeSection = range;
2918 return true;
2921 if (!d.readBytes(range.size)) {
2922 return false;
2926 return false;
2929 bool wasm::DecodeModuleEnvironment(Decoder& d, ModuleEnvironment* env) {
2930 if (!DecodePreamble(d)) {
2931 return false;
2934 if (!DecodeTypeSection(d, env)) {
2935 return false;
2938 if (!DecodeImportSection(d, env)) {
2939 return false;
2942 // Eagerly check imports for future link errors against any known builtin
2943 // module.
2944 if (!CheckImportsAgainstBuiltinModules(d, env)) {
2945 return false;
2948 if (!DecodeFunctionSection(d, env)) {
2949 return false;
2952 if (!DecodeTableSection(d, env)) {
2953 return false;
2956 if (!DecodeMemorySection(d, env)) {
2957 return false;
2960 if (!DecodeTagSection(d, env)) {
2961 return false;
2964 if (!DecodeGlobalSection(d, env)) {
2965 return false;
2968 if (!DecodeExportSection(d, env)) {
2969 return false;
2972 if (!DecodeStartSection(d, env)) {
2973 return false;
2976 if (!DecodeElemSection(d, env)) {
2977 return false;
2980 if (!DecodeDataCountSection(d, env)) {
2981 return false;
2984 if (!d.startSection(SectionId::Code, env, &env->codeSection, "code")) {
2985 return false;
2988 if (env->codeSection && env->codeSection->size > MaxCodeSectionBytes) {
2989 return d.fail("code section too big");
2992 return true;
2995 static bool DecodeFunctionBody(Decoder& d, const ModuleEnvironment& env,
2996 uint32_t funcIndex) {
2997 uint32_t bodySize;
2998 if (!d.readVarU32(&bodySize)) {
2999 return d.fail("expected number of function body bytes");
3002 if (bodySize > MaxFunctionBytes) {
3003 return d.fail("function body too big");
3006 if (d.bytesRemain() < bodySize) {
3007 return d.fail("function body length too big");
3010 return ValidateFunctionBody(env, funcIndex, bodySize, d);
3013 static bool DecodeCodeSection(Decoder& d, ModuleEnvironment* env) {
3014 if (!env->codeSection) {
3015 if (env->numFuncDefs() != 0) {
3016 return d.fail("expected code section");
3018 return true;
3021 uint32_t numFuncDefs;
3022 if (!d.readVarU32(&numFuncDefs)) {
3023 return d.fail("expected function body count");
3026 if (numFuncDefs != env->numFuncDefs()) {
3027 return d.fail(
3028 "function body count does not match function signature count");
3031 for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs; funcDefIndex++) {
3032 if (!DecodeFunctionBody(d, *env, env->numFuncImports + funcDefIndex)) {
3033 return false;
3037 return d.finishSection(*env->codeSection, "code");
3040 static bool DecodeDataSection(Decoder& d, ModuleEnvironment* env) {
3041 MaybeSectionRange range;
3042 if (!d.startSection(SectionId::Data, env, &range, "data")) {
3043 return false;
3045 if (!range) {
3046 if (env->dataCount.isSome() && *env->dataCount > 0) {
3047 return d.fail("number of data segments does not match declared count");
3049 return true;
3052 uint32_t numSegments;
3053 if (!d.readVarU32(&numSegments)) {
3054 return d.fail("failed to read number of data segments");
3057 if (numSegments > MaxDataSegments) {
3058 return d.fail("too many data segments");
3061 if (env->dataCount.isSome() && numSegments != *env->dataCount) {
3062 return d.fail("number of data segments does not match declared count");
3065 for (uint32_t i = 0; i < numSegments; i++) {
3066 uint32_t initializerKindVal;
3067 if (!d.readVarU32(&initializerKindVal)) {
3068 return d.fail("expected data initializer-kind field");
3071 switch (initializerKindVal) {
3072 case uint32_t(DataSegmentKind::Active):
3073 case uint32_t(DataSegmentKind::Passive):
3074 case uint32_t(DataSegmentKind::ActiveWithMemoryIndex):
3075 break;
3076 default:
3077 return d.fail("invalid data initializer-kind field");
3080 DataSegmentKind initializerKind = DataSegmentKind(initializerKindVal);
3082 if (initializerKind != DataSegmentKind::Passive &&
3083 env->numMemories() == 0) {
3084 return d.fail("active data segment requires a memory section");
3087 DataSegmentEnv seg;
3088 if (initializerKind == DataSegmentKind::ActiveWithMemoryIndex) {
3089 if (!d.readVarU32(&seg.memoryIndex)) {
3090 return d.fail("expected memory index");
3092 } else if (initializerKind == DataSegmentKind::Active) {
3093 seg.memoryIndex = 0;
3094 } else {
3095 seg.memoryIndex = InvalidMemoryIndex;
3098 if (initializerKind == DataSegmentKind::Active ||
3099 initializerKind == DataSegmentKind::ActiveWithMemoryIndex) {
3100 if (seg.memoryIndex >= env->numMemories()) {
3101 return d.fail("invalid memory index");
3104 InitExpr segOffset;
3105 ValType exprType = ToValType(env->memories[seg.memoryIndex].indexType());
3106 if (!InitExpr::decodeAndValidate(d, env, exprType, &segOffset)) {
3107 return false;
3109 seg.offsetIfActive.emplace(std::move(segOffset));
3112 if (!d.readVarU32(&seg.length)) {
3113 return d.fail("expected segment size");
3116 if (seg.length > MaxDataSegmentLengthPages * PageSize) {
3117 return d.fail("segment size too big");
3120 seg.bytecodeOffset = d.currentOffset();
3122 if (!d.readBytes(seg.length)) {
3123 return d.fail("data segment shorter than declared");
3126 if (!env->dataSegments.append(std::move(seg))) {
3127 return false;
3131 return d.finishSection(*range, "data");
3134 static bool DecodeModuleNameSubsection(Decoder& d,
3135 const CustomSectionEnv& nameSection,
3136 ModuleEnvironment* env) {
3137 Maybe<uint32_t> endOffset;
3138 if (!d.startNameSubsection(NameType::Module, &endOffset)) {
3139 return false;
3141 if (!endOffset) {
3142 return true;
3145 Name moduleName;
3146 if (!d.readVarU32(&moduleName.length)) {
3147 return d.fail("failed to read module name length");
3150 MOZ_ASSERT(d.currentOffset() >= nameSection.payloadOffset);
3151 moduleName.offsetInNamePayload =
3152 d.currentOffset() - nameSection.payloadOffset;
3154 const uint8_t* bytes;
3155 if (!d.readBytes(moduleName.length, &bytes)) {
3156 return d.fail("failed to read module name bytes");
3159 if (!d.finishNameSubsection(*endOffset)) {
3160 return false;
3163 // Only save the module name if the whole subsection validates.
3164 env->moduleName.emplace(moduleName);
3165 return true;
3168 static bool DecodeFunctionNameSubsection(Decoder& d,
3169 const CustomSectionEnv& nameSection,
3170 ModuleEnvironment* env) {
3171 Maybe<uint32_t> endOffset;
3172 if (!d.startNameSubsection(NameType::Function, &endOffset)) {
3173 return false;
3175 if (!endOffset) {
3176 return true;
3179 uint32_t nameCount = 0;
3180 if (!d.readVarU32(&nameCount) || nameCount > MaxFuncs) {
3181 return d.fail("bad function name count");
3184 NameVector funcNames;
3186 for (uint32_t i = 0; i < nameCount; ++i) {
3187 uint32_t funcIndex = 0;
3188 if (!d.readVarU32(&funcIndex)) {
3189 return d.fail("unable to read function index");
3192 // Names must refer to real functions and be given in ascending order.
3193 if (funcIndex >= env->numFuncs() || funcIndex < funcNames.length()) {
3194 return d.fail("invalid function index");
3197 Name funcName;
3198 if (!d.readVarU32(&funcName.length) ||
3199 funcName.length > JS::MaxStringLength) {
3200 return d.fail("unable to read function name length");
3203 if (!funcName.length) {
3204 continue;
3207 if (!funcNames.resize(funcIndex + 1)) {
3208 return false;
3211 MOZ_ASSERT(d.currentOffset() >= nameSection.payloadOffset);
3212 funcName.offsetInNamePayload =
3213 d.currentOffset() - nameSection.payloadOffset;
3215 if (!d.readBytes(funcName.length)) {
3216 return d.fail("unable to read function name bytes");
3219 funcNames[funcIndex] = funcName;
3222 if (!d.finishNameSubsection(*endOffset)) {
3223 return false;
3226 // To encourage fully valid function names subsections; only save names if
3227 // the entire subsection decoded correctly.
3228 env->funcNames = std::move(funcNames);
3229 return true;
3232 static bool DecodeNameSection(Decoder& d, ModuleEnvironment* env) {
3233 MaybeSectionRange range;
3234 if (!d.startCustomSection(NameSectionName, env, &range)) {
3235 return false;
3237 if (!range) {
3238 return true;
3241 env->nameCustomSectionIndex = Some(env->customSections.length() - 1);
3242 const CustomSectionEnv& nameSection = env->customSections.back();
3244 // Once started, custom sections do not report validation errors.
3246 if (!DecodeModuleNameSubsection(d, nameSection, env)) {
3247 goto finish;
3250 if (!DecodeFunctionNameSubsection(d, nameSection, env)) {
3251 goto finish;
3254 while (d.currentOffset() < range->end()) {
3255 if (!d.skipNameSubsection()) {
3256 goto finish;
3260 finish:
3261 d.finishCustomSection(NameSectionName, *range);
3262 return true;
3265 bool wasm::DecodeModuleTail(Decoder& d, ModuleEnvironment* env) {
3266 if (!DecodeDataSection(d, env)) {
3267 return false;
3270 if (!DecodeNameSection(d, env)) {
3271 return false;
3274 while (!d.done()) {
3275 if (!d.skipCustomSection(env)) {
3276 if (d.resilientMode()) {
3277 d.clearError();
3278 return true;
3280 return false;
3284 return true;
3287 // Validate algorithm.
3289 bool wasm::Validate(JSContext* cx, const ShareableBytes& bytecode,
3290 const FeatureOptions& options, UniqueChars* error) {
3291 Decoder d(bytecode.bytes, 0, error);
3293 FeatureArgs features = FeatureArgs::build(cx, options);
3294 ModuleEnvironment env(features);
3295 if (!env.init()) {
3296 return false;
3299 if (!DecodeModuleEnvironment(d, &env)) {
3300 return false;
3303 if (!DecodeCodeSection(d, &env)) {
3304 return false;
3307 if (!DecodeModuleTail(d, &env)) {
3308 return false;
3311 MOZ_ASSERT(!*error, "unreported error in decoding");
3312 return true;