1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2016 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 #include "wasm/WasmValidate.h"
21 #include "mozilla/CheckedInt.h"
22 #include "mozilla/Span.h"
23 #include "mozilla/Utf8.h"
25 #include "js/Printf.h"
26 #include "js/String.h" // JS::MaxStringLength
27 #include "vm/JSContext.h"
29 #include "wasm/WasmInitExpr.h"
30 #include "wasm/WasmOpIter.h"
31 #include "wasm/WasmTypeDecls.h"
34 using namespace js::jit
;
35 using namespace js::wasm
;
37 using mozilla::AsChars
;
38 using mozilla::CheckedInt
;
39 using mozilla::CheckedInt32
;
40 using mozilla::IsUtf8
;
45 bool wasm::EncodeLocalEntries(Encoder
& e
, const ValTypeVector
& locals
) {
46 if (locals
.length() > MaxLocals
) {
50 uint32_t numLocalEntries
= 0;
51 if (locals
.length()) {
52 ValType prev
= locals
[0];
54 for (ValType t
: locals
) {
62 if (!e
.writeVarU32(numLocalEntries
)) {
66 if (numLocalEntries
) {
67 ValType prev
= locals
[0];
69 for (uint32_t i
= 1; i
< locals
.length(); i
++, count
++) {
70 if (prev
!= locals
[i
]) {
71 if (!e
.writeVarU32(count
)) {
74 if (!e
.writeValType(prev
)) {
81 if (!e
.writeVarU32(count
)) {
84 if (!e
.writeValType(prev
)) {
92 bool wasm::DecodeLocalEntries(Decoder
& d
, const TypeContext
& types
,
93 const FeatureArgs
& features
,
94 ValTypeVector
* locals
) {
95 uint32_t numLocalEntries
;
96 if (!d
.readVarU32(&numLocalEntries
)) {
97 return d
.fail("failed to read number of local entries");
100 for (uint32_t i
= 0; i
< numLocalEntries
; i
++) {
102 if (!d
.readVarU32(&count
)) {
103 return d
.fail("failed to read local entry count");
106 if (MaxLocals
- locals
->length() < count
) {
107 return d
.fail("too many locals");
111 if (!d
.readValType(types
, features
, &type
)) {
115 if (!locals
->appendN(type
, count
)) {
123 bool wasm::DecodeValidatedLocalEntries(const TypeContext
& types
, Decoder
& d
,
124 ValTypeVector
* locals
) {
125 uint32_t numLocalEntries
;
126 MOZ_ALWAYS_TRUE(d
.readVarU32(&numLocalEntries
));
128 for (uint32_t i
= 0; i
< numLocalEntries
; i
++) {
129 uint32_t count
= d
.uncheckedReadVarU32();
130 MOZ_ASSERT(MaxLocals
- locals
->length() >= count
);
131 if (!locals
->appendN(d
.uncheckedReadValType(types
), count
)) {
139 bool wasm::CheckIsSubtypeOf(Decoder
& d
, const ModuleEnvironment
& env
,
140 size_t opcodeOffset
, FieldType subType
,
141 FieldType superType
) {
142 if (FieldType::isSubTypeOf(subType
, superType
)) {
146 UniqueChars subText
= ToString(subType
, env
.types
);
151 UniqueChars superText
= ToString(superType
, env
.types
);
157 JS_smprintf("type mismatch: expression has type %s but expected %s",
158 subText
.get(), superText
.get()));
163 return d
.fail(opcodeOffset
, error
.get());
166 // Function body validation.
168 static bool DecodeFunctionBodyExprs(const ModuleEnvironment
& env
,
170 const ValTypeVector
& locals
,
171 const uint8_t* bodyEnd
, Decoder
* d
) {
172 ValidatingOpIter
iter(env
, *d
);
174 if (!iter
.startFunction(funcIndex
, locals
)) {
179 if (!(c)) return false; \
184 if (!iter
.readOp(&op
)) {
189 NothingVector nothings
{};
190 ResultType unusedType
;
193 case uint16_t(Op::End
): {
194 LabelKind unusedKind
;
195 if (!iter
.readEnd(&unusedKind
, &unusedType
, ¬hings
, ¬hings
)) {
199 if (iter
.controlStackEmpty()) {
200 return iter
.endFunction(bodyEnd
);
204 case uint16_t(Op::Nop
):
205 CHECK(iter
.readNop());
206 case uint16_t(Op::Drop
):
207 CHECK(iter
.readDrop());
208 case uint16_t(Op::Call
): {
209 uint32_t unusedIndex
;
210 NothingVector unusedArgs
{};
211 CHECK(iter
.readCall(&unusedIndex
, &unusedArgs
));
213 case uint16_t(Op::CallIndirect
): {
214 uint32_t unusedIndex
, unusedIndex2
;
215 NothingVector unusedArgs
{};
216 CHECK(iter
.readCallIndirect(&unusedIndex
, &unusedIndex2
, ¬hing
,
219 #ifdef ENABLE_WASM_TAIL_CALLS
220 case uint16_t(Op::ReturnCall
): {
221 if (!env
.tailCallsEnabled()) {
222 return iter
.unrecognizedOpcode(&op
);
224 uint32_t unusedIndex
;
225 NothingVector unusedArgs
{};
226 CHECK(iter
.readReturnCall(&unusedIndex
, &unusedArgs
));
228 case uint16_t(Op::ReturnCallIndirect
): {
229 if (!env
.tailCallsEnabled()) {
230 return iter
.unrecognizedOpcode(&op
);
232 uint32_t unusedIndex
, unusedIndex2
;
233 NothingVector unusedArgs
{};
234 CHECK(iter
.readReturnCallIndirect(&unusedIndex
, &unusedIndex2
, ¬hing
,
238 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
239 case uint16_t(Op::CallRef
): {
240 if (!env
.functionReferencesEnabled()) {
241 return iter
.unrecognizedOpcode(&op
);
243 const FuncType
* unusedType
;
244 NothingVector unusedArgs
{};
245 CHECK(iter
.readCallRef(&unusedType
, ¬hing
, &unusedArgs
));
247 # ifdef ENABLE_WASM_TAIL_CALLS
248 case uint16_t(Op::ReturnCallRef
): {
249 if (!env
.functionReferencesEnabled() || !env
.tailCallsEnabled()) {
250 return iter
.unrecognizedOpcode(&op
);
252 const FuncType
* unusedType
;
253 NothingVector unusedArgs
{};
254 CHECK(iter
.readReturnCallRef(&unusedType
, ¬hing
, &unusedArgs
));
258 case uint16_t(Op::I32Const
): {
260 CHECK(iter
.readI32Const(&unused
));
262 case uint16_t(Op::I64Const
): {
264 CHECK(iter
.readI64Const(&unused
));
266 case uint16_t(Op::F32Const
): {
268 CHECK(iter
.readF32Const(&unused
));
270 case uint16_t(Op::F64Const
): {
272 CHECK(iter
.readF64Const(&unused
));
274 case uint16_t(Op::LocalGet
): {
276 CHECK(iter
.readGetLocal(locals
, &unused
));
278 case uint16_t(Op::LocalSet
): {
280 CHECK(iter
.readSetLocal(locals
, &unused
, ¬hing
));
282 case uint16_t(Op::LocalTee
): {
284 CHECK(iter
.readTeeLocal(locals
, &unused
, ¬hing
));
286 case uint16_t(Op::GlobalGet
): {
288 CHECK(iter
.readGetGlobal(&unused
));
290 case uint16_t(Op::GlobalSet
): {
292 CHECK(iter
.readSetGlobal(&unused
, ¬hing
));
294 case uint16_t(Op::TableGet
): {
295 uint32_t unusedTableIndex
;
296 CHECK(iter
.readTableGet(&unusedTableIndex
, ¬hing
));
298 case uint16_t(Op::TableSet
): {
299 uint32_t unusedTableIndex
;
300 CHECK(iter
.readTableSet(&unusedTableIndex
, ¬hing
, ¬hing
));
302 case uint16_t(Op::SelectNumeric
): {
304 CHECK(iter
.readSelect(/*typed*/ false, &unused
, ¬hing
, ¬hing
,
307 case uint16_t(Op::SelectTyped
): {
309 CHECK(iter
.readSelect(/*typed*/ true, &unused
, ¬hing
, ¬hing
,
312 case uint16_t(Op::Block
):
313 CHECK(iter
.readBlock(&unusedType
));
314 case uint16_t(Op::Loop
):
315 CHECK(iter
.readLoop(&unusedType
));
316 case uint16_t(Op::If
):
317 CHECK(iter
.readIf(&unusedType
, ¬hing
));
318 case uint16_t(Op::Else
):
319 CHECK(iter
.readElse(&unusedType
, &unusedType
, ¬hings
));
320 case uint16_t(Op::I32Clz
):
321 case uint16_t(Op::I32Ctz
):
322 case uint16_t(Op::I32Popcnt
):
323 CHECK(iter
.readUnary(ValType::I32
, ¬hing
));
324 case uint16_t(Op::I64Clz
):
325 case uint16_t(Op::I64Ctz
):
326 case uint16_t(Op::I64Popcnt
):
327 CHECK(iter
.readUnary(ValType::I64
, ¬hing
));
328 case uint16_t(Op::F32Abs
):
329 case uint16_t(Op::F32Neg
):
330 case uint16_t(Op::F32Ceil
):
331 case uint16_t(Op::F32Floor
):
332 case uint16_t(Op::F32Sqrt
):
333 case uint16_t(Op::F32Trunc
):
334 case uint16_t(Op::F32Nearest
):
335 CHECK(iter
.readUnary(ValType::F32
, ¬hing
));
336 case uint16_t(Op::F64Abs
):
337 case uint16_t(Op::F64Neg
):
338 case uint16_t(Op::F64Ceil
):
339 case uint16_t(Op::F64Floor
):
340 case uint16_t(Op::F64Sqrt
):
341 case uint16_t(Op::F64Trunc
):
342 case uint16_t(Op::F64Nearest
):
343 CHECK(iter
.readUnary(ValType::F64
, ¬hing
));
344 case uint16_t(Op::I32Add
):
345 case uint16_t(Op::I32Sub
):
346 case uint16_t(Op::I32Mul
):
347 case uint16_t(Op::I32DivS
):
348 case uint16_t(Op::I32DivU
):
349 case uint16_t(Op::I32RemS
):
350 case uint16_t(Op::I32RemU
):
351 case uint16_t(Op::I32And
):
352 case uint16_t(Op::I32Or
):
353 case uint16_t(Op::I32Xor
):
354 case uint16_t(Op::I32Shl
):
355 case uint16_t(Op::I32ShrS
):
356 case uint16_t(Op::I32ShrU
):
357 case uint16_t(Op::I32Rotl
):
358 case uint16_t(Op::I32Rotr
):
359 CHECK(iter
.readBinary(ValType::I32
, ¬hing
, ¬hing
));
360 case uint16_t(Op::I64Add
):
361 case uint16_t(Op::I64Sub
):
362 case uint16_t(Op::I64Mul
):
363 case uint16_t(Op::I64DivS
):
364 case uint16_t(Op::I64DivU
):
365 case uint16_t(Op::I64RemS
):
366 case uint16_t(Op::I64RemU
):
367 case uint16_t(Op::I64And
):
368 case uint16_t(Op::I64Or
):
369 case uint16_t(Op::I64Xor
):
370 case uint16_t(Op::I64Shl
):
371 case uint16_t(Op::I64ShrS
):
372 case uint16_t(Op::I64ShrU
):
373 case uint16_t(Op::I64Rotl
):
374 case uint16_t(Op::I64Rotr
):
375 CHECK(iter
.readBinary(ValType::I64
, ¬hing
, ¬hing
));
376 case uint16_t(Op::F32Add
):
377 case uint16_t(Op::F32Sub
):
378 case uint16_t(Op::F32Mul
):
379 case uint16_t(Op::F32Div
):
380 case uint16_t(Op::F32Min
):
381 case uint16_t(Op::F32Max
):
382 case uint16_t(Op::F32CopySign
):
383 CHECK(iter
.readBinary(ValType::F32
, ¬hing
, ¬hing
));
384 case uint16_t(Op::F64Add
):
385 case uint16_t(Op::F64Sub
):
386 case uint16_t(Op::F64Mul
):
387 case uint16_t(Op::F64Div
):
388 case uint16_t(Op::F64Min
):
389 case uint16_t(Op::F64Max
):
390 case uint16_t(Op::F64CopySign
):
391 CHECK(iter
.readBinary(ValType::F64
, ¬hing
, ¬hing
));
392 case uint16_t(Op::I32Eq
):
393 case uint16_t(Op::I32Ne
):
394 case uint16_t(Op::I32LtS
):
395 case uint16_t(Op::I32LtU
):
396 case uint16_t(Op::I32LeS
):
397 case uint16_t(Op::I32LeU
):
398 case uint16_t(Op::I32GtS
):
399 case uint16_t(Op::I32GtU
):
400 case uint16_t(Op::I32GeS
):
401 case uint16_t(Op::I32GeU
):
402 CHECK(iter
.readComparison(ValType::I32
, ¬hing
, ¬hing
));
403 case uint16_t(Op::I64Eq
):
404 case uint16_t(Op::I64Ne
):
405 case uint16_t(Op::I64LtS
):
406 case uint16_t(Op::I64LtU
):
407 case uint16_t(Op::I64LeS
):
408 case uint16_t(Op::I64LeU
):
409 case uint16_t(Op::I64GtS
):
410 case uint16_t(Op::I64GtU
):
411 case uint16_t(Op::I64GeS
):
412 case uint16_t(Op::I64GeU
):
413 CHECK(iter
.readComparison(ValType::I64
, ¬hing
, ¬hing
));
414 case uint16_t(Op::F32Eq
):
415 case uint16_t(Op::F32Ne
):
416 case uint16_t(Op::F32Lt
):
417 case uint16_t(Op::F32Le
):
418 case uint16_t(Op::F32Gt
):
419 case uint16_t(Op::F32Ge
):
420 CHECK(iter
.readComparison(ValType::F32
, ¬hing
, ¬hing
));
421 case uint16_t(Op::F64Eq
):
422 case uint16_t(Op::F64Ne
):
423 case uint16_t(Op::F64Lt
):
424 case uint16_t(Op::F64Le
):
425 case uint16_t(Op::F64Gt
):
426 case uint16_t(Op::F64Ge
):
427 CHECK(iter
.readComparison(ValType::F64
, ¬hing
, ¬hing
));
428 case uint16_t(Op::I32Eqz
):
429 CHECK(iter
.readConversion(ValType::I32
, ValType::I32
, ¬hing
));
430 case uint16_t(Op::I64Eqz
):
431 case uint16_t(Op::I32WrapI64
):
432 CHECK(iter
.readConversion(ValType::I64
, ValType::I32
, ¬hing
));
433 case uint16_t(Op::I32TruncF32S
):
434 case uint16_t(Op::I32TruncF32U
):
435 case uint16_t(Op::I32ReinterpretF32
):
436 CHECK(iter
.readConversion(ValType::F32
, ValType::I32
, ¬hing
));
437 case uint16_t(Op::I32TruncF64S
):
438 case uint16_t(Op::I32TruncF64U
):
439 CHECK(iter
.readConversion(ValType::F64
, ValType::I32
, ¬hing
));
440 case uint16_t(Op::I64ExtendI32S
):
441 case uint16_t(Op::I64ExtendI32U
):
442 CHECK(iter
.readConversion(ValType::I32
, ValType::I64
, ¬hing
));
443 case uint16_t(Op::I64TruncF32S
):
444 case uint16_t(Op::I64TruncF32U
):
445 CHECK(iter
.readConversion(ValType::F32
, ValType::I64
, ¬hing
));
446 case uint16_t(Op::I64TruncF64S
):
447 case uint16_t(Op::I64TruncF64U
):
448 case uint16_t(Op::I64ReinterpretF64
):
449 CHECK(iter
.readConversion(ValType::F64
, ValType::I64
, ¬hing
));
450 case uint16_t(Op::F32ConvertI32S
):
451 case uint16_t(Op::F32ConvertI32U
):
452 case uint16_t(Op::F32ReinterpretI32
):
453 CHECK(iter
.readConversion(ValType::I32
, ValType::F32
, ¬hing
));
454 case uint16_t(Op::F32ConvertI64S
):
455 case uint16_t(Op::F32ConvertI64U
):
456 CHECK(iter
.readConversion(ValType::I64
, ValType::F32
, ¬hing
));
457 case uint16_t(Op::F32DemoteF64
):
458 CHECK(iter
.readConversion(ValType::F64
, ValType::F32
, ¬hing
));
459 case uint16_t(Op::F64ConvertI32S
):
460 case uint16_t(Op::F64ConvertI32U
):
461 CHECK(iter
.readConversion(ValType::I32
, ValType::F64
, ¬hing
));
462 case uint16_t(Op::F64ConvertI64S
):
463 case uint16_t(Op::F64ConvertI64U
):
464 case uint16_t(Op::F64ReinterpretI64
):
465 CHECK(iter
.readConversion(ValType::I64
, ValType::F64
, ¬hing
));
466 case uint16_t(Op::F64PromoteF32
):
467 CHECK(iter
.readConversion(ValType::F32
, ValType::F64
, ¬hing
));
468 case uint16_t(Op::I32Extend8S
):
469 case uint16_t(Op::I32Extend16S
):
470 CHECK(iter
.readConversion(ValType::I32
, ValType::I32
, ¬hing
));
471 case uint16_t(Op::I64Extend8S
):
472 case uint16_t(Op::I64Extend16S
):
473 case uint16_t(Op::I64Extend32S
):
474 CHECK(iter
.readConversion(ValType::I64
, ValType::I64
, ¬hing
));
475 case uint16_t(Op::I32Load8S
):
476 case uint16_t(Op::I32Load8U
): {
477 LinearMemoryAddress
<Nothing
> addr
;
478 CHECK(iter
.readLoad(ValType::I32
, 1, &addr
));
480 case uint16_t(Op::I32Load16S
):
481 case uint16_t(Op::I32Load16U
): {
482 LinearMemoryAddress
<Nothing
> addr
;
483 CHECK(iter
.readLoad(ValType::I32
, 2, &addr
));
485 case uint16_t(Op::I32Load
): {
486 LinearMemoryAddress
<Nothing
> addr
;
487 CHECK(iter
.readLoad(ValType::I32
, 4, &addr
));
489 case uint16_t(Op::I64Load8S
):
490 case uint16_t(Op::I64Load8U
): {
491 LinearMemoryAddress
<Nothing
> addr
;
492 CHECK(iter
.readLoad(ValType::I64
, 1, &addr
));
494 case uint16_t(Op::I64Load16S
):
495 case uint16_t(Op::I64Load16U
): {
496 LinearMemoryAddress
<Nothing
> addr
;
497 CHECK(iter
.readLoad(ValType::I64
, 2, &addr
));
499 case uint16_t(Op::I64Load32S
):
500 case uint16_t(Op::I64Load32U
): {
501 LinearMemoryAddress
<Nothing
> addr
;
502 CHECK(iter
.readLoad(ValType::I64
, 4, &addr
));
504 case uint16_t(Op::I64Load
): {
505 LinearMemoryAddress
<Nothing
> addr
;
506 CHECK(iter
.readLoad(ValType::I64
, 8, &addr
));
508 case uint16_t(Op::F32Load
): {
509 LinearMemoryAddress
<Nothing
> addr
;
510 CHECK(iter
.readLoad(ValType::F32
, 4, &addr
));
512 case uint16_t(Op::F64Load
): {
513 LinearMemoryAddress
<Nothing
> addr
;
514 CHECK(iter
.readLoad(ValType::F64
, 8, &addr
));
516 case uint16_t(Op::I32Store8
): {
517 LinearMemoryAddress
<Nothing
> addr
;
518 CHECK(iter
.readStore(ValType::I32
, 1, &addr
, ¬hing
));
520 case uint16_t(Op::I32Store16
): {
521 LinearMemoryAddress
<Nothing
> addr
;
522 CHECK(iter
.readStore(ValType::I32
, 2, &addr
, ¬hing
));
524 case uint16_t(Op::I32Store
): {
525 LinearMemoryAddress
<Nothing
> addr
;
526 CHECK(iter
.readStore(ValType::I32
, 4, &addr
, ¬hing
));
528 case uint16_t(Op::I64Store8
): {
529 LinearMemoryAddress
<Nothing
> addr
;
530 CHECK(iter
.readStore(ValType::I64
, 1, &addr
, ¬hing
));
532 case uint16_t(Op::I64Store16
): {
533 LinearMemoryAddress
<Nothing
> addr
;
534 CHECK(iter
.readStore(ValType::I64
, 2, &addr
, ¬hing
));
536 case uint16_t(Op::I64Store32
): {
537 LinearMemoryAddress
<Nothing
> addr
;
538 CHECK(iter
.readStore(ValType::I64
, 4, &addr
, ¬hing
));
540 case uint16_t(Op::I64Store
): {
541 LinearMemoryAddress
<Nothing
> addr
;
542 CHECK(iter
.readStore(ValType::I64
, 8, &addr
, ¬hing
));
544 case uint16_t(Op::F32Store
): {
545 LinearMemoryAddress
<Nothing
> addr
;
546 CHECK(iter
.readStore(ValType::F32
, 4, &addr
, ¬hing
));
548 case uint16_t(Op::F64Store
): {
549 LinearMemoryAddress
<Nothing
> addr
;
550 CHECK(iter
.readStore(ValType::F64
, 8, &addr
, ¬hing
));
552 case uint16_t(Op::MemoryGrow
): {
553 uint32_t memoryIndex
;
554 CHECK(iter
.readMemoryGrow(&memoryIndex
, ¬hing
));
556 case uint16_t(Op::MemorySize
): {
557 uint32_t memoryIndex
;
558 CHECK(iter
.readMemorySize(&memoryIndex
));
560 case uint16_t(Op::Br
): {
561 uint32_t unusedDepth
;
562 CHECK(iter
.readBr(&unusedDepth
, &unusedType
, ¬hings
));
564 case uint16_t(Op::BrIf
): {
565 uint32_t unusedDepth
;
566 CHECK(iter
.readBrIf(&unusedDepth
, &unusedType
, ¬hings
, ¬hing
));
568 case uint16_t(Op::BrTable
): {
569 Uint32Vector unusedDepths
;
570 uint32_t unusedDefault
;
571 CHECK(iter
.readBrTable(&unusedDepths
, &unusedDefault
, &unusedType
,
572 ¬hings
, ¬hing
));
574 case uint16_t(Op::Return
):
575 CHECK(iter
.readReturn(¬hings
));
576 case uint16_t(Op::Unreachable
):
577 CHECK(iter
.readUnreachable());
578 #ifdef ENABLE_WASM_GC
579 case uint16_t(Op::GcPrefix
): {
580 if (!env
.gcEnabled()) {
581 return iter
.unrecognizedOpcode(&op
);
584 case uint32_t(GcOp::StructNew
): {
586 NothingVector unusedArgs
{};
587 CHECK(iter
.readStructNew(&unusedUint
, &unusedArgs
));
589 case uint32_t(GcOp::StructNewDefault
): {
591 CHECK(iter
.readStructNewDefault(&unusedUint
));
593 case uint32_t(GcOp::StructGet
): {
594 uint32_t unusedUint1
, unusedUint2
;
595 CHECK(iter
.readStructGet(&unusedUint1
, &unusedUint2
,
596 FieldWideningOp::None
, ¬hing
));
598 case uint32_t(GcOp::StructGetS
): {
599 uint32_t unusedUint1
, unusedUint2
;
600 CHECK(iter
.readStructGet(&unusedUint1
, &unusedUint2
,
601 FieldWideningOp::Signed
, ¬hing
));
603 case uint32_t(GcOp::StructGetU
): {
604 uint32_t unusedUint1
, unusedUint2
;
605 CHECK(iter
.readStructGet(&unusedUint1
, &unusedUint2
,
606 FieldWideningOp::Unsigned
, ¬hing
));
608 case uint32_t(GcOp::StructSet
): {
609 uint32_t unusedUint1
, unusedUint2
;
610 CHECK(iter
.readStructSet(&unusedUint1
, &unusedUint2
, ¬hing
,
613 case uint32_t(GcOp::ArrayNew
): {
615 CHECK(iter
.readArrayNew(&unusedUint
, ¬hing
, ¬hing
));
617 case uint32_t(GcOp::ArrayNewFixed
): {
618 uint32_t unusedUint1
, unusedUint2
;
620 iter
.readArrayNewFixed(&unusedUint1
, &unusedUint2
, ¬hings
));
622 case uint32_t(GcOp::ArrayNewDefault
): {
624 CHECK(iter
.readArrayNewDefault(&unusedUint
, ¬hing
));
626 case uint32_t(GcOp::ArrayNewData
): {
627 uint32_t unusedUint1
, unusedUint2
;
628 CHECK(iter
.readArrayNewData(&unusedUint1
, &unusedUint2
, ¬hing
,
631 case uint32_t(GcOp::ArrayNewElem
): {
632 uint32_t unusedUint1
, unusedUint2
;
633 CHECK(iter
.readArrayNewElem(&unusedUint1
, &unusedUint2
, ¬hing
,
636 case uint32_t(GcOp::ArrayInitData
): {
637 uint32_t unusedUint1
, unusedUint2
;
638 CHECK(iter
.readArrayInitData(&unusedUint1
, &unusedUint2
, ¬hing
,
639 ¬hing
, ¬hing
, ¬hing
));
641 case uint32_t(GcOp::ArrayInitElem
): {
642 uint32_t unusedUint1
, unusedUint2
;
643 CHECK(iter
.readArrayInitElem(&unusedUint1
, &unusedUint2
, ¬hing
,
644 ¬hing
, ¬hing
, ¬hing
));
646 case uint32_t(GcOp::ArrayGet
): {
647 uint32_t unusedUint1
;
648 CHECK(iter
.readArrayGet(&unusedUint1
, FieldWideningOp::None
,
649 ¬hing
, ¬hing
));
651 case uint32_t(GcOp::ArrayGetS
): {
652 uint32_t unusedUint1
;
653 CHECK(iter
.readArrayGet(&unusedUint1
, FieldWideningOp::Signed
,
654 ¬hing
, ¬hing
));
656 case uint32_t(GcOp::ArrayGetU
): {
657 uint32_t unusedUint1
;
658 CHECK(iter
.readArrayGet(&unusedUint1
, FieldWideningOp::Unsigned
,
659 ¬hing
, ¬hing
));
661 case uint32_t(GcOp::ArraySet
): {
662 uint32_t unusedUint1
;
664 iter
.readArraySet(&unusedUint1
, ¬hing
, ¬hing
, ¬hing
));
666 case uint32_t(GcOp::ArrayLen
): {
667 CHECK(iter
.readArrayLen(¬hing
));
669 case uint32_t(GcOp::ArrayCopy
): {
672 CHECK(iter
.readArrayCopy(&unusedInt
, &unusedBool
, ¬hing
,
673 ¬hing
, ¬hing
, ¬hing
, ¬hing
));
675 case uint32_t(GcOp::ArrayFill
): {
676 uint32_t unusedTypeIndex
;
677 CHECK(iter
.readArrayFill(&unusedTypeIndex
, ¬hing
, ¬hing
,
678 ¬hing
, ¬hing
));
680 case uint32_t(GcOp::RefI31
): {
681 CHECK(iter
.readConversion(ValType::I32
,
682 ValType(RefType::i31().asNonNullable()),
685 case uint32_t(GcOp::I31GetS
): {
686 CHECK(iter
.readConversion(ValType(RefType::i31()), ValType::I32
,
689 case uint32_t(GcOp::I31GetU
): {
690 CHECK(iter
.readConversion(ValType(RefType::i31()), ValType::I32
,
693 case uint16_t(GcOp::RefTest
): {
694 RefType unusedSourceType
;
695 RefType unusedDestType
;
696 CHECK(iter
.readRefTest(false, &unusedSourceType
, &unusedDestType
,
699 case uint16_t(GcOp::RefTestNull
): {
700 RefType unusedSourceType
;
701 RefType unusedDestType
;
702 CHECK(iter
.readRefTest(true, &unusedSourceType
, &unusedDestType
,
705 case uint16_t(GcOp::RefCast
): {
706 RefType unusedSourceType
;
707 RefType unusedDestType
;
708 CHECK(iter
.readRefCast(false, &unusedSourceType
, &unusedDestType
,
711 case uint16_t(GcOp::RefCastNull
): {
712 RefType unusedSourceType
;
713 RefType unusedDestType
;
714 CHECK(iter
.readRefCast(true, &unusedSourceType
, &unusedDestType
,
717 case uint16_t(GcOp::BrOnCast
): {
718 uint32_t unusedRelativeDepth
;
719 RefType unusedSourceType
;
720 RefType unusedDestType
;
721 CHECK(iter
.readBrOnCast(true, &unusedRelativeDepth
,
722 &unusedSourceType
, &unusedDestType
,
723 &unusedType
, ¬hings
));
725 case uint16_t(GcOp::BrOnCastFail
): {
726 uint32_t unusedRelativeDepth
;
727 RefType unusedSourceType
;
728 RefType unusedDestType
;
729 CHECK(iter
.readBrOnCast(false, &unusedRelativeDepth
,
730 &unusedSourceType
, &unusedDestType
,
731 &unusedType
, ¬hings
));
733 case uint16_t(GcOp::AnyConvertExtern
): {
734 CHECK(iter
.readRefConversion(RefType::extern_(), RefType::any(),
737 case uint16_t(GcOp::ExternConvertAny
): {
738 CHECK(iter
.readRefConversion(RefType::any(), RefType::extern_(),
742 return iter
.unrecognizedOpcode(&op
);
748 #ifdef ENABLE_WASM_SIMD
749 case uint16_t(Op::SimdPrefix
): {
750 if (!env
.simdAvailable()) {
751 return iter
.unrecognizedOpcode(&op
);
755 case uint32_t(SimdOp::I8x16ExtractLaneS
):
756 case uint32_t(SimdOp::I8x16ExtractLaneU
):
757 CHECK(iter
.readExtractLane(ValType::I32
, 16, &noIndex
, ¬hing
));
758 case uint32_t(SimdOp::I16x8ExtractLaneS
):
759 case uint32_t(SimdOp::I16x8ExtractLaneU
):
760 CHECK(iter
.readExtractLane(ValType::I32
, 8, &noIndex
, ¬hing
));
761 case uint32_t(SimdOp::I32x4ExtractLane
):
762 CHECK(iter
.readExtractLane(ValType::I32
, 4, &noIndex
, ¬hing
));
763 case uint32_t(SimdOp::I64x2ExtractLane
):
764 CHECK(iter
.readExtractLane(ValType::I64
, 2, &noIndex
, ¬hing
));
765 case uint32_t(SimdOp::F32x4ExtractLane
):
766 CHECK(iter
.readExtractLane(ValType::F32
, 4, &noIndex
, ¬hing
));
767 case uint32_t(SimdOp::F64x2ExtractLane
):
768 CHECK(iter
.readExtractLane(ValType::F64
, 2, &noIndex
, ¬hing
));
770 case uint32_t(SimdOp::I8x16Splat
):
771 case uint32_t(SimdOp::I16x8Splat
):
772 case uint32_t(SimdOp::I32x4Splat
):
773 CHECK(iter
.readConversion(ValType::I32
, ValType::V128
, ¬hing
));
774 case uint32_t(SimdOp::I64x2Splat
):
775 CHECK(iter
.readConversion(ValType::I64
, ValType::V128
, ¬hing
));
776 case uint32_t(SimdOp::F32x4Splat
):
777 CHECK(iter
.readConversion(ValType::F32
, ValType::V128
, ¬hing
));
778 case uint32_t(SimdOp::F64x2Splat
):
779 CHECK(iter
.readConversion(ValType::F64
, ValType::V128
, ¬hing
));
781 case uint32_t(SimdOp::V128AnyTrue
):
782 case uint32_t(SimdOp::I8x16AllTrue
):
783 case uint32_t(SimdOp::I16x8AllTrue
):
784 case uint32_t(SimdOp::I32x4AllTrue
):
785 case uint32_t(SimdOp::I64x2AllTrue
):
786 case uint32_t(SimdOp::I8x16Bitmask
):
787 case uint32_t(SimdOp::I16x8Bitmask
):
788 case uint32_t(SimdOp::I32x4Bitmask
):
789 case uint32_t(SimdOp::I64x2Bitmask
):
790 CHECK(iter
.readConversion(ValType::V128
, ValType::I32
, ¬hing
));
792 case uint32_t(SimdOp::I8x16ReplaceLane
):
793 CHECK(iter
.readReplaceLane(ValType::I32
, 16, &noIndex
, ¬hing
,
795 case uint32_t(SimdOp::I16x8ReplaceLane
):
796 CHECK(iter
.readReplaceLane(ValType::I32
, 8, &noIndex
, ¬hing
,
798 case uint32_t(SimdOp::I32x4ReplaceLane
):
799 CHECK(iter
.readReplaceLane(ValType::I32
, 4, &noIndex
, ¬hing
,
801 case uint32_t(SimdOp::I64x2ReplaceLane
):
802 CHECK(iter
.readReplaceLane(ValType::I64
, 2, &noIndex
, ¬hing
,
804 case uint32_t(SimdOp::F32x4ReplaceLane
):
805 CHECK(iter
.readReplaceLane(ValType::F32
, 4, &noIndex
, ¬hing
,
807 case uint32_t(SimdOp::F64x2ReplaceLane
):
808 CHECK(iter
.readReplaceLane(ValType::F64
, 2, &noIndex
, ¬hing
,
811 case uint32_t(SimdOp::I8x16Eq
):
812 case uint32_t(SimdOp::I8x16Ne
):
813 case uint32_t(SimdOp::I8x16LtS
):
814 case uint32_t(SimdOp::I8x16LtU
):
815 case uint32_t(SimdOp::I8x16GtS
):
816 case uint32_t(SimdOp::I8x16GtU
):
817 case uint32_t(SimdOp::I8x16LeS
):
818 case uint32_t(SimdOp::I8x16LeU
):
819 case uint32_t(SimdOp::I8x16GeS
):
820 case uint32_t(SimdOp::I8x16GeU
):
821 case uint32_t(SimdOp::I16x8Eq
):
822 case uint32_t(SimdOp::I16x8Ne
):
823 case uint32_t(SimdOp::I16x8LtS
):
824 case uint32_t(SimdOp::I16x8LtU
):
825 case uint32_t(SimdOp::I16x8GtS
):
826 case uint32_t(SimdOp::I16x8GtU
):
827 case uint32_t(SimdOp::I16x8LeS
):
828 case uint32_t(SimdOp::I16x8LeU
):
829 case uint32_t(SimdOp::I16x8GeS
):
830 case uint32_t(SimdOp::I16x8GeU
):
831 case uint32_t(SimdOp::I32x4Eq
):
832 case uint32_t(SimdOp::I32x4Ne
):
833 case uint32_t(SimdOp::I32x4LtS
):
834 case uint32_t(SimdOp::I32x4LtU
):
835 case uint32_t(SimdOp::I32x4GtS
):
836 case uint32_t(SimdOp::I32x4GtU
):
837 case uint32_t(SimdOp::I32x4LeS
):
838 case uint32_t(SimdOp::I32x4LeU
):
839 case uint32_t(SimdOp::I32x4GeS
):
840 case uint32_t(SimdOp::I32x4GeU
):
841 case uint32_t(SimdOp::I64x2Eq
):
842 case uint32_t(SimdOp::I64x2Ne
):
843 case uint32_t(SimdOp::I64x2LtS
):
844 case uint32_t(SimdOp::I64x2GtS
):
845 case uint32_t(SimdOp::I64x2LeS
):
846 case uint32_t(SimdOp::I64x2GeS
):
847 case uint32_t(SimdOp::F32x4Eq
):
848 case uint32_t(SimdOp::F32x4Ne
):
849 case uint32_t(SimdOp::F32x4Lt
):
850 case uint32_t(SimdOp::F32x4Gt
):
851 case uint32_t(SimdOp::F32x4Le
):
852 case uint32_t(SimdOp::F32x4Ge
):
853 case uint32_t(SimdOp::F64x2Eq
):
854 case uint32_t(SimdOp::F64x2Ne
):
855 case uint32_t(SimdOp::F64x2Lt
):
856 case uint32_t(SimdOp::F64x2Gt
):
857 case uint32_t(SimdOp::F64x2Le
):
858 case uint32_t(SimdOp::F64x2Ge
):
859 case uint32_t(SimdOp::V128And
):
860 case uint32_t(SimdOp::V128Or
):
861 case uint32_t(SimdOp::V128Xor
):
862 case uint32_t(SimdOp::V128AndNot
):
863 case uint32_t(SimdOp::I8x16AvgrU
):
864 case uint32_t(SimdOp::I16x8AvgrU
):
865 case uint32_t(SimdOp::I8x16Add
):
866 case uint32_t(SimdOp::I8x16AddSatS
):
867 case uint32_t(SimdOp::I8x16AddSatU
):
868 case uint32_t(SimdOp::I8x16Sub
):
869 case uint32_t(SimdOp::I8x16SubSatS
):
870 case uint32_t(SimdOp::I8x16SubSatU
):
871 case uint32_t(SimdOp::I8x16MinS
):
872 case uint32_t(SimdOp::I8x16MinU
):
873 case uint32_t(SimdOp::I8x16MaxS
):
874 case uint32_t(SimdOp::I8x16MaxU
):
875 case uint32_t(SimdOp::I16x8Add
):
876 case uint32_t(SimdOp::I16x8AddSatS
):
877 case uint32_t(SimdOp::I16x8AddSatU
):
878 case uint32_t(SimdOp::I16x8Sub
):
879 case uint32_t(SimdOp::I16x8SubSatS
):
880 case uint32_t(SimdOp::I16x8SubSatU
):
881 case uint32_t(SimdOp::I16x8Mul
):
882 case uint32_t(SimdOp::I16x8MinS
):
883 case uint32_t(SimdOp::I16x8MinU
):
884 case uint32_t(SimdOp::I16x8MaxS
):
885 case uint32_t(SimdOp::I16x8MaxU
):
886 case uint32_t(SimdOp::I32x4Add
):
887 case uint32_t(SimdOp::I32x4Sub
):
888 case uint32_t(SimdOp::I32x4Mul
):
889 case uint32_t(SimdOp::I32x4MinS
):
890 case uint32_t(SimdOp::I32x4MinU
):
891 case uint32_t(SimdOp::I32x4MaxS
):
892 case uint32_t(SimdOp::I32x4MaxU
):
893 case uint32_t(SimdOp::I64x2Add
):
894 case uint32_t(SimdOp::I64x2Sub
):
895 case uint32_t(SimdOp::I64x2Mul
):
896 case uint32_t(SimdOp::F32x4Add
):
897 case uint32_t(SimdOp::F32x4Sub
):
898 case uint32_t(SimdOp::F32x4Mul
):
899 case uint32_t(SimdOp::F32x4Div
):
900 case uint32_t(SimdOp::F32x4Min
):
901 case uint32_t(SimdOp::F32x4Max
):
902 case uint32_t(SimdOp::F64x2Add
):
903 case uint32_t(SimdOp::F64x2Sub
):
904 case uint32_t(SimdOp::F64x2Mul
):
905 case uint32_t(SimdOp::F64x2Div
):
906 case uint32_t(SimdOp::F64x2Min
):
907 case uint32_t(SimdOp::F64x2Max
):
908 case uint32_t(SimdOp::I8x16NarrowI16x8S
):
909 case uint32_t(SimdOp::I8x16NarrowI16x8U
):
910 case uint32_t(SimdOp::I16x8NarrowI32x4S
):
911 case uint32_t(SimdOp::I16x8NarrowI32x4U
):
912 case uint32_t(SimdOp::I8x16Swizzle
):
913 case uint32_t(SimdOp::F32x4PMax
):
914 case uint32_t(SimdOp::F32x4PMin
):
915 case uint32_t(SimdOp::F64x2PMax
):
916 case uint32_t(SimdOp::F64x2PMin
):
917 case uint32_t(SimdOp::I32x4DotI16x8S
):
918 case uint32_t(SimdOp::I16x8ExtmulLowI8x16S
):
919 case uint32_t(SimdOp::I16x8ExtmulHighI8x16S
):
920 case uint32_t(SimdOp::I16x8ExtmulLowI8x16U
):
921 case uint32_t(SimdOp::I16x8ExtmulHighI8x16U
):
922 case uint32_t(SimdOp::I32x4ExtmulLowI16x8S
):
923 case uint32_t(SimdOp::I32x4ExtmulHighI16x8S
):
924 case uint32_t(SimdOp::I32x4ExtmulLowI16x8U
):
925 case uint32_t(SimdOp::I32x4ExtmulHighI16x8U
):
926 case uint32_t(SimdOp::I64x2ExtmulLowI32x4S
):
927 case uint32_t(SimdOp::I64x2ExtmulHighI32x4S
):
928 case uint32_t(SimdOp::I64x2ExtmulLowI32x4U
):
929 case uint32_t(SimdOp::I64x2ExtmulHighI32x4U
):
930 case uint32_t(SimdOp::I16x8Q15MulrSatS
):
931 CHECK(iter
.readBinary(ValType::V128
, ¬hing
, ¬hing
));
933 case uint32_t(SimdOp::I8x16Neg
):
934 case uint32_t(SimdOp::I16x8Neg
):
935 case uint32_t(SimdOp::I16x8ExtendLowI8x16S
):
936 case uint32_t(SimdOp::I16x8ExtendHighI8x16S
):
937 case uint32_t(SimdOp::I16x8ExtendLowI8x16U
):
938 case uint32_t(SimdOp::I16x8ExtendHighI8x16U
):
939 case uint32_t(SimdOp::I32x4Neg
):
940 case uint32_t(SimdOp::I32x4ExtendLowI16x8S
):
941 case uint32_t(SimdOp::I32x4ExtendHighI16x8S
):
942 case uint32_t(SimdOp::I32x4ExtendLowI16x8U
):
943 case uint32_t(SimdOp::I32x4ExtendHighI16x8U
):
944 case uint32_t(SimdOp::I32x4TruncSatF32x4S
):
945 case uint32_t(SimdOp::I32x4TruncSatF32x4U
):
946 case uint32_t(SimdOp::I64x2Neg
):
947 case uint32_t(SimdOp::I64x2ExtendLowI32x4S
):
948 case uint32_t(SimdOp::I64x2ExtendHighI32x4S
):
949 case uint32_t(SimdOp::I64x2ExtendLowI32x4U
):
950 case uint32_t(SimdOp::I64x2ExtendHighI32x4U
):
951 case uint32_t(SimdOp::F32x4Abs
):
952 case uint32_t(SimdOp::F32x4Neg
):
953 case uint32_t(SimdOp::F32x4Sqrt
):
954 case uint32_t(SimdOp::F32x4ConvertI32x4S
):
955 case uint32_t(SimdOp::F32x4ConvertI32x4U
):
956 case uint32_t(SimdOp::F64x2Abs
):
957 case uint32_t(SimdOp::F64x2Neg
):
958 case uint32_t(SimdOp::F64x2Sqrt
):
959 case uint32_t(SimdOp::V128Not
):
960 case uint32_t(SimdOp::I8x16Popcnt
):
961 case uint32_t(SimdOp::I8x16Abs
):
962 case uint32_t(SimdOp::I16x8Abs
):
963 case uint32_t(SimdOp::I32x4Abs
):
964 case uint32_t(SimdOp::I64x2Abs
):
965 case uint32_t(SimdOp::F32x4Ceil
):
966 case uint32_t(SimdOp::F32x4Floor
):
967 case uint32_t(SimdOp::F32x4Trunc
):
968 case uint32_t(SimdOp::F32x4Nearest
):
969 case uint32_t(SimdOp::F64x2Ceil
):
970 case uint32_t(SimdOp::F64x2Floor
):
971 case uint32_t(SimdOp::F64x2Trunc
):
972 case uint32_t(SimdOp::F64x2Nearest
):
973 case uint32_t(SimdOp::F32x4DemoteF64x2Zero
):
974 case uint32_t(SimdOp::F64x2PromoteLowF32x4
):
975 case uint32_t(SimdOp::F64x2ConvertLowI32x4S
):
976 case uint32_t(SimdOp::F64x2ConvertLowI32x4U
):
977 case uint32_t(SimdOp::I32x4TruncSatF64x2SZero
):
978 case uint32_t(SimdOp::I32x4TruncSatF64x2UZero
):
979 case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16S
):
980 case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16U
):
981 case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8S
):
982 case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8U
):
983 CHECK(iter
.readUnary(ValType::V128
, ¬hing
));
985 case uint32_t(SimdOp::I8x16Shl
):
986 case uint32_t(SimdOp::I8x16ShrS
):
987 case uint32_t(SimdOp::I8x16ShrU
):
988 case uint32_t(SimdOp::I16x8Shl
):
989 case uint32_t(SimdOp::I16x8ShrS
):
990 case uint32_t(SimdOp::I16x8ShrU
):
991 case uint32_t(SimdOp::I32x4Shl
):
992 case uint32_t(SimdOp::I32x4ShrS
):
993 case uint32_t(SimdOp::I32x4ShrU
):
994 case uint32_t(SimdOp::I64x2Shl
):
995 case uint32_t(SimdOp::I64x2ShrS
):
996 case uint32_t(SimdOp::I64x2ShrU
):
997 CHECK(iter
.readVectorShift(¬hing
, ¬hing
));
999 case uint32_t(SimdOp::V128Bitselect
):
1001 iter
.readTernary(ValType::V128
, ¬hing
, ¬hing
, ¬hing
));
1003 case uint32_t(SimdOp::I8x16Shuffle
): {
1005 CHECK(iter
.readVectorShuffle(¬hing
, ¬hing
, &mask
));
1008 case uint32_t(SimdOp::V128Const
): {
1010 CHECK(iter
.readV128Const(&noVector
));
1013 case uint32_t(SimdOp::V128Load
): {
1014 LinearMemoryAddress
<Nothing
> addr
;
1015 CHECK(iter
.readLoad(ValType::V128
, 16, &addr
));
1018 case uint32_t(SimdOp::V128Load8Splat
): {
1019 LinearMemoryAddress
<Nothing
> addr
;
1020 CHECK(iter
.readLoadSplat(1, &addr
));
1023 case uint32_t(SimdOp::V128Load16Splat
): {
1024 LinearMemoryAddress
<Nothing
> addr
;
1025 CHECK(iter
.readLoadSplat(2, &addr
));
1028 case uint32_t(SimdOp::V128Load32Splat
): {
1029 LinearMemoryAddress
<Nothing
> addr
;
1030 CHECK(iter
.readLoadSplat(4, &addr
));
1033 case uint32_t(SimdOp::V128Load64Splat
): {
1034 LinearMemoryAddress
<Nothing
> addr
;
1035 CHECK(iter
.readLoadSplat(8, &addr
));
1038 case uint32_t(SimdOp::V128Load8x8S
):
1039 case uint32_t(SimdOp::V128Load8x8U
): {
1040 LinearMemoryAddress
<Nothing
> addr
;
1041 CHECK(iter
.readLoadExtend(&addr
));
1044 case uint32_t(SimdOp::V128Load16x4S
):
1045 case uint32_t(SimdOp::V128Load16x4U
): {
1046 LinearMemoryAddress
<Nothing
> addr
;
1047 CHECK(iter
.readLoadExtend(&addr
));
1050 case uint32_t(SimdOp::V128Load32x2S
):
1051 case uint32_t(SimdOp::V128Load32x2U
): {
1052 LinearMemoryAddress
<Nothing
> addr
;
1053 CHECK(iter
.readLoadExtend(&addr
));
1056 case uint32_t(SimdOp::V128Store
): {
1057 LinearMemoryAddress
<Nothing
> addr
;
1058 CHECK(iter
.readStore(ValType::V128
, 16, &addr
, ¬hing
));
1061 case uint32_t(SimdOp::V128Load32Zero
): {
1062 LinearMemoryAddress
<Nothing
> addr
;
1063 CHECK(iter
.readLoadSplat(4, &addr
));
1066 case uint32_t(SimdOp::V128Load64Zero
): {
1067 LinearMemoryAddress
<Nothing
> addr
;
1068 CHECK(iter
.readLoadSplat(8, &addr
));
1071 case uint32_t(SimdOp::V128Load8Lane
): {
1072 LinearMemoryAddress
<Nothing
> addr
;
1073 CHECK(iter
.readLoadLane(1, &addr
, &noIndex
, ¬hing
));
1076 case uint32_t(SimdOp::V128Load16Lane
): {
1077 LinearMemoryAddress
<Nothing
> addr
;
1078 CHECK(iter
.readLoadLane(2, &addr
, &noIndex
, ¬hing
));
1081 case uint32_t(SimdOp::V128Load32Lane
): {
1082 LinearMemoryAddress
<Nothing
> addr
;
1083 CHECK(iter
.readLoadLane(4, &addr
, &noIndex
, ¬hing
));
1086 case uint32_t(SimdOp::V128Load64Lane
): {
1087 LinearMemoryAddress
<Nothing
> addr
;
1088 CHECK(iter
.readLoadLane(8, &addr
, &noIndex
, ¬hing
));
1091 case uint32_t(SimdOp::V128Store8Lane
): {
1092 LinearMemoryAddress
<Nothing
> addr
;
1093 CHECK(iter
.readStoreLane(1, &addr
, &noIndex
, ¬hing
));
1096 case uint32_t(SimdOp::V128Store16Lane
): {
1097 LinearMemoryAddress
<Nothing
> addr
;
1098 CHECK(iter
.readStoreLane(2, &addr
, &noIndex
, ¬hing
));
1101 case uint32_t(SimdOp::V128Store32Lane
): {
1102 LinearMemoryAddress
<Nothing
> addr
;
1103 CHECK(iter
.readStoreLane(4, &addr
, &noIndex
, ¬hing
));
1106 case uint32_t(SimdOp::V128Store64Lane
): {
1107 LinearMemoryAddress
<Nothing
> addr
;
1108 CHECK(iter
.readStoreLane(8, &addr
, &noIndex
, ¬hing
));
1111 # ifdef ENABLE_WASM_RELAXED_SIMD
1112 case uint32_t(SimdOp::F32x4RelaxedMadd
):
1113 case uint32_t(SimdOp::F32x4RelaxedNmadd
):
1114 case uint32_t(SimdOp::F64x2RelaxedMadd
):
1115 case uint32_t(SimdOp::F64x2RelaxedNmadd
):
1116 case uint32_t(SimdOp::I8x16RelaxedLaneSelect
):
1117 case uint32_t(SimdOp::I16x8RelaxedLaneSelect
):
1118 case uint32_t(SimdOp::I32x4RelaxedLaneSelect
):
1119 case uint32_t(SimdOp::I64x2RelaxedLaneSelect
):
1120 case uint32_t(SimdOp::I32x4DotI8x16I7x16AddS
): {
1121 if (!env
.v128RelaxedEnabled()) {
1122 return iter
.unrecognizedOpcode(&op
);
1125 iter
.readTernary(ValType::V128
, ¬hing
, ¬hing
, ¬hing
));
1127 case uint32_t(SimdOp::F32x4RelaxedMin
):
1128 case uint32_t(SimdOp::F32x4RelaxedMax
):
1129 case uint32_t(SimdOp::F64x2RelaxedMin
):
1130 case uint32_t(SimdOp::F64x2RelaxedMax
):
1131 case uint32_t(SimdOp::I16x8RelaxedQ15MulrS
):
1132 case uint32_t(SimdOp::I16x8DotI8x16I7x16S
): {
1133 if (!env
.v128RelaxedEnabled()) {
1134 return iter
.unrecognizedOpcode(&op
);
1136 CHECK(iter
.readBinary(ValType::V128
, ¬hing
, ¬hing
));
1138 case uint32_t(SimdOp::I32x4RelaxedTruncF32x4S
):
1139 case uint32_t(SimdOp::I32x4RelaxedTruncF32x4U
):
1140 case uint32_t(SimdOp::I32x4RelaxedTruncF64x2SZero
):
1141 case uint32_t(SimdOp::I32x4RelaxedTruncF64x2UZero
): {
1142 if (!env
.v128RelaxedEnabled()) {
1143 return iter
.unrecognizedOpcode(&op
);
1145 CHECK(iter
.readUnary(ValType::V128
, ¬hing
));
1147 case uint32_t(SimdOp::I8x16RelaxedSwizzle
): {
1148 if (!env
.v128RelaxedEnabled()) {
1149 return iter
.unrecognizedOpcode(&op
);
1151 CHECK(iter
.readBinary(ValType::V128
, ¬hing
, ¬hing
));
1156 return iter
.unrecognizedOpcode(&op
);
1160 #endif // ENABLE_WASM_SIMD
1162 case uint16_t(Op::MiscPrefix
): {
1164 case uint32_t(MiscOp::I32TruncSatF32S
):
1165 case uint32_t(MiscOp::I32TruncSatF32U
):
1166 CHECK(iter
.readConversion(ValType::F32
, ValType::I32
, ¬hing
));
1167 case uint32_t(MiscOp::I32TruncSatF64S
):
1168 case uint32_t(MiscOp::I32TruncSatF64U
):
1169 CHECK(iter
.readConversion(ValType::F64
, ValType::I32
, ¬hing
));
1170 case uint32_t(MiscOp::I64TruncSatF32S
):
1171 case uint32_t(MiscOp::I64TruncSatF32U
):
1172 CHECK(iter
.readConversion(ValType::F32
, ValType::I64
, ¬hing
));
1173 case uint32_t(MiscOp::I64TruncSatF64S
):
1174 case uint32_t(MiscOp::I64TruncSatF64U
):
1175 CHECK(iter
.readConversion(ValType::F64
, ValType::I64
, ¬hing
));
1176 case uint32_t(MiscOp::MemoryCopy
): {
1177 uint32_t unusedDestMemIndex
;
1178 uint32_t unusedSrcMemIndex
;
1179 CHECK(iter
.readMemOrTableCopy(/*isMem=*/true, &unusedDestMemIndex
,
1180 ¬hing
, &unusedSrcMemIndex
,
1181 ¬hing
, ¬hing
));
1183 case uint32_t(MiscOp::DataDrop
): {
1184 uint32_t unusedSegIndex
;
1185 CHECK(iter
.readDataOrElemDrop(/*isData=*/true, &unusedSegIndex
));
1187 case uint32_t(MiscOp::MemoryFill
): {
1188 uint32_t memoryIndex
;
1189 CHECK(iter
.readMemFill(&memoryIndex
, ¬hing
, ¬hing
, ¬hing
));
1191 case uint32_t(MiscOp::MemoryInit
): {
1192 uint32_t unusedSegIndex
;
1193 uint32_t unusedMemoryIndex
;
1194 CHECK(iter
.readMemOrTableInit(/*isMem=*/true, &unusedSegIndex
,
1195 &unusedMemoryIndex
, ¬hing
,
1196 ¬hing
, ¬hing
));
1198 case uint32_t(MiscOp::TableCopy
): {
1199 uint32_t unusedDestTableIndex
;
1200 uint32_t unusedSrcTableIndex
;
1201 CHECK(iter
.readMemOrTableCopy(
1202 /*isMem=*/false, &unusedDestTableIndex
, ¬hing
,
1203 &unusedSrcTableIndex
, ¬hing
, ¬hing
));
1205 case uint32_t(MiscOp::ElemDrop
): {
1206 uint32_t unusedSegIndex
;
1207 CHECK(iter
.readDataOrElemDrop(/*isData=*/false, &unusedSegIndex
));
1209 case uint32_t(MiscOp::TableInit
): {
1210 uint32_t unusedSegIndex
;
1211 uint32_t unusedTableIndex
;
1212 CHECK(iter
.readMemOrTableInit(/*isMem=*/false, &unusedSegIndex
,
1213 &unusedTableIndex
, ¬hing
, ¬hing
,
1216 case uint32_t(MiscOp::TableFill
): {
1217 uint32_t unusedTableIndex
;
1218 CHECK(iter
.readTableFill(&unusedTableIndex
, ¬hing
, ¬hing
,
1221 #ifdef ENABLE_WASM_MEMORY_CONTROL
1222 case uint32_t(MiscOp::MemoryDiscard
): {
1223 if (!env
.memoryControlEnabled()) {
1224 return iter
.unrecognizedOpcode(&op
);
1226 uint32_t unusedMemoryIndex
;
1227 CHECK(iter
.readMemDiscard(&unusedMemoryIndex
, ¬hing
, ¬hing
));
1230 case uint32_t(MiscOp::TableGrow
): {
1231 uint32_t unusedTableIndex
;
1232 CHECK(iter
.readTableGrow(&unusedTableIndex
, ¬hing
, ¬hing
));
1234 case uint32_t(MiscOp::TableSize
): {
1235 uint32_t unusedTableIndex
;
1236 CHECK(iter
.readTableSize(&unusedTableIndex
));
1239 return iter
.unrecognizedOpcode(&op
);
1243 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
1244 case uint16_t(Op::RefAsNonNull
): {
1245 if (!env
.functionReferencesEnabled()) {
1246 return iter
.unrecognizedOpcode(&op
);
1248 CHECK(iter
.readRefAsNonNull(¬hing
));
1250 case uint16_t(Op::BrOnNull
): {
1251 if (!env
.functionReferencesEnabled()) {
1252 return iter
.unrecognizedOpcode(&op
);
1254 uint32_t unusedDepth
;
1256 iter
.readBrOnNull(&unusedDepth
, &unusedType
, ¬hings
, ¬hing
));
1258 case uint16_t(Op::BrOnNonNull
): {
1259 if (!env
.functionReferencesEnabled()) {
1260 return iter
.unrecognizedOpcode(&op
);
1262 uint32_t unusedDepth
;
1263 CHECK(iter
.readBrOnNonNull(&unusedDepth
, &unusedType
, ¬hings
,
1267 #ifdef ENABLE_WASM_GC
1268 case uint16_t(Op::RefEq
): {
1269 if (!env
.gcEnabled()) {
1270 return iter
.unrecognizedOpcode(&op
);
1272 CHECK(iter
.readComparison(RefType::eq(), ¬hing
, ¬hing
));
1275 case uint16_t(Op::RefFunc
): {
1276 uint32_t unusedIndex
;
1277 CHECK(iter
.readRefFunc(&unusedIndex
));
1279 case uint16_t(Op::RefNull
): {
1281 CHECK(iter
.readRefNull(&type
));
1283 case uint16_t(Op::RefIsNull
): {
1285 CHECK(iter
.readRefIsNull(¬hing
));
1287 case uint16_t(Op::Try
):
1288 if (!env
.exceptionsEnabled()) {
1289 return iter
.unrecognizedOpcode(&op
);
1291 CHECK(iter
.readTry(&unusedType
));
1292 case uint16_t(Op::Catch
): {
1293 if (!env
.exceptionsEnabled()) {
1294 return iter
.unrecognizedOpcode(&op
);
1296 LabelKind unusedKind
;
1297 uint32_t unusedIndex
;
1298 CHECK(iter
.readCatch(&unusedKind
, &unusedIndex
, &unusedType
,
1299 &unusedType
, ¬hings
));
1301 case uint16_t(Op::CatchAll
): {
1302 if (!env
.exceptionsEnabled()) {
1303 return iter
.unrecognizedOpcode(&op
);
1305 LabelKind unusedKind
;
1306 CHECK(iter
.readCatchAll(&unusedKind
, &unusedType
, &unusedType
,
1309 case uint16_t(Op::Delegate
): {
1310 if (!env
.exceptionsEnabled()) {
1311 return iter
.unrecognizedOpcode(&op
);
1313 uint32_t unusedDepth
;
1314 if (!iter
.readDelegate(&unusedDepth
, &unusedType
, ¬hings
)) {
1320 case uint16_t(Op::Throw
): {
1321 if (!env
.exceptionsEnabled()) {
1322 return iter
.unrecognizedOpcode(&op
);
1324 uint32_t unusedIndex
;
1325 CHECK(iter
.readThrow(&unusedIndex
, ¬hings
));
1327 case uint16_t(Op::Rethrow
): {
1328 if (!env
.exceptionsEnabled()) {
1329 return iter
.unrecognizedOpcode(&op
);
1331 uint32_t unusedDepth
;
1332 CHECK(iter
.readRethrow(&unusedDepth
));
1334 case uint16_t(Op::ThreadPrefix
): {
1335 // Though thread ops can be used on nonshared memories, we make them
1336 // unavailable if shared memory has been disabled in the prefs, for
1337 // maximum predictability and safety and consistency with JS.
1338 if (env
.sharedMemoryEnabled() == Shareable::False
) {
1339 return iter
.unrecognizedOpcode(&op
);
1342 case uint32_t(ThreadOp::Wake
): {
1343 LinearMemoryAddress
<Nothing
> addr
;
1344 CHECK(iter
.readWake(&addr
, ¬hing
));
1346 case uint32_t(ThreadOp::I32Wait
): {
1347 LinearMemoryAddress
<Nothing
> addr
;
1348 CHECK(iter
.readWait(&addr
, ValType::I32
, 4, ¬hing
, ¬hing
));
1350 case uint32_t(ThreadOp::I64Wait
): {
1351 LinearMemoryAddress
<Nothing
> addr
;
1352 CHECK(iter
.readWait(&addr
, ValType::I64
, 8, ¬hing
, ¬hing
));
1354 case uint32_t(ThreadOp::Fence
): {
1355 CHECK(iter
.readFence());
1357 case uint32_t(ThreadOp::I32AtomicLoad
): {
1358 LinearMemoryAddress
<Nothing
> addr
;
1359 CHECK(iter
.readAtomicLoad(&addr
, ValType::I32
, 4));
1361 case uint32_t(ThreadOp::I64AtomicLoad
): {
1362 LinearMemoryAddress
<Nothing
> addr
;
1363 CHECK(iter
.readAtomicLoad(&addr
, ValType::I64
, 8));
1365 case uint32_t(ThreadOp::I32AtomicLoad8U
): {
1366 LinearMemoryAddress
<Nothing
> addr
;
1367 CHECK(iter
.readAtomicLoad(&addr
, ValType::I32
, 1));
1369 case uint32_t(ThreadOp::I32AtomicLoad16U
): {
1370 LinearMemoryAddress
<Nothing
> addr
;
1371 CHECK(iter
.readAtomicLoad(&addr
, ValType::I32
, 2));
1373 case uint32_t(ThreadOp::I64AtomicLoad8U
): {
1374 LinearMemoryAddress
<Nothing
> addr
;
1375 CHECK(iter
.readAtomicLoad(&addr
, ValType::I64
, 1));
1377 case uint32_t(ThreadOp::I64AtomicLoad16U
): {
1378 LinearMemoryAddress
<Nothing
> addr
;
1379 CHECK(iter
.readAtomicLoad(&addr
, ValType::I64
, 2));
1381 case uint32_t(ThreadOp::I64AtomicLoad32U
): {
1382 LinearMemoryAddress
<Nothing
> addr
;
1383 CHECK(iter
.readAtomicLoad(&addr
, ValType::I64
, 4));
1385 case uint32_t(ThreadOp::I32AtomicStore
): {
1386 LinearMemoryAddress
<Nothing
> addr
;
1387 CHECK(iter
.readAtomicStore(&addr
, ValType::I32
, 4, ¬hing
));
1389 case uint32_t(ThreadOp::I64AtomicStore
): {
1390 LinearMemoryAddress
<Nothing
> addr
;
1391 CHECK(iter
.readAtomicStore(&addr
, ValType::I64
, 8, ¬hing
));
1393 case uint32_t(ThreadOp::I32AtomicStore8U
): {
1394 LinearMemoryAddress
<Nothing
> addr
;
1395 CHECK(iter
.readAtomicStore(&addr
, ValType::I32
, 1, ¬hing
));
1397 case uint32_t(ThreadOp::I32AtomicStore16U
): {
1398 LinearMemoryAddress
<Nothing
> addr
;
1399 CHECK(iter
.readAtomicStore(&addr
, ValType::I32
, 2, ¬hing
));
1401 case uint32_t(ThreadOp::I64AtomicStore8U
): {
1402 LinearMemoryAddress
<Nothing
> addr
;
1403 CHECK(iter
.readAtomicStore(&addr
, ValType::I64
, 1, ¬hing
));
1405 case uint32_t(ThreadOp::I64AtomicStore16U
): {
1406 LinearMemoryAddress
<Nothing
> addr
;
1407 CHECK(iter
.readAtomicStore(&addr
, ValType::I64
, 2, ¬hing
));
1409 case uint32_t(ThreadOp::I64AtomicStore32U
): {
1410 LinearMemoryAddress
<Nothing
> addr
;
1411 CHECK(iter
.readAtomicStore(&addr
, ValType::I64
, 4, ¬hing
));
1413 case uint32_t(ThreadOp::I32AtomicAdd
):
1414 case uint32_t(ThreadOp::I32AtomicSub
):
1415 case uint32_t(ThreadOp::I32AtomicAnd
):
1416 case uint32_t(ThreadOp::I32AtomicOr
):
1417 case uint32_t(ThreadOp::I32AtomicXor
):
1418 case uint32_t(ThreadOp::I32AtomicXchg
): {
1419 LinearMemoryAddress
<Nothing
> addr
;
1420 CHECK(iter
.readAtomicRMW(&addr
, ValType::I32
, 4, ¬hing
));
1422 case uint32_t(ThreadOp::I64AtomicAdd
):
1423 case uint32_t(ThreadOp::I64AtomicSub
):
1424 case uint32_t(ThreadOp::I64AtomicAnd
):
1425 case uint32_t(ThreadOp::I64AtomicOr
):
1426 case uint32_t(ThreadOp::I64AtomicXor
):
1427 case uint32_t(ThreadOp::I64AtomicXchg
): {
1428 LinearMemoryAddress
<Nothing
> addr
;
1429 CHECK(iter
.readAtomicRMW(&addr
, ValType::I64
, 8, ¬hing
));
1431 case uint32_t(ThreadOp::I32AtomicAdd8U
):
1432 case uint32_t(ThreadOp::I32AtomicSub8U
):
1433 case uint32_t(ThreadOp::I32AtomicAnd8U
):
1434 case uint32_t(ThreadOp::I32AtomicOr8U
):
1435 case uint32_t(ThreadOp::I32AtomicXor8U
):
1436 case uint32_t(ThreadOp::I32AtomicXchg8U
): {
1437 LinearMemoryAddress
<Nothing
> addr
;
1438 CHECK(iter
.readAtomicRMW(&addr
, ValType::I32
, 1, ¬hing
));
1440 case uint32_t(ThreadOp::I32AtomicAdd16U
):
1441 case uint32_t(ThreadOp::I32AtomicSub16U
):
1442 case uint32_t(ThreadOp::I32AtomicAnd16U
):
1443 case uint32_t(ThreadOp::I32AtomicOr16U
):
1444 case uint32_t(ThreadOp::I32AtomicXor16U
):
1445 case uint32_t(ThreadOp::I32AtomicXchg16U
): {
1446 LinearMemoryAddress
<Nothing
> addr
;
1447 CHECK(iter
.readAtomicRMW(&addr
, ValType::I32
, 2, ¬hing
));
1449 case uint32_t(ThreadOp::I64AtomicAdd8U
):
1450 case uint32_t(ThreadOp::I64AtomicSub8U
):
1451 case uint32_t(ThreadOp::I64AtomicAnd8U
):
1452 case uint32_t(ThreadOp::I64AtomicOr8U
):
1453 case uint32_t(ThreadOp::I64AtomicXor8U
):
1454 case uint32_t(ThreadOp::I64AtomicXchg8U
): {
1455 LinearMemoryAddress
<Nothing
> addr
;
1456 CHECK(iter
.readAtomicRMW(&addr
, ValType::I64
, 1, ¬hing
));
1458 case uint32_t(ThreadOp::I64AtomicAdd16U
):
1459 case uint32_t(ThreadOp::I64AtomicSub16U
):
1460 case uint32_t(ThreadOp::I64AtomicAnd16U
):
1461 case uint32_t(ThreadOp::I64AtomicOr16U
):
1462 case uint32_t(ThreadOp::I64AtomicXor16U
):
1463 case uint32_t(ThreadOp::I64AtomicXchg16U
): {
1464 LinearMemoryAddress
<Nothing
> addr
;
1465 CHECK(iter
.readAtomicRMW(&addr
, ValType::I64
, 2, ¬hing
));
1467 case uint32_t(ThreadOp::I64AtomicAdd32U
):
1468 case uint32_t(ThreadOp::I64AtomicSub32U
):
1469 case uint32_t(ThreadOp::I64AtomicAnd32U
):
1470 case uint32_t(ThreadOp::I64AtomicOr32U
):
1471 case uint32_t(ThreadOp::I64AtomicXor32U
):
1472 case uint32_t(ThreadOp::I64AtomicXchg32U
): {
1473 LinearMemoryAddress
<Nothing
> addr
;
1474 CHECK(iter
.readAtomicRMW(&addr
, ValType::I64
, 4, ¬hing
));
1476 case uint32_t(ThreadOp::I32AtomicCmpXchg
): {
1477 LinearMemoryAddress
<Nothing
> addr
;
1478 CHECK(iter
.readAtomicCmpXchg(&addr
, ValType::I32
, 4, ¬hing
,
1481 case uint32_t(ThreadOp::I64AtomicCmpXchg
): {
1482 LinearMemoryAddress
<Nothing
> addr
;
1483 CHECK(iter
.readAtomicCmpXchg(&addr
, ValType::I64
, 8, ¬hing
,
1486 case uint32_t(ThreadOp::I32AtomicCmpXchg8U
): {
1487 LinearMemoryAddress
<Nothing
> addr
;
1488 CHECK(iter
.readAtomicCmpXchg(&addr
, ValType::I32
, 1, ¬hing
,
1491 case uint32_t(ThreadOp::I32AtomicCmpXchg16U
): {
1492 LinearMemoryAddress
<Nothing
> addr
;
1493 CHECK(iter
.readAtomicCmpXchg(&addr
, ValType::I32
, 2, ¬hing
,
1496 case uint32_t(ThreadOp::I64AtomicCmpXchg8U
): {
1497 LinearMemoryAddress
<Nothing
> addr
;
1498 CHECK(iter
.readAtomicCmpXchg(&addr
, ValType::I64
, 1, ¬hing
,
1501 case uint32_t(ThreadOp::I64AtomicCmpXchg16U
): {
1502 LinearMemoryAddress
<Nothing
> addr
;
1503 CHECK(iter
.readAtomicCmpXchg(&addr
, ValType::I64
, 2, ¬hing
,
1506 case uint32_t(ThreadOp::I64AtomicCmpXchg32U
): {
1507 LinearMemoryAddress
<Nothing
> addr
;
1508 CHECK(iter
.readAtomicCmpXchg(&addr
, ValType::I64
, 4, ¬hing
,
1512 return iter
.unrecognizedOpcode(&op
);
1516 case uint16_t(Op::MozPrefix
):
1517 return iter
.unrecognizedOpcode(&op
);
1519 return iter
.unrecognizedOpcode(&op
);
1523 MOZ_CRASH("unreachable");
1528 bool wasm::ValidateFunctionBody(const ModuleEnvironment
& env
,
1529 uint32_t funcIndex
, uint32_t bodySize
,
1531 ValTypeVector locals
;
1532 if (!locals
.appendAll(env
.funcs
[funcIndex
].type
->args())) {
1536 const uint8_t* bodyBegin
= d
.currentPosition();
1538 if (!DecodeLocalEntries(d
, *env
.types
, env
.features
, &locals
)) {
1542 return DecodeFunctionBodyExprs(env
, funcIndex
, locals
, bodyBegin
+ bodySize
,
1548 static bool DecodePreamble(Decoder
& d
) {
1549 if (d
.bytesRemain() > MaxModuleBytes
) {
1550 return d
.fail("module too big");
1554 if (!d
.readFixedU32(&u32
) || u32
!= MagicNumber
) {
1555 return d
.fail("failed to match magic number");
1558 if (!d
.readFixedU32(&u32
) || u32
!= EncodingVersion
) {
1559 return d
.failf("binary version 0x%" PRIx32
1560 " does not match expected version 0x%" PRIx32
,
1561 u32
, EncodingVersion
);
1567 static bool DecodeValTypeVector(Decoder
& d
, ModuleEnvironment
* env
,
1568 uint32_t count
, ValTypeVector
* valTypes
) {
1569 if (!valTypes
->resize(count
)) {
1573 for (uint32_t i
= 0; i
< count
; i
++) {
1574 if (!d
.readValType(*env
->types
, env
->features
, &(*valTypes
)[i
])) {
1581 static bool DecodeFuncType(Decoder
& d
, ModuleEnvironment
* env
,
1582 FuncType
* funcType
) {
1584 if (!d
.readVarU32(&numArgs
)) {
1585 return d
.fail("bad number of function args");
1587 if (numArgs
> MaxParams
) {
1588 return d
.fail("too many arguments in signature");
1591 if (!DecodeValTypeVector(d
, env
, numArgs
, &args
)) {
1595 uint32_t numResults
;
1596 if (!d
.readVarU32(&numResults
)) {
1597 return d
.fail("bad number of function returns");
1599 if (numResults
> MaxResults
) {
1600 return d
.fail("too many returns in signature");
1602 ValTypeVector results
;
1603 if (!DecodeValTypeVector(d
, env
, numResults
, &results
)) {
1607 *funcType
= FuncType(std::move(args
), std::move(results
));
1611 static bool DecodeStructType(Decoder
& d
, ModuleEnvironment
* env
,
1612 StructType
* structType
) {
1613 if (!env
->gcEnabled()) {
1614 return d
.fail("Structure types not enabled");
1618 if (!d
.readVarU32(&numFields
)) {
1619 return d
.fail("Bad number of fields");
1622 if (numFields
> MaxStructFields
) {
1623 return d
.fail("too many fields in struct");
1626 StructFieldVector fields
;
1627 if (!fields
.resize(numFields
)) {
1631 for (uint32_t i
= 0; i
< numFields
; i
++) {
1632 if (!d
.readFieldType(*env
->types
, env
->features
, &fields
[i
].type
)) {
1637 if (!d
.readFixedU8(&flags
)) {
1638 return d
.fail("expected flag");
1640 if ((flags
& ~uint8_t(FieldFlags::AllowedMask
)) != 0) {
1641 return d
.fail("garbage flag bits");
1643 fields
[i
].isMutable
= flags
& uint8_t(FieldFlags::Mutable
);
1646 *structType
= StructType(std::move(fields
));
1648 // Compute the struct layout, and fail if the struct is too large
1649 if (!structType
->init()) {
1650 return d
.fail("too many fields in struct");
1655 static bool DecodeArrayType(Decoder
& d
, ModuleEnvironment
* env
,
1656 ArrayType
* arrayType
) {
1657 if (!env
->gcEnabled()) {
1658 return d
.fail("gc types not enabled");
1661 FieldType elementType
;
1662 if (!d
.readFieldType(*env
->types
, env
->features
, &elementType
)) {
1667 if (!d
.readFixedU8(&flags
)) {
1668 return d
.fail("expected flag");
1670 if ((flags
& ~uint8_t(FieldFlags::AllowedMask
)) != 0) {
1671 return d
.fail("garbage flag bits");
1673 bool isMutable
= flags
& uint8_t(FieldFlags::Mutable
);
1675 *arrayType
= ArrayType(elementType
, isMutable
);
1679 static bool DecodeTypeSection(Decoder
& d
, ModuleEnvironment
* env
) {
1680 MaybeSectionRange range
;
1681 if (!d
.startSection(SectionId::Type
, env
, &range
, "type")) {
1688 uint32_t numRecGroups
;
1689 if (!d
.readVarU32(&numRecGroups
)) {
1690 return d
.fail("expected number of types");
1693 // Check if we've reached our implementation defined limit of recursion
1695 if (numRecGroups
> MaxRecGroups
) {
1696 return d
.fail("too many types");
1699 for (uint32_t recGroupIndex
= 0; recGroupIndex
< numRecGroups
;
1701 uint32_t recGroupLength
= 1;
1703 // Decode an optional recursion group length, if the GC proposal is
1705 if (env
->gcEnabled()) {
1706 uint8_t firstTypeCode
;
1707 if (!d
.peekByte(&firstTypeCode
)) {
1708 return d
.fail("expected type form");
1711 if (firstTypeCode
== (uint8_t)TypeCode::RecGroup
) {
1712 // Skip over the prefix byte that was peeked.
1713 d
.uncheckedReadFixedU8();
1715 // Read the number of types in this recursion group
1716 if (!d
.readVarU32(&recGroupLength
)) {
1717 return d
.fail("expected recursion group length");
1722 // Start a recursion group. This will extend the type context with empty
1723 // type definitions to be filled.
1724 MutableRecGroup recGroup
= env
->types
->startRecGroup(recGroupLength
);
1729 // First, iterate over the types, validate them and set super types.
1730 // Subtyping relationship will be checked in a second iteration.
1731 for (uint32_t recGroupTypeIndex
= 0; recGroupTypeIndex
< recGroupLength
;
1732 recGroupTypeIndex
++) {
1733 uint32_t typeIndex
=
1734 env
->types
->length() - recGroupLength
+ recGroupTypeIndex
;
1736 // Check if we've reached our implementation defined limit of type
1738 if (typeIndex
>= MaxTypes
) {
1739 return d
.fail("too many types");
1743 const TypeDef
* superTypeDef
= nullptr;
1745 // By default, all types are final unless the sub keyword is specified.
1746 bool finalTypeFlag
= true;
1748 // Decode an optional declared super type index, if the GC proposal is
1750 if (env
->gcEnabled() && d
.peekByte(&form
) &&
1751 (form
== (uint8_t)TypeCode::SubNoFinalType
||
1752 form
== (uint8_t)TypeCode::SubFinalType
)) {
1753 if (form
== (uint8_t)TypeCode::SubNoFinalType
) {
1754 finalTypeFlag
= false;
1757 // Skip over the `sub` or `final` prefix byte we peeked.
1758 d
.uncheckedReadFixedU8();
1760 // Decode the number of super types, which is currently limited to at
1762 uint32_t numSuperTypes
;
1763 if (!d
.readVarU32(&numSuperTypes
)) {
1764 return d
.fail("expected number of super types");
1766 if (numSuperTypes
> 1) {
1767 return d
.fail("too many super types");
1770 // Decode the super type, if any.
1771 if (numSuperTypes
== 1) {
1772 uint32_t superTypeDefIndex
;
1773 if (!d
.readVarU32(&superTypeDefIndex
)) {
1774 return d
.fail("expected super type index");
1777 // A super type index must be strictly less than the current type
1778 // index in order to avoid cycles.
1779 if (superTypeDefIndex
>= typeIndex
) {
1780 return d
.fail("invalid super type index");
1783 superTypeDef
= &env
->types
->type(superTypeDefIndex
);
1787 // Decode the kind of type definition
1788 if (!d
.readFixedU8(&form
)) {
1789 return d
.fail("expected type form");
1792 TypeDef
* typeDef
= &recGroup
->type(recGroupTypeIndex
);
1794 case uint8_t(TypeCode::Func
): {
1796 if (!DecodeFuncType(d
, env
, &funcType
)) {
1799 *typeDef
= std::move(funcType
);
1802 case uint8_t(TypeCode::Struct
): {
1803 StructType structType
;
1804 if (!DecodeStructType(d
, env
, &structType
)) {
1807 *typeDef
= std::move(structType
);
1810 case uint8_t(TypeCode::Array
): {
1811 ArrayType arrayType
;
1812 if (!DecodeArrayType(d
, env
, &arrayType
)) {
1815 *typeDef
= std::move(arrayType
);
1819 return d
.fail("expected type form");
1822 typeDef
->setFinal(finalTypeFlag
);
1824 // Check that we aren't creating too deep of a subtyping chain
1825 if (superTypeDef
->subTypingDepth() >= MaxSubTypingDepth
) {
1826 return d
.fail("type is too deep");
1829 typeDef
->setSuperTypeDef(superTypeDef
);
1832 if (typeDef
->isFuncType()) {
1833 typeDef
->funcType().initImmediateTypeId(
1834 env
->gcEnabled(), typeDef
->isFinal(), superTypeDef
, recGroupLength
);
1838 // Check the super types to make sure they are compatible with their
1839 // subtypes. This is done in a second iteration to avoid dealing with not
1840 // yet loaded types.
1841 for (uint32_t recGroupTypeIndex
= 0; recGroupTypeIndex
< recGroupLength
;
1842 recGroupTypeIndex
++) {
1843 TypeDef
* typeDef
= &recGroup
->type(recGroupTypeIndex
);
1844 if (typeDef
->superTypeDef()) {
1845 // Check that the super type is compatible with this type
1846 if (!TypeDef::canBeSubTypeOf(typeDef
, typeDef
->superTypeDef())) {
1847 return d
.fail("incompatible super type");
1852 // Finish the recursion group, which will canonicalize the types.
1853 if (!env
->types
->endRecGroup()) {
1858 return d
.finishSection(*range
, "type");
1861 [[nodiscard
]] static bool DecodeName(Decoder
& d
, CacheableName
* name
) {
1863 if (!d
.readVarU32(&numBytes
)) {
1867 if (numBytes
> MaxStringBytes
) {
1871 const uint8_t* bytes
;
1872 if (!d
.readBytes(numBytes
, &bytes
)) {
1876 if (!IsUtf8(AsChars(Span(bytes
, numBytes
)))) {
1880 UTF8Bytes utf8Bytes
;
1881 if (!utf8Bytes
.resizeUninitialized(numBytes
)) {
1884 memcpy(utf8Bytes
.begin(), bytes
, numBytes
);
1886 *name
= CacheableName(std::move(utf8Bytes
));
1890 static bool DecodeFuncTypeIndex(Decoder
& d
, const SharedTypeContext
& types
,
1891 uint32_t* funcTypeIndex
) {
1892 if (!d
.readVarU32(funcTypeIndex
)) {
1893 return d
.fail("expected signature index");
1896 if (*funcTypeIndex
>= types
->length()) {
1897 return d
.fail("signature index out of range");
1900 const TypeDef
& def
= (*types
)[*funcTypeIndex
];
1902 if (!def
.isFuncType()) {
1903 return d
.fail("signature index references non-signature");
1909 static bool DecodeLimitBound(Decoder
& d
, IndexType indexType
, uint64_t* bound
) {
1910 if (indexType
== IndexType::I64
) {
1911 return d
.readVarU64(bound
);
1914 // Spec tests assert that we only decode a LEB32 when index type is I32.
1916 if (!d
.readVarU32(&bound32
)) {
1923 static bool DecodeLimits(Decoder
& d
, LimitsKind kind
, Limits
* limits
) {
1925 if (!d
.readFixedU8(&flags
)) {
1926 return d
.fail("expected flags");
1929 uint8_t mask
= kind
== LimitsKind::Memory
? uint8_t(LimitsMask::Memory
)
1930 : uint8_t(LimitsMask::Table
);
1932 if (flags
& ~uint8_t(mask
)) {
1933 return d
.failf("unexpected bits set in flags: %" PRIu32
,
1934 uint32_t(flags
& ~uint8_t(mask
)));
1937 // Memory limits may be shared or specify an alternate index type
1938 if (kind
== LimitsKind::Memory
) {
1939 if ((flags
& uint8_t(LimitsFlags::IsShared
)) &&
1940 !(flags
& uint8_t(LimitsFlags::HasMaximum
))) {
1941 return d
.fail("maximum length required for shared memory");
1944 limits
->shared
= (flags
& uint8_t(LimitsFlags::IsShared
))
1948 #ifdef ENABLE_WASM_MEMORY64
1950 (flags
& uint8_t(LimitsFlags::IsI64
)) ? IndexType::I64
: IndexType::I32
;
1952 limits
->indexType
= IndexType::I32
;
1953 if (flags
& uint8_t(LimitsFlags::IsI64
)) {
1954 return d
.fail("i64 is not supported for memory limits");
1958 limits
->shared
= Shareable::False
;
1959 limits
->indexType
= IndexType::I32
;
1963 if (!DecodeLimitBound(d
, limits
->indexType
, &initial
)) {
1964 return d
.fail("expected initial length");
1966 limits
->initial
= initial
;
1968 if (flags
& uint8_t(LimitsFlags::HasMaximum
)) {
1970 if (!DecodeLimitBound(d
, limits
->indexType
, &maximum
)) {
1971 return d
.fail("expected maximum length");
1974 if (limits
->initial
> maximum
) {
1976 "memory size minimum must not be greater than maximum; "
1977 "maximum length %" PRIu64
" is less than initial length %" PRIu64
,
1978 maximum
, limits
->initial
);
1981 limits
->maximum
.emplace(maximum
);
1987 static bool DecodeTableTypeAndLimits(Decoder
& d
, ModuleEnvironment
* env
) {
1988 bool initExprPresent
= false;
1990 if (!d
.peekByte(&typeCode
)) {
1991 return d
.fail("expected type code");
1993 if (typeCode
== (uint8_t)TypeCode::TableHasInitExpr
) {
1994 d
.uncheckedReadFixedU8();
1996 if (!d
.readFixedU8(&flags
) || flags
!= 0) {
1997 return d
.fail("expected reserved byte to be 0");
1999 initExprPresent
= true;
2002 RefType tableElemType
;
2003 if (!d
.readRefType(*env
->types
, env
->features
, &tableElemType
)) {
2008 if (!DecodeLimits(d
, LimitsKind::Table
, &limits
)) {
2012 // Decoding limits for a table only supports i32
2013 MOZ_ASSERT(limits
.indexType
== IndexType::I32
);
2015 // If there's a maximum, check it is in range. The check to exclude
2016 // initial > maximum is carried out by the DecodeLimits call above, so
2017 // we don't repeat it here.
2018 if (limits
.initial
> MaxTableLimitField
||
2019 ((limits
.maximum
.isSome() &&
2020 limits
.maximum
.value() > MaxTableLimitField
))) {
2021 return d
.fail("too many table elements");
2024 if (env
->tables
.length() >= MaxTables
) {
2025 return d
.fail("too many tables");
2028 // The rest of the runtime expects table limits to be within a 32-bit range.
2029 static_assert(MaxTableLimitField
<= UINT32_MAX
, "invariant");
2030 uint32_t initialLength
= uint32_t(limits
.initial
);
2031 Maybe
<uint32_t> maximumLength
;
2032 if (limits
.maximum
) {
2033 maximumLength
= Some(uint32_t(*limits
.maximum
));
2036 Maybe
<InitExpr
> initExpr
;
2037 if (initExprPresent
) {
2038 InitExpr initializer
;
2039 if (!InitExpr::decodeAndValidate(d
, env
, tableElemType
, &initializer
)) {
2042 initExpr
= Some(std::move(initializer
));
2044 if (!tableElemType
.isNullable()) {
2045 return d
.fail("table with non-nullable references requires initializer");
2049 return env
->tables
.emplaceBack(tableElemType
, initialLength
, maximumLength
,
2050 std::move(initExpr
), /* isAsmJS */ false);
2053 static bool DecodeGlobalType(Decoder
& d
, const SharedTypeContext
& types
,
2054 const FeatureArgs
& features
, ValType
* type
,
2056 if (!d
.readValType(*types
, features
, type
)) {
2057 return d
.fail("expected global type");
2061 if (!d
.readFixedU8(&flags
)) {
2062 return d
.fail("expected global flags");
2065 if (flags
& ~uint8_t(GlobalTypeImmediate::AllowedMask
)) {
2066 return d
.fail("unexpected bits set in global flags");
2069 *isMutable
= flags
& uint8_t(GlobalTypeImmediate::IsMutable
);
2073 static bool DecodeMemoryTypeAndLimits(Decoder
& d
, ModuleEnvironment
* env
,
2074 MemoryDescVector
* memories
) {
2075 if (!env
->features
.multiMemory
&& env
->numMemories() == 1) {
2076 return d
.fail("already have default memory");
2079 if (env
->numMemories() >= MaxMemories
) {
2080 return d
.fail("too many memories");
2084 if (!DecodeLimits(d
, LimitsKind::Memory
, &limits
)) {
2088 uint64_t maxField
= MaxMemoryLimitField(limits
.indexType
);
2090 if (limits
.initial
> maxField
) {
2091 return d
.fail("initial memory size too big");
2094 if (limits
.maximum
&& *limits
.maximum
> maxField
) {
2095 return d
.fail("maximum memory size too big");
2098 if (limits
.shared
== Shareable::True
&&
2099 env
->sharedMemoryEnabled() == Shareable::False
) {
2100 return d
.fail("shared memory is disabled");
2103 if (limits
.indexType
== IndexType::I64
&& !env
->memory64Enabled()) {
2104 return d
.fail("memory64 is disabled");
2107 return memories
->emplaceBack(MemoryDesc(limits
));
2110 static bool DecodeTag(Decoder
& d
, ModuleEnvironment
* env
, TagKind
* tagKind
,
2111 uint32_t* funcTypeIndex
) {
2113 if (!d
.readVarU32(&tagCode
)) {
2114 return d
.fail("expected tag kind");
2117 if (TagKind(tagCode
) != TagKind::Exception
) {
2118 return d
.fail("illegal tag kind");
2120 *tagKind
= TagKind(tagCode
);
2122 if (!d
.readVarU32(funcTypeIndex
)) {
2123 return d
.fail("expected function index in tag");
2125 if (*funcTypeIndex
>= env
->numTypes()) {
2126 return d
.fail("function type index in tag out of bounds");
2128 if (!(*env
->types
)[*funcTypeIndex
].isFuncType()) {
2129 return d
.fail("function type index must index a function type");
2131 if ((*env
->types
)[*funcTypeIndex
].funcType().results().length() != 0) {
2132 return d
.fail("tag function types must not return anything");
2137 static bool DecodeImport(Decoder
& d
, ModuleEnvironment
* env
) {
2138 CacheableName moduleName
;
2139 if (!DecodeName(d
, &moduleName
)) {
2140 return d
.fail("expected valid import module name");
2143 CacheableName fieldName
;
2144 if (!DecodeName(d
, &fieldName
)) {
2145 return d
.fail("expected valid import field name");
2148 uint8_t rawImportKind
;
2149 if (!d
.readFixedU8(&rawImportKind
)) {
2150 return d
.fail("failed to read import kind");
2153 DefinitionKind importKind
= DefinitionKind(rawImportKind
);
2155 switch (importKind
) {
2156 case DefinitionKind::Function
: {
2157 uint32_t funcTypeIndex
;
2158 if (!DecodeFuncTypeIndex(d
, env
->types
, &funcTypeIndex
)) {
2161 if (!env
->funcs
.append(FuncDesc(
2162 &env
->types
->type(funcTypeIndex
).funcType(), funcTypeIndex
))) {
2165 if (env
->funcs
.length() > MaxFuncs
) {
2166 return d
.fail("too many functions");
2170 case DefinitionKind::Table
: {
2171 if (!DecodeTableTypeAndLimits(d
, env
)) {
2174 env
->tables
.back().isImported
= true;
2177 case DefinitionKind::Memory
: {
2178 if (!DecodeMemoryTypeAndLimits(d
, env
, &env
->memories
)) {
2183 case DefinitionKind::Global
: {
2186 if (!DecodeGlobalType(d
, env
->types
, env
->features
, &type
, &isMutable
)) {
2189 if (!env
->globals
.append(
2190 GlobalDesc(type
, isMutable
, env
->globals
.length()))) {
2193 if (env
->globals
.length() > MaxGlobals
) {
2194 return d
.fail("too many globals");
2198 case DefinitionKind::Tag
: {
2200 uint32_t funcTypeIndex
;
2201 if (!DecodeTag(d
, env
, &tagKind
, &funcTypeIndex
)) {
2205 if (!args
.appendAll((*env
->types
)[funcTypeIndex
].funcType().args())) {
2208 MutableTagType tagType
= js_new
<TagType
>();
2209 if (!tagType
|| !tagType
->initialize(std::move(args
))) {
2212 if (!env
->tags
.emplaceBack(tagKind
, tagType
)) {
2215 if (env
->tags
.length() > MaxTags
) {
2216 return d
.fail("too many tags");
2221 return d
.fail("unsupported import kind");
2224 return env
->imports
.emplaceBack(std::move(moduleName
), std::move(fieldName
),
2228 static bool CheckImportsAgainstBuiltinModules(Decoder
& d
,
2229 ModuleEnvironment
* env
) {
2230 const BuiltinModuleIds
& builtinModules
= env
->features
.builtinModules
;
2232 // Skip this pass if there are no builtin modules enabled
2233 if (builtinModules
.hasNone()) {
2237 // Allocate a type context for builtin types so we can canonicalize them
2238 // and use them in type comparisons
2239 RefPtr
<TypeContext
> builtinTypes
= js_new
<TypeContext
>();
2240 if (!builtinTypes
) {
2244 uint32_t importFuncIndex
= 0;
2245 for (auto& import
: env
->imports
) {
2246 Maybe
<BuiltinModuleId
> builtinModule
=
2247 ImportMatchesBuiltinModule(import
.module
.utf8Bytes(), builtinModules
);
2249 switch (import
.kind
) {
2250 case DefinitionKind::Function
: {
2251 const FuncDesc
& func
= env
->funcs
[importFuncIndex
];
2252 importFuncIndex
+= 1;
2254 // Skip this import if it doesn't refer to a builtin module. We do have
2255 // to increment the import function index regardless though.
2256 if (!builtinModule
) {
2260 // Check if this import refers to a builtin module function
2261 Maybe
<const BuiltinModuleFunc
*> builtinFunc
=
2262 ImportMatchesBuiltinModuleFunc(import
.field
.utf8Bytes(),
2265 return d
.fail("unrecognized builtin module field");
2268 // Get a canonicalized type definition for this builtin so we can
2269 // accurately compare it against the import type.
2270 FuncType builtinFuncType
;
2271 if (!(*builtinFunc
)->funcType(&builtinFuncType
)) {
2274 if (!builtinTypes
->addType(builtinFuncType
)) {
2277 const TypeDef
& builtinTypeDef
=
2278 builtinTypes
->type(builtinTypes
->length() - 1);
2280 const TypeDef
& importTypeDef
= (*env
->types
)[func
.typeIndex
];
2281 if (!TypeDef::isSubTypeOf(&builtinTypeDef
, &importTypeDef
)) {
2282 return d
.failf("type mismatch in %s", (*builtinFunc
)->exportName
);
2287 if (!builtinModule
) {
2290 return d
.fail("unrecognized builtin import");
2298 static bool DecodeImportSection(Decoder
& d
, ModuleEnvironment
* env
) {
2299 MaybeSectionRange range
;
2300 if (!d
.startSection(SectionId::Import
, env
, &range
, "import")) {
2307 uint32_t numImports
;
2308 if (!d
.readVarU32(&numImports
)) {
2309 return d
.fail("failed to read number of imports");
2312 if (numImports
> MaxImports
) {
2313 return d
.fail("too many imports");
2316 for (uint32_t i
= 0; i
< numImports
; i
++) {
2317 if (!DecodeImport(d
, env
)) {
2322 if (!d
.finishSection(*range
, "import")) {
2326 env
->numFuncImports
= env
->funcs
.length();
2327 env
->numGlobalImports
= env
->globals
.length();
2331 static bool DecodeFunctionSection(Decoder
& d
, ModuleEnvironment
* env
) {
2332 MaybeSectionRange range
;
2333 if (!d
.startSection(SectionId::Function
, env
, &range
, "function")) {
2341 if (!d
.readVarU32(&numDefs
)) {
2342 return d
.fail("expected number of function definitions");
2345 CheckedInt
<uint32_t> numFuncs
= env
->funcs
.length();
2346 numFuncs
+= numDefs
;
2347 if (!numFuncs
.isValid() || numFuncs
.value() > MaxFuncs
) {
2348 return d
.fail("too many functions");
2351 if (!env
->funcs
.reserve(numFuncs
.value())) {
2355 for (uint32_t i
= 0; i
< numDefs
; i
++) {
2356 uint32_t funcTypeIndex
;
2357 if (!DecodeFuncTypeIndex(d
, env
->types
, &funcTypeIndex
)) {
2360 env
->funcs
.infallibleAppend(
2361 FuncDesc(&env
->types
->type(funcTypeIndex
).funcType(), funcTypeIndex
));
2364 return d
.finishSection(*range
, "function");
2367 static bool DecodeTableSection(Decoder
& d
, ModuleEnvironment
* env
) {
2368 MaybeSectionRange range
;
2369 if (!d
.startSection(SectionId::Table
, env
, &range
, "table")) {
2377 if (!d
.readVarU32(&numTables
)) {
2378 return d
.fail("failed to read number of tables");
2381 for (uint32_t i
= 0; i
< numTables
; ++i
) {
2382 if (!DecodeTableTypeAndLimits(d
, env
)) {
2387 return d
.finishSection(*range
, "table");
2390 static bool DecodeMemorySection(Decoder
& d
, ModuleEnvironment
* env
) {
2391 MaybeSectionRange range
;
2392 if (!d
.startSection(SectionId::Memory
, env
, &range
, "memory")) {
2399 uint32_t numMemories
;
2400 if (!d
.readVarU32(&numMemories
)) {
2401 return d
.fail("failed to read number of memories");
2404 if (!env
->features
.multiMemory
&& numMemories
> 1) {
2405 return d
.fail("the number of memories must be at most one");
2408 for (uint32_t i
= 0; i
< numMemories
; ++i
) {
2409 if (!DecodeMemoryTypeAndLimits(d
, env
, &env
->memories
)) {
2414 return d
.finishSection(*range
, "memory");
2417 static bool DecodeGlobalSection(Decoder
& d
, ModuleEnvironment
* env
) {
2418 MaybeSectionRange range
;
2419 if (!d
.startSection(SectionId::Global
, env
, &range
, "global")) {
2427 if (!d
.readVarU32(&numDefs
)) {
2428 return d
.fail("expected number of globals");
2431 CheckedInt
<uint32_t> numGlobals
= env
->globals
.length();
2432 numGlobals
+= numDefs
;
2433 if (!numGlobals
.isValid() || numGlobals
.value() > MaxGlobals
) {
2434 return d
.fail("too many globals");
2437 if (!env
->globals
.reserve(numGlobals
.value())) {
2441 for (uint32_t i
= 0; i
< numDefs
; i
++) {
2444 if (!DecodeGlobalType(d
, env
->types
, env
->features
, &type
, &isMutable
)) {
2448 InitExpr initializer
;
2449 if (!InitExpr::decodeAndValidate(d
, env
, type
, &initializer
)) {
2453 env
->globals
.infallibleAppend(
2454 GlobalDesc(std::move(initializer
), isMutable
));
2457 return d
.finishSection(*range
, "global");
2460 static bool DecodeTagSection(Decoder
& d
, ModuleEnvironment
* env
) {
2461 MaybeSectionRange range
;
2462 if (!d
.startSection(SectionId::Tag
, env
, &range
, "tag")) {
2469 if (!env
->exceptionsEnabled()) {
2470 return d
.fail("exceptions not enabled");
2474 if (!d
.readVarU32(&numDefs
)) {
2475 return d
.fail("expected number of tags");
2478 CheckedInt
<uint32_t> numTags
= env
->tags
.length();
2480 if (!numTags
.isValid() || numTags
.value() > MaxTags
) {
2481 return d
.fail("too many tags");
2484 if (!env
->tags
.reserve(numTags
.value())) {
2488 for (uint32_t i
= 0; i
< numDefs
; i
++) {
2490 uint32_t funcTypeIndex
;
2491 if (!DecodeTag(d
, env
, &tagKind
, &funcTypeIndex
)) {
2495 if (!args
.appendAll((*env
->types
)[funcTypeIndex
].funcType().args())) {
2498 MutableTagType tagType
= js_new
<TagType
>();
2499 if (!tagType
|| !tagType
->initialize(std::move(args
))) {
2502 env
->tags
.infallibleEmplaceBack(tagKind
, tagType
);
2505 return d
.finishSection(*range
, "tag");
2508 using NameSet
= HashSet
<Span
<char>, NameHasher
, SystemAllocPolicy
>;
2510 [[nodiscard
]] static bool DecodeExportName(Decoder
& d
, NameSet
* dupSet
,
2511 CacheableName
* exportName
) {
2512 if (!DecodeName(d
, exportName
)) {
2513 d
.fail("expected valid export name");
2517 NameSet::AddPtr p
= dupSet
->lookupForAdd(exportName
->utf8Bytes());
2519 d
.fail("duplicate export");
2523 return dupSet
->add(p
, exportName
->utf8Bytes());
2526 static bool DecodeExport(Decoder
& d
, ModuleEnvironment
* env
, NameSet
* dupSet
) {
2527 CacheableName fieldName
;
2528 if (!DecodeExportName(d
, dupSet
, &fieldName
)) {
2533 if (!d
.readFixedU8(&exportKind
)) {
2534 return d
.fail("failed to read export kind");
2537 switch (DefinitionKind(exportKind
)) {
2538 case DefinitionKind::Function
: {
2540 if (!d
.readVarU32(&funcIndex
)) {
2541 return d
.fail("expected function index");
2544 if (funcIndex
>= env
->numFuncs()) {
2545 return d
.fail("exported function index out of bounds");
2548 env
->declareFuncExported(funcIndex
, /* eager */ true,
2549 /* canRefFunc */ true);
2550 return env
->exports
.emplaceBack(std::move(fieldName
), funcIndex
,
2551 DefinitionKind::Function
);
2553 case DefinitionKind::Table
: {
2554 uint32_t tableIndex
;
2555 if (!d
.readVarU32(&tableIndex
)) {
2556 return d
.fail("expected table index");
2559 if (tableIndex
>= env
->tables
.length()) {
2560 return d
.fail("exported table index out of bounds");
2562 env
->tables
[tableIndex
].isExported
= true;
2563 return env
->exports
.emplaceBack(std::move(fieldName
), tableIndex
,
2564 DefinitionKind::Table
);
2566 case DefinitionKind::Memory
: {
2567 uint32_t memoryIndex
;
2568 if (!d
.readVarU32(&memoryIndex
)) {
2569 return d
.fail("expected memory index");
2572 if (memoryIndex
>= env
->numMemories()) {
2573 return d
.fail("exported memory index out of bounds");
2576 return env
->exports
.emplaceBack(std::move(fieldName
), memoryIndex
,
2577 DefinitionKind::Memory
);
2579 case DefinitionKind::Global
: {
2580 uint32_t globalIndex
;
2581 if (!d
.readVarU32(&globalIndex
)) {
2582 return d
.fail("expected global index");
2585 if (globalIndex
>= env
->globals
.length()) {
2586 return d
.fail("exported global index out of bounds");
2589 GlobalDesc
* global
= &env
->globals
[globalIndex
];
2590 global
->setIsExport();
2592 return env
->exports
.emplaceBack(std::move(fieldName
), globalIndex
,
2593 DefinitionKind::Global
);
2595 case DefinitionKind::Tag
: {
2597 if (!d
.readVarU32(&tagIndex
)) {
2598 return d
.fail("expected tag index");
2600 if (tagIndex
>= env
->tags
.length()) {
2601 return d
.fail("exported tag index out of bounds");
2604 env
->tags
[tagIndex
].isExport
= true;
2605 return env
->exports
.emplaceBack(std::move(fieldName
), tagIndex
,
2606 DefinitionKind::Tag
);
2609 return d
.fail("unexpected export kind");
2612 MOZ_CRASH("unreachable");
2615 static bool DecodeExportSection(Decoder
& d
, ModuleEnvironment
* env
) {
2616 MaybeSectionRange range
;
2617 if (!d
.startSection(SectionId::Export
, env
, &range
, "export")) {
2626 uint32_t numExports
;
2627 if (!d
.readVarU32(&numExports
)) {
2628 return d
.fail("failed to read number of exports");
2631 if (numExports
> MaxExports
) {
2632 return d
.fail("too many exports");
2635 for (uint32_t i
= 0; i
< numExports
; i
++) {
2636 if (!DecodeExport(d
, env
, &dupSet
)) {
2641 return d
.finishSection(*range
, "export");
2644 static bool DecodeStartSection(Decoder
& d
, ModuleEnvironment
* env
) {
2645 MaybeSectionRange range
;
2646 if (!d
.startSection(SectionId::Start
, env
, &range
, "start")) {
2654 if (!d
.readVarU32(&funcIndex
)) {
2655 return d
.fail("failed to read start func index");
2658 if (funcIndex
>= env
->numFuncs()) {
2659 return d
.fail("unknown start function");
2662 const FuncType
& funcType
= *env
->funcs
[funcIndex
].type
;
2663 if (funcType
.results().length() > 0) {
2664 return d
.fail("start function must not return anything");
2667 if (funcType
.args().length()) {
2668 return d
.fail("start function must be nullary");
2671 env
->declareFuncExported(funcIndex
, /* eager */ true, /* canFuncRef */ false);
2672 env
->startFuncIndex
= Some(funcIndex
);
2674 return d
.finishSection(*range
, "start");
2677 static inline ModuleElemSegment::Kind
NormalizeElemSegmentKind(
2678 ElemSegmentKind decodedKind
) {
2679 switch (decodedKind
) {
2680 case ElemSegmentKind::Active
:
2681 case ElemSegmentKind::ActiveWithTableIndex
: {
2682 return ModuleElemSegment::Kind::Active
;
2684 case ElemSegmentKind::Passive
: {
2685 return ModuleElemSegment::Kind::Passive
;
2687 case ElemSegmentKind::Declared
: {
2688 return ModuleElemSegment::Kind::Declared
;
2691 MOZ_CRASH("unexpected elem segment kind");
2694 static bool DecodeElemSegment(Decoder
& d
, ModuleEnvironment
* env
) {
2695 uint32_t segmentFlags
;
2696 if (!d
.readVarU32(&segmentFlags
)) {
2697 return d
.fail("expected elem segment flags field");
2700 Maybe
<ElemSegmentFlags
> flags
= ElemSegmentFlags::construct(segmentFlags
);
2702 return d
.fail("invalid elem segment flags field");
2705 ModuleElemSegment seg
= ModuleElemSegment();
2707 ElemSegmentKind segmentKind
= flags
->kind();
2708 seg
.kind
= NormalizeElemSegmentKind(segmentKind
);
2710 if (segmentKind
== ElemSegmentKind::Active
||
2711 segmentKind
== ElemSegmentKind::ActiveWithTableIndex
) {
2712 if (env
->tables
.length() == 0) {
2713 return d
.fail("active elem segment requires a table");
2716 uint32_t tableIndex
= 0;
2717 if (segmentKind
== ElemSegmentKind::ActiveWithTableIndex
&&
2718 !d
.readVarU32(&tableIndex
)) {
2719 return d
.fail("expected table index");
2721 if (tableIndex
>= env
->tables
.length()) {
2722 return d
.fail("table index out of range for element segment");
2724 seg
.tableIndex
= tableIndex
;
2727 if (!InitExpr::decodeAndValidate(d
, env
, ValType::I32
, &offset
)) {
2730 seg
.offsetIfActive
.emplace(std::move(offset
));
2732 // Too many bugs result from keeping this value zero. For passive
2733 // or declared segments, there really is no table index, and we should
2734 // never touch the field.
2735 MOZ_ASSERT(segmentKind
== ElemSegmentKind::Passive
||
2736 segmentKind
== ElemSegmentKind::Declared
);
2737 seg
.tableIndex
= (uint32_t)-1;
2740 ElemSegmentPayload payload
= flags
->payload();
2743 // `ActiveWithTableIndex`, `Declared`, and `Passive` element segments encode
2744 // the type or definition kind of the payload. `Active` element segments are
2745 // restricted to MVP behavior, which assumes only function indices.
2746 if (segmentKind
== ElemSegmentKind::Active
) {
2747 elemType
= RefType::func();
2750 case ElemSegmentPayload::Expressions
: {
2751 if (!d
.readRefType(*env
->types
, env
->features
, &elemType
)) {
2755 case ElemSegmentPayload::Indices
: {
2757 if (!d
.readFixedU8(&elemKind
)) {
2758 return d
.fail("expected element kind");
2761 if (elemKind
!= uint8_t(DefinitionKind::Function
)) {
2762 return d
.fail("invalid element kind");
2764 elemType
= RefType::func();
2769 // For active segments, check if the element type is compatible with the
2770 // destination table type.
2772 RefType tblElemType
= env
->tables
[seg
.tableIndex
].elemType
;
2773 if (!CheckIsSubtypeOf(d
, *env
, d
.currentOffset(),
2774 ValType(elemType
).fieldType(),
2775 ValType(tblElemType
).fieldType())) {
2779 seg
.elemType
= elemType
;
2782 if (!d
.readVarU32(&numElems
)) {
2783 return d
.fail("expected element segment size");
2786 if (numElems
> MaxElemSegmentLength
) {
2787 return d
.fail("too many elements in element segment");
2790 bool isAsmJS
= seg
.active() && env
->tables
[seg
.tableIndex
].isAsmJS
;
2793 case ElemSegmentPayload::Indices
: {
2794 seg
.encoding
= ModuleElemSegment::Encoding::Indices
;
2795 if (!seg
.elemIndices
.reserve(numElems
)) {
2799 for (uint32_t i
= 0; i
< numElems
; i
++) {
2801 if (!d
.readVarU32(&elemIndex
)) {
2802 return d
.fail("failed to read element index");
2804 // The only valid type of index right now is a function index.
2805 if (elemIndex
>= env
->numFuncs()) {
2806 return d
.fail("element index out of range");
2809 seg
.elemIndices
.infallibleAppend(elemIndex
);
2811 env
->declareFuncExported(elemIndex
, /*eager=*/false,
2812 /*canRefFunc=*/true);
2816 case ElemSegmentPayload::Expressions
: {
2817 seg
.encoding
= ModuleElemSegment::Encoding::Expressions
;
2818 const uint8_t* exprsStart
= d
.currentPosition();
2819 seg
.elemExpressions
.count
= numElems
;
2820 for (uint32_t i
= 0; i
< numElems
; i
++) {
2821 Maybe
<LitVal
> unusedLiteral
;
2822 if (!DecodeConstantExpression(d
, env
, elemType
, &unusedLiteral
)) {
2826 const uint8_t* exprsEnd
= d
.currentPosition();
2827 if (!seg
.elemExpressions
.exprBytes
.append(exprsStart
, exprsEnd
)) {
2833 env
->elemSegments
.infallibleAppend(std::move(seg
));
2837 static bool DecodeElemSection(Decoder
& d
, ModuleEnvironment
* env
) {
2838 MaybeSectionRange range
;
2839 if (!d
.startSection(SectionId::Elem
, env
, &range
, "elem")) {
2846 uint32_t numSegments
;
2847 if (!d
.readVarU32(&numSegments
)) {
2848 return d
.fail("failed to read number of elem segments");
2851 if (numSegments
> MaxElemSegments
) {
2852 return d
.fail("too many elem segments");
2855 if (!env
->elemSegments
.reserve(numSegments
)) {
2859 for (uint32_t i
= 0; i
< numSegments
; i
++) {
2860 if (!DecodeElemSegment(d
, env
)) {
2865 return d
.finishSection(*range
, "elem");
2868 static bool DecodeDataCountSection(Decoder
& d
, ModuleEnvironment
* env
) {
2869 MaybeSectionRange range
;
2870 if (!d
.startSection(SectionId::DataCount
, env
, &range
, "datacount")) {
2878 if (!d
.readVarU32(&dataCount
)) {
2879 return d
.fail("expected data segment count");
2882 env
->dataCount
.emplace(dataCount
);
2884 return d
.finishSection(*range
, "datacount");
2887 bool wasm::StartsCodeSection(const uint8_t* begin
, const uint8_t* end
,
2888 SectionRange
* codeSection
) {
2890 Decoder
d(begin
, end
, 0, &unused
);
2892 if (!DecodePreamble(d
)) {
2899 if (!d
.readSectionHeader(&id
, &range
)) {
2903 if (id
== uint8_t(SectionId::Code
)) {
2904 *codeSection
= range
;
2908 if (!d
.readBytes(range
.size
)) {
2916 bool wasm::DecodeModuleEnvironment(Decoder
& d
, ModuleEnvironment
* env
) {
2917 if (!DecodePreamble(d
)) {
2921 if (!DecodeTypeSection(d
, env
)) {
2925 if (!DecodeImportSection(d
, env
)) {
2929 // Eagerly check imports for future link errors against any known builtin
2931 if (!CheckImportsAgainstBuiltinModules(d
, env
)) {
2935 if (!DecodeFunctionSection(d
, env
)) {
2939 if (!DecodeTableSection(d
, env
)) {
2943 if (!DecodeMemorySection(d
, env
)) {
2947 if (!DecodeTagSection(d
, env
)) {
2951 if (!DecodeGlobalSection(d
, env
)) {
2955 if (!DecodeExportSection(d
, env
)) {
2959 if (!DecodeStartSection(d
, env
)) {
2963 if (!DecodeElemSection(d
, env
)) {
2967 if (!DecodeDataCountSection(d
, env
)) {
2971 if (!d
.startSection(SectionId::Code
, env
, &env
->codeSection
, "code")) {
2975 if (env
->codeSection
&& env
->codeSection
->size
> MaxCodeSectionBytes
) {
2976 return d
.fail("code section too big");
2982 static bool DecodeFunctionBody(Decoder
& d
, const ModuleEnvironment
& env
,
2983 uint32_t funcIndex
) {
2985 if (!d
.readVarU32(&bodySize
)) {
2986 return d
.fail("expected number of function body bytes");
2989 if (bodySize
> MaxFunctionBytes
) {
2990 return d
.fail("function body too big");
2993 if (d
.bytesRemain() < bodySize
) {
2994 return d
.fail("function body length too big");
2997 return ValidateFunctionBody(env
, funcIndex
, bodySize
, d
);
3000 static bool DecodeCodeSection(Decoder
& d
, ModuleEnvironment
* env
) {
3001 if (!env
->codeSection
) {
3002 if (env
->numFuncDefs() != 0) {
3003 return d
.fail("expected code section");
3008 uint32_t numFuncDefs
;
3009 if (!d
.readVarU32(&numFuncDefs
)) {
3010 return d
.fail("expected function body count");
3013 if (numFuncDefs
!= env
->numFuncDefs()) {
3015 "function body count does not match function signature count");
3018 for (uint32_t funcDefIndex
= 0; funcDefIndex
< numFuncDefs
; funcDefIndex
++) {
3019 if (!DecodeFunctionBody(d
, *env
, env
->numFuncImports
+ funcDefIndex
)) {
3024 return d
.finishSection(*env
->codeSection
, "code");
3027 static bool DecodeDataSection(Decoder
& d
, ModuleEnvironment
* env
) {
3028 MaybeSectionRange range
;
3029 if (!d
.startSection(SectionId::Data
, env
, &range
, "data")) {
3033 if (env
->dataCount
.isSome() && *env
->dataCount
> 0) {
3034 return d
.fail("number of data segments does not match declared count");
3039 uint32_t numSegments
;
3040 if (!d
.readVarU32(&numSegments
)) {
3041 return d
.fail("failed to read number of data segments");
3044 if (numSegments
> MaxDataSegments
) {
3045 return d
.fail("too many data segments");
3048 if (env
->dataCount
.isSome() && numSegments
!= *env
->dataCount
) {
3049 return d
.fail("number of data segments does not match declared count");
3052 for (uint32_t i
= 0; i
< numSegments
; i
++) {
3053 uint32_t initializerKindVal
;
3054 if (!d
.readVarU32(&initializerKindVal
)) {
3055 return d
.fail("expected data initializer-kind field");
3058 switch (initializerKindVal
) {
3059 case uint32_t(DataSegmentKind::Active
):
3060 case uint32_t(DataSegmentKind::Passive
):
3061 case uint32_t(DataSegmentKind::ActiveWithMemoryIndex
):
3064 return d
.fail("invalid data initializer-kind field");
3067 DataSegmentKind initializerKind
= DataSegmentKind(initializerKindVal
);
3069 if (initializerKind
!= DataSegmentKind::Passive
&&
3070 env
->numMemories() == 0) {
3071 return d
.fail("active data segment requires a memory section");
3075 if (initializerKind
== DataSegmentKind::ActiveWithMemoryIndex
) {
3076 if (!d
.readVarU32(&seg
.memoryIndex
)) {
3077 return d
.fail("expected memory index");
3079 } else if (initializerKind
== DataSegmentKind::Active
) {
3080 seg
.memoryIndex
= 0;
3082 seg
.memoryIndex
= InvalidMemoryIndex
;
3085 if (initializerKind
== DataSegmentKind::Active
||
3086 initializerKind
== DataSegmentKind::ActiveWithMemoryIndex
) {
3087 if (seg
.memoryIndex
>= env
->numMemories()) {
3088 return d
.fail("invalid memory index");
3092 ValType exprType
= ToValType(env
->memories
[seg
.memoryIndex
].indexType());
3093 if (!InitExpr::decodeAndValidate(d
, env
, exprType
, &segOffset
)) {
3096 seg
.offsetIfActive
.emplace(std::move(segOffset
));
3099 if (!d
.readVarU32(&seg
.length
)) {
3100 return d
.fail("expected segment size");
3103 if (seg
.length
> MaxDataSegmentLengthPages
* PageSize
) {
3104 return d
.fail("segment size too big");
3107 seg
.bytecodeOffset
= d
.currentOffset();
3109 if (!d
.readBytes(seg
.length
)) {
3110 return d
.fail("data segment shorter than declared");
3113 if (!env
->dataSegments
.append(std::move(seg
))) {
3118 return d
.finishSection(*range
, "data");
3121 static bool DecodeModuleNameSubsection(Decoder
& d
,
3122 const CustomSectionEnv
& nameSection
,
3123 ModuleEnvironment
* env
) {
3124 Maybe
<uint32_t> endOffset
;
3125 if (!d
.startNameSubsection(NameType::Module
, &endOffset
)) {
3133 if (!d
.readVarU32(&moduleName
.length
)) {
3134 return d
.fail("failed to read module name length");
3137 MOZ_ASSERT(d
.currentOffset() >= nameSection
.payloadOffset
);
3138 moduleName
.offsetInNamePayload
=
3139 d
.currentOffset() - nameSection
.payloadOffset
;
3141 const uint8_t* bytes
;
3142 if (!d
.readBytes(moduleName
.length
, &bytes
)) {
3143 return d
.fail("failed to read module name bytes");
3146 if (!d
.finishNameSubsection(*endOffset
)) {
3150 // Only save the module name if the whole subsection validates.
3151 env
->moduleName
.emplace(moduleName
);
3155 static bool DecodeFunctionNameSubsection(Decoder
& d
,
3156 const CustomSectionEnv
& nameSection
,
3157 ModuleEnvironment
* env
) {
3158 Maybe
<uint32_t> endOffset
;
3159 if (!d
.startNameSubsection(NameType::Function
, &endOffset
)) {
3166 uint32_t nameCount
= 0;
3167 if (!d
.readVarU32(&nameCount
) || nameCount
> MaxFuncs
) {
3168 return d
.fail("bad function name count");
3171 NameVector funcNames
;
3173 for (uint32_t i
= 0; i
< nameCount
; ++i
) {
3174 uint32_t funcIndex
= 0;
3175 if (!d
.readVarU32(&funcIndex
)) {
3176 return d
.fail("unable to read function index");
3179 // Names must refer to real functions and be given in ascending order.
3180 if (funcIndex
>= env
->numFuncs() || funcIndex
< funcNames
.length()) {
3181 return d
.fail("invalid function index");
3185 if (!d
.readVarU32(&funcName
.length
) ||
3186 funcName
.length
> JS::MaxStringLength
) {
3187 return d
.fail("unable to read function name length");
3190 if (!funcName
.length
) {
3194 if (!funcNames
.resize(funcIndex
+ 1)) {
3198 MOZ_ASSERT(d
.currentOffset() >= nameSection
.payloadOffset
);
3199 funcName
.offsetInNamePayload
=
3200 d
.currentOffset() - nameSection
.payloadOffset
;
3202 if (!d
.readBytes(funcName
.length
)) {
3203 return d
.fail("unable to read function name bytes");
3206 funcNames
[funcIndex
] = funcName
;
3209 if (!d
.finishNameSubsection(*endOffset
)) {
3213 // To encourage fully valid function names subsections; only save names if
3214 // the entire subsection decoded correctly.
3215 env
->funcNames
= std::move(funcNames
);
3219 static bool DecodeNameSection(Decoder
& d
, ModuleEnvironment
* env
) {
3220 MaybeSectionRange range
;
3221 if (!d
.startCustomSection(NameSectionName
, env
, &range
)) {
3228 env
->nameCustomSectionIndex
= Some(env
->customSections
.length() - 1);
3229 const CustomSectionEnv
& nameSection
= env
->customSections
.back();
3231 // Once started, custom sections do not report validation errors.
3233 if (!DecodeModuleNameSubsection(d
, nameSection
, env
)) {
3237 if (!DecodeFunctionNameSubsection(d
, nameSection
, env
)) {
3241 while (d
.currentOffset() < range
->end()) {
3242 if (!d
.skipNameSubsection()) {
3248 d
.finishCustomSection(NameSectionName
, *range
);
3252 bool wasm::DecodeModuleTail(Decoder
& d
, ModuleEnvironment
* env
) {
3253 if (!DecodeDataSection(d
, env
)) {
3257 if (!DecodeNameSection(d
, env
)) {
3262 if (!d
.skipCustomSection(env
)) {
3263 if (d
.resilientMode()) {
3274 // Validate algorithm.
3276 bool wasm::Validate(JSContext
* cx
, const ShareableBytes
& bytecode
,
3277 const FeatureOptions
& options
, UniqueChars
* error
) {
3278 Decoder
d(bytecode
.bytes
, 0, error
);
3280 FeatureArgs features
= FeatureArgs::build(cx
, options
);
3281 ModuleEnvironment
env(features
);
3286 if (!DecodeModuleEnvironment(d
, &env
)) {
3290 if (!DecodeCodeSection(d
, &env
)) {
3294 if (!DecodeModuleTail(d
, &env
)) {
3298 MOZ_ASSERT(!*error
, "unreported error in decoding");