Bug 1865597 - Add error checking when initializing parallel marking and disable on...
[gecko.git] / js / src / vm / SharedArrayObject.cpp
bloba46e3615ea2b6df73a19c7b0c9077ae8e5706024
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "vm/SharedArrayObject.h"
9 #include "mozilla/Atomics.h"
10 #include "mozilla/DebugOnly.h"
11 #include "mozilla/TaggedAnonymousMemory.h"
13 #include "gc/GCContext.h"
14 #include "gc/Memory.h"
15 #include "jit/AtomicOperations.h"
16 #include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
17 #include "js/PropertySpec.h"
18 #include "js/SharedArrayBuffer.h"
19 #include "util/Memory.h"
20 #include "util/WindowsWrapper.h"
21 #include "vm/SharedMem.h"
22 #include "wasm/WasmConstants.h"
23 #include "wasm/WasmMemory.h"
25 #include "vm/ArrayBufferObject-inl.h"
26 #include "vm/JSObject-inl.h"
27 #include "vm/NativeObject-inl.h"
29 using js::wasm::Pages;
30 using mozilla::DebugOnly;
31 using mozilla::Maybe;
32 using mozilla::Nothing;
33 using mozilla::Some;
35 using namespace js;
36 using namespace js::jit;
38 static size_t WasmSharedArrayAccessibleSize(size_t length) {
39 return AlignBytes(length, gc::SystemPageSize());
42 static size_t NonWasmSharedArrayAllocSize(size_t length) {
43 MOZ_ASSERT(length <= ArrayBufferObject::MaxByteLength);
44 return sizeof(SharedArrayRawBuffer) + length;
47 // The mapped size for a plain shared array buffer, used only for tracking
48 // memory usage. This is incorrect for some WASM cases, and for hypothetical
49 // callers of js::SharedArrayBufferObject::createFromNewRawBuffer that do not
50 // currently exist, but it's fine as a signal of GC pressure.
51 static size_t SharedArrayMappedSize(bool isWasm, size_t length) {
52 // Wasm buffers use MapBufferMemory and allocate a full page for the header.
53 // Non-Wasm buffers use malloc.
54 if (isWasm) {
55 return WasmSharedArrayAccessibleSize(length) + gc::SystemPageSize();
57 return NonWasmSharedArrayAllocSize(length);
60 SharedArrayRawBuffer* SharedArrayRawBuffer::Allocate(size_t length) {
61 MOZ_RELEASE_ASSERT(length <= ArrayBufferObject::MaxByteLength);
63 size_t allocSize = NonWasmSharedArrayAllocSize(length);
64 uint8_t* p = js_pod_calloc<uint8_t>(allocSize);
65 if (!p) {
66 return nullptr;
69 uint8_t* buffer = p + sizeof(SharedArrayRawBuffer);
70 return new (p) SharedArrayRawBuffer(/* isWasm = */ false, buffer, length);
73 WasmSharedArrayRawBuffer* WasmSharedArrayRawBuffer::AllocateWasm(
74 wasm::IndexType indexType, Pages initialPages, wasm::Pages clampedMaxPages,
75 const mozilla::Maybe<wasm::Pages>& sourceMaxPages,
76 const mozilla::Maybe<size_t>& mappedSize) {
77 // Prior code has asserted that initial pages is within our implementation
78 // limits (wasm::MaxMemoryPages()) and we can assume it is a valid size_t.
79 MOZ_ASSERT(initialPages.hasByteLength());
80 size_t length = initialPages.byteLength();
82 MOZ_RELEASE_ASSERT(length <= ArrayBufferObject::MaxByteLength);
84 size_t accessibleSize = WasmSharedArrayAccessibleSize(length);
85 if (accessibleSize < length) {
86 return nullptr;
89 size_t computedMappedSize = mappedSize.isSome()
90 ? *mappedSize
91 : wasm::ComputeMappedSize(clampedMaxPages);
92 MOZ_ASSERT(accessibleSize <= computedMappedSize);
94 uint64_t mappedSizeWithHeader = computedMappedSize + gc::SystemPageSize();
95 uint64_t accessibleSizeWithHeader = accessibleSize + gc::SystemPageSize();
97 void* p = MapBufferMemory(indexType, mappedSizeWithHeader,
98 accessibleSizeWithHeader);
99 if (!p) {
100 return nullptr;
103 uint8_t* buffer = reinterpret_cast<uint8_t*>(p) + gc::SystemPageSize();
104 uint8_t* base = buffer - sizeof(WasmSharedArrayRawBuffer);
105 return new (base) WasmSharedArrayRawBuffer(
106 buffer, length, indexType, clampedMaxPages,
107 sourceMaxPages.valueOr(Pages(0)), computedMappedSize);
110 void WasmSharedArrayRawBuffer::tryGrowMaxPagesInPlace(Pages deltaMaxPages) {
111 Pages newMaxPages = clampedMaxPages_;
112 DebugOnly<bool> valid = newMaxPages.checkedIncrement(deltaMaxPages);
113 // Caller must ensure increment does not overflow or increase over the
114 // specified maximum pages.
115 MOZ_ASSERT(valid);
116 MOZ_ASSERT(newMaxPages <= sourceMaxPages_);
118 size_t newMappedSize = wasm::ComputeMappedSize(newMaxPages);
119 MOZ_ASSERT(mappedSize_ <= newMappedSize);
120 if (mappedSize_ == newMappedSize) {
121 return;
124 if (!ExtendBufferMapping(basePointer(), mappedSize_, newMappedSize)) {
125 return;
128 mappedSize_ = newMappedSize;
129 clampedMaxPages_ = newMaxPages;
132 bool WasmSharedArrayRawBuffer::wasmGrowToPagesInPlace(const Lock&,
133 wasm::IndexType t,
134 wasm::Pages newPages) {
135 // Check that the new pages is within our allowable range. This will
136 // simultaneously check against the maximum specified in source and our
137 // implementation limits.
138 if (newPages > clampedMaxPages_) {
139 return false;
141 MOZ_ASSERT(newPages <= wasm::MaxMemoryPages(t) &&
142 newPages.byteLength() <= ArrayBufferObject::MaxByteLength);
144 // We have checked against the clamped maximum and so we know we can convert
145 // to byte lengths now.
146 size_t newLength = newPages.byteLength();
148 MOZ_ASSERT(newLength >= length_);
150 if (newLength == length_) {
151 return true;
154 size_t delta = newLength - length_;
155 MOZ_ASSERT(delta % wasm::PageSize == 0);
157 uint8_t* dataEnd = dataPointerShared().unwrap(/* for resize */) + length_;
158 MOZ_ASSERT(uintptr_t(dataEnd) % gc::SystemPageSize() == 0);
160 if (!CommitBufferMemory(dataEnd, delta)) {
161 return false;
164 // We rely on CommitBufferMemory (and therefore memmap/VirtualAlloc) to only
165 // return once it has committed memory for all threads. We only update with a
166 // new length once this has occurred.
167 length_ = newLength;
169 return true;
172 void WasmSharedArrayRawBuffer::discard(size_t byteOffset, size_t byteLen) {
173 SharedMem<uint8_t*> memBase = dataPointerShared();
175 // The caller is responsible for ensuring these conditions are met; see this
176 // function's comment in SharedArrayObject.h.
177 MOZ_ASSERT(byteOffset % wasm::PageSize == 0);
178 MOZ_ASSERT(byteLen % wasm::PageSize == 0);
179 MOZ_ASSERT(wasm::MemoryBoundsCheck(uint64_t(byteOffset), uint64_t(byteLen),
180 volatileByteLength()));
182 // Discarding zero bytes "succeeds" with no effect.
183 if (byteLen == 0) {
184 return;
187 SharedMem<uint8_t*> addr = memBase + uintptr_t(byteOffset);
189 // On POSIX-ish platforms, we discard memory by overwriting previously-mapped
190 // pages with freshly-mapped pages (which are all zeroed). The operating
191 // system recognizes this and decreases the process RSS, and eventually
192 // collects the abandoned physical pages.
194 // On Windows, committing over previously-committed pages has no effect. We
195 // could decommit and recommit, but this doesn't work for shared memories
196 // since other threads could access decommitted memory - causing a trap.
197 // Instead, we simply zero memory (memset 0), and then VirtualUnlock(), which
198 // for Historical Reasons immediately removes the pages from the working set.
199 // And then, because the pages were zeroed, Windows will actually reclaim the
200 // memory entirely instead of paging it out to disk. Naturally this behavior
201 // is not officially documented, but a Raymond Chen blog post is basically as
202 // good as MSDN, right?
204 // https://devblogs.microsoft.com/oldnewthing/20170113-00/?p=95185
206 #ifdef XP_WIN
207 // Discarding the entire region at once causes us to page the entire region
208 // into the working set, only to throw it out again. This can be actually
209 // disastrous when discarding already-discarded memory. To mitigate this, we
210 // discard a chunk of memory at a time - this comes at a small performance
211 // cost from syscalls and potentially less-optimal memsets.
212 size_t numPages = byteLen / wasm::PageSize;
213 for (size_t i = 0; i < numPages; i++) {
214 AtomicOperations::memsetSafeWhenRacy(addr + (i * wasm::PageSize), 0,
215 wasm::PageSize);
216 DebugOnly<bool> result =
217 VirtualUnlock(addr.unwrap() + (i * wasm::PageSize), wasm::PageSize);
218 MOZ_ASSERT(!result); // this always "fails" when unlocking unlocked
219 // memory...which is the only case we care about
221 #elif defined(__wasi__)
222 AtomicOperations::memsetSafeWhenRacy(addr, 0, byteLen);
223 #else // !XP_WIN
224 void* data = MozTaggedAnonymousMmap(
225 addr.unwrap(), byteLen, PROT_READ | PROT_WRITE,
226 MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0, "wasm-reserved");
227 if (data == MAP_FAILED) {
228 MOZ_CRASH("failed to discard wasm memory; memory mappings may be broken");
230 #endif
233 bool SharedArrayRawBuffer::addReference() {
234 MOZ_RELEASE_ASSERT(refcount_ > 0);
236 // Be careful never to overflow the refcount field.
237 for (;;) {
238 uint32_t old_refcount = refcount_;
239 uint32_t new_refcount = old_refcount + 1;
240 if (new_refcount == 0) {
241 return false;
243 if (refcount_.compareExchange(old_refcount, new_refcount)) {
244 return true;
249 void SharedArrayRawBuffer::dropReference() {
250 // Normally if the refcount is zero then the memory will have been unmapped
251 // and this test may just crash, but if the memory has been retained for any
252 // reason we will catch the underflow here.
253 MOZ_RELEASE_ASSERT(refcount_ > 0);
255 // Drop the reference to the buffer.
256 uint32_t new_refcount = --refcount_; // Atomic.
257 if (new_refcount) {
258 return;
261 // This was the final reference, so release the buffer.
262 if (isWasm()) {
263 WasmSharedArrayRawBuffer* wasmBuf = toWasmBuffer();
264 wasm::IndexType indexType = wasmBuf->wasmIndexType();
265 uint8_t* basePointer = wasmBuf->basePointer();
266 size_t mappedSizeWithHeader = wasmBuf->mappedSize() + gc::SystemPageSize();
267 // Call the destructor to destroy the growLock_ Mutex.
268 wasmBuf->~WasmSharedArrayRawBuffer();
269 UnmapBufferMemory(indexType, basePointer, mappedSizeWithHeader);
270 } else {
271 js_delete(this);
275 static bool IsSharedArrayBuffer(HandleValue v) {
276 return v.isObject() && v.toObject().is<SharedArrayBufferObject>();
279 MOZ_ALWAYS_INLINE bool SharedArrayBufferObject::byteLengthGetterImpl(
280 JSContext* cx, const CallArgs& args) {
281 MOZ_ASSERT(IsSharedArrayBuffer(args.thisv()));
282 auto* buffer = &args.thisv().toObject().as<SharedArrayBufferObject>();
283 args.rval().setNumber(buffer->byteLength());
284 return true;
287 bool SharedArrayBufferObject::byteLengthGetter(JSContext* cx, unsigned argc,
288 Value* vp) {
289 CallArgs args = CallArgsFromVp(argc, vp);
290 return CallNonGenericMethod<IsSharedArrayBuffer, byteLengthGetterImpl>(cx,
291 args);
294 // ES2017 draft rev 6390c2f1b34b309895d31d8c0512eac8660a0210
295 // 24.2.2.1 SharedArrayBuffer( length )
296 bool SharedArrayBufferObject::class_constructor(JSContext* cx, unsigned argc,
297 Value* vp) {
298 CallArgs args = CallArgsFromVp(argc, vp);
300 // Step 1.
301 if (!ThrowIfNotConstructing(cx, args, "SharedArrayBuffer")) {
302 return false;
305 // Step 2.
306 uint64_t byteLength;
307 if (!ToIndex(cx, args.get(0), &byteLength)) {
308 return false;
311 // Step 3 (Inlined 24.2.1.1 AllocateSharedArrayBuffer).
312 // 24.2.1.1, step 1 (Inlined 9.1.14 OrdinaryCreateFromConstructor).
313 RootedObject proto(cx);
314 if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_SharedArrayBuffer,
315 &proto)) {
316 return false;
319 // 24.2.1.1, step 3 (Inlined 6.2.7.2 CreateSharedByteDataBlock, step 2).
320 // Refuse to allocate too large buffers.
321 if (byteLength > ArrayBufferObject::MaxByteLength) {
322 JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
323 JSMSG_SHARED_ARRAY_BAD_LENGTH);
324 return false;
327 // 24.2.1.1, steps 1 and 4-6.
328 JSObject* bufobj = New(cx, byteLength, proto);
329 if (!bufobj) {
330 return false;
332 args.rval().setObject(*bufobj);
333 return true;
336 SharedArrayBufferObject* SharedArrayBufferObject::New(JSContext* cx,
337 size_t length,
338 HandleObject proto) {
339 SharedArrayRawBuffer* buffer = SharedArrayRawBuffer::Allocate(length);
340 if (!buffer) {
341 js::ReportOutOfMemory(cx);
342 return nullptr;
345 SharedArrayBufferObject* obj = New(cx, buffer, length, proto);
346 if (!obj) {
347 buffer->dropReference();
348 return nullptr;
351 return obj;
354 SharedArrayBufferObject* SharedArrayBufferObject::New(
355 JSContext* cx, SharedArrayRawBuffer* buffer, size_t length,
356 HandleObject proto) {
357 MOZ_ASSERT(cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled());
359 AutoSetNewObjectMetadata metadata(cx);
360 Rooted<SharedArrayBufferObject*> obj(
361 cx, NewObjectWithClassProto<SharedArrayBufferObject>(cx, proto));
362 if (!obj) {
363 return nullptr;
366 MOZ_ASSERT(obj->getClass() == &class_);
368 cx->runtime()->incSABCount();
370 if (!obj->acceptRawBuffer(buffer, length)) {
371 js::ReportOutOfMemory(cx);
372 return nullptr;
375 return obj;
378 bool SharedArrayBufferObject::acceptRawBuffer(SharedArrayRawBuffer* buffer,
379 size_t length) {
380 if (!zone()->addSharedMemory(buffer,
381 SharedArrayMappedSize(buffer->isWasm(), length),
382 MemoryUse::SharedArrayRawBuffer)) {
383 return false;
386 setFixedSlot(RAWBUF_SLOT, PrivateValue(buffer));
387 setFixedSlot(LENGTH_SLOT, PrivateValue(length));
388 return true;
391 void SharedArrayBufferObject::dropRawBuffer() {
392 size_t size = SharedArrayMappedSize(isWasm(), byteLength());
393 zoneFromAnyThread()->removeSharedMemory(rawBufferObject(), size,
394 MemoryUse::SharedArrayRawBuffer);
395 rawBufferObject()->dropReference();
396 setFixedSlot(RAWBUF_SLOT, UndefinedValue());
399 SharedArrayRawBuffer* SharedArrayBufferObject::rawBufferObject() const {
400 Value v = getFixedSlot(RAWBUF_SLOT);
401 MOZ_ASSERT(!v.isUndefined());
402 return reinterpret_cast<SharedArrayRawBuffer*>(v.toPrivate());
405 void SharedArrayBufferObject::Finalize(JS::GCContext* gcx, JSObject* obj) {
406 // Must be foreground finalizable so that we can account for the object.
407 MOZ_ASSERT(gcx->onMainThread());
408 gcx->runtime()->decSABCount();
410 SharedArrayBufferObject& buf = obj->as<SharedArrayBufferObject>();
412 // Detect the case of failure during SharedArrayBufferObject creation,
413 // which causes a SharedArrayRawBuffer to never be attached.
414 Value v = buf.getFixedSlot(RAWBUF_SLOT);
415 if (!v.isUndefined()) {
416 buf.dropRawBuffer();
420 /* static */
421 void SharedArrayBufferObject::addSizeOfExcludingThis(
422 JSObject* obj, mozilla::MallocSizeOf mallocSizeOf, JS::ClassInfo* info,
423 JS::RuntimeSizes* runtimeSizes) {
424 // Divide the buffer size by the refcount to get the fraction of the buffer
425 // owned by this thread. It's conceivable that the refcount might change in
426 // the middle of memory reporting, in which case the amount reported for
427 // some threads might be to high (if the refcount goes up) or too low (if
428 // the refcount goes down). But that's unlikely and hard to avoid, so we
429 // just live with the risk.
430 const SharedArrayBufferObject& buf = obj->as<SharedArrayBufferObject>();
431 size_t owned = buf.byteLength() / buf.rawBufferObject()->refcount();
432 if (buf.isWasm()) {
433 info->objectsNonHeapElementsWasmShared += owned;
434 if (runtimeSizes) {
435 size_t ownedGuardPages = (buf.wasmMappedSize() - buf.byteLength()) /
436 buf.rawBufferObject()->refcount();
437 runtimeSizes->wasmGuardPages += ownedGuardPages;
439 } else {
440 info->objectsNonHeapElementsShared += owned;
444 /* static */
445 void SharedArrayBufferObject::copyData(
446 Handle<ArrayBufferObjectMaybeShared*> toBuffer, size_t toIndex,
447 Handle<ArrayBufferObjectMaybeShared*> fromBuffer, size_t fromIndex,
448 size_t count) {
449 MOZ_ASSERT(toBuffer->byteLength() >= count);
450 MOZ_ASSERT(toBuffer->byteLength() >= toIndex + count);
451 MOZ_ASSERT(fromBuffer->byteLength() >= fromIndex);
452 MOZ_ASSERT(fromBuffer->byteLength() >= fromIndex + count);
454 jit::AtomicOperations::memcpySafeWhenRacy(
455 toBuffer->dataPointerEither() + toIndex,
456 fromBuffer->dataPointerEither() + fromIndex, count);
459 SharedArrayBufferObject* SharedArrayBufferObject::createFromNewRawBuffer(
460 JSContext* cx, WasmSharedArrayRawBuffer* buffer, size_t initialSize) {
461 MOZ_ASSERT(cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled());
463 AutoSetNewObjectMetadata metadata(cx);
464 SharedArrayBufferObject* obj =
465 NewBuiltinClassInstance<SharedArrayBufferObject>(cx);
466 if (!obj) {
467 buffer->dropReference();
468 return nullptr;
471 cx->runtime()->incSABCount();
473 if (!obj->acceptRawBuffer(buffer, initialSize)) {
474 buffer->dropReference();
475 return nullptr;
478 return obj;
481 /* static */
482 void SharedArrayBufferObject::wasmDiscard(Handle<SharedArrayBufferObject*> buf,
483 uint64_t byteOffset,
484 uint64_t byteLen) {
485 MOZ_ASSERT(buf->isWasm());
486 buf->rawWasmBufferObject()->discard(byteOffset, byteLen);
489 static const JSClassOps SharedArrayBufferObjectClassOps = {
490 nullptr, // addProperty
491 nullptr, // delProperty
492 nullptr, // enumerate
493 nullptr, // newEnumerate
494 nullptr, // resolve
495 nullptr, // mayResolve
496 SharedArrayBufferObject::Finalize, // finalize
497 nullptr, // call
498 nullptr, // construct
499 nullptr, // trace
502 static const JSFunctionSpec sharedarrray_functions[] = {JS_FS_END};
504 static const JSPropertySpec sharedarrray_properties[] = {
505 JS_SELF_HOSTED_SYM_GET(species, "$SharedArrayBufferSpecies", 0), JS_PS_END};
507 static const JSFunctionSpec sharedarray_proto_functions[] = {
508 JS_SELF_HOSTED_FN("slice", "SharedArrayBufferSlice", 2, 0), JS_FS_END};
510 static const JSPropertySpec sharedarray_proto_properties[] = {
511 JS_PSG("byteLength", SharedArrayBufferObject::byteLengthGetter, 0),
512 JS_STRING_SYM_PS(toStringTag, "SharedArrayBuffer", JSPROP_READONLY),
513 JS_PS_END};
515 static const ClassSpec SharedArrayBufferObjectClassSpec = {
516 GenericCreateConstructor<SharedArrayBufferObject::class_constructor, 1,
517 gc::AllocKind::FUNCTION>,
518 GenericCreatePrototype<SharedArrayBufferObject>,
519 sharedarrray_functions,
520 sharedarrray_properties,
521 sharedarray_proto_functions,
522 sharedarray_proto_properties};
524 const JSClass SharedArrayBufferObject::class_ = {
525 "SharedArrayBuffer",
526 JSCLASS_DELAY_METADATA_BUILDER |
527 JSCLASS_HAS_RESERVED_SLOTS(SharedArrayBufferObject::RESERVED_SLOTS) |
528 JSCLASS_HAS_CACHED_PROTO(JSProto_SharedArrayBuffer) |
529 JSCLASS_FOREGROUND_FINALIZE,
530 &SharedArrayBufferObjectClassOps, &SharedArrayBufferObjectClassSpec,
531 JS_NULL_CLASS_EXT};
533 const JSClass SharedArrayBufferObject::protoClass_ = {
534 "SharedArrayBuffer.prototype",
535 JSCLASS_HAS_CACHED_PROTO(JSProto_SharedArrayBuffer), JS_NULL_CLASS_OPS,
536 &SharedArrayBufferObjectClassSpec};
538 JS_PUBLIC_API size_t JS::GetSharedArrayBufferByteLength(JSObject* obj) {
539 auto* aobj = obj->maybeUnwrapAs<SharedArrayBufferObject>();
540 return aobj ? aobj->byteLength() : 0;
543 JS_PUBLIC_API void JS::GetSharedArrayBufferLengthAndData(JSObject* obj,
544 size_t* length,
545 bool* isSharedMemory,
546 uint8_t** data) {
547 MOZ_ASSERT(obj->is<SharedArrayBufferObject>());
548 *length = obj->as<SharedArrayBufferObject>().byteLength();
549 *data = obj->as<SharedArrayBufferObject>().dataPointerShared().unwrap(
550 /*safe - caller knows*/);
551 *isSharedMemory = true;
554 JS_PUBLIC_API JSObject* JS::NewSharedArrayBuffer(JSContext* cx, size_t nbytes) {
555 MOZ_ASSERT(cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled());
557 if (nbytes > ArrayBufferObject::MaxByteLength) {
558 JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
559 JSMSG_SHARED_ARRAY_BAD_LENGTH);
560 return nullptr;
563 return SharedArrayBufferObject::New(cx, nbytes,
564 /* proto = */ nullptr);
567 JS_PUBLIC_API bool JS::IsSharedArrayBufferObject(JSObject* obj) {
568 return obj->canUnwrapAs<SharedArrayBufferObject>();
571 JS_PUBLIC_API uint8_t* JS::GetSharedArrayBufferData(
572 JSObject* obj, bool* isSharedMemory, const JS::AutoRequireNoGC&) {
573 auto* aobj = obj->maybeUnwrapAs<SharedArrayBufferObject>();
574 if (!aobj) {
575 return nullptr;
577 *isSharedMemory = true;
578 return aobj->dataPointerShared().unwrap(/*safe - caller knows*/);
581 JS_PUBLIC_API bool JS::ContainsSharedArrayBuffer(JSContext* cx) {
582 return cx->runtime()->hasLiveSABs();