Backed out changeset 2450366cf7ca (bug 1891629) for causing win msix mochitest failures
[gecko.git] / js / src / vm / TypedArrayObject-inl.h
blobffb9a3c9f69abb082bd774baeb81ffc5bc88222d
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef vm_TypedArrayObject_inl_h
8 #define vm_TypedArrayObject_inl_h
10 /* Utilities and common inline code for TypedArray */
12 #include "vm/TypedArrayObject.h"
14 #include "mozilla/Assertions.h"
15 #include "mozilla/FloatingPoint.h"
17 #include <algorithm>
18 #include <type_traits>
20 #include "jsnum.h"
22 #include "gc/Zone.h"
23 #include "jit/AtomicOperations.h"
24 #include "js/Conversions.h"
25 #include "js/ScalarType.h" // js::Scalar::Type
26 #include "js/Value.h"
27 #include "util/DifferentialTesting.h"
28 #include "util/Memory.h"
29 #include "vm/ArrayObject.h"
30 #include "vm/BigIntType.h"
31 #include "vm/NativeObject.h"
32 #include "vm/Uint8Clamped.h"
34 #include "gc/ObjectKind-inl.h"
35 #include "vm/NativeObject-inl.h"
36 #include "vm/ObjectOperations-inl.h"
38 namespace js {
40 template <typename To, typename From>
41 inline To ConvertNumber(From src);
43 template <>
44 inline int8_t ConvertNumber<int8_t, float>(float src) {
45 return JS::ToInt8(src);
48 template <>
49 inline uint8_t ConvertNumber<uint8_t, float>(float src) {
50 return JS::ToUint8(src);
53 template <>
54 inline uint8_clamped ConvertNumber<uint8_clamped, float>(float src) {
55 return uint8_clamped(src);
58 template <>
59 inline int16_t ConvertNumber<int16_t, float>(float src) {
60 return JS::ToInt16(src);
63 template <>
64 inline uint16_t ConvertNumber<uint16_t, float>(float src) {
65 return JS::ToUint16(src);
68 template <>
69 inline int32_t ConvertNumber<int32_t, float>(float src) {
70 return JS::ToInt32(src);
73 template <>
74 inline uint32_t ConvertNumber<uint32_t, float>(float src) {
75 return JS::ToUint32(src);
78 template <>
79 inline int64_t ConvertNumber<int64_t, float>(float src) {
80 return JS::ToInt64(src);
83 template <>
84 inline uint64_t ConvertNumber<uint64_t, float>(float src) {
85 return JS::ToUint64(src);
88 template <>
89 inline int8_t ConvertNumber<int8_t, double>(double src) {
90 return JS::ToInt8(src);
93 template <>
94 inline uint8_t ConvertNumber<uint8_t, double>(double src) {
95 return JS::ToUint8(src);
98 template <>
99 inline uint8_clamped ConvertNumber<uint8_clamped, double>(double src) {
100 return uint8_clamped(src);
103 template <>
104 inline int16_t ConvertNumber<int16_t, double>(double src) {
105 return JS::ToInt16(src);
108 template <>
109 inline uint16_t ConvertNumber<uint16_t, double>(double src) {
110 return JS::ToUint16(src);
113 template <>
114 inline int32_t ConvertNumber<int32_t, double>(double src) {
115 return JS::ToInt32(src);
118 template <>
119 inline uint32_t ConvertNumber<uint32_t, double>(double src) {
120 return JS::ToUint32(src);
123 template <>
124 inline int64_t ConvertNumber<int64_t, double>(double src) {
125 return JS::ToInt64(src);
128 template <>
129 inline uint64_t ConvertNumber<uint64_t, double>(double src) {
130 return JS::ToUint64(src);
133 template <typename To, typename From>
134 inline To ConvertNumber(From src) {
135 static_assert(
136 !std::is_floating_point_v<From> ||
137 (std::is_floating_point_v<From> && std::is_floating_point_v<To>),
138 "conversion from floating point to int should have been handled by "
139 "specializations above");
140 return To(src);
143 template <typename NativeType>
144 struct TypeIDOfType;
145 template <>
146 struct TypeIDOfType<int8_t> {
147 static const Scalar::Type id = Scalar::Int8;
148 static const JSProtoKey protoKey = JSProto_Int8Array;
150 template <>
151 struct TypeIDOfType<uint8_t> {
152 static const Scalar::Type id = Scalar::Uint8;
153 static const JSProtoKey protoKey = JSProto_Uint8Array;
155 template <>
156 struct TypeIDOfType<int16_t> {
157 static const Scalar::Type id = Scalar::Int16;
158 static const JSProtoKey protoKey = JSProto_Int16Array;
160 template <>
161 struct TypeIDOfType<uint16_t> {
162 static const Scalar::Type id = Scalar::Uint16;
163 static const JSProtoKey protoKey = JSProto_Uint16Array;
165 template <>
166 struct TypeIDOfType<int32_t> {
167 static const Scalar::Type id = Scalar::Int32;
168 static const JSProtoKey protoKey = JSProto_Int32Array;
170 template <>
171 struct TypeIDOfType<uint32_t> {
172 static const Scalar::Type id = Scalar::Uint32;
173 static const JSProtoKey protoKey = JSProto_Uint32Array;
175 template <>
176 struct TypeIDOfType<int64_t> {
177 static const Scalar::Type id = Scalar::BigInt64;
178 static const JSProtoKey protoKey = JSProto_BigInt64Array;
180 template <>
181 struct TypeIDOfType<uint64_t> {
182 static const Scalar::Type id = Scalar::BigUint64;
183 static const JSProtoKey protoKey = JSProto_BigUint64Array;
185 template <>
186 struct TypeIDOfType<float> {
187 static const Scalar::Type id = Scalar::Float32;
188 static const JSProtoKey protoKey = JSProto_Float32Array;
190 template <>
191 struct TypeIDOfType<double> {
192 static const Scalar::Type id = Scalar::Float64;
193 static const JSProtoKey protoKey = JSProto_Float64Array;
195 template <>
196 struct TypeIDOfType<uint8_clamped> {
197 static const Scalar::Type id = Scalar::Uint8Clamped;
198 static const JSProtoKey protoKey = JSProto_Uint8ClampedArray;
201 class SharedOps {
202 public:
203 template <typename T>
204 static T load(SharedMem<T*> addr) {
205 return js::jit::AtomicOperations::loadSafeWhenRacy(addr);
208 template <typename T>
209 static void store(SharedMem<T*> addr, T value) {
210 js::jit::AtomicOperations::storeSafeWhenRacy(addr, value);
213 template <typename T>
214 static void memcpy(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
215 js::jit::AtomicOperations::memcpySafeWhenRacy(dest, src, size);
218 template <typename T>
219 static void memmove(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
220 js::jit::AtomicOperations::memmoveSafeWhenRacy(dest, src, size);
223 template <typename T>
224 static void podCopy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
225 js::jit::AtomicOperations::podCopySafeWhenRacy(dest, src, nelem);
228 template <typename T>
229 static void podMove(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
230 js::jit::AtomicOperations::podMoveSafeWhenRacy(dest, src, nelem);
233 static SharedMem<void*> extract(TypedArrayObject* obj) {
234 return obj->dataPointerEither();
238 class UnsharedOps {
239 public:
240 template <typename T>
241 static T load(SharedMem<T*> addr) {
242 return *addr.unwrapUnshared();
245 template <typename T>
246 static void store(SharedMem<T*> addr, T value) {
247 *addr.unwrapUnshared() = value;
250 template <typename T>
251 static void memcpy(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
252 ::memcpy(dest.unwrapUnshared(), src.unwrapUnshared(), size);
255 template <typename T>
256 static void memmove(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
257 ::memmove(dest.unwrapUnshared(), src.unwrapUnshared(), size);
260 template <typename T>
261 static void podCopy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
262 // std::copy_n better matches the argument values/types of this
263 // function, but as noted below it allows the input/output ranges to
264 // overlap. std::copy does not, so use it so the compiler has extra
265 // ability to optimize.
266 const auto* first = src.unwrapUnshared();
267 const auto* last = first + nelem;
268 auto* result = dest.unwrapUnshared();
269 std::copy(first, last, result);
272 template <typename T>
273 static void podMove(SharedMem<T*> dest, SharedMem<T*> src, size_t n) {
274 // std::copy_n copies from |src| to |dest| starting from |src|, so
275 // input/output ranges *may* permissibly overlap, as this function
276 // allows.
277 const auto* start = src.unwrapUnshared();
278 auto* result = dest.unwrapUnshared();
279 std::copy_n(start, n, result);
282 static SharedMem<void*> extract(TypedArrayObject* obj) {
283 return SharedMem<void*>::unshared(obj->dataPointerUnshared());
287 template <typename T, typename Ops>
288 class ElementSpecific {
289 public:
291 * Copy |source|'s elements into |target|, starting at |target[offset]|.
292 * Act as if the assignments occurred from a fresh copy of |source|, in
293 * case the two memory ranges overlap.
295 static bool setFromTypedArray(Handle<TypedArrayObject*> target,
296 size_t targetLength,
297 Handle<TypedArrayObject*> source,
298 size_t sourceLength, size_t offset) {
299 // WARNING: |source| may be an unwrapped typed array from a different
300 // compartment. Proceed with caution!
302 MOZ_ASSERT(TypeIDOfType<T>::id == target->type(),
303 "calling wrong setFromTypedArray specialization");
304 MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
305 MOZ_ASSERT(!source->hasDetachedBuffer(), "source isn't detached");
306 MOZ_ASSERT(*target->length() >= targetLength, "target isn't shrunk");
307 MOZ_ASSERT(*source->length() >= sourceLength, "source isn't shrunk");
309 MOZ_ASSERT(offset <= targetLength);
310 MOZ_ASSERT(sourceLength <= targetLength - offset);
312 if (TypedArrayObject::sameBuffer(target, source)) {
313 return setFromOverlappingTypedArray(target, targetLength, source,
314 sourceLength, offset);
317 SharedMem<T*> dest =
318 target->dataPointerEither().template cast<T*>() + offset;
319 size_t count = sourceLength;
321 if (source->type() == target->type()) {
322 Ops::podCopy(dest, source->dataPointerEither().template cast<T*>(),
323 count);
324 return true;
327 SharedMem<void*> data = Ops::extract(source);
328 switch (source->type()) {
329 case Scalar::Int8: {
330 SharedMem<int8_t*> src = data.cast<int8_t*>();
331 for (size_t i = 0; i < count; ++i) {
332 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
334 break;
336 case Scalar::Uint8:
337 case Scalar::Uint8Clamped: {
338 SharedMem<uint8_t*> src = data.cast<uint8_t*>();
339 for (size_t i = 0; i < count; ++i) {
340 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
342 break;
344 case Scalar::Int16: {
345 SharedMem<int16_t*> src = data.cast<int16_t*>();
346 for (size_t i = 0; i < count; ++i) {
347 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
349 break;
351 case Scalar::Uint16: {
352 SharedMem<uint16_t*> src = data.cast<uint16_t*>();
353 for (size_t i = 0; i < count; ++i) {
354 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
356 break;
358 case Scalar::Int32: {
359 SharedMem<int32_t*> src = data.cast<int32_t*>();
360 for (size_t i = 0; i < count; ++i) {
361 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
363 break;
365 case Scalar::Uint32: {
366 SharedMem<uint32_t*> src = data.cast<uint32_t*>();
367 for (size_t i = 0; i < count; ++i) {
368 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
370 break;
372 case Scalar::BigInt64: {
373 SharedMem<int64_t*> src = data.cast<int64_t*>();
374 for (size_t i = 0; i < count; ++i) {
375 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
377 break;
379 case Scalar::BigUint64: {
380 SharedMem<uint64_t*> src = data.cast<uint64_t*>();
381 for (size_t i = 0; i < count; ++i) {
382 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
384 break;
386 case Scalar::Float32: {
387 SharedMem<float*> src = data.cast<float*>();
388 for (size_t i = 0; i < count; ++i) {
389 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
391 break;
393 case Scalar::Float64: {
394 SharedMem<double*> src = data.cast<double*>();
395 for (size_t i = 0; i < count; ++i) {
396 Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
398 break;
400 default:
401 MOZ_CRASH("setFromTypedArray with a typed array with bogus type");
404 return true;
408 * Copy |source[0]| to |source[len]| (exclusive) elements into the typed
409 * array |target|, starting at index |offset|. |source| must not be a
410 * typed array.
412 static bool setFromNonTypedArray(JSContext* cx,
413 Handle<TypedArrayObject*> target,
414 HandleObject source, size_t len,
415 size_t offset = 0) {
416 MOZ_ASSERT(target->type() == TypeIDOfType<T>::id,
417 "target type and NativeType must match");
418 MOZ_ASSERT(!source->is<TypedArrayObject>(),
419 "use setFromTypedArray instead of this method");
420 MOZ_ASSERT_IF(target->hasDetachedBuffer(), target->length().isNothing());
422 size_t i = 0;
423 if (source->is<NativeObject>()) {
424 size_t targetLength = target->length().valueOr(0);
425 if (offset <= targetLength && len <= targetLength - offset) {
426 // Attempt fast-path infallible conversion of dense elements up to
427 // the first potentially side-effectful lookup or conversion.
428 size_t bound = std::min<size_t>(
429 source->as<NativeObject>().getDenseInitializedLength(), len);
431 SharedMem<T*> dest =
432 target->dataPointerEither().template cast<T*>() + offset;
434 MOZ_ASSERT(!canConvertInfallibly(MagicValue(JS_ELEMENTS_HOLE)),
435 "the following loop must abort on holes");
437 const Value* srcValues = source->as<NativeObject>().getDenseElements();
438 for (; i < bound; i++) {
439 if (!canConvertInfallibly(srcValues[i])) {
440 break;
442 Ops::store(dest + i, infallibleValueToNative(srcValues[i]));
444 if (i == len) {
445 return true;
450 // Convert and copy any remaining elements generically.
451 RootedValue v(cx);
452 for (; i < len; i++) {
453 if constexpr (sizeof(i) == sizeof(uint32_t)) {
454 if (!GetElement(cx, source, source, uint32_t(i), &v)) {
455 return false;
457 } else {
458 if (!GetElementLargeIndex(cx, source, source, i, &v)) {
459 return false;
463 T n;
464 if (!valueToNative(cx, v, &n)) {
465 return false;
468 // Ignore out-of-bounds writes, but still execute getElement/valueToNative
469 // because of observable side-effects.
470 if (offset + i >= target->length().valueOr(0)) {
471 continue;
474 MOZ_ASSERT(!target->hasDetachedBuffer());
476 // Compute every iteration in case getElement/valueToNative
477 // detaches the underlying array buffer or GC moves the data.
478 SharedMem<T*> dest =
479 target->dataPointerEither().template cast<T*>() + offset + i;
480 Ops::store(dest, n);
483 return true;
487 * Copy |source| into the typed array |target|.
489 static bool initFromIterablePackedArray(
490 JSContext* cx, Handle<FixedLengthTypedArrayObject*> target,
491 Handle<ArrayObject*> source) {
492 MOZ_ASSERT(target->type() == TypeIDOfType<T>::id,
493 "target type and NativeType must match");
494 MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
495 MOZ_ASSERT(IsPackedArray(source), "source array must be packed");
496 MOZ_ASSERT(source->getDenseInitializedLength() <= target->length());
498 size_t len = source->getDenseInitializedLength();
499 size_t i = 0;
501 // Attempt fast-path infallible conversion of dense elements up to the
502 // first potentially side-effectful conversion.
504 SharedMem<T*> dest = target->dataPointerEither().template cast<T*>();
506 const Value* srcValues = source->getDenseElements();
507 for (; i < len; i++) {
508 if (!canConvertInfallibly(srcValues[i])) {
509 break;
511 Ops::store(dest + i, infallibleValueToNative(srcValues[i]));
513 if (i == len) {
514 return true;
517 // Convert any remaining elements by first collecting them into a
518 // temporary list, and then copying them into the typed array.
519 RootedValueVector values(cx);
520 if (!values.append(srcValues + i, len - i)) {
521 return false;
524 RootedValue v(cx);
525 for (size_t j = 0; j < values.length(); i++, j++) {
526 v = values[j];
528 T n;
529 if (!valueToNative(cx, v, &n)) {
530 return false;
533 // |target| is a newly allocated typed array and not yet visible to
534 // content script, so valueToNative can't detach the underlying
535 // buffer.
536 MOZ_ASSERT(i < target->length());
538 // Compute every iteration in case GC moves the data.
539 SharedMem<T*> newDest = target->dataPointerEither().template cast<T*>();
540 Ops::store(newDest + i, n);
543 return true;
546 private:
547 static bool setFromOverlappingTypedArray(Handle<TypedArrayObject*> target,
548 size_t targetLength,
549 Handle<TypedArrayObject*> source,
550 size_t sourceLength, size_t offset) {
551 // WARNING: |source| may be an unwrapped typed array from a different
552 // compartment. Proceed with caution!
554 MOZ_ASSERT(TypeIDOfType<T>::id == target->type(),
555 "calling wrong setFromTypedArray specialization");
556 MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
557 MOZ_ASSERT(!source->hasDetachedBuffer(), "source isn't detached");
558 MOZ_ASSERT(*target->length() >= targetLength, "target isn't shrunk");
559 MOZ_ASSERT(*source->length() >= sourceLength, "source isn't shrunk");
560 MOZ_ASSERT(TypedArrayObject::sameBuffer(target, source),
561 "the provided arrays don't actually overlap, so it's "
562 "undesirable to use this method");
564 MOZ_ASSERT(offset <= targetLength);
565 MOZ_ASSERT(sourceLength <= targetLength - offset);
567 SharedMem<T*> dest =
568 target->dataPointerEither().template cast<T*>() + offset;
569 size_t len = sourceLength;
571 if (source->type() == target->type()) {
572 SharedMem<T*> src = source->dataPointerEither().template cast<T*>();
573 Ops::podMove(dest, src, len);
574 return true;
577 // Copy |source| in case it overlaps the target elements being set.
578 size_t sourceByteLen = len * source->bytesPerElement();
579 void* data = target->zone()->template pod_malloc<uint8_t>(sourceByteLen);
580 if (!data) {
581 return false;
583 Ops::memcpy(SharedMem<void*>::unshared(data), source->dataPointerEither(),
584 sourceByteLen);
586 switch (source->type()) {
587 case Scalar::Int8: {
588 int8_t* src = static_cast<int8_t*>(data);
589 for (size_t i = 0; i < len; ++i) {
590 Ops::store(dest++, ConvertNumber<T>(*src++));
592 break;
594 case Scalar::Uint8:
595 case Scalar::Uint8Clamped: {
596 uint8_t* src = static_cast<uint8_t*>(data);
597 for (size_t i = 0; i < len; ++i) {
598 Ops::store(dest++, ConvertNumber<T>(*src++));
600 break;
602 case Scalar::Int16: {
603 int16_t* src = static_cast<int16_t*>(data);
604 for (size_t i = 0; i < len; ++i) {
605 Ops::store(dest++, ConvertNumber<T>(*src++));
607 break;
609 case Scalar::Uint16: {
610 uint16_t* src = static_cast<uint16_t*>(data);
611 for (size_t i = 0; i < len; ++i) {
612 Ops::store(dest++, ConvertNumber<T>(*src++));
614 break;
616 case Scalar::Int32: {
617 int32_t* src = static_cast<int32_t*>(data);
618 for (size_t i = 0; i < len; ++i) {
619 Ops::store(dest++, ConvertNumber<T>(*src++));
621 break;
623 case Scalar::Uint32: {
624 uint32_t* src = static_cast<uint32_t*>(data);
625 for (size_t i = 0; i < len; ++i) {
626 Ops::store(dest++, ConvertNumber<T>(*src++));
628 break;
630 case Scalar::BigInt64: {
631 int64_t* src = static_cast<int64_t*>(data);
632 for (size_t i = 0; i < len; ++i) {
633 Ops::store(dest++, ConvertNumber<T>(*src++));
635 break;
637 case Scalar::BigUint64: {
638 uint64_t* src = static_cast<uint64_t*>(data);
639 for (size_t i = 0; i < len; ++i) {
640 Ops::store(dest++, ConvertNumber<T>(*src++));
642 break;
644 case Scalar::Float32: {
645 float* src = static_cast<float*>(data);
646 for (size_t i = 0; i < len; ++i) {
647 Ops::store(dest++, ConvertNumber<T>(*src++));
649 break;
651 case Scalar::Float64: {
652 double* src = static_cast<double*>(data);
653 for (size_t i = 0; i < len; ++i) {
654 Ops::store(dest++, ConvertNumber<T>(*src++));
656 break;
658 default:
659 MOZ_CRASH(
660 "setFromOverlappingTypedArray with a typed array with bogus type");
663 js_free(data);
664 return true;
667 static bool canConvertInfallibly(const Value& v) {
668 if (TypeIDOfType<T>::id == Scalar::BigInt64 ||
669 TypeIDOfType<T>::id == Scalar::BigUint64) {
670 // Numbers, Null, Undefined, and Symbols throw a TypeError. Strings may
671 // OOM and Objects may have side-effects.
672 return v.isBigInt() || v.isBoolean();
674 // BigInts and Symbols throw a TypeError. Strings may OOM and Objects may
675 // have side-effects.
676 return v.isNumber() || v.isBoolean() || v.isNull() || v.isUndefined();
679 static T infallibleValueToNative(const Value& v) {
680 if (TypeIDOfType<T>::id == Scalar::BigInt64) {
681 if (v.isBigInt()) {
682 return T(BigInt::toInt64(v.toBigInt()));
684 return T(v.toBoolean());
686 if (TypeIDOfType<T>::id == Scalar::BigUint64) {
687 if (v.isBigInt()) {
688 return T(BigInt::toUint64(v.toBigInt()));
690 return T(v.toBoolean());
692 if (v.isInt32()) {
693 return T(v.toInt32());
695 if (v.isDouble()) {
696 return doubleToNative(v.toDouble());
698 if (v.isBoolean()) {
699 return T(v.toBoolean());
701 if (v.isNull()) {
702 return T(0);
705 MOZ_ASSERT(v.isUndefined());
706 return TypeIsFloatingPoint<T>() ? T(JS::GenericNaN()) : T(0);
709 static bool valueToNative(JSContext* cx, HandleValue v, T* result) {
710 MOZ_ASSERT(!v.isMagic());
712 if (MOZ_LIKELY(canConvertInfallibly(v))) {
713 *result = infallibleValueToNative(v);
714 return true;
717 if (std::is_same_v<T, int64_t>) {
718 JS_TRY_VAR_OR_RETURN_FALSE(cx, *result, ToBigInt64(cx, v));
719 return true;
722 if (std::is_same_v<T, uint64_t>) {
723 JS_TRY_VAR_OR_RETURN_FALSE(cx, *result, ToBigUint64(cx, v));
724 return true;
727 double d;
728 MOZ_ASSERT(v.isString() || v.isObject() || v.isSymbol() || v.isBigInt());
729 if (!(v.isString() ? StringToNumber(cx, v.toString(), &d)
730 : ToNumber(cx, v, &d))) {
731 return false;
734 *result = doubleToNative(d);
735 return true;
738 static T doubleToNative(double d) {
739 if (TypeIsFloatingPoint<T>()) {
740 // The JS spec doesn't distinguish among different NaN values, and
741 // it deliberately doesn't specify the bit pattern written to a
742 // typed array when NaN is written into it. This bit-pattern
743 // inconsistency could confuse differential testing, so always
744 // canonicalize NaN values in differential testing.
745 if (js::SupportDifferentialTesting()) {
746 d = JS::CanonicalizeNaN(d);
748 return T(d);
750 if (MOZ_UNLIKELY(std::isnan(d))) {
751 return T(0);
753 if (TypeIDOfType<T>::id == Scalar::Uint8Clamped) {
754 return T(d);
756 if (TypeIsUnsigned<T>()) {
757 return T(JS::ToUint32(d));
759 return T(JS::ToInt32(d));
763 inline gc::AllocKind js::FixedLengthTypedArrayObject::allocKindForTenure()
764 const {
765 // Fixed length typed arrays in the nursery may have a lazily allocated
766 // buffer. Make sure there is room for the array's fixed data when moving the
767 // array.
769 if (hasBuffer()) {
770 return NativeObject::allocKindForTenure();
773 gc::AllocKind allocKind;
774 if (hasInlineElements()) {
775 allocKind = AllocKindForLazyBuffer(byteLength());
776 } else {
777 allocKind = gc::GetGCObjectKind(getClass());
780 return gc::ForegroundToBackgroundAllocKind(allocKind);
783 /* static */ gc::AllocKind
784 js::FixedLengthTypedArrayObject::AllocKindForLazyBuffer(size_t nbytes) {
785 MOZ_ASSERT(nbytes <= INLINE_BUFFER_LIMIT);
786 if (nbytes == 0) {
787 nbytes += sizeof(uint8_t);
789 size_t dataSlots = AlignBytes(nbytes, sizeof(Value)) / sizeof(Value);
790 MOZ_ASSERT(nbytes <= dataSlots * sizeof(Value));
791 return gc::GetGCObjectKind(FIXED_DATA_START + dataSlots);
794 } // namespace js
796 #endif // vm_TypedArrayObject_inl_h