1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 # This script generates jit/AtomicOperationsGenerated.h
7 # See the big comment in jit/AtomicOperations.h for an explanation.
11 is_64bit
= "JS_64BIT" in buildconfig
.defines
12 cpu_arch
= buildconfig
.substs
["TARGET_CPU"]
13 is_gcc
= buildconfig
.substs
["CC_TYPE"] == "gcc"
17 return '"' + s
+ '\\n\\t"\n'
20 def gen_seqcst(fun_name
):
21 if cpu_arch
in ("x86", "x86_64"):
23 INLINE_ATTR void %(fun_name)s() {
24 asm volatile ("mfence\n\t" ::: "memory");
28 if cpu_arch
== "aarch64":
30 INLINE_ATTR void %(fun_name)s() {
31 asm volatile ("dmb ish\n\t" ::: "memory");
37 INLINE_ATTR void %(fun_name)s() {
38 asm volatile ("dmb sy\n\t" ::: "memory");
42 raise Exception("Unexpected arch")
45 def gen_load(fun_name
, cpp_type
, size
, barrier
):
46 # NOTE: the assembly code must match the generated code in:
47 # - CacheIRCompiler::emitAtomicsLoadResult
48 # - LIRGenerator::visitLoadUnboxedScalar
49 # - CodeGenerator::visitAtomicLoad64 (on 64-bit platforms)
50 # - MacroAssembler::wasmLoad
51 if cpu_arch
in ("x86", "x86_64"):
54 insns
+= fmt_insn("movb (%[arg]), %[res]")
56 insns
+= fmt_insn("movw (%[arg]), %[res]")
58 insns
+= fmt_insn("movl (%[arg]), %[res]")
61 insns
+= fmt_insn("movq (%[arg]), %[res]")
63 INLINE_ATTR %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) {
65 asm volatile (%(insns)s
75 if cpu_arch
== "aarch64":
78 insns
+= fmt_insn("ldrb %w[res], [%x[arg]]")
80 insns
+= fmt_insn("ldrh %w[res], [%x[arg]]")
82 insns
+= fmt_insn("ldr %w[res], [%x[arg]]")
85 insns
+= fmt_insn("ldr %x[res], [%x[arg]]")
87 insns
+= fmt_insn("dmb ish")
89 INLINE_ATTR %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) {
91 asm volatile (%(insns)s
101 if cpu_arch
== "arm":
104 insns
+= fmt_insn("ldrb %[res], [%[arg]]")
106 insns
+= fmt_insn("ldrh %[res], [%[arg]]")
109 insns
+= fmt_insn("ldr %[res], [%[arg]]")
111 insns
+= fmt_insn("dmb sy")
113 INLINE_ATTR %(cpp_type)s %(fun_name)s(const %(cpp_type)s* arg) {
115 asm volatile (%(insns)s
121 "cpp_type": cpp_type
,
122 "fun_name": fun_name
,
125 raise Exception("Unexpected arch")
128 def gen_store(fun_name
, cpp_type
, size
, barrier
):
129 # NOTE: the assembly code must match the generated code in:
130 # - CacheIRCompiler::emitAtomicsStoreResult
131 # - LIRGenerator::visitStoreUnboxedScalar
132 # - CodeGenerator::visitAtomicStore64 (on 64-bit platforms)
133 # - MacroAssembler::wasmStore
134 if cpu_arch
in ("x86", "x86_64"):
137 insns
+= fmt_insn("movb %[val], (%[addr])")
139 insns
+= fmt_insn("movw %[val], (%[addr])")
141 insns
+= fmt_insn("movl %[val], (%[addr])")
144 insns
+= fmt_insn("movq %[val], (%[addr])")
146 insns
+= fmt_insn("mfence")
148 INLINE_ATTR void %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
149 asm volatile (%(insns)s
151 : [addr] "r" (addr), [val] "r"(val)
154 "cpp_type": cpp_type
,
155 "fun_name": fun_name
,
158 if cpu_arch
== "aarch64":
161 insns
+= fmt_insn("dmb ish")
163 insns
+= fmt_insn("strb %w[val], [%x[addr]]")
165 insns
+= fmt_insn("strh %w[val], [%x[addr]]")
167 insns
+= fmt_insn("str %w[val], [%x[addr]]")
170 insns
+= fmt_insn("str %x[val], [%x[addr]]")
172 insns
+= fmt_insn("dmb ish")
174 INLINE_ATTR void %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
175 asm volatile (%(insns)s
177 : [addr] "r" (addr), [val] "r"(val)
180 "cpp_type": cpp_type
,
181 "fun_name": fun_name
,
184 if cpu_arch
== "arm":
187 insns
+= fmt_insn("dmb sy")
189 insns
+= fmt_insn("strb %[val], [%[addr]]")
191 insns
+= fmt_insn("strh %[val], [%[addr]]")
194 insns
+= fmt_insn("str %[val], [%[addr]]")
196 insns
+= fmt_insn("dmb sy")
198 INLINE_ATTR void %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
199 asm volatile (%(insns)s
201 : [addr] "r" (addr), [val] "r"(val)
204 "cpp_type": cpp_type
,
205 "fun_name": fun_name
,
208 raise Exception("Unexpected arch")
211 def gen_exchange(fun_name
, cpp_type
, size
):
212 # NOTE: the assembly code must match the generated code in:
213 # - MacroAssembler::atomicExchange
214 # - MacroAssembler::atomicExchange64 (on 64-bit platforms)
215 if cpu_arch
in ("x86", "x86_64"):
216 # Request an input/output register for `val` so that we can simply XCHG it
220 insns
+= fmt_insn("xchgb %[val], (%[addr])")
222 insns
+= fmt_insn("xchgw %[val], (%[addr])")
224 insns
+= fmt_insn("xchgl %[val], (%[addr])")
227 insns
+= fmt_insn("xchgq %[val], (%[addr])")
229 INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
230 asm volatile (%(insns)s
236 "cpp_type": cpp_type
,
237 "fun_name": fun_name
,
240 if cpu_arch
== "aarch64":
242 insns
+= fmt_insn("dmb ish")
243 insns
+= fmt_insn("0:")
245 insns
+= fmt_insn("ldxrb %w[res], [%x[addr]]")
246 insns
+= fmt_insn("stxrb %w[scratch], %w[val], [%x[addr]]")
248 insns
+= fmt_insn("ldxrh %w[res], [%x[addr]]")
249 insns
+= fmt_insn("stxrh %w[scratch], %w[val], [%x[addr]]")
251 insns
+= fmt_insn("ldxr %w[res], [%x[addr]]")
252 insns
+= fmt_insn("stxr %w[scratch], %w[val], [%x[addr]]")
255 insns
+= fmt_insn("ldxr %x[res], [%x[addr]]")
256 insns
+= fmt_insn("stxr %w[scratch], %x[val], [%x[addr]]")
257 insns
+= fmt_insn("cbnz %w[scratch], 0b")
258 insns
+= fmt_insn("dmb ish")
260 INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
263 asm volatile (%(insns)s
264 : [res] "=&r"(res), [scratch] "=&r"(scratch)
265 : [addr] "r" (addr), [val] "r"(val)
269 "cpp_type": cpp_type
,
270 "fun_name": fun_name
,
273 if cpu_arch
== "arm":
275 insns
+= fmt_insn("dmb sy")
276 insns
+= fmt_insn("0:")
278 insns
+= fmt_insn("ldrexb %[res], [%[addr]]")
279 insns
+= fmt_insn("strexb %[scratch], %[val], [%[addr]]")
281 insns
+= fmt_insn("ldrexh %[res], [%[addr]]")
282 insns
+= fmt_insn("strexh %[scratch], %[val], [%[addr]]")
285 insns
+= fmt_insn("ldrex %[res], [%[addr]]")
286 insns
+= fmt_insn("strex %[scratch], %[val], [%[addr]]")
287 insns
+= fmt_insn("cmp %[scratch], #1")
288 insns
+= fmt_insn("beq 0b")
289 insns
+= fmt_insn("dmb sy")
291 INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
294 asm volatile (%(insns)s
295 : [res] "=&r"(res), [scratch] "=&r"(scratch)
296 : [addr] "r" (addr), [val] "r"(val)
300 "cpp_type": cpp_type
,
301 "fun_name": fun_name
,
304 raise Exception("Unexpected arch")
307 def gen_cmpxchg(fun_name
, cpp_type
, size
):
308 # NOTE: the assembly code must match the generated code in:
309 # - MacroAssembler::compareExchange
310 # - MacroAssembler::compareExchange64
311 if cpu_arch
== "x86" and size
== 64:
312 # Use a +A constraint to load `oldval` into EDX:EAX as input/output.
313 # `newval` is loaded into ECX:EBX.
315 INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr,
317 %(cpp_type)s newval) {
318 asm volatile ("lock; cmpxchg8b (%%[addr])\n\t"
321 "b" (uint32_t(newval & 0xffff'ffff)),
322 "c" (uint32_t(newval >> 32))
326 "cpp_type": cpp_type
,
327 "fun_name": fun_name
,
329 if cpu_arch
== "arm" and size
== 64:
331 INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr,
333 %(cpp_type)s newval) {
334 uint32_t oldval0 = oldval & 0xffff'ffff;
335 uint32_t oldval1 = oldval >> 32;
336 uint32_t newval0 = newval & 0xffff'ffff;
337 uint32_t newval1 = newval >> 32;
340 "0: ldrexd r0, r1, [%%[addr]]\n\t"
341 "cmp r0, %%[oldval0]\n\t"
343 "cmp r1, %%[oldval1]\n\t"
345 "mov r2, %%[newval0]\n\t"
346 "mov r3, %%[newval1]\n\t"
347 "strexd r4, r2, r3, [%%[addr]]\n\t"
351 "mov %%[oldval0], r0\n\t"
352 "mov %%[oldval1], r1\n\t"
353 : [oldval0] "+&r" (oldval0), [oldval1] "+&r"(oldval1)
354 : [addr] "r" (addr), [newval0] "r" (newval0), [newval1] "r" (newval1)
355 : "memory", "cc", "r0", "r1", "r2", "r3", "r4");
356 return uint64_t(oldval0) | (uint64_t(oldval1) << 32);
358 "cpp_type": cpp_type
,
359 "fun_name": fun_name
,
361 if cpu_arch
in ("x86", "x86_64"):
362 # Use a +a constraint to load `oldval` into RAX as input/output register.
365 insns
+= fmt_insn("lock; cmpxchgb %[newval], (%[addr])")
367 insns
+= fmt_insn("lock; cmpxchgw %[newval], (%[addr])")
369 insns
+= fmt_insn("lock; cmpxchgl %[newval], (%[addr])")
372 insns
+= fmt_insn("lock; cmpxchgq %[newval], (%[addr])")
374 INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr,
376 %(cpp_type)s newval) {
377 asm volatile (%(insns)s
378 : [oldval] "+a" (oldval)
379 : [addr] "r" (addr), [newval] "r" (newval)
383 "cpp_type": cpp_type
,
384 "fun_name": fun_name
,
387 if cpu_arch
== "aarch64":
389 insns
+= fmt_insn("dmb ish")
390 insns
+= fmt_insn("0:")
392 insns
+= fmt_insn("uxtb %w[scratch], %w[oldval]")
393 insns
+= fmt_insn("ldxrb %w[res], [%x[addr]]")
394 insns
+= fmt_insn("cmp %w[res], %w[scratch]")
395 insns
+= fmt_insn("b.ne 1f")
396 insns
+= fmt_insn("stxrb %w[scratch], %w[newval], [%x[addr]]")
398 insns
+= fmt_insn("uxth %w[scratch], %w[oldval]")
399 insns
+= fmt_insn("ldxrh %w[res], [%x[addr]]")
400 insns
+= fmt_insn("cmp %w[res], %w[scratch]")
401 insns
+= fmt_insn("b.ne 1f")
402 insns
+= fmt_insn("stxrh %w[scratch], %w[newval], [%x[addr]]")
404 insns
+= fmt_insn("mov %w[scratch], %w[oldval]")
405 insns
+= fmt_insn("ldxr %w[res], [%x[addr]]")
406 insns
+= fmt_insn("cmp %w[res], %w[scratch]")
407 insns
+= fmt_insn("b.ne 1f")
408 insns
+= fmt_insn("stxr %w[scratch], %w[newval], [%x[addr]]")
411 insns
+= fmt_insn("mov %x[scratch], %x[oldval]")
412 insns
+= fmt_insn("ldxr %x[res], [%x[addr]]")
413 insns
+= fmt_insn("cmp %x[res], %x[scratch]")
414 insns
+= fmt_insn("b.ne 1f")
415 insns
+= fmt_insn("stxr %w[scratch], %x[newval], [%x[addr]]")
416 insns
+= fmt_insn("cbnz %w[scratch], 0b")
417 insns
+= fmt_insn("1: dmb ish")
419 INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr,
421 %(cpp_type)s newval) {
422 %(cpp_type)s res, scratch;
423 asm volatile (%(insns)s
424 : [res] "=&r" (res), [scratch] "=&r" (scratch)
425 : [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval)
429 "cpp_type": cpp_type
,
430 "fun_name": fun_name
,
433 if cpu_arch
== "arm":
435 insns
+= fmt_insn("dmb sy")
436 insns
+= fmt_insn("0:")
438 insns
+= fmt_insn("uxtb %[scratch], %[oldval]")
439 insns
+= fmt_insn("ldrexb %[res], [%[addr]]")
440 insns
+= fmt_insn("cmp %[res], %[scratch]")
441 insns
+= fmt_insn("bne 1f")
442 insns
+= fmt_insn("strexb %[scratch], %[newval], [%[addr]]")
444 insns
+= fmt_insn("uxth %[scratch], %[oldval]")
445 insns
+= fmt_insn("ldrexh %[res], [%[addr]]")
446 insns
+= fmt_insn("cmp %[res], %[scratch]")
447 insns
+= fmt_insn("bne 1f")
448 insns
+= fmt_insn("strexh %[scratch], %[newval], [%[addr]]")
451 insns
+= fmt_insn("mov %[scratch], %[oldval]")
452 insns
+= fmt_insn("ldrex %[res], [%[addr]]")
453 insns
+= fmt_insn("cmp %[res], %[scratch]")
454 insns
+= fmt_insn("bne 1f")
455 insns
+= fmt_insn("strex %[scratch], %[newval], [%[addr]]")
456 insns
+= fmt_insn("cmp %[scratch], #1")
457 insns
+= fmt_insn("beq 0b")
458 insns
+= fmt_insn("1: dmb sy")
460 INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr,
462 %(cpp_type)s newval) {
463 %(cpp_type)s res, scratch;
464 asm volatile (%(insns)s
465 : [res] "=&r" (res), [scratch] "=&r" (scratch)
466 : [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval)
470 "cpp_type": cpp_type
,
471 "fun_name": fun_name
,
474 raise Exception("Unexpected arch")
477 def gen_fetchop(fun_name
, cpp_type
, size
, op
):
478 # NOTE: the assembly code must match the generated code in:
479 # - MacroAssembler::atomicFetchOp
480 # - MacroAssembler::atomicFetchOp64 (on 64-bit platforms)
481 if cpu_arch
in ("x86", "x86_64"):
482 # The `add` operation can be optimized with XADD.
486 insns
+= fmt_insn("lock; xaddb %[val], (%[addr])")
488 insns
+= fmt_insn("lock; xaddw %[val], (%[addr])")
490 insns
+= fmt_insn("lock; xaddl %[val], (%[addr])")
493 insns
+= fmt_insn("lock; xaddq %[val], (%[addr])")
495 INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
496 asm volatile (%(insns)s
502 "cpp_type": cpp_type
,
503 "fun_name": fun_name
,
506 # Use a +a constraint to ensure `res` is stored in RAX. This is required
507 # for the CMPXCHG instruction.
510 insns
+= fmt_insn("movb (%[addr]), %[res]")
511 insns
+= fmt_insn("0: movb %[res], %[scratch]")
512 insns
+= fmt_insn("OPb %[val], %[scratch]")
513 insns
+= fmt_insn("lock; cmpxchgb %[scratch], (%[addr])")
515 insns
+= fmt_insn("movw (%[addr]), %[res]")
516 insns
+= fmt_insn("0: movw %[res], %[scratch]")
517 insns
+= fmt_insn("OPw %[val], %[scratch]")
518 insns
+= fmt_insn("lock; cmpxchgw %[scratch], (%[addr])")
520 insns
+= fmt_insn("movl (%[addr]), %[res]")
521 insns
+= fmt_insn("0: movl %[res], %[scratch]")
522 insns
+= fmt_insn("OPl %[val], %[scratch]")
523 insns
+= fmt_insn("lock; cmpxchgl %[scratch], (%[addr])")
526 insns
+= fmt_insn("movq (%[addr]), %[res]")
527 insns
+= fmt_insn("0: movq %[res], %[scratch]")
528 insns
+= fmt_insn("OPq %[val], %[scratch]")
529 insns
+= fmt_insn("lock; cmpxchgq %[scratch], (%[addr])")
530 insns
= insns
.replace("OP", op
)
531 insns
+= fmt_insn("jnz 0b")
533 INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
534 %(cpp_type)s res, scratch;
535 asm volatile (%(insns)s
536 : [res] "=&a" (res), [scratch] "=&r" (scratch)
537 : [addr] "r" (addr), [val] "r"(val)
541 "cpp_type": cpp_type
,
542 "fun_name": fun_name
,
545 if cpu_arch
== "aarch64":
547 insns
+= fmt_insn("dmb ish")
548 insns
+= fmt_insn("0:")
550 insns
+= fmt_insn("ldxrb %w[res], [%x[addr]]")
551 insns
+= fmt_insn("OP %x[scratch1], %x[res], %x[val]")
552 insns
+= fmt_insn("stxrb %w[scratch2], %w[scratch1], [%x[addr]]")
554 insns
+= fmt_insn("ldxrh %w[res], [%x[addr]]")
555 insns
+= fmt_insn("OP %x[scratch1], %x[res], %x[val]")
556 insns
+= fmt_insn("stxrh %w[scratch2], %w[scratch1], [%x[addr]]")
558 insns
+= fmt_insn("ldxr %w[res], [%x[addr]]")
559 insns
+= fmt_insn("OP %x[scratch1], %x[res], %x[val]")
560 insns
+= fmt_insn("stxr %w[scratch2], %w[scratch1], [%x[addr]]")
563 insns
+= fmt_insn("ldxr %x[res], [%x[addr]]")
564 insns
+= fmt_insn("OP %x[scratch1], %x[res], %x[val]")
565 insns
+= fmt_insn("stxr %w[scratch2], %x[scratch1], [%x[addr]]")
571 insns
= insns
.replace("OP", cpu_op
)
572 insns
+= fmt_insn("cbnz %w[scratch2], 0b")
573 insns
+= fmt_insn("dmb ish")
575 INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
577 uintptr_t scratch1, scratch2;
578 asm volatile (%(insns)s
579 : [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
580 : [addr] "r" (addr), [val] "r"(val)
584 "cpp_type": cpp_type
,
585 "fun_name": fun_name
,
588 if cpu_arch
== "arm":
590 insns
+= fmt_insn("dmb sy")
591 insns
+= fmt_insn("0:")
593 insns
+= fmt_insn("ldrexb %[res], [%[addr]]")
594 insns
+= fmt_insn("OP %[scratch1], %[res], %[val]")
595 insns
+= fmt_insn("strexb %[scratch2], %[scratch1], [%[addr]]")
597 insns
+= fmt_insn("ldrexh %[res], [%[addr]]")
598 insns
+= fmt_insn("OP %[scratch1], %[res], %[val]")
599 insns
+= fmt_insn("strexh %[scratch2], %[scratch1], [%[addr]]")
602 insns
+= fmt_insn("ldrex %[res], [%[addr]]")
603 insns
+= fmt_insn("OP %[scratch1], %[res], %[val]")
604 insns
+= fmt_insn("strex %[scratch2], %[scratch1], [%[addr]]")
610 insns
= insns
.replace("OP", cpu_op
)
611 insns
+= fmt_insn("cmp %[scratch2], #1")
612 insns
+= fmt_insn("beq 0b")
613 insns
+= fmt_insn("dmb sy")
615 INLINE_ATTR %(cpp_type)s %(fun_name)s(%(cpp_type)s* addr, %(cpp_type)s val) {
617 uintptr_t scratch1, scratch2;
618 asm volatile (%(insns)s
619 : [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
620 : [addr] "r" (addr), [val] "r"(val)
624 "cpp_type": cpp_type
,
625 "fun_name": fun_name
,
628 raise Exception("Unexpected arch")
631 def gen_copy(fun_name
, cpp_type
, size
, unroll
, direction
):
632 assert direction
in ("down", "up")
634 if direction
== "up":
637 for i
in range(unroll
):
638 if cpu_arch
in ("x86", "x86_64"):
640 insns
+= fmt_insn("movb OFFSET(%[src]), %[scratch]")
641 insns
+= fmt_insn("movb %[scratch], OFFSET(%[dst])")
643 insns
+= fmt_insn("movl OFFSET(%[src]), %[scratch]")
644 insns
+= fmt_insn("movl %[scratch], OFFSET(%[dst])")
647 insns
+= fmt_insn("movq OFFSET(%[src]), %[scratch]")
648 insns
+= fmt_insn("movq %[scratch], OFFSET(%[dst])")
649 elif cpu_arch
== "aarch64":
651 insns
+= fmt_insn("ldrb %w[scratch], [%x[src], OFFSET]")
652 insns
+= fmt_insn("strb %w[scratch], [%x[dst], OFFSET]")
655 insns
+= fmt_insn("ldr %x[scratch], [%x[src], OFFSET]")
656 insns
+= fmt_insn("str %x[scratch], [%x[dst], OFFSET]")
657 elif cpu_arch
== "arm":
659 insns
+= fmt_insn("ldrb %[scratch], [%[src], #OFFSET]")
660 insns
+= fmt_insn("strb %[scratch], [%[dst], #OFFSET]")
663 insns
+= fmt_insn("ldr %[scratch], [%[src], #OFFSET]")
664 insns
+= fmt_insn("str %[scratch], [%[dst], #OFFSET]")
666 raise Exception("Unexpected arch")
667 insns
= insns
.replace("OFFSET", str(offset
* size
))
669 if direction
== "down":
675 INLINE_ATTR void %(fun_name)s(uint8_t* dst, const uint8_t* src) {
676 %(cpp_type)s* dst_ = reinterpret_cast<%(cpp_type)s*>(dst);
677 const %(cpp_type)s* src_ = reinterpret_cast<const %(cpp_type)s*>(src);
678 %(cpp_type)s scratch;
679 asm volatile (%(insns)s
680 : [scratch] "=&r" (scratch)
681 : [dst] "r" (dst_), [src] "r"(src_)
684 "cpp_type": cpp_type
,
685 "fun_name": fun_name
,
690 HEADER_TEMPLATE
= """\
691 /* This Source Code Form is subject to the terms of the Mozilla Public
692 * License, v. 2.0. If a copy of the MPL was not distributed with this
693 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
695 #ifndef jit_AtomicOperationsGenerated_h
696 #define jit_AtomicOperationsGenerated_h
698 /* This file is generated by jit/GenerateAtomicOperations.py. Do not edit! */
700 #include "mozilla/Attributes.h"
710 #endif // jit_AtomicOperationsGenerated_h
714 def generate_atomics_header(c_out
):
716 if cpu_arch
in ("x86", "x86_64", "aarch64") or (
717 cpu_arch
== "arm" and int(buildconfig
.substs
["ARM_ARCH"]) >= 7
719 contents
+= "#define JS_HAVE_GENERATED_ATOMIC_OPS 1"
721 # `fence` performs a full memory barrier.
722 contents
+= gen_seqcst("AtomicFenceSeqCst")
724 contents
+= gen_load("AtomicLoad8SeqCst", "uint8_t", 8, True)
725 contents
+= gen_load("AtomicLoad16SeqCst", "uint16_t", 16, True)
726 contents
+= gen_load("AtomicLoad32SeqCst", "uint32_t", 32, True)
728 contents
+= gen_load("AtomicLoad64SeqCst", "uint64_t", 64, True)
730 # These are access-atomic up to sizeof(uintptr_t).
731 contents
+= gen_load("AtomicLoad8Unsynchronized", "uint8_t", 8, False)
732 contents
+= gen_load("AtomicLoad16Unsynchronized", "uint16_t", 16, False)
733 contents
+= gen_load("AtomicLoad32Unsynchronized", "uint32_t", 32, False)
735 contents
+= gen_load("AtomicLoad64Unsynchronized", "uint64_t", 64, False)
737 contents
+= gen_store("AtomicStore8SeqCst", "uint8_t", 8, True)
738 contents
+= gen_store("AtomicStore16SeqCst", "uint16_t", 16, True)
739 contents
+= gen_store("AtomicStore32SeqCst", "uint32_t", 32, True)
741 contents
+= gen_store("AtomicStore64SeqCst", "uint64_t", 64, True)
743 # These are access-atomic up to sizeof(uintptr_t).
744 contents
+= gen_store("AtomicStore8Unsynchronized", "uint8_t", 8, False)
745 contents
+= gen_store("AtomicStore16Unsynchronized", "uint16_t", 16, False)
746 contents
+= gen_store("AtomicStore32Unsynchronized", "uint32_t", 32, False)
748 contents
+= gen_store("AtomicStore64Unsynchronized", "uint64_t", 64, False)
750 # `exchange` takes a cell address and a value. It stores it in the cell and
751 # returns the value previously in the cell.
752 contents
+= gen_exchange("AtomicExchange8SeqCst", "uint8_t", 8)
753 contents
+= gen_exchange("AtomicExchange16SeqCst", "uint16_t", 16)
754 contents
+= gen_exchange("AtomicExchange32SeqCst", "uint32_t", 32)
756 contents
+= gen_exchange("AtomicExchange64SeqCst", "uint64_t", 64)
758 # `cmpxchg` takes a cell address, an expected value and a replacement value.
759 # If the value in the cell equals the expected value then the replacement value
760 # is stored in the cell. It always returns the value previously in the cell.
761 contents
+= gen_cmpxchg("AtomicCmpXchg8SeqCst", "uint8_t", 8)
762 contents
+= gen_cmpxchg("AtomicCmpXchg16SeqCst", "uint16_t", 16)
763 contents
+= gen_cmpxchg("AtomicCmpXchg32SeqCst", "uint32_t", 32)
764 contents
+= gen_cmpxchg("AtomicCmpXchg64SeqCst", "uint64_t", 64)
766 # `add` adds a value atomically to the cell and returns the old value in the
767 # cell. (There is no `sub`; just add the negated value.)
768 contents
+= gen_fetchop("AtomicAdd8SeqCst", "uint8_t", 8, "add")
769 contents
+= gen_fetchop("AtomicAdd16SeqCst", "uint16_t", 16, "add")
770 contents
+= gen_fetchop("AtomicAdd32SeqCst", "uint32_t", 32, "add")
772 contents
+= gen_fetchop("AtomicAdd64SeqCst", "uint64_t", 64, "add")
774 # `and` bitwise-ands a value atomically into the cell and returns the old value
776 contents
+= gen_fetchop("AtomicAnd8SeqCst", "uint8_t", 8, "and")
777 contents
+= gen_fetchop("AtomicAnd16SeqCst", "uint16_t", 16, "and")
778 contents
+= gen_fetchop("AtomicAnd32SeqCst", "uint32_t", 32, "and")
780 contents
+= gen_fetchop("AtomicAnd64SeqCst", "uint64_t", 64, "and")
782 # `or` bitwise-ors a value atomically into the cell and returns the old value
784 contents
+= gen_fetchop("AtomicOr8SeqCst", "uint8_t", 8, "or")
785 contents
+= gen_fetchop("AtomicOr16SeqCst", "uint16_t", 16, "or")
786 contents
+= gen_fetchop("AtomicOr32SeqCst", "uint32_t", 32, "or")
788 contents
+= gen_fetchop("AtomicOr64SeqCst", "uint64_t", 64, "or")
790 # `xor` bitwise-xors a value atomically into the cell and returns the old value
792 contents
+= gen_fetchop("AtomicXor8SeqCst", "uint8_t", 8, "xor")
793 contents
+= gen_fetchop("AtomicXor16SeqCst", "uint16_t", 16, "xor")
794 contents
+= gen_fetchop("AtomicXor32SeqCst", "uint32_t", 32, "xor")
796 contents
+= gen_fetchop("AtomicXor64SeqCst", "uint64_t", 64, "xor")
798 # See comment in jit/AtomicOperations-shared-jit.cpp for an explanation.
799 wordsize
= 8 if is_64bit
else 4
801 blocksize
= words_in_block
* wordsize
803 contents
+= gen_copy(
804 "AtomicCopyUnalignedBlockDownUnsynchronized",
810 contents
+= gen_copy(
811 "AtomicCopyUnalignedBlockUpUnsynchronized", "uint8_t", 1, blocksize
, "up"
814 contents
+= gen_copy(
815 "AtomicCopyUnalignedWordDownUnsynchronized", "uint8_t", 1, wordsize
, "down"
817 contents
+= gen_copy(
818 "AtomicCopyUnalignedWordUpUnsynchronized", "uint8_t", 1, wordsize
, "up"
821 contents
+= gen_copy(
822 "AtomicCopyBlockDownUnsynchronized",
828 contents
+= gen_copy(
829 "AtomicCopyBlockUpUnsynchronized",
836 contents
+= gen_copy(
837 "AtomicCopyWordUnsynchronized", "uintptr_t", wordsize
, 1, "down"
839 contents
+= gen_copy("AtomicCopyByteUnsynchronized", "uint8_t", 1, 1, "down")
843 "constexpr size_t JS_GENERATED_ATOMICS_BLOCKSIZE = "
848 "constexpr size_t JS_GENERATED_ATOMICS_WORDSIZE = " + str(wordsize
) + ";\n"
851 # Work around a GCC issue on 32-bit x86 by adding MOZ_NEVER_INLINE.
853 if is_gcc
and cpu_arch
== "x86":
854 contents
= contents
.replace("INLINE_ATTR", "MOZ_NEVER_INLINE inline")
856 contents
= contents
.replace("INLINE_ATTR", "inline")
861 "contents": contents
,