1 ;; GCC machine description for i386 synchronization instructions.
2 ;; Copyright (C) 2005-2015 Free Software Foundation, Inc.
4 ;; This file is part of GCC.
6 ;; GCC is free software; you can redistribute it and/or modify
7 ;; it under the terms of the GNU General Public License as published by
8 ;; the Free Software Foundation; either version 3, or (at your option)
11 ;; GCC is distributed in the hope that it will be useful,
12 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ;; GNU General Public License for more details.
16 ;; You should have received a copy of the GNU General Public License
17 ;; along with GCC; see the file COPYING3. If not see
18 ;; <http://www.gnu.org/licenses/>.
20 (define_c_enum "unspec" [
33 (define_c_enum "unspecv" [
39 (define_expand "sse2_lfence"
41 (unspec:BLK [(match_dup 0)] UNSPEC_LFENCE))]
44 operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
45 MEM_VOLATILE_P (operands[0]) = 1;
48 (define_insn "*sse2_lfence"
49 [(set (match_operand:BLK 0)
50 (unspec:BLK [(match_dup 0)] UNSPEC_LFENCE))]
53 [(set_attr "type" "sse")
54 (set_attr "length_address" "0")
55 (set_attr "atom_sse_attr" "lfence")
56 (set_attr "memory" "unknown")])
58 (define_expand "sse_sfence"
60 (unspec:BLK [(match_dup 0)] UNSPEC_SFENCE))]
61 "TARGET_SSE || TARGET_3DNOW_A"
63 operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
64 MEM_VOLATILE_P (operands[0]) = 1;
67 (define_insn "*sse_sfence"
68 [(set (match_operand:BLK 0)
69 (unspec:BLK [(match_dup 0)] UNSPEC_SFENCE))]
70 "TARGET_SSE || TARGET_3DNOW_A"
72 [(set_attr "type" "sse")
73 (set_attr "length_address" "0")
74 (set_attr "atom_sse_attr" "fence")
75 (set_attr "memory" "unknown")])
77 (define_expand "sse2_mfence"
79 (unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))]
82 operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
83 MEM_VOLATILE_P (operands[0]) = 1;
86 (define_insn "mfence_sse2"
87 [(set (match_operand:BLK 0)
88 (unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))]
89 "TARGET_64BIT || TARGET_SSE2"
91 [(set_attr "type" "sse")
92 (set_attr "length_address" "0")
93 (set_attr "atom_sse_attr" "fence")
94 (set_attr "memory" "unknown")])
96 (define_insn "mfence_nosse"
97 [(set (match_operand:BLK 0)
98 (unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))
99 (clobber (reg:CC FLAGS_REG))]
100 "!(TARGET_64BIT || TARGET_SSE2)"
101 "lock{%;} or{l}\t{$0, (%%esp)|DWORD PTR [esp], 0}"
102 [(set_attr "memory" "unknown")])
104 (define_expand "mem_thread_fence"
105 [(match_operand:SI 0 "const_int_operand")] ;; model
108 enum memmodel model = (enum memmodel) (INTVAL (operands[0]) & MEMMODEL_MASK);
110 /* Unless this is a SEQ_CST fence, the i386 memory model is strong
111 enough not to require barriers of any kind. */
112 if (model == MEMMODEL_SEQ_CST)
114 rtx (*mfence_insn)(rtx);
117 if (TARGET_64BIT || TARGET_SSE2)
118 mfence_insn = gen_mfence_sse2;
120 mfence_insn = gen_mfence_nosse;
122 mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
123 MEM_VOLATILE_P (mem) = 1;
125 emit_insn (mfence_insn (mem));
130 ;; ??? From volume 3 section 8.1.1 Guaranteed Atomic Operations,
131 ;; Only beginning at Pentium family processors do we get any guarantee of
132 ;; atomicity in aligned 64-bit quantities. Beginning at P6, we get a
133 ;; guarantee for 64-bit accesses that do not cross a cacheline boundary.
135 ;; Note that the TARGET_CMPXCHG8B test below is a stand-in for "Pentium".
137 ;; Importantly, *no* processor makes atomicity guarantees for larger
138 ;; accesses. In particular, there's no way to perform an atomic TImode
139 ;; move, despite the apparent applicability of MOVDQA et al.
141 (define_mode_iterator ATOMIC
143 (DI "TARGET_64BIT || (TARGET_CMPXCHG8B && (TARGET_80387 || TARGET_SSE))")
146 (define_expand "atomic_load<mode>"
147 [(set (match_operand:ATOMIC 0 "nonimmediate_operand")
148 (unspec:ATOMIC [(match_operand:ATOMIC 1 "memory_operand")
149 (match_operand:SI 2 "const_int_operand")]
153 /* For DImode on 32-bit, we can use the FPU to perform the load. */
154 if (<MODE>mode == DImode && !TARGET_64BIT)
155 emit_insn (gen_atomic_loaddi_fpu
156 (operands[0], operands[1],
157 assign_386_stack_local (DImode, SLOT_TEMP)));
160 rtx dst = operands[0];
163 dst = gen_reg_rtx (<MODE>mode);
165 emit_move_insn (dst, operands[1]);
167 /* Fix up the destination if needed. */
168 if (dst != operands[0])
169 emit_move_insn (operands[0], dst);
174 (define_insn_and_split "atomic_loaddi_fpu"
175 [(set (match_operand:DI 0 "nonimmediate_operand" "=x,m,?r")
176 (unspec:DI [(match_operand:DI 1 "memory_operand" "m,m,m")]
178 (clobber (match_operand:DI 2 "memory_operand" "=X,X,m"))
179 (clobber (match_scratch:DF 3 "=X,xf,xf"))]
180 "!TARGET_64BIT && (TARGET_80387 || TARGET_SSE)"
182 "&& reload_completed"
185 rtx dst = operands[0], src = operands[1];
186 rtx mem = operands[2], tmp = operands[3];
189 emit_move_insn (dst, src);
195 if (STACK_REG_P (tmp))
197 emit_insn (gen_loaddi_via_fpu (tmp, src));
198 emit_insn (gen_storedi_via_fpu (mem, tmp));
202 adjust_reg_mode (tmp, DImode);
203 emit_move_insn (tmp, src);
204 emit_move_insn (mem, tmp);
208 emit_move_insn (dst, mem);
213 (define_expand "atomic_store<mode>"
214 [(set (match_operand:ATOMIC 0 "memory_operand")
215 (unspec:ATOMIC [(match_operand:ATOMIC 1 "nonimmediate_operand")
216 (match_operand:SI 2 "const_int_operand")]
220 enum memmodel model = (enum memmodel) (INTVAL (operands[2]) & MEMMODEL_MASK);
222 if (<MODE>mode == DImode && !TARGET_64BIT)
224 /* For DImode on 32-bit, we can use the FPU to perform the store. */
225 /* Note that while we could perform a cmpxchg8b loop, that turns
226 out to be significantly larger than this plus a barrier. */
227 emit_insn (gen_atomic_storedi_fpu
228 (operands[0], operands[1],
229 assign_386_stack_local (DImode, SLOT_TEMP)));
233 operands[1] = force_reg (<MODE>mode, operands[1]);
235 /* For seq-cst stores, when we lack MFENCE, use XCHG. */
236 if (model == MEMMODEL_SEQ_CST && !(TARGET_64BIT || TARGET_SSE2))
238 emit_insn (gen_atomic_exchange<mode> (gen_reg_rtx (<MODE>mode),
239 operands[0], operands[1],
244 /* Otherwise use a store. */
245 emit_insn (gen_atomic_store<mode>_1 (operands[0], operands[1],
248 /* ... followed by an MFENCE, if required. */
249 if (model == MEMMODEL_SEQ_CST)
250 emit_insn (gen_mem_thread_fence (operands[2]));
254 (define_insn "atomic_store<mode>_1"
255 [(set (match_operand:SWI 0 "memory_operand" "=m")
256 (unspec:SWI [(match_operand:SWI 1 "<nonmemory_operand>" "<r><i>")
257 (match_operand:SI 2 "const_int_operand")]
260 "%K2mov{<imodesuffix>}\t{%1, %0|%0, %1}")
262 (define_insn_and_split "atomic_storedi_fpu"
263 [(set (match_operand:DI 0 "memory_operand" "=m,m,m")
264 (unspec:DI [(match_operand:DI 1 "nonimmediate_operand" "x,m,?r")]
266 (clobber (match_operand:DI 2 "memory_operand" "=X,X,m"))
267 (clobber (match_scratch:DF 3 "=X,xf,xf"))]
268 "!TARGET_64BIT && (TARGET_80387 || TARGET_SSE)"
270 "&& reload_completed"
273 rtx dst = operands[0], src = operands[1];
274 rtx mem = operands[2], tmp = operands[3];
276 if (!SSE_REG_P (src))
280 emit_move_insn (mem, src);
284 if (STACK_REG_P (tmp))
286 emit_insn (gen_loaddi_via_fpu (tmp, src));
287 emit_insn (gen_storedi_via_fpu (dst, tmp));
292 adjust_reg_mode (tmp, DImode);
293 emit_move_insn (tmp, src);
297 emit_move_insn (dst, src);
301 ;; ??? You'd think that we'd be able to perform this via FLOAT + FIX_TRUNC
302 ;; operations. But the fix_trunc patterns want way more setup than we want
303 ;; to provide. Note that the scratch is DFmode instead of XFmode in order
304 ;; to make it easy to allocate a scratch in either SSE or FP_REGs above.
306 (define_insn "loaddi_via_fpu"
307 [(set (match_operand:DF 0 "register_operand" "=f")
308 (unspec:DF [(match_operand:DI 1 "memory_operand" "m")]
309 UNSPEC_FILD_ATOMIC))]
312 [(set_attr "type" "fmov")
313 (set_attr "mode" "DF")
314 (set_attr "fp_int_src" "true")])
316 (define_insn "storedi_via_fpu"
317 [(set (match_operand:DI 0 "memory_operand" "=m")
318 (unspec:DI [(match_operand:DF 1 "register_operand" "f")]
319 UNSPEC_FIST_ATOMIC))]
322 gcc_assert (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != NULL_RTX);
324 return "fistp%Z0\t%0";
326 [(set_attr "type" "fmov")
327 (set_attr "mode" "DI")])
329 (define_expand "atomic_compare_and_swap<mode>"
330 [(match_operand:QI 0 "register_operand") ;; bool success output
331 (match_operand:SWI124 1 "register_operand") ;; oldval output
332 (match_operand:SWI124 2 "memory_operand") ;; memory
333 (match_operand:SWI124 3 "register_operand") ;; expected input
334 (match_operand:SWI124 4 "register_operand") ;; newval input
335 (match_operand:SI 5 "const_int_operand") ;; is_weak
336 (match_operand:SI 6 "const_int_operand") ;; success model
337 (match_operand:SI 7 "const_int_operand")] ;; failure model
341 (gen_atomic_compare_and_swap<mode>_1
342 (operands[1], operands[2], operands[3], operands[4], operands[6]));
343 ix86_expand_setcc (operands[0], EQ, gen_rtx_REG (CCZmode, FLAGS_REG),
348 (define_mode_iterator CASMODE
349 [(DI "TARGET_64BIT || TARGET_CMPXCHG8B")
350 (TI "TARGET_64BIT && TARGET_CMPXCHG16B")])
351 (define_mode_attr CASHMODE [(DI "SI") (TI "DI")])
353 (define_expand "atomic_compare_and_swap<mode>"
354 [(match_operand:QI 0 "register_operand") ;; bool success output
355 (match_operand:CASMODE 1 "register_operand") ;; oldval output
356 (match_operand:CASMODE 2 "memory_operand") ;; memory
357 (match_operand:CASMODE 3 "register_operand") ;; expected input
358 (match_operand:CASMODE 4 "register_operand") ;; newval input
359 (match_operand:SI 5 "const_int_operand") ;; is_weak
360 (match_operand:SI 6 "const_int_operand") ;; success model
361 (match_operand:SI 7 "const_int_operand")] ;; failure model
364 if (<MODE>mode == DImode && TARGET_64BIT)
367 (gen_atomic_compare_and_swapdi_1
368 (operands[1], operands[2], operands[3], operands[4], operands[6]));
372 machine_mode hmode = <CASHMODE>mode;
375 (gen_atomic_compare_and_swap<mode>_doubleword
376 (operands[1], operands[2], operands[3],
377 gen_lowpart (hmode, operands[4]), gen_highpart (hmode, operands[4]),
381 ix86_expand_setcc (operands[0], EQ, gen_rtx_REG (CCZmode, FLAGS_REG),
386 ;; For double-word compare and swap, we are obliged to play tricks with
387 ;; the input newval (op3:op4) because the Intel register numbering does
388 ;; not match the gcc register numbering, so the pair must be CX:BX.
390 (define_mode_attr doublemodesuffix [(SI "8") (DI "16")])
392 (define_insn "atomic_compare_and_swap<dwi>_doubleword"
393 [(set (match_operand:<DWI> 0 "register_operand" "=A")
394 (unspec_volatile:<DWI>
395 [(match_operand:<DWI> 1 "memory_operand" "+m")
396 (match_operand:<DWI> 2 "register_operand" "0")
397 (match_operand:DWIH 3 "register_operand" "b")
398 (match_operand:DWIH 4 "register_operand" "c")
399 (match_operand:SI 5 "const_int_operand")]
402 (unspec_volatile:<DWI> [(const_int 0)] UNSPECV_CMPXCHG))
403 (set (reg:CCZ FLAGS_REG)
404 (unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG))]
405 "TARGET_CMPXCHG<doublemodesuffix>B"
406 "lock{%;} %K5cmpxchg<doublemodesuffix>b\t%1")
408 (define_insn "atomic_compare_and_swap<mode>_1"
409 [(set (match_operand:SWI 0 "register_operand" "=a")
411 [(match_operand:SWI 1 "memory_operand" "+m")
412 (match_operand:SWI 2 "register_operand" "0")
413 (match_operand:SWI 3 "register_operand" "<r>")
414 (match_operand:SI 4 "const_int_operand")]
417 (unspec_volatile:SWI [(const_int 0)] UNSPECV_CMPXCHG))
418 (set (reg:CCZ FLAGS_REG)
419 (unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG))]
421 "lock{%;} %K4cmpxchg{<imodesuffix>}\t{%3, %1|%1, %3}")
423 ;; For operand 2 nonmemory_operand predicate is used instead of
424 ;; register_operand to allow combiner to better optimize atomic
425 ;; additions of constants.
426 (define_insn "atomic_fetch_add<mode>"
427 [(set (match_operand:SWI 0 "register_operand" "=<r>")
429 [(match_operand:SWI 1 "memory_operand" "+m")
430 (match_operand:SI 3 "const_int_operand")] ;; model
433 (plus:SWI (match_dup 1)
434 (match_operand:SWI 2 "nonmemory_operand" "0")))
435 (clobber (reg:CC FLAGS_REG))]
437 "lock{%;} %K3xadd{<imodesuffix>}\t{%0, %1|%1, %0}")
439 ;; This peephole2 and following insn optimize
440 ;; __sync_fetch_and_add (x, -N) == N into just lock {add,sub,inc,dec}
441 ;; followed by testing of flags instead of lock xadd and comparisons.
443 [(set (match_operand:SWI 0 "register_operand")
444 (match_operand:SWI 2 "const_int_operand"))
445 (parallel [(set (match_dup 0)
447 [(match_operand:SWI 1 "memory_operand")
448 (match_operand:SI 4 "const_int_operand")]
451 (plus:SWI (match_dup 1)
453 (clobber (reg:CC FLAGS_REG))])
454 (set (reg:CCZ FLAGS_REG)
455 (compare:CCZ (match_dup 0)
456 (match_operand:SWI 3 "const_int_operand")))]
457 "peep2_reg_dead_p (3, operands[0])
458 && (unsigned HOST_WIDE_INT) INTVAL (operands[2])
459 == -(unsigned HOST_WIDE_INT) INTVAL (operands[3])
460 && !reg_overlap_mentioned_p (operands[0], operands[1])"
461 [(parallel [(set (reg:CCZ FLAGS_REG)
463 (unspec_volatile:SWI [(match_dup 1) (match_dup 4)]
467 (plus:SWI (match_dup 1)
470 (define_insn "*atomic_fetch_add_cmp<mode>"
471 [(set (reg:CCZ FLAGS_REG)
474 [(match_operand:SWI 0 "memory_operand" "+m")
475 (match_operand:SI 3 "const_int_operand")] ;; model
477 (match_operand:SWI 2 "const_int_operand" "i")))
479 (plus:SWI (match_dup 0)
480 (match_operand:SWI 1 "const_int_operand" "i")))]
481 "(unsigned HOST_WIDE_INT) INTVAL (operands[1])
482 == -(unsigned HOST_WIDE_INT) INTVAL (operands[2])"
484 if (incdec_operand (operands[1], <MODE>mode))
486 if (operands[1] == const1_rtx)
487 return "lock{%;} %K3inc{<imodesuffix>}\t%0";
490 gcc_assert (operands[1] == constm1_rtx);
491 return "lock{%;} %K3dec{<imodesuffix>}\t%0";
495 if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
496 return "lock{%;} %K3sub{<imodesuffix>}\t{%1, %0|%0, %1}";
498 return "lock{%;} %K3add{<imodesuffix>}\t{%1, %0|%0, %1}";
501 ;; Recall that xchg implicitly sets LOCK#, so adding it again wastes space.
502 ;; In addition, it is always a full barrier, so we can ignore the memory model.
503 (define_insn "atomic_exchange<mode>"
504 [(set (match_operand:SWI 0 "register_operand" "=<r>") ;; output
506 [(match_operand:SWI 1 "memory_operand" "+m") ;; memory
507 (match_operand:SI 3 "const_int_operand")] ;; model
510 (match_operand:SWI 2 "register_operand" "0"))] ;; input
512 "%K3xchg{<imodesuffix>}\t{%1, %0|%0, %1}")
514 (define_insn "atomic_add<mode>"
515 [(set (match_operand:SWI 0 "memory_operand" "+m")
517 [(plus:SWI (match_dup 0)
518 (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
519 (match_operand:SI 2 "const_int_operand")] ;; model
521 (clobber (reg:CC FLAGS_REG))]
524 if (incdec_operand (operands[1], <MODE>mode))
526 if (operands[1] == const1_rtx)
527 return "lock{%;} %K2inc{<imodesuffix>}\t%0";
530 gcc_assert (operands[1] == constm1_rtx);
531 return "lock{%;} %K2dec{<imodesuffix>}\t%0";
535 if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
536 return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}";
538 return "lock{%;} %K2add{<imodesuffix>}\t{%1, %0|%0, %1}";
541 (define_insn "atomic_sub<mode>"
542 [(set (match_operand:SWI 0 "memory_operand" "+m")
544 [(minus:SWI (match_dup 0)
545 (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
546 (match_operand:SI 2 "const_int_operand")] ;; model
548 (clobber (reg:CC FLAGS_REG))]
551 if (incdec_operand (operands[1], <MODE>mode))
553 if (operands[1] == const1_rtx)
554 return "lock{%;} %K2dec{<imodesuffix>}\t%0";
557 gcc_assert (operands[1] == constm1_rtx);
558 return "lock{%;} %K2inc{<imodesuffix>}\t%0";
562 if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
563 return "lock{%;} %K2add{<imodesuffix>}\t{%1, %0|%0, %1}";
565 return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}";
568 (define_insn "atomic_<logic><mode>"
569 [(set (match_operand:SWI 0 "memory_operand" "+m")
571 [(any_logic:SWI (match_dup 0)
572 (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
573 (match_operand:SI 2 "const_int_operand")] ;; model
575 (clobber (reg:CC FLAGS_REG))]
577 "lock{%;} %K2<logic>{<imodesuffix>}\t{%1, %0|%0, %1}")