1 ;; GCC machine description for i386 synchronization instructions.
2 ;; Copyright (C) 2005-2017 Free Software Foundation, Inc.
4 ;; This file is part of GCC.
6 ;; GCC is free software; you can redistribute it and/or modify
7 ;; it under the terms of the GNU General Public License as published by
8 ;; the Free Software Foundation; either version 3, or (at your option)
11 ;; GCC is distributed in the hope that it will be useful,
12 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ;; GNU General Public License for more details.
16 ;; You should have received a copy of the GNU General Public License
17 ;; along with GCC; see the file COPYING3. If not see
18 ;; <http://www.gnu.org/licenses/>.
20 (define_c_enum "unspec" [
36 (define_c_enum "unspecv" [
42 (define_expand "sse2_lfence"
44 (unspec:BLK [(match_dup 0)] UNSPEC_LFENCE))]
47 operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
48 MEM_VOLATILE_P (operands[0]) = 1;
51 (define_insn "*sse2_lfence"
52 [(set (match_operand:BLK 0)
53 (unspec:BLK [(match_dup 0)] UNSPEC_LFENCE))]
56 [(set_attr "type" "sse")
57 (set_attr "length_address" "0")
58 (set_attr "atom_sse_attr" "lfence")
59 (set_attr "memory" "unknown")])
61 (define_expand "sse_sfence"
63 (unspec:BLK [(match_dup 0)] UNSPEC_SFENCE))]
64 "TARGET_SSE || TARGET_3DNOW_A"
66 operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
67 MEM_VOLATILE_P (operands[0]) = 1;
70 (define_insn "*sse_sfence"
71 [(set (match_operand:BLK 0)
72 (unspec:BLK [(match_dup 0)] UNSPEC_SFENCE))]
73 "TARGET_SSE || TARGET_3DNOW_A"
75 [(set_attr "type" "sse")
76 (set_attr "length_address" "0")
77 (set_attr "atom_sse_attr" "fence")
78 (set_attr "memory" "unknown")])
80 (define_expand "sse2_mfence"
82 (unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))]
85 operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
86 MEM_VOLATILE_P (operands[0]) = 1;
89 (define_insn "mfence_sse2"
90 [(set (match_operand:BLK 0)
91 (unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))]
92 "TARGET_64BIT || TARGET_SSE2"
94 [(set_attr "type" "sse")
95 (set_attr "length_address" "0")
96 (set_attr "atom_sse_attr" "fence")
97 (set_attr "memory" "unknown")])
99 (define_insn "mfence_nosse"
100 [(set (match_operand:BLK 0)
101 (unspec:BLK [(match_dup 0)] UNSPEC_MFENCE))
102 (clobber (reg:CC FLAGS_REG))]
103 "!(TARGET_64BIT || TARGET_SSE2)"
104 "lock{%;} or{l}\t{$0, (%%esp)|DWORD PTR [esp], 0}"
105 [(set_attr "memory" "unknown")])
107 (define_expand "mem_thread_fence"
108 [(match_operand:SI 0 "const_int_operand")] ;; model
111 enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
113 /* Unless this is a SEQ_CST fence, the i386 memory model is strong
114 enough not to require barriers of any kind. */
115 if (is_mm_seq_cst (model))
117 rtx (*mfence_insn)(rtx);
120 if (TARGET_64BIT || TARGET_SSE2)
121 mfence_insn = gen_mfence_sse2;
123 mfence_insn = gen_mfence_nosse;
125 mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
126 MEM_VOLATILE_P (mem) = 1;
128 emit_insn (mfence_insn (mem));
133 ;; ??? From volume 3 section 8.1.1 Guaranteed Atomic Operations,
134 ;; Only beginning at Pentium family processors do we get any guarantee of
135 ;; atomicity in aligned 64-bit quantities. Beginning at P6, we get a
136 ;; guarantee for 64-bit accesses that do not cross a cacheline boundary.
138 ;; Note that the TARGET_CMPXCHG8B test below is a stand-in for "Pentium".
140 ;; Importantly, *no* processor makes atomicity guarantees for larger
141 ;; accesses. In particular, there's no way to perform an atomic TImode
142 ;; move, despite the apparent applicability of MOVDQA et al.
144 (define_mode_iterator ATOMIC
146 (DI "TARGET_64BIT || (TARGET_CMPXCHG8B && (TARGET_80387 || TARGET_SSE))")
149 (define_expand "atomic_load<mode>"
150 [(set (match_operand:ATOMIC 0 "nonimmediate_operand")
151 (unspec:ATOMIC [(match_operand:ATOMIC 1 "memory_operand")
152 (match_operand:SI 2 "const_int_operand")]
156 /* For DImode on 32-bit, we can use the FPU to perform the load. */
157 if (<MODE>mode == DImode && !TARGET_64BIT)
158 emit_insn (gen_atomic_loaddi_fpu
159 (operands[0], operands[1],
160 assign_386_stack_local (DImode, SLOT_TEMP)));
163 rtx dst = operands[0];
166 dst = gen_reg_rtx (<MODE>mode);
168 emit_move_insn (dst, operands[1]);
170 /* Fix up the destination if needed. */
171 if (dst != operands[0])
172 emit_move_insn (operands[0], dst);
177 (define_insn_and_split "atomic_loaddi_fpu"
178 [(set (match_operand:DI 0 "nonimmediate_operand" "=x,m,?r")
179 (unspec:DI [(match_operand:DI 1 "memory_operand" "m,m,m")]
181 (clobber (match_operand:DI 2 "memory_operand" "=X,X,m"))
182 (clobber (match_scratch:DF 3 "=X,xf,xf"))]
183 "!TARGET_64BIT && (TARGET_80387 || TARGET_SSE)"
185 "&& reload_completed"
188 rtx dst = operands[0], src = operands[1];
189 rtx mem = operands[2], tmp = operands[3];
192 emit_move_insn (dst, src);
198 if (STACK_REG_P (tmp))
200 emit_insn (gen_loaddi_via_fpu (tmp, src));
201 emit_insn (gen_storedi_via_fpu (mem, tmp));
205 emit_insn (gen_loaddi_via_sse (tmp, src));
206 emit_insn (gen_storedi_via_sse (mem, tmp));
210 emit_move_insn (dst, mem);
216 [(set (match_operand:DF 0 "fp_register_operand")
217 (unspec:DF [(match_operand:DI 1 "memory_operand")]
219 (set (match_operand:DI 2 "memory_operand")
220 (unspec:DI [(match_dup 0)]
222 (set (match_operand:DF 3 "fp_register_operand")
223 (match_operand:DF 4 "memory_operand"))]
225 && peep2_reg_dead_p (2, operands[0])
226 && rtx_equal_p (operands[4], adjust_address_nv (operands[2], DFmode, 0))"
227 [(set (match_dup 3) (match_dup 5))]
228 "operands[5] = gen_lowpart (DFmode, operands[1]);")
231 [(set (match_operand:DF 0 "sse_reg_operand")
232 (unspec:DF [(match_operand:DI 1 "memory_operand")]
234 (set (match_operand:DI 2 "memory_operand")
235 (unspec:DI [(match_dup 0)]
237 (set (match_operand:DF 3 "fp_register_operand")
238 (match_operand:DF 4 "memory_operand"))]
240 && peep2_reg_dead_p (2, operands[0])
241 && rtx_equal_p (operands[4], adjust_address_nv (operands[2], DFmode, 0))"
242 [(set (match_dup 3) (match_dup 5))]
243 "operands[5] = gen_lowpart (DFmode, operands[1]);")
245 (define_expand "atomic_store<mode>"
246 [(set (match_operand:ATOMIC 0 "memory_operand")
247 (unspec:ATOMIC [(match_operand:ATOMIC 1 "nonimmediate_operand")
248 (match_operand:SI 2 "const_int_operand")]
252 enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
254 if (<MODE>mode == DImode && !TARGET_64BIT)
256 /* For DImode on 32-bit, we can use the FPU to perform the store. */
257 /* Note that while we could perform a cmpxchg8b loop, that turns
258 out to be significantly larger than this plus a barrier. */
259 emit_insn (gen_atomic_storedi_fpu
260 (operands[0], operands[1],
261 assign_386_stack_local (DImode, SLOT_TEMP)));
265 operands[1] = force_reg (<MODE>mode, operands[1]);
267 /* For seq-cst stores, when we lack MFENCE, use XCHG. */
268 if (is_mm_seq_cst (model) && !(TARGET_64BIT || TARGET_SSE2))
270 emit_insn (gen_atomic_exchange<mode> (gen_reg_rtx (<MODE>mode),
271 operands[0], operands[1],
276 /* Otherwise use a store. */
277 emit_insn (gen_atomic_store<mode>_1 (operands[0], operands[1],
280 /* ... followed by an MFENCE, if required. */
281 if (is_mm_seq_cst (model))
282 emit_insn (gen_mem_thread_fence (operands[2]));
286 (define_insn "atomic_store<mode>_1"
287 [(set (match_operand:SWI 0 "memory_operand" "=m")
288 (unspec:SWI [(match_operand:SWI 1 "<nonmemory_operand>" "<r><i>")
289 (match_operand:SI 2 "const_int_operand")]
292 "%K2mov{<imodesuffix>}\t{%1, %0|%0, %1}")
294 (define_insn_and_split "atomic_storedi_fpu"
295 [(set (match_operand:DI 0 "memory_operand" "=m,m,m")
296 (unspec:DI [(match_operand:DI 1 "nonimmediate_operand" "x,m,?r")]
298 (clobber (match_operand:DI 2 "memory_operand" "=X,X,m"))
299 (clobber (match_scratch:DF 3 "=X,xf,xf"))]
300 "!TARGET_64BIT && (TARGET_80387 || TARGET_SSE)"
302 "&& reload_completed"
305 rtx dst = operands[0], src = operands[1];
306 rtx mem = operands[2], tmp = operands[3];
309 emit_move_insn (dst, src);
314 emit_move_insn (mem, src);
318 if (STACK_REG_P (tmp))
320 emit_insn (gen_loaddi_via_fpu (tmp, src));
321 emit_insn (gen_storedi_via_fpu (dst, tmp));
325 emit_insn (gen_loaddi_via_sse (tmp, src));
326 emit_insn (gen_storedi_via_sse (dst, tmp));
333 [(set (match_operand:DF 0 "memory_operand")
334 (match_operand:DF 1 "fp_register_operand"))
335 (set (match_operand:DF 2 "fp_register_operand")
336 (unspec:DF [(match_operand:DI 3 "memory_operand")]
338 (set (match_operand:DI 4 "memory_operand")
339 (unspec:DI [(match_dup 2)]
340 UNSPEC_FIST_ATOMIC))]
342 && peep2_reg_dead_p (3, operands[2])
343 && rtx_equal_p (operands[0], adjust_address_nv (operands[3], DFmode, 0))"
344 [(set (match_dup 5) (match_dup 1))]
345 "operands[5] = gen_lowpart (DFmode, operands[4]);")
348 [(set (match_operand:DF 0 "memory_operand")
349 (match_operand:DF 1 "fp_register_operand"))
350 (set (match_operand:DF 2 "sse_reg_operand")
351 (unspec:DF [(match_operand:DI 3 "memory_operand")]
353 (set (match_operand:DI 4 "memory_operand")
354 (unspec:DI [(match_dup 2)]
357 && peep2_reg_dead_p (3, operands[2])
358 && rtx_equal_p (operands[0], adjust_address_nv (operands[3], DFmode, 0))"
359 [(set (match_dup 5) (match_dup 1))]
360 "operands[5] = gen_lowpart (DFmode, operands[4]);")
362 ;; ??? You'd think that we'd be able to perform this via FLOAT + FIX_TRUNC
363 ;; operations. But the fix_trunc patterns want way more setup than we want
364 ;; to provide. Note that the scratch is DFmode instead of XFmode in order
365 ;; to make it easy to allocate a scratch in either SSE or FP_REGs above.
367 (define_insn "loaddi_via_fpu"
368 [(set (match_operand:DF 0 "register_operand" "=f")
369 (unspec:DF [(match_operand:DI 1 "memory_operand" "m")]
370 UNSPEC_FILD_ATOMIC))]
373 [(set_attr "type" "fmov")
374 (set_attr "mode" "DF")
375 (set_attr "fp_int_src" "true")])
377 (define_insn "storedi_via_fpu"
378 [(set (match_operand:DI 0 "memory_operand" "=m")
379 (unspec:DI [(match_operand:DF 1 "register_operand" "f")]
380 UNSPEC_FIST_ATOMIC))]
383 gcc_assert (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != NULL_RTX);
385 return "fistp%Z0\t%0";
387 [(set_attr "type" "fmov")
388 (set_attr "mode" "DI")])
390 (define_insn "loaddi_via_sse"
391 [(set (match_operand:DF 0 "register_operand" "=x")
392 (unspec:DF [(match_operand:DI 1 "memory_operand" "m")]
397 return "%vmovq\t{%1, %0|%0, %1}";
398 return "movlps\t{%1, %0|%0, %1}";
400 [(set_attr "type" "ssemov")
401 (set_attr "mode" "DI")])
403 (define_insn "storedi_via_sse"
404 [(set (match_operand:DI 0 "memory_operand" "=m")
405 (unspec:DI [(match_operand:DF 1 "register_operand" "x")]
410 return "%vmovq\t{%1, %0|%0, %1}";
411 return "movlps\t{%1, %0|%0, %1}";
413 [(set_attr "type" "ssemov")
414 (set_attr "mode" "DI")])
416 (define_expand "atomic_compare_and_swap<mode>"
417 [(match_operand:QI 0 "register_operand") ;; bool success output
418 (match_operand:SWI124 1 "register_operand") ;; oldval output
419 (match_operand:SWI124 2 "memory_operand") ;; memory
420 (match_operand:SWI124 3 "register_operand") ;; expected input
421 (match_operand:SWI124 4 "register_operand") ;; newval input
422 (match_operand:SI 5 "const_int_operand") ;; is_weak
423 (match_operand:SI 6 "const_int_operand") ;; success model
424 (match_operand:SI 7 "const_int_operand")] ;; failure model
428 (gen_atomic_compare_and_swap<mode>_1
429 (operands[1], operands[2], operands[3], operands[4], operands[6]));
430 ix86_expand_setcc (operands[0], EQ, gen_rtx_REG (CCZmode, FLAGS_REG),
435 (define_mode_iterator CASMODE
436 [(DI "TARGET_64BIT || TARGET_CMPXCHG8B")
437 (TI "TARGET_64BIT && TARGET_CMPXCHG16B")])
438 (define_mode_attr CASHMODE [(DI "SI") (TI "DI")])
440 (define_expand "atomic_compare_and_swap<mode>"
441 [(match_operand:QI 0 "register_operand") ;; bool success output
442 (match_operand:CASMODE 1 "register_operand") ;; oldval output
443 (match_operand:CASMODE 2 "memory_operand") ;; memory
444 (match_operand:CASMODE 3 "register_operand") ;; expected input
445 (match_operand:CASMODE 4 "register_operand") ;; newval input
446 (match_operand:SI 5 "const_int_operand") ;; is_weak
447 (match_operand:SI 6 "const_int_operand") ;; success model
448 (match_operand:SI 7 "const_int_operand")] ;; failure model
451 if (<MODE>mode == DImode && TARGET_64BIT)
454 (gen_atomic_compare_and_swapdi_1
455 (operands[1], operands[2], operands[3], operands[4], operands[6]));
459 machine_mode hmode = <CASHMODE>mode;
462 (gen_atomic_compare_and_swap<mode>_doubleword
463 (operands[1], operands[2], operands[3],
464 gen_lowpart (hmode, operands[4]), gen_highpart (hmode, operands[4]),
468 ix86_expand_setcc (operands[0], EQ, gen_rtx_REG (CCZmode, FLAGS_REG),
473 ;; For double-word compare and swap, we are obliged to play tricks with
474 ;; the input newval (op3:op4) because the Intel register numbering does
475 ;; not match the gcc register numbering, so the pair must be CX:BX.
477 (define_mode_attr doublemodesuffix [(SI "8") (DI "16")])
479 (define_insn "atomic_compare_and_swap<dwi>_doubleword"
480 [(set (match_operand:<DWI> 0 "register_operand" "=A")
481 (unspec_volatile:<DWI>
482 [(match_operand:<DWI> 1 "memory_operand" "+m")
483 (match_operand:<DWI> 2 "register_operand" "0")
484 (match_operand:DWIH 3 "register_operand" "b")
485 (match_operand:DWIH 4 "register_operand" "c")
486 (match_operand:SI 5 "const_int_operand")]
489 (unspec_volatile:<DWI> [(const_int 0)] UNSPECV_CMPXCHG))
490 (set (reg:CCZ FLAGS_REG)
491 (unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG))]
492 "TARGET_CMPXCHG<doublemodesuffix>B"
493 "lock{%;} %K5cmpxchg<doublemodesuffix>b\t%1")
495 (define_insn "atomic_compare_and_swap<mode>_1"
496 [(set (match_operand:SWI 0 "register_operand" "=a")
498 [(match_operand:SWI 1 "memory_operand" "+m")
499 (match_operand:SWI 2 "register_operand" "0")
500 (match_operand:SWI 3 "register_operand" "<r>")
501 (match_operand:SI 4 "const_int_operand")]
504 (unspec_volatile:SWI [(const_int 0)] UNSPECV_CMPXCHG))
505 (set (reg:CCZ FLAGS_REG)
506 (unspec_volatile:CCZ [(const_int 0)] UNSPECV_CMPXCHG))]
508 "lock{%;} %K4cmpxchg{<imodesuffix>}\t{%3, %1|%1, %3}")
510 ;; For operand 2 nonmemory_operand predicate is used instead of
511 ;; register_operand to allow combiner to better optimize atomic
512 ;; additions of constants.
513 (define_insn "atomic_fetch_add<mode>"
514 [(set (match_operand:SWI 0 "register_operand" "=<r>")
516 [(match_operand:SWI 1 "memory_operand" "+m")
517 (match_operand:SI 3 "const_int_operand")] ;; model
520 (plus:SWI (match_dup 1)
521 (match_operand:SWI 2 "nonmemory_operand" "0")))
522 (clobber (reg:CC FLAGS_REG))]
524 "lock{%;} %K3xadd{<imodesuffix>}\t{%0, %1|%1, %0}")
526 ;; This peephole2 and following insn optimize
527 ;; __sync_fetch_and_add (x, -N) == N into just lock {add,sub,inc,dec}
528 ;; followed by testing of flags instead of lock xadd and comparisons.
530 [(set (match_operand:SWI 0 "register_operand")
531 (match_operand:SWI 2 "const_int_operand"))
532 (parallel [(set (match_dup 0)
534 [(match_operand:SWI 1 "memory_operand")
535 (match_operand:SI 4 "const_int_operand")]
538 (plus:SWI (match_dup 1)
540 (clobber (reg:CC FLAGS_REG))])
541 (set (reg:CCZ FLAGS_REG)
542 (compare:CCZ (match_dup 0)
543 (match_operand:SWI 3 "const_int_operand")))]
544 "peep2_reg_dead_p (3, operands[0])
545 && (unsigned HOST_WIDE_INT) INTVAL (operands[2])
546 == -(unsigned HOST_WIDE_INT) INTVAL (operands[3])
547 && !reg_overlap_mentioned_p (operands[0], operands[1])"
548 [(parallel [(set (reg:CCZ FLAGS_REG)
550 (unspec_volatile:SWI [(match_dup 1) (match_dup 4)]
554 (plus:SWI (match_dup 1)
557 ;; Likewise, but for the -Os special case of *mov<mode>_or.
559 [(parallel [(set (match_operand:SWI 0 "register_operand")
560 (match_operand:SWI 2 "constm1_operand"))
561 (clobber (reg:CC FLAGS_REG))])
562 (parallel [(set (match_dup 0)
564 [(match_operand:SWI 1 "memory_operand")
565 (match_operand:SI 4 "const_int_operand")]
568 (plus:SWI (match_dup 1)
570 (clobber (reg:CC FLAGS_REG))])
571 (set (reg:CCZ FLAGS_REG)
572 (compare:CCZ (match_dup 0)
573 (match_operand:SWI 3 "const_int_operand")))]
574 "peep2_reg_dead_p (3, operands[0])
575 && (unsigned HOST_WIDE_INT) INTVAL (operands[2])
576 == -(unsigned HOST_WIDE_INT) INTVAL (operands[3])
577 && !reg_overlap_mentioned_p (operands[0], operands[1])"
578 [(parallel [(set (reg:CCZ FLAGS_REG)
580 (unspec_volatile:SWI [(match_dup 1) (match_dup 4)]
584 (plus:SWI (match_dup 1)
587 (define_insn "*atomic_fetch_add_cmp<mode>"
588 [(set (reg:CCZ FLAGS_REG)
591 [(match_operand:SWI 0 "memory_operand" "+m")
592 (match_operand:SI 3 "const_int_operand")] ;; model
594 (match_operand:SWI 2 "const_int_operand" "i")))
596 (plus:SWI (match_dup 0)
597 (match_operand:SWI 1 "const_int_operand" "i")))]
598 "(unsigned HOST_WIDE_INT) INTVAL (operands[1])
599 == -(unsigned HOST_WIDE_INT) INTVAL (operands[2])"
601 if (incdec_operand (operands[1], <MODE>mode))
603 if (operands[1] == const1_rtx)
604 return "lock{%;} %K3inc{<imodesuffix>}\t%0";
607 gcc_assert (operands[1] == constm1_rtx);
608 return "lock{%;} %K3dec{<imodesuffix>}\t%0";
612 if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
613 return "lock{%;} %K3sub{<imodesuffix>}\t{%1, %0|%0, %1}";
615 return "lock{%;} %K3add{<imodesuffix>}\t{%1, %0|%0, %1}";
618 ;; Recall that xchg implicitly sets LOCK#, so adding it again wastes space.
619 ;; In addition, it is always a full barrier, so we can ignore the memory model.
620 (define_insn "atomic_exchange<mode>"
621 [(set (match_operand:SWI 0 "register_operand" "=<r>") ;; output
623 [(match_operand:SWI 1 "memory_operand" "+m") ;; memory
624 (match_operand:SI 3 "const_int_operand")] ;; model
627 (match_operand:SWI 2 "register_operand" "0"))] ;; input
629 "%K3xchg{<imodesuffix>}\t{%1, %0|%0, %1}")
631 (define_insn "atomic_add<mode>"
632 [(set (match_operand:SWI 0 "memory_operand" "+m")
634 [(plus:SWI (match_dup 0)
635 (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
636 (match_operand:SI 2 "const_int_operand")] ;; model
638 (clobber (reg:CC FLAGS_REG))]
641 if (incdec_operand (operands[1], <MODE>mode))
643 if (operands[1] == const1_rtx)
644 return "lock{%;} %K2inc{<imodesuffix>}\t%0";
647 gcc_assert (operands[1] == constm1_rtx);
648 return "lock{%;} %K2dec{<imodesuffix>}\t%0";
652 if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
653 return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}";
655 return "lock{%;} %K2add{<imodesuffix>}\t{%1, %0|%0, %1}";
658 (define_insn "atomic_sub<mode>"
659 [(set (match_operand:SWI 0 "memory_operand" "+m")
661 [(minus:SWI (match_dup 0)
662 (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
663 (match_operand:SI 2 "const_int_operand")] ;; model
665 (clobber (reg:CC FLAGS_REG))]
668 if (incdec_operand (operands[1], <MODE>mode))
670 if (operands[1] == const1_rtx)
671 return "lock{%;} %K2dec{<imodesuffix>}\t%0";
674 gcc_assert (operands[1] == constm1_rtx);
675 return "lock{%;} %K2inc{<imodesuffix>}\t%0";
679 if (x86_maybe_negate_const_int (&operands[1], <MODE>mode))
680 return "lock{%;} %K2add{<imodesuffix>}\t{%1, %0|%0, %1}";
682 return "lock{%;} %K2sub{<imodesuffix>}\t{%1, %0|%0, %1}";
685 (define_insn "atomic_<logic><mode>"
686 [(set (match_operand:SWI 0 "memory_operand" "+m")
688 [(any_logic:SWI (match_dup 0)
689 (match_operand:SWI 1 "nonmemory_operand" "<r><i>"))
690 (match_operand:SI 2 "const_int_operand")] ;; model
692 (clobber (reg:CC FLAGS_REG))]
694 "lock{%;} %K2<logic>{<imodesuffix>}\t{%1, %0|%0, %1}")
696 (define_expand "atomic_bit_test_and_set<mode>"
697 [(match_operand:SWI248 0 "register_operand")
698 (match_operand:SWI248 1 "memory_operand")
699 (match_operand:SWI248 2 "nonmemory_operand")
700 (match_operand:SI 3 "const_int_operand") ;; model
701 (match_operand:SI 4 "const_int_operand")]
704 emit_insn (gen_atomic_bit_test_and_set<mode>_1 (operands[1], operands[2],
706 rtx tem = gen_reg_rtx (QImode);
707 ix86_expand_setcc (tem, EQ, gen_rtx_REG (CCCmode, FLAGS_REG), const0_rtx);
708 rtx result = convert_modes (<MODE>mode, QImode, tem, 1);
709 if (operands[4] == const0_rtx)
710 result = expand_simple_binop (<MODE>mode, ASHIFT, result,
711 operands[2], operands[0], 0, OPTAB_DIRECT);
712 if (result != operands[0])
713 emit_move_insn (operands[0], result);
717 (define_insn "atomic_bit_test_and_set<mode>_1"
718 [(set (reg:CCC FLAGS_REG)
720 (unspec_volatile:SWI248
721 [(match_operand:SWI248 0 "memory_operand" "+m")
722 (match_operand:SI 2 "const_int_operand")] ;; model
725 (set (zero_extract:SWI248 (match_dup 0)
727 (match_operand:SWI248 1 "nonmemory_operand" "rN"))
730 "lock{%;} %K2bts{<imodesuffix>}\t{%1, %0|%0, %1}")
732 (define_expand "atomic_bit_test_and_complement<mode>"
733 [(match_operand:SWI248 0 "register_operand")
734 (match_operand:SWI248 1 "memory_operand")
735 (match_operand:SWI248 2 "nonmemory_operand")
736 (match_operand:SI 3 "const_int_operand") ;; model
737 (match_operand:SI 4 "const_int_operand")]
740 emit_insn (gen_atomic_bit_test_and_complement<mode>_1 (operands[1],
743 rtx tem = gen_reg_rtx (QImode);
744 ix86_expand_setcc (tem, EQ, gen_rtx_REG (CCCmode, FLAGS_REG), const0_rtx);
745 rtx result = convert_modes (<MODE>mode, QImode, tem, 1);
746 if (operands[4] == const0_rtx)
747 result = expand_simple_binop (<MODE>mode, ASHIFT, result,
748 operands[2], operands[0], 0, OPTAB_DIRECT);
749 if (result != operands[0])
750 emit_move_insn (operands[0], result);
754 (define_insn "atomic_bit_test_and_complement<mode>_1"
755 [(set (reg:CCC FLAGS_REG)
757 (unspec_volatile:SWI248
758 [(match_operand:SWI248 0 "memory_operand" "+m")
759 (match_operand:SI 2 "const_int_operand")] ;; model
762 (set (zero_extract:SWI248 (match_dup 0)
764 (match_operand:SWI248 1 "nonmemory_operand" "rN"))
765 (not:SWI248 (zero_extract:SWI248 (match_dup 0)
769 "lock{%;} %K2btc{<imodesuffix>}\t{%1, %0|%0, %1}")
771 (define_expand "atomic_bit_test_and_reset<mode>"
772 [(match_operand:SWI248 0 "register_operand")
773 (match_operand:SWI248 1 "memory_operand")
774 (match_operand:SWI248 2 "nonmemory_operand")
775 (match_operand:SI 3 "const_int_operand") ;; model
776 (match_operand:SI 4 "const_int_operand")]
779 emit_insn (gen_atomic_bit_test_and_reset<mode>_1 (operands[1], operands[2],
781 rtx tem = gen_reg_rtx (QImode);
782 ix86_expand_setcc (tem, EQ, gen_rtx_REG (CCCmode, FLAGS_REG), const0_rtx);
783 rtx result = convert_modes (<MODE>mode, QImode, tem, 1);
784 if (operands[4] == const0_rtx)
785 result = expand_simple_binop (<MODE>mode, ASHIFT, result,
786 operands[2], operands[0], 0, OPTAB_DIRECT);
787 if (result != operands[0])
788 emit_move_insn (operands[0], result);
792 (define_insn "atomic_bit_test_and_reset<mode>_1"
793 [(set (reg:CCC FLAGS_REG)
795 (unspec_volatile:SWI248
796 [(match_operand:SWI248 0 "memory_operand" "+m")
797 (match_operand:SI 2 "const_int_operand")] ;; model
800 (set (zero_extract:SWI248 (match_dup 0)
802 (match_operand:SWI248 1 "nonmemory_operand" "rN"))
805 "lock{%;} %K2btr{<imodesuffix>}\t{%1, %0|%0, %1}")