1 ;; Machine description for PowerPC synchronization instructions.
2 ;; Copyright (C) 2005-2014 Free Software Foundation, Inc.
3 ;; Contributed by Geoffrey Keating.
5 ;; This file is part of GCC.
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published
9 ;; by the Free Software Foundation; either version 3, or (at your
10 ;; option) any later version.
12 ;; GCC is distributed in the hope that it will be useful, but WITHOUT
13 ;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 ;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 ;; License for more details.
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
21 (define_mode_attr larx [(QI "lbarx")
27 (define_mode_attr stcx [(QI "stbcx.")
33 (define_code_iterator FETCHOP [plus minus ior xor and])
34 (define_code_attr fetchop_name
35 [(plus "add") (minus "sub") (ior "or") (xor "xor") (and "and")])
36 (define_code_attr fetchop_pred
37 [(plus "add_operand") (minus "int_reg_operand")
38 (ior "logical_operand") (xor "logical_operand") (and "and_operand")])
40 (define_expand "mem_thread_fence"
41 [(match_operand:SI 0 "const_int_operand" "")] ;; model
44 enum memmodel model = (enum memmodel) INTVAL (operands[0]);
47 case MEMMODEL_RELAXED:
49 case MEMMODEL_CONSUME:
50 case MEMMODEL_ACQUIRE:
51 case MEMMODEL_RELEASE:
52 case MEMMODEL_ACQ_REL:
53 emit_insn (gen_lwsync ());
55 case MEMMODEL_SEQ_CST:
56 emit_insn (gen_hwsync ());
64 (define_expand "hwsync"
66 (unspec:BLK [(match_dup 0)] UNSPEC_SYNC))]
69 operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
70 MEM_VOLATILE_P (operands[0]) = 1;
73 (define_insn "*hwsync"
74 [(set (match_operand:BLK 0 "" "")
75 (unspec:BLK [(match_dup 0)] UNSPEC_SYNC))]
78 [(set_attr "type" "sync")])
80 (define_expand "lwsync"
82 (unspec:BLK [(match_dup 0)] UNSPEC_LWSYNC))]
85 operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
86 MEM_VOLATILE_P (operands[0]) = 1;
89 (define_insn "*lwsync"
90 [(set (match_operand:BLK 0 "" "")
91 (unspec:BLK [(match_dup 0)] UNSPEC_LWSYNC))]
94 /* Some AIX assemblers don't accept lwsync, so we use a .long. */
97 else if (TARGET_LWSYNC_INSTRUCTION)
100 return ".long 0x7c2004ac";
102 [(set_attr "type" "sync")])
105 [(unspec_volatile:BLK [(const_int 0)] UNSPECV_ISYNC)]
108 [(set_attr "type" "isync")])
110 ;; The control dependency used for load dependency described
111 ;; in B.2.3 of the Power ISA 2.06B.
112 (define_insn "loadsync_<mode>"
113 [(unspec_volatile:BLK [(match_operand:INT1 0 "register_operand" "r")]
115 (clobber (match_scratch:CC 1 "=y"))]
117 "cmpw %1,%0,%0\;bne- %1,$+4\;isync"
118 [(set_attr "type" "isync")
119 (set_attr "length" "12")])
121 (define_expand "atomic_load<mode>"
122 [(set (match_operand:INT1 0 "register_operand" "") ;; output
123 (match_operand:INT1 1 "memory_operand" "")) ;; memory
124 (use (match_operand:SI 2 "const_int_operand" ""))] ;; model
127 enum memmodel model = (enum memmodel) INTVAL (operands[2]);
129 if (model == MEMMODEL_SEQ_CST)
130 emit_insn (gen_hwsync ());
132 emit_move_insn (operands[0], operands[1]);
136 case MEMMODEL_RELAXED:
138 case MEMMODEL_CONSUME:
139 case MEMMODEL_ACQUIRE:
140 case MEMMODEL_SEQ_CST:
141 emit_insn (gen_loadsync_<mode> (operands[0]));
149 (define_expand "atomic_store<mode>"
150 [(set (match_operand:INT1 0 "memory_operand" "") ;; memory
151 (match_operand:INT1 1 "register_operand" "")) ;; input
152 (use (match_operand:SI 2 "const_int_operand" ""))] ;; model
155 enum memmodel model = (enum memmodel) INTVAL (operands[2]);
158 case MEMMODEL_RELAXED:
160 case MEMMODEL_RELEASE:
161 emit_insn (gen_lwsync ());
163 case MEMMODEL_SEQ_CST:
164 emit_insn (gen_hwsync ());
169 emit_move_insn (operands[0], operands[1]);
173 ;; Any supported integer mode that has atomic l<x>arx/st<x>cx. instrucitons
174 ;; other than the quad memory operations, which have special restrictions.
175 ;; Byte/halfword atomic instructions were added in ISA 2.06B, but were phased
176 ;; in and did not show up until power8. TImode atomic lqarx/stqcx. require
177 ;; special handling due to even/odd register requirements.
178 (define_mode_iterator ATOMIC [(QI "TARGET_SYNC_HI_QI")
179 (HI "TARGET_SYNC_HI_QI")
181 (DI "TARGET_POWERPC64")])
183 ;; Types that we should provide atomic instructions for.
185 (define_mode_iterator AINT [QI
188 (DI "TARGET_POWERPC64")
189 (TI "TARGET_SYNC_TI")])
191 (define_insn "load_locked<mode>"
192 [(set (match_operand:ATOMIC 0 "int_reg_operand" "=r")
193 (unspec_volatile:ATOMIC
194 [(match_operand:ATOMIC 1 "memory_operand" "Z")] UNSPECV_LL))]
197 [(set_attr "type" "load_l")])
199 (define_insn "load_locked<QHI:mode>_si"
200 [(set (match_operand:SI 0 "int_reg_operand" "=r")
202 [(match_operand:QHI 1 "memory_operand" "Z")] UNSPECV_LL))]
205 [(set_attr "type" "load_l")])
207 ;; Use PTImode to get even/odd register pairs.
208 ;; Use a temporary register to force getting an even register for the
209 ;; lqarx/stqcrx. instructions. Normal optimizations will eliminate this extra
210 ;; copy on big endian systems.
212 ;; On little endian systems where non-atomic quad word load/store instructions
213 ;; are not used, the address can be register+offset, so make sure the address
214 ;; is indexed or indirect before register allocation.
216 (define_expand "load_lockedti"
217 [(use (match_operand:TI 0 "quad_int_reg_operand" ""))
218 (use (match_operand:TI 1 "memory_operand" ""))]
221 rtx op0 = operands[0];
222 rtx op1 = operands[1];
223 rtx pti = gen_reg_rtx (PTImode);
225 if (!indexed_or_indirect_operand (op1, TImode))
227 rtx old_addr = XEXP (op1, 0);
228 rtx new_addr = force_reg (Pmode, old_addr);
229 operands[1] = op1 = change_address (op1, TImode, new_addr);
232 emit_insn (gen_load_lockedpti (pti, op1));
233 if (WORDS_BIG_ENDIAN)
234 emit_move_insn (op0, gen_lowpart (TImode, pti));
237 emit_move_insn (gen_lowpart (DImode, op0), gen_highpart (DImode, pti));
238 emit_move_insn (gen_highpart (DImode, op0), gen_lowpart (DImode, pti));
243 (define_insn "load_lockedpti"
244 [(set (match_operand:PTI 0 "quad_int_reg_operand" "=&r")
246 [(match_operand:TI 1 "indexed_or_indirect_operand" "Z")] UNSPECV_LL))]
248 && !reg_mentioned_p (operands[0], operands[1])
249 && quad_int_reg_operand (operands[0], PTImode)"
251 [(set_attr "type" "load_l")])
253 (define_insn "store_conditional<mode>"
254 [(set (match_operand:CC 0 "cc_reg_operand" "=x")
255 (unspec_volatile:CC [(const_int 0)] UNSPECV_SC))
256 (set (match_operand:ATOMIC 1 "memory_operand" "=Z")
257 (match_operand:ATOMIC 2 "int_reg_operand" "r"))]
260 [(set_attr "type" "store_c")])
262 ;; Use a temporary register to force getting an even register for the
263 ;; lqarx/stqcrx. instructions. Normal optimizations will eliminate this extra
264 ;; copy on big endian systems.
266 ;; On little endian systems where non-atomic quad word load/store instructions
267 ;; are not used, the address can be register+offset, so make sure the address
268 ;; is indexed or indirect before register allocation.
270 (define_expand "store_conditionalti"
271 [(use (match_operand:CC 0 "cc_reg_operand" ""))
272 (use (match_operand:TI 1 "memory_operand" ""))
273 (use (match_operand:TI 2 "quad_int_reg_operand" ""))]
276 rtx op0 = operands[0];
277 rtx op1 = operands[1];
278 rtx op2 = operands[2];
279 rtx addr = XEXP (op1, 0);
283 if (!indexed_or_indirect_operand (op1, TImode))
285 rtx new_addr = force_reg (Pmode, addr);
286 operands[1] = op1 = change_address (op1, TImode, new_addr);
290 pti_mem = change_address (op1, PTImode, addr);
291 pti_reg = gen_reg_rtx (PTImode);
293 if (WORDS_BIG_ENDIAN)
294 emit_move_insn (pti_reg, gen_lowpart (PTImode, op2));
297 emit_move_insn (gen_lowpart (DImode, pti_reg), gen_highpart (DImode, op2));
298 emit_move_insn (gen_highpart (DImode, pti_reg), gen_lowpart (DImode, op2));
301 emit_insn (gen_store_conditionalpti (op0, pti_mem, pti_reg));
305 (define_insn "store_conditionalpti"
306 [(set (match_operand:CC 0 "cc_reg_operand" "=x")
307 (unspec_volatile:CC [(const_int 0)] UNSPECV_SC))
308 (set (match_operand:PTI 1 "indexed_or_indirect_operand" "=Z")
309 (match_operand:PTI 2 "quad_int_reg_operand" "r"))]
310 "TARGET_SYNC_TI && quad_int_reg_operand (operands[2], PTImode)"
312 [(set_attr "type" "store_c")])
314 (define_expand "atomic_compare_and_swap<mode>"
315 [(match_operand:SI 0 "int_reg_operand" "") ;; bool out
316 (match_operand:AINT 1 "int_reg_operand" "") ;; val out
317 (match_operand:AINT 2 "memory_operand" "") ;; memory
318 (match_operand:AINT 3 "reg_or_short_operand" "") ;; expected
319 (match_operand:AINT 4 "int_reg_operand" "") ;; desired
320 (match_operand:SI 5 "const_int_operand" "") ;; is_weak
321 (match_operand:SI 6 "const_int_operand" "") ;; model succ
322 (match_operand:SI 7 "const_int_operand" "")] ;; model fail
325 rs6000_expand_atomic_compare_and_swap (operands);
329 (define_expand "atomic_exchange<mode>"
330 [(match_operand:AINT 0 "int_reg_operand" "") ;; output
331 (match_operand:AINT 1 "memory_operand" "") ;; memory
332 (match_operand:AINT 2 "int_reg_operand" "") ;; input
333 (match_operand:SI 3 "const_int_operand" "")] ;; model
336 rs6000_expand_atomic_exchange (operands);
340 (define_expand "atomic_<fetchop_name><mode>"
341 [(match_operand:AINT 0 "memory_operand" "") ;; memory
342 (FETCHOP:AINT (match_dup 0)
343 (match_operand:AINT 1 "<fetchop_pred>" "")) ;; operand
344 (match_operand:SI 2 "const_int_operand" "")] ;; model
347 rs6000_expand_atomic_op (<CODE>, operands[0], operands[1],
348 NULL_RTX, NULL_RTX, operands[2]);
352 (define_expand "atomic_nand<mode>"
353 [(match_operand:AINT 0 "memory_operand" "") ;; memory
354 (match_operand:AINT 1 "int_reg_operand" "") ;; operand
355 (match_operand:SI 2 "const_int_operand" "")] ;; model
358 rs6000_expand_atomic_op (NOT, operands[0], operands[1],
359 NULL_RTX, NULL_RTX, operands[2]);
363 (define_expand "atomic_fetch_<fetchop_name><mode>"
364 [(match_operand:AINT 0 "int_reg_operand" "") ;; output
365 (match_operand:AINT 1 "memory_operand" "") ;; memory
366 (FETCHOP:AINT (match_dup 1)
367 (match_operand:AINT 2 "<fetchop_pred>" "")) ;; operand
368 (match_operand:SI 3 "const_int_operand" "")] ;; model
371 rs6000_expand_atomic_op (<CODE>, operands[1], operands[2],
372 operands[0], NULL_RTX, operands[3]);
376 (define_expand "atomic_fetch_nand<mode>"
377 [(match_operand:AINT 0 "int_reg_operand" "") ;; output
378 (match_operand:AINT 1 "memory_operand" "") ;; memory
379 (match_operand:AINT 2 "int_reg_operand" "") ;; operand
380 (match_operand:SI 3 "const_int_operand" "")] ;; model
383 rs6000_expand_atomic_op (NOT, operands[1], operands[2],
384 operands[0], NULL_RTX, operands[3]);
388 (define_expand "atomic_<fetchop_name>_fetch<mode>"
389 [(match_operand:AINT 0 "int_reg_operand" "") ;; output
390 (match_operand:AINT 1 "memory_operand" "") ;; memory
391 (FETCHOP:AINT (match_dup 1)
392 (match_operand:AINT 2 "<fetchop_pred>" "")) ;; operand
393 (match_operand:SI 3 "const_int_operand" "")] ;; model
396 rs6000_expand_atomic_op (<CODE>, operands[1], operands[2],
397 NULL_RTX, operands[0], operands[3]);
401 (define_expand "atomic_nand_fetch<mode>"
402 [(match_operand:AINT 0 "int_reg_operand" "") ;; output
403 (match_operand:AINT 1 "memory_operand" "") ;; memory
404 (match_operand:AINT 2 "int_reg_operand" "") ;; operand
405 (match_operand:SI 3 "const_int_operand" "")] ;; model
408 rs6000_expand_atomic_op (NOT, operands[1], operands[2],
409 NULL_RTX, operands[0], operands[3]);