1 ;; Machine Description for Renesas RL78 processors
2 ;; Copyright (C) 2011-2013 Free Software Foundation, Inc.
3 ;; Contributed by Red Hat.
5 ;; This file is part of GCC.
7 ;; GCC is free software; you can redistribute it and/or modify
8 ;; it under the terms of the GNU General Public License as published by
9 ;; the Free Software Foundation; either version 3, or (at your option)
12 ;; GCC is distributed in the hope that it will be useful,
13 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 ;; GNU General Public License for more details.
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
21 ;; In this MD file, we define those insn patterns that involve
22 ;; registers, where such registers are virtual until allocated to a
23 ;; physical register. All of these insns need to be conditional on
24 ;; rl78_virt_insns_ok () being true.
26 ;; This tells the physical register allocator what method to use to
27 ;; allocate registers. Basically, this defines the template of the
28 ;; instruction - op1 is of the form "a = op(b)", op2 is "a = b op c"
31 (define_attr "valloc" "op1,op2,ro1,cmp,umul,macax"
34 ;;---------- Moving ------------------------
36 (define_insn "*movqi_virt"
37 [(set (match_operand:QI 0 "nonimmediate_operand" "=vY,v,Wfr")
38 (match_operand 1 "general_operand" "vInt8JY,Wfr,vInt8J"))]
39 "rl78_virt_insns_ok ()"
41 [(set_attr "valloc" "op1")]
44 (define_insn "*movhi_virt"
45 [(set (match_operand:HI 0 "nonimmediate_operand" "=vYS,v,Wfr")
46 (match_operand:HI 1 "general_operand" "viYS,Wfr,v"))]
47 "rl78_virt_insns_ok ()"
49 [(set_attr "valloc" "op1")]
52 ;;---------- Conversions ------------------------
54 (define_insn "*zero_extendqihi2_virt"
55 [(set (match_operand:HI 0 "rl78_nonfar_nonimm_operand" "=vm")
56 (zero_extend:HI (match_operand:QI 1 "general_operand" "vim")))]
57 "rl78_virt_insns_ok ()"
58 "v.zero_extend\t%0, %1"
59 [(set_attr "valloc" "op1")]
62 (define_insn "*extendqihi2_virt"
63 [(set (match_operand:HI 0 "rl78_nonfar_nonimm_operand" "=vm")
64 (sign_extend:HI (match_operand:QI 1 "general_operand" "vim")))]
65 "rl78_virt_insns_ok ()"
66 "v.sign_extend\t%0, %1"
67 [(set_attr "valloc" "op1")]
70 ;;---------- Arithmetic ------------------------
72 (define_insn "*add<mode>3_virt"
73 [(set (match_operand:QHI 0 "rl78_nonfar_nonimm_operand" "=vY,S")
74 (plus:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "viY,0")
75 (match_operand:QHI 2 "general_operand" "vim,i")))
77 "rl78_virt_insns_ok ()"
81 (define_insn "*sub<mode>3_virt"
82 [(set (match_operand:QHI 0 "rl78_nonfar_nonimm_operand" "=vm,S")
83 (minus:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim,0")
84 (match_operand:QHI 2 "general_operand" "vim,i")))
86 "rl78_virt_insns_ok ()"
90 (define_insn "*umulhi3_shift_virt"
91 [(set (match_operand:HI 0 "register_operand" "=vm")
92 (mult:HI (match_operand:HI 1 "rl78_nonfar_operand" "%vim")
93 (match_operand:HI 2 "rl78_24_operand" "Ni")))]
94 "rl78_virt_insns_ok ()"
96 [(set_attr "valloc" "umul")]
99 (define_insn "*umulqihi3_virt"
100 [(set (match_operand:HI 0 "register_operand" "=vm")
101 (mult:HI (zero_extend:HI (match_operand:QI 1 "rl78_nonfar_operand" "%vim"))
102 (zero_extend:HI (match_operand:QI 2 "general_operand" "vim"))))]
103 "rl78_virt_insns_ok ()"
105 [(set_attr "valloc" "umul")]
108 (define_insn "*andqi3_virt"
109 [(set (match_operand:QI 0 "rl78_nonfar_nonimm_operand" "=vm")
110 (and:QI (match_operand:QI 1 "rl78_nonfar_operand" "vim")
111 (match_operand:QI 2 "general_operand" "vim")))
113 "rl78_virt_insns_ok ()"
117 (define_insn "*iorqi3_virt"
118 [(set (match_operand:QI 0 "rl78_nonfar_nonimm_operand" "=vm")
119 (ior:QI (match_operand:QI 1 "rl78_nonfar_operand" "vim")
120 (match_operand:QI 2 "general_operand" "vim")))
122 "rl78_virt_insns_ok ()"
126 (define_insn "*xor3_virt"
127 [(set (match_operand:QI 0 "rl78_nonfar_nonimm_operand" "=v,vm,m")
128 (xor:QI (match_operand:QI 1 "rl78_nonfar_operand" "%0,vm,vm")
129 (match_operand 2 "general_operand" "i,vm,vim")))
131 "rl78_virt_insns_ok ()"
135 ;;---------- Shifts ------------------------
137 (define_insn "*ashl<mode>3_virt"
138 [(set (match_operand:QHI 0 "rl78_nonfar_nonimm_operand" "=vm")
139 (ashift:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim")
140 (match_operand:QI 2 "general_operand" "vim")))
142 "rl78_virt_insns_ok ()"
146 (define_insn "*ashr<mode>3_virt"
147 [(set (match_operand:QHI 0 "rl78_nonfar_nonimm_operand" "=vm")
148 (ashiftrt:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim")
149 (match_operand:QI 2 "general_operand" "vim")))
151 "rl78_virt_insns_ok ()"
155 (define_insn "*lshr<mode>3_virt"
156 [(set (match_operand:QHI 0 "rl78_nonfar_nonimm_operand" "=vm")
157 (lshiftrt:QHI (match_operand:QHI 1 "rl78_nonfar_operand" "vim")
158 (match_operand:QI 2 "general_operand" "vim")))
160 "rl78_virt_insns_ok ()"
164 ;; This is complex mostly because the RL78 has no SImode operations,
165 ;; and very limited HImode operations, and no variable shifts. This
166 ;; pattern is optimized for each constant shift count and operand
167 ;; types, so as to use a hand-optimized pattern. For readability, the
168 ;; usual \t\; syntax is not used here. Also, there's no easy way to
169 ;; constrain to avoid partial overlaps, hence the duplication.
170 (define_insn "ashrsi3_virt" ;; 0 1 2-7 8 9-15 16 17-23 24 25-31 var
171 [(set (match_operand:SI 0 "nonimmediate_operand" "=v,vU,&vU,v, &vU, &vU, v, &vU, v, &vU, &vU, vU, v,&vU, vU, vU, vU")
172 (ashiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0, 0, vU,0, vWab, U, 0, vU, 0, vWab,U, vU, 0, vU, vU, vU, 0")
173 (match_operand:SI 2 "nonmemory_operand" "M, K, K, Int3,Int3,Int3,Iv08,Iv08,Is09,Is09,Is09,Iv16,Is17,Is17,Iv24,Is25, iv")))
174 (clobber (reg:HI X_REG))
180 movw ax,%H1 \; sarw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
181 movw ax,%H1 \; sarw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
183 movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
184 movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
185 movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
187 mov x,%Q1 \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; sarw ax,8 \; movw %H0,ax
188 mov a,%Q1 \; mov x, a \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; sarw ax,8 \; movw %H0,ax
190 mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
191 mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
192 mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; sarw ax,%u2 \; movw %H0,ax
194 movw ax,%H1 \; movw %0,ax \; sarw ax,15 \; movw %H0,ax
196 movw ax,%H1 \; sarw ax,%S2 \; movw %0,ax \; sarw ax,15 \; movw %H0,ax
197 movw ax,%H1 \; sarw ax,%S2 \; movw %0,ax \; sarw ax,15 \; movw %H0,ax
199 movw ax,%H1 \; mov %0,a \; sarw ax,15 \; movw %H0,ax \; mov %Q0,a
201 movw ax,%H1 \; sar a,%s2 \; mov %0,a \; sarw ax,15 \; movw %H0,ax \; mov %Q0,a
203 mov b,%2 \; cmp0 b \; bz $2f \; 1: \; movw ax,%H1 \; sarw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a \; dec b \; bnz $1b \; 2:"
204 [(set_attr "valloc" "macax")]
208 (define_insn "lshrsi3_virt" ;; 0 1 2-7 8 9-15 16 17-23 24 25-31 var
209 [(set (match_operand:SI 0 "nonimmediate_operand" "=v,vU,&vU,v, &vU, &vU, v, &vU, v, &vU, &vU, vU, v,&vU, vU, vU, vU")
210 (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0, 0, vU,0, vWab, U, 0, vU, 0, vWab,U, vU, 0, vU, vU, vU, 0")
211 (match_operand:SI 2 "nonmemory_operand" "M, K, K, Int3,Int3,Int3,Iv08,Iv08,Is09,Is09,Is09,Iv16,Is17,Is17,Iv24,Is25, iv")))
212 (clobber (reg:HI X_REG))
218 movw ax,%H1 \; shrw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
219 movw ax,%H1 \; shrw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a
221 movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
222 movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
223 movw ax,%1 \; shlw ax,%r2 \; mov %0,a \; mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
225 mov x,%Q1 \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; shrw ax,8 \; movw %H0,ax
226 mov a,%Q1 \; mov x, a \; mov a,%H1 \; movw %0,ax \; movw ax,%H1 \; shrw ax,8 \; movw %H0,ax
228 mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
229 mov x,%Q1 \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
230 mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%r2 \; mov %0,a \; movw ax,%H1 \; shlw ax,%r2 \; mov %Q0,a \; movw ax,%H1 \; shrw ax,%u2 \; movw %H0,ax
232 movw ax,%H1 \; movw %0,ax \; movw ax,#0 \; movw %H0,ax
234 movw ax,%H1 \; shrw ax,%S2 \; movw %0,ax \; movw ax,#0 \; movw %H0,ax
235 movw ax,%H1 \; shrw ax,%S2 \; movw %0,ax \; movw ax,#0 \; movw %H0,ax
237 movw ax,%H1 \; mov %0,a \; movw ax,#0 \; movw %H0,ax \; mov %Q0,a
239 movw ax,%H1 \; shr a,%s2 \; mov %0,a \; movw ax,#0 \; movw %H0,ax \; mov %Q0,a
241 mov b,%2 \; cmp0 b \; bz $2f \; 1: \; movw ax,%H1 \; shrw ax,1 \; movw %H0,ax \; mov a,%Q1 \; rorc a,1 \; mov %Q0,a \; mov a,%q1 \; rorc a,1 \; mov %q0,a \; dec b \; bnz $1b \; 2:"
242 [(set_attr "valloc" "macax")]
246 (define_insn "ashlsi3_virt" ;; 0 1 2-7 8 9-15 16 17-23 24 25-31 var
247 [(set (match_operand:SI 0 "nonimmediate_operand" "=v,vU,&vU,v, &vU, &vU, v, &vU, v, &vU, &vU, v, U, v,&vU, v, U, v, U, vWab,vU, vU")
248 (ashift:SI (match_operand:SI 1 "nonimmediate_operand" "0, 0, vU,0, vWab, U, 0, vU, 0, vWab,U, vU, vU, 0, vU, vU, vU, vU, vU, 0, vWab,U")
249 (match_operand:SI 2 "nonmemory_operand" "M, K, K, Int3,Int3,Int3,Iv08,Iv08,Is09,Is09,Is09,Iv16,Iv16,Is17,Is17,Iv24,Iv24,Is25,Is25,iv, iv, iv")))
250 (clobber (reg:HI X_REG))
256 movw ax,%1 \; shlw ax,1 \; movw %0,ax \; movw ax,%H1 \; rolwc ax,1 \; movw %H0,ax
257 movw ax,%1 \; shlw ax,1 \; movw %0,ax \; movw ax,%H1 \; rolwc ax,1 \; movw %H0,ax
259 movw ax,%H1 \; shlw ax,%u2 \; mov %E0,a \; mov x,%Q1 \; mov a, %H1 \; shlw ax,%S2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
260 movw ax,%H1 \; shlw ax,%u2 \; mov %E0,a \; mov x,%Q1 \; mov a, %H1 \; shlw ax,%S2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
261 movw ax,%H1 \; shlw ax,%u2 \; mov %E0,a \; mov a,%Q1 \; mov x,a \; mov a, %H1 \; shlw ax,%S2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
263 mov x,%Q1 \; mov a,%H1 \; movw %H0,ax \; movw ax,%1 \; shlw ax,8 \; movw %0,ax
264 mov a,%Q1 \; mov x,a \; mov a,%H1 \; movw %H0,ax \; movw ax,%1 \; shlw ax,8 \; movw %0,ax
266 mov x,%Q1 \; mov a,%H1 \; shlw ax,%s2 \; movw %H0,ax \; movw ax,%1 \; shlw ax,%s2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
267 mov x,%Q1 \; mov a,%H1 \; shlw ax,%s2 \; movw %H0,ax \; movw ax,%1 \; shlw ax,%s2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
268 mov a,%Q1 \; mov x,a \; mov a,%H1 \; shlw ax,%s2 \; movw %H0,ax \; movw ax,%1 \; shlw ax,%s2 \; mov %H0,a \; movw ax,%1 \; shlw ax,%u2 \; movw %0,ax
270 movw ax,%1 \; movw %H0,ax \; movw %0,#0
271 movw ax,%1 \; movw %H0,ax \; movw ax,#0 \; movw %0,ax
273 movw ax,%1 \; shlw ax,%S2 \; movw %H0,ax \; movw %0,#0
274 movw ax,%1 \; shlw ax,%S2 \; movw %H0,ax \; movw ax,#0 \; movw %0,ax
276 mov a,%1 \; movw %H0,ax \; mov %H0,#0 \; movw %0,#0
277 mov a,%1 \; movw %H0,ax \; movw ax,#0 \; mov %H0,a \; movW %0,ax
279 mov a,%1 \; shl a,%s2 \; movw %H0,ax \; mov %H0,#0 \; movw %0,#0
280 mov a,%1 \; shl a,%s2 \; movw %H0,ax \; movw ax,#0 \; mov %H0,a \; movW %0,ax
282 mov a,%2 \; cmp0 a \; bz $2f \; mov d,a \; movw ax,%H1 \; movw bc,%1 \; 1: \; shlw bc,1 \; rolwc ax,1 \; dec d \; bnz $1b \; movw %H0,ax \; movw ax,bc \; movw %0,ax \; 2:
283 mov a,%2 \; mov d,a \; movw ax,%H1 \; movw bc,%1 \; cmp0 0xFFEFD \; bz $2f \; 1: \; shlw bc,1 \; rolwc ax,1 \; dec d \; bnz $1b \; 2: \; movw %H0,ax \; movw ax,bc \; movw %0,ax
284 mov a,%2 \; mov d,a \; movw ax,%1 \; movw bc,ax \; movw ax,%H1 \; cmp0 0xFFEFD \; bz $2f \; 1: \; shlw bc,1 \; rolwc ax,1 \; dec d \; bnz $1b \; 2: \; movw %H0,ax \; movw ax,bc \; movw %0,ax"
285 [(set_attr "valloc" "macax")]
288 ;;---------- Branching ------------------------
290 (define_insn "*indirect_jump_virt"
292 (match_operand:HI 0 "nonimmediate_operand" "vm"))]
293 "rl78_virt_insns_ok ()"
295 [(set_attr "valloc" "ro1")]
298 (define_insn "*call_virt"
299 [(call (match_operand:HI 0 "memory_operand" "Wab,Wcv")
300 (match_operand 1 "" ""))]
301 "rl78_virt_insns_ok ()"
303 [(set_attr "valloc" "ro1")]
306 (define_insn "*call_value_virt"
307 [(set (match_operand 0 "register_operand" "=v,v")
308 (call (match_operand:HI 1 "memory_operand" "Wab,Wcv")
309 (match_operand 2 "" "")))]
310 "rl78_virt_insns_ok ()"
312 [(set_attr "valloc" "op1")]
315 (define_insn "*cbranchqi4_virt_signed"
316 [(set (pc) (if_then_else
317 (match_operator 0 "rl78_cmp_operator_signed"
318 [(match_operand:QI 1 "general_operand" "vim")
319 (match_operand:QI 2 "nonmemory_operand" "vi")])
320 (label_ref (match_operand 3 "" ""))
322 "rl78_virt_insns_ok ()"
323 "v.cmp\t%1, %2\\n\tv.b%C0\t%3"
324 [(set_attr "valloc" "cmp")]
327 (define_insn "*cbranchqi4_virt"
328 [(set (pc) (if_then_else
329 (match_operator 0 "rl78_cmp_operator_real"
330 [(match_operand:QI 1 "general_operand" "vim")
331 (match_operand:QI 2 "general_operand" "vim")])
332 (label_ref (match_operand 3 "" ""))
334 "rl78_virt_insns_ok ()"
335 "v.cmp\t%1, %2\\n\tv.b%C0\t%3"
336 [(set_attr "valloc" "cmp")]
339 (define_insn "*cbranchhi4_virt_signed"
340 [(set (pc) (if_then_else
341 (match_operator 0 "rl78_cmp_operator_signed"
342 [(match_operand:HI 1 "general_operand" "vim")
343 (match_operand:HI 2 "nonmemory_operand" "vi")])
344 (label_ref (match_operand 3 "" ""))
346 "rl78_virt_insns_ok ()"
347 "v.cmpw\t%1, %2\\n\tv.b%C0\t%3"
348 [(set_attr "valloc" "cmp")]
351 (define_insn "*cbranchhi4_virt"
352 [(set (pc) (if_then_else
353 (match_operator 0 "rl78_cmp_operator_real"
354 [(match_operand:HI 1 "general_operand" "vim")
355 (match_operand:HI 2 "general_operand" "vim")])
356 (label_ref (match_operand 3 "" ""))
358 "rl78_virt_insns_ok ()"
359 "v.cmpw\t%1, %2\\n\tv.b%C0\t%3"
360 [(set_attr "valloc" "cmp")]
363 (define_insn "*cbranchsi4_virt"
364 [(set (pc) (if_then_else
365 (match_operator 0 "rl78_cmp_operator"
366 [(match_operand:SI 1 "general_operand" "vim")
367 (match_operand:SI 2 "nonmemory_operand" "vi")])
368 (label_ref (match_operand 3 "" ""))
370 (clobber (reg:HI AX_REG))
372 "rl78_virt_insns_ok ()"
373 "v.cmpd\t%1, %2\\n\tv.b%C0\t%3"
374 [(set_attr "valloc" "macax")]
377 ;;---------- Peepholes ------------------------
380 [(set (match_operand:QI 0 "" "")
381 (match_operand:QI 1 "" ""))
382 (set (match_operand:QI 2 "" "")
383 (match_operand:QI 3 "" ""))]
384 "rl78_peep_movhi_p (operands)"
387 "rl78_setup_peep_movhi (operands);"
392 (match_operand:QI 1 "" ""))
393 (set (match_operand:QI 0 "" "")
396 (match_operand:QI 3 "" ""))
397 (set (match_operand:QI 2 "" "")
400 "rl78_peep_movhi_p (operands)"
401 [(set (reg:HI AX_REG)
406 "rl78_setup_peep_movhi (operands);"