1 ;; Machine description for RISC-V for GNU compiler.
2 ;; Copyright (C) 2011-2023 Free Software Foundation, Inc.
3 ;; Contributed by Andrew Waterman (andrew@sifive.com).
4 ;; Based on MIPS target for GNU compiler.
6 ;; This file is part of GCC.
8 ;; GCC is free software; you can redistribute it and/or modify
9 ;; it under the terms of the GNU General Public License as published by
10 ;; the Free Software Foundation; either version 3, or (at your option)
13 ;; GCC is distributed in the hope that it will be useful,
14 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ;; GNU General Public License for more details.
18 ;; You should have received a copy of the GNU General Public License
19 ;; along with GCC; see the file COPYING3. If not see
20 ;; <http://www.gnu.org/licenses/>.
23 ;; Keep this list and the one above riscv_print_operand in sync.
24 ;; The special asm out single letter directives following a '%' are:
25 ;; h -- Print the high-part relocation associated with OP, after stripping
26 ;; any outermost HIGH.
27 ;; R -- Print the low-part relocation associated with OP.
28 ;; C -- Print the integer branch condition for comparison OP.
29 ;; A -- Print the atomic operation suffix for memory model OP.
30 ;; F -- Print a FENCE if the memory model requires a release.
31 ;; z -- Print x0 if OP is zero, otherwise print OP normally.
32 ;; i -- Print i if the operand is not a register.
33 ;; S -- Print shift-index of single-bit mask OP.
34 ;; T -- Print shift-index of inverted single-bit mask OP.
35 ;; ~ -- Print w if TARGET_64BIT is true; otherwise not print anything.
37 (define_c_enum "unspec" [
38 ;; Override return address for exception handling.
41 ;; Symbolic accesses. The order of this list must match that of
42 ;; enum riscv_symbol_type in riscv-protos.h.
51 ;; High part of PC-relative address.
54 ;; Floating-point unspecs.
75 (define_c_enum "unspecv" [
76 ;; Register save and restore.
80 ;; Floating-point unspecs.
85 ;; Interrupt handler instructions.
90 ;; Blockage and synchronization.
95 ;; Stack Smash Protector
106 ;; Zihintpause unspec
115 [(RETURN_ADDR_REGNUM 1)
142 (include "predicates.md")
143 (include "constraints.md")
144 (include "iterators.md")
146 ;; ....................
150 ;; ....................
152 (define_attr "got" "unset,xgot_high,load"
153 (const_string "unset"))
155 ;; Classification of moves, extensions and truncations. Most values
156 ;; are as for "type" (see below) but there are also the following
157 ;; move-specific values:
159 ;; andi a single ANDI instruction
160 ;; shift_shift a shift left followed by a shift right
162 ;; This attribute is used to determine the instruction's length and
163 ;; scheduling type. For doubleword moves, the attribute always describes
164 ;; the split instructions; in some cases, it is more appropriate for the
165 ;; scheduling type to be "multi" instead.
166 (define_attr "move_type"
167 "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
168 const,logical,arith,andi,shift_shift,rdvlenb"
169 (const_string "unknown"))
171 ;; Main data type used by the insn
172 (define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,HF,SF,DF,TF,
173 VNx1BI,VNx2BI,VNx4BI,VNx8BI,VNx16BI,VNx32BI,VNx64BI,VNx128BI,
174 VNx1QI,VNx2QI,VNx4QI,VNx8QI,VNx16QI,VNx32QI,VNx64QI,VNx128QI,
175 VNx1HI,VNx2HI,VNx4HI,VNx8HI,VNx16HI,VNx32HI,VNx64HI,
176 VNx1SI,VNx2SI,VNx4SI,VNx8SI,VNx16SI,VNx32SI,
177 VNx1DI,VNx2DI,VNx4DI,VNx8DI,VNx16DI,
178 VNx1HF,VNx2HF,VNx4HF,VNx8HF,VNx16HF,VNx32HF,VNx64HF,
179 VNx1SF,VNx2SF,VNx4SF,VNx8SF,VNx16SF,VNx32SF,
180 VNx1DF,VNx2DF,VNx4DF,VNx8DF,VNx16DF,
181 VNx2x64QI,VNx2x32QI,VNx3x32QI,VNx4x32QI,
182 VNx2x16QI,VNx3x16QI,VNx4x16QI,VNx5x16QI,VNx6x16QI,VNx7x16QI,VNx8x16QI,
183 VNx2x8QI,VNx3x8QI,VNx4x8QI,VNx5x8QI,VNx6x8QI,VNx7x8QI,VNx8x8QI,
184 VNx2x4QI,VNx3x4QI,VNx4x4QI,VNx5x4QI,VNx6x4QI,VNx7x4QI,VNx8x4QI,
185 VNx2x2QI,VNx3x2QI,VNx4x2QI,VNx5x2QI,VNx6x2QI,VNx7x2QI,VNx8x2QI,
186 VNx2x1QI,VNx3x1QI,VNx4x1QI,VNx5x1QI,VNx6x1QI,VNx7x1QI,VNx8x1QI,
187 VNx2x32HI,VNx2x16HI,VNx3x16HI,VNx4x16HI,
188 VNx2x8HI,VNx3x8HI,VNx4x8HI,VNx5x8HI,VNx6x8HI,VNx7x8HI,VNx8x8HI,
189 VNx2x4HI,VNx3x4HI,VNx4x4HI,VNx5x4HI,VNx6x4HI,VNx7x4HI,VNx8x4HI,
190 VNx2x2HI,VNx3x2HI,VNx4x2HI,VNx5x2HI,VNx6x2HI,VNx7x2HI,VNx8x2HI,
191 VNx2x1HI,VNx3x1HI,VNx4x1HI,VNx5x1HI,VNx6x1HI,VNx7x1HI,VNx8x1HI,
192 VNx2x16SI,VNx2x8SI,VNx3x8SI,VNx4x8SI,
193 VNx2x4SI,VNx3x4SI,VNx4x4SI,VNx5x4SI,VNx6x4SI,VNx7x4SI,VNx8x4SI,
194 VNx2x2SI,VNx3x2SI,VNx4x2SI,VNx5x2SI,VNx6x2SI,VNx7x2SI,VNx8x2SI,
195 VNx2x1SI,VNx3x1SI,VNx4x1SI,VNx5x1SI,VNx6x1SI,VNx7x1SI,VNx8x1SI,
196 VNx2x16SF,VNx2x8SF,VNx3x8SF,VNx4x8SF,
197 VNx2x4SF,VNx3x4SF,VNx4x4SF,VNx5x4SF,VNx6x4SF,VNx7x4SF,VNx8x4SF,
198 VNx2x2SF,VNx3x2SF,VNx4x2SF,VNx5x2SF,VNx6x2SF,VNx7x2SF,VNx8x2SF,
199 VNx2x1SF,VNx3x1SF,VNx4x1SF,VNx5x1SF,VNx6x1SF,VNx7x1SF,VNx8x1SF,
200 VNx2x8DI,VNx2x4DI,VNx3x4DI,VNx4x4DI,
201 VNx2x2DI,VNx3x2DI,VNx4x2DI,VNx5x2DI,VNx6x2DI,VNx7x2DI,VNx8x2DI,
202 VNx2x1DI,VNx3x1DI,VNx4x1DI,VNx5x1DI,VNx6x1DI,VNx7x1DI,VNx8x1DI,
203 VNx2x8DF,VNx2x4DF,VNx3x4DF,VNx4x4DF,
204 VNx2x2DF,VNx3x2DF,VNx4x2DF,VNx5x2DF,VNx6x2DF,VNx7x2DF,VNx8x2DF,
205 VNx2x1DF,VNx3x1DF,VNx4x1DF,VNx5x1DF,VNx6x1DF,VNx7x1DF,VNx8x1DF"
206 (const_string "unknown"))
208 ;; True if the main data type is twice the size of a word.
209 (define_attr "dword_mode" "no,yes"
210 (cond [(and (eq_attr "mode" "DI,DF")
211 (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
214 (and (eq_attr "mode" "TI,TF")
215 (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
216 (const_string "yes")]
217 (const_string "no")))
220 (define_attr "ext" "base,f,d,vector"
221 (const_string "base"))
223 ;; True if the extension is enabled.
224 (define_attr "ext_enabled" "no,yes"
225 (cond [(eq_attr "ext" "base")
228 (and (eq_attr "ext" "f")
229 (match_test "TARGET_HARD_FLOAT"))
232 (and (eq_attr "ext" "d")
233 (match_test "TARGET_DOUBLE_FLOAT"))
236 (and (eq_attr "ext" "vector")
237 (match_test "TARGET_VECTOR"))
240 (const_string "no")))
242 ;; Attribute to control enable or disable instructions.
243 (define_attr "enabled" "no,yes"
244 (cond [(eq_attr "ext_enabled" "no")
246 (const_string "yes")))
248 ;; Classification of each insn.
249 ;; branch conditional branch
250 ;; jump unconditional jump
251 ;; call unconditional call
252 ;; load load instruction(s)
253 ;; fpload floating point load
254 ;; store store instruction(s)
255 ;; fpstore floating point store
256 ;; mtc transfer to coprocessor
257 ;; mfc transfer from coprocessor
258 ;; const load constant
259 ;; arith integer arithmetic instructions
260 ;; logical integer logical instructions
261 ;; shift integer shift instructions
262 ;; slt set less than instructions
263 ;; imul integer multiply
264 ;; idiv integer divide
265 ;; move integer register move (addi rd, rs1, 0)
266 ;; fmove floating point register move
267 ;; fadd floating point add/subtract
268 ;; fmul floating point multiply
269 ;; fmadd floating point multiply-add
270 ;; fdiv floating point divide
271 ;; fcmp floating point compare
272 ;; fcvt floating point convert
273 ;; fsqrt floating point square root
274 ;; multi multiword sequence (or user asm statements)
275 ;; auipc integer addition to PC
276 ;; sfb_alu SFB ALU instruction
278 ;; ghost an instruction that produces no real code
279 ;; bitmanip bit manipulation instructions
280 ;; clmul clmul, clmulh, clmulr
281 ;; rotate rotation instructions
282 ;; atomic atomic instructions
283 ;; condmove conditional moves
284 ;; crypto cryptography instructions
285 ;; Classification of RVV instructions which will be added to each RVV .md pattern and used by scheduler.
286 ;; rdvlenb vector byte length vlenb csrr read
287 ;; rdvl vector length vl csrr read
288 ;; wrvxrm vector fixed-point rounding mode write
289 ;; vsetvl vector configuration-setting instrucions
290 ;; 7. Vector Loads and Stores
291 ;; vlde vector unit-stride load instructions
292 ;; vste vector unit-stride store instructions
293 ;; vldm vector unit-stride mask load instructions
294 ;; vstm vector unit-stride mask store instructions
295 ;; vlds vector strided load instructions
296 ;; vsts vector strided store instructions
297 ;; vldux vector unordered indexed load instructions
298 ;; vldox vector ordered indexed load instructions
299 ;; vstux vector unordered indexed store instructions
300 ;; vstox vector ordered indexed store instructions
301 ;; vldff vector unit-stride fault-only-first load instructions
302 ;; vldr vector whole register load instructions
303 ;; vstr vector whole register store instructions
304 ;; vlsegde vector segment unit-stride load instructions
305 ;; vssegte vector segment unit-stride store instructions
306 ;; vlsegds vector segment strided load instructions
307 ;; vssegts vector segment strided store instructions
308 ;; vlsegdux vector segment unordered indexed load instructions
309 ;; vlsegdox vector segment ordered indexed load instructions
310 ;; vssegtux vector segment unordered indexed store instructions
311 ;; vssegtox vector segment ordered indexed store instructions
312 ;; vlsegdff vector segment unit-stride fault-only-first load instructions
313 ;; 11. Vector integer arithmetic instructions
314 ;; vialu vector single-width integer add and subtract and logical nstructions
315 ;; viwalu vector widening integer add/subtract
316 ;; vext vector integer extension
317 ;; vicalu vector arithmetic with carry or borrow instructions
318 ;; vshift vector single-width bit shift instructions
319 ;; vnshift vector narrowing integer shift instructions
320 ;; viminmax vector integer min/max instructions
321 ;; vicmp vector integer comparison instructions
322 ;; vimul vector single-width integer multiply instructions
323 ;; vidiv vector single-width integer divide instructions
324 ;; viwmul vector widening integer multiply instructions
325 ;; vimuladd vector single-width integer multiply-add instructions
326 ;; viwmuladd vector widening integer multiply-add instructions
327 ;; vimerge vector integer merge instructions
328 ;; vimov vector integer move vector instructions
329 ;; 12. Vector fixed-point arithmetic instructions
330 ;; vsalu vector single-width saturating add and subtract and logical instructions
331 ;; vaalu vector single-width averaging add and subtract and logical instructions
332 ;; vsmul vector single-width fractional multiply with rounding and saturation instructions
333 ;; vsshift vector single-width scaling shift instructions
334 ;; vnclip vector narrowing fixed-point clip instructions
335 ;; 13. Vector floating-point instructions
336 ;; vfalu vector single-width floating-point add/subtract instructions
337 ;; vfwalu vector widening floating-point add/subtract instructions
338 ;; vfmul vector single-width floating-point multiply instructions
339 ;; vfdiv vector single-width floating-point divide instructions
340 ;; vfwmul vector widening floating-point multiply instructions
341 ;; vfmuladd vector single-width floating-point multiply-add instructions
342 ;; vfwmuladd vector widening floating-point multiply-add instructions
343 ;; vfsqrt vector floating-point square-root instructions
344 ;; vfrecp vector floating-point reciprocal square-root instructions
345 ;; vfminmax vector floating-point min/max instructions
346 ;; vfcmp vector floating-point comparison instructions
347 ;; vfsgnj vector floating-point sign-injection instructions
348 ;; vfclass vector floating-point classify instruction
349 ;; vfmerge vector floating-point merge instruction
350 ;; vfmov vector floating-point move instruction
351 ;; vfcvtitof vector single-width integer to floating-point instruction
352 ;; vfcvtftoi vector single-width floating-point to integer instruction
353 ;; vfwcvtitof vector widening integer to floating-point instruction
354 ;; vfwcvtftoi vector widening floating-point to integer instruction
355 ;; vfwcvtftof vector widening floating-point to floating-point instruction
356 ;; vfncvtitof vector narrowing integer to floating-point instruction
357 ;; vfncvtftoi vector narrowing floating-point to integer instruction
358 ;; vfncvtftof vector narrowing floating-point to floating-point instruction
359 ;; 14. Vector reduction operations
360 ;; vired vector single-width integer reduction instructions
361 ;; viwred vector widening integer reduction instructions
362 ;; vfredu vector single-width floating-point un-ordered reduction instruction
363 ;; vfredo vector single-width floating-point ordered reduction instruction
364 ;; vfwredu vector widening floating-point un-ordered reduction instruction
365 ;; vfwredo vector widening floating-point ordered reduction instruction
366 ;; 15. Vector mask instructions
367 ;; vmalu vector mask-register logical instructions
368 ;; vmpop vector mask population count
369 ;; vmffs vector find-first-set mask bit
370 ;; vmsfs vector set mask bit
371 ;; vmiota vector iota
372 ;; vmidx vector element index instruction
373 ;; 16. Vector permutation instructions
374 ;; vimovvx integer scalar move instructions
375 ;; vimovxv integer scalar move instructions
376 ;; vfmovvf floating-point scalar move instructions
377 ;; vfmovfv floating-point scalar move instructions
378 ;; vslideup vector slide instructions
379 ;; vslidedown vector slide instructions
380 ;; vislide1up vector slide instructions
381 ;; vislide1down vector slide instructions
382 ;; vfslide1up vector slide instructions
383 ;; vfslide1down vector slide instructions
384 ;; vgather vector register gather instructions
385 ;; vcompress vector compress instruction
386 ;; vmov whole vector register move
388 "unknown,branch,jump,call,load,fpload,store,fpstore,
389 mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
390 fmadd,fdiv,fcmp,fcvt,fsqrt,multi,auipc,sfb_alu,nop,ghost,bitmanip,rotate,
391 clmul,min,max,minu,maxu,clz,ctz,cpop,
392 atomic,condmove,crypto,rdvlenb,rdvl,wrvxrm,vsetvl,vlde,vste,vldm,vstm,vlds,vsts,
393 vldux,vldox,vstux,vstox,vldff,vldr,vstr,
394 vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,vssegtux,vssegtox,vlsegdff,
395 vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,
396 vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,
397 vsalu,vaalu,vsmul,vsshift,vnclip,
398 vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,
399 vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov,
400 vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,
401 vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,
402 vired,viwred,vfredu,vfredo,vfwredu,vfwredo,
403 vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,
404 vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
405 vgather,vcompress,vmov"
406 (cond [(eq_attr "got" "load") (const_string "load")
408 ;; If a doubleword move uses these expensive instructions,
409 ;; it is usually better to schedule them in the same way
410 ;; as the singleword form, rather than as "multi".
411 (eq_attr "move_type" "load") (const_string "load")
412 (eq_attr "move_type" "fpload") (const_string "fpload")
413 (eq_attr "move_type" "store") (const_string "store")
414 (eq_attr "move_type" "fpstore") (const_string "fpstore")
415 (eq_attr "move_type" "mtc") (const_string "mtc")
416 (eq_attr "move_type" "mfc") (const_string "mfc")
418 ;; These types of move are always single insns.
419 (eq_attr "move_type" "fmove") (const_string "fmove")
420 (eq_attr "move_type" "arith") (const_string "arith")
421 (eq_attr "move_type" "logical") (const_string "logical")
422 (eq_attr "move_type" "andi") (const_string "logical")
424 ;; These types of move are always split.
425 (eq_attr "move_type" "shift_shift")
426 (const_string "multi")
428 ;; These types of move are split for doubleword modes only.
429 (and (eq_attr "move_type" "move,const")
430 (eq_attr "dword_mode" "yes"))
431 (const_string "multi")
432 (eq_attr "move_type" "move") (const_string "move")
433 (eq_attr "move_type" "const") (const_string "const")
434 (eq_attr "move_type" "rdvlenb") (const_string "rdvlenb")]
435 (const_string "unknown")))
437 ;; Length of instruction in bytes.
438 (define_attr "length" ""
440 ;; Branches further than +/- 4 KiB require two instructions.
441 (eq_attr "type" "branch")
442 (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088))
443 (le (minus (pc) (match_dup 0)) (const_int 4092)))
447 ;; Conservatively assume calls take two instructions (AUIPC + JALR).
448 ;; The linker will opportunistically relax the sequence to JAL.
449 (eq_attr "type" "call") (const_int 8)
451 ;; "Ghost" instructions occupy no space.
452 (eq_attr "type" "ghost") (const_int 0)
454 (eq_attr "got" "load") (const_int 8)
456 ;; SHIFT_SHIFTs are decomposed into two separate instructions.
457 (eq_attr "move_type" "shift_shift")
460 ;; Check for doubleword moves that are decomposed into two
462 (and (eq_attr "move_type" "mtc,mfc,move")
463 (eq_attr "dword_mode" "yes"))
466 ;; Doubleword CONST{,N} moves are split into two word
468 (and (eq_attr "move_type" "const")
469 (eq_attr "dword_mode" "yes"))
470 (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
472 ;; Otherwise, constants, loads and stores are handled by external
474 (eq_attr "move_type" "load,fpload")
475 (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
476 (eq_attr "move_type" "store,fpstore")
477 (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
480 ;; Is copying of this instruction disallowed?
481 (define_attr "cannot_copy" "no,yes" (const_string "no"))
483 ;; Microarchitectures we know how to tune for.
484 ;; Keep this in sync with enum riscv_microarchitecture.
487 (const (symbol_ref "((enum attr_tune) riscv_microarchitecture)")))
489 ;; Describe a user's asm statement.
490 (define_asm_attributes
491 [(set_attr "type" "multi")])
493 ;; Ghost instructions produce no real code and introduce no hazards.
494 ;; They exist purely to express an effect on dataflow.
495 (define_insn_reservation "ghost" 0
496 (eq_attr "type" "ghost")
500 ;; ....................
504 ;; ....................
507 (define_insn "add<mode>3"
508 [(set (match_operand:ANYF 0 "register_operand" "=f")
509 (plus:ANYF (match_operand:ANYF 1 "register_operand" " f")
510 (match_operand:ANYF 2 "register_operand" " f")))]
511 "TARGET_HARD_FLOAT || TARGET_ZFINX"
512 "fadd.<fmt>\t%0,%1,%2"
513 [(set_attr "type" "fadd")
514 (set_attr "mode" "<UNITMODE>")])
516 (define_insn "*addsi3"
517 [(set (match_operand:SI 0 "register_operand" "=r,r")
518 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
519 (match_operand:SI 2 "arith_operand" " r,I")))]
522 [(set_attr "type" "arith")
523 (set_attr "mode" "SI")])
525 (define_expand "addsi3"
526 [(set (match_operand:SI 0 "register_operand" "=r,r")
527 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
528 (match_operand:SI 2 "arith_operand" " r,I")))]
533 rtx t = gen_reg_rtx (DImode);
534 emit_insn (gen_addsi3_extended (t, operands[1], operands[2]));
535 t = gen_lowpart (SImode, t);
536 SUBREG_PROMOTED_VAR_P (t) = 1;
537 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
538 emit_move_insn (operands[0], t);
543 (define_insn "adddi3"
544 [(set (match_operand:DI 0 "register_operand" "=r,r")
545 (plus:DI (match_operand:DI 1 "register_operand" " r,r")
546 (match_operand:DI 2 "arith_operand" " r,I")))]
549 [(set_attr "type" "arith")
550 (set_attr "mode" "DI")])
552 (define_expand "addv<mode>4"
553 [(set (match_operand:GPR 0 "register_operand" "=r,r")
554 (plus:GPR (match_operand:GPR 1 "register_operand" " r,r")
555 (match_operand:GPR 2 "arith_operand" " r,I")))
556 (label_ref (match_operand 3 "" ""))]
559 if (TARGET_64BIT && <MODE>mode == SImode)
561 rtx t3 = gen_reg_rtx (DImode);
562 rtx t4 = gen_reg_rtx (DImode);
563 rtx t5 = gen_reg_rtx (DImode);
564 rtx t6 = gen_reg_rtx (DImode);
566 riscv_emit_binary (PLUS, operands[0], operands[1], operands[2]);
567 if (GET_CODE (operands[1]) != CONST_INT)
568 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
571 if (GET_CODE (operands[2]) != CONST_INT)
572 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
575 emit_insn (gen_adddi3 (t3, t4, t5));
576 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
578 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
582 rtx t3 = gen_reg_rtx (<MODE>mode);
583 rtx t4 = gen_reg_rtx (<MODE>mode);
585 emit_insn (gen_add3_insn (operands[0], operands[1], operands[2]));
586 rtx cmp1 = gen_rtx_LT (<MODE>mode, operands[2], const0_rtx);
587 emit_insn (gen_cstore<mode>4 (t3, cmp1, operands[2], const0_rtx));
588 rtx cmp2 = gen_rtx_LT (<MODE>mode, operands[0], operands[1]);
590 emit_insn (gen_cstore<mode>4 (t4, cmp2, operands[0], operands[1]));
591 riscv_expand_conditional_branch (operands[3], NE, t3, t4);
596 (define_expand "uaddv<mode>4"
597 [(set (match_operand:GPR 0 "register_operand" "=r,r")
598 (plus:GPR (match_operand:GPR 1 "register_operand" " r,r")
599 (match_operand:GPR 2 "arith_operand" " r,I")))
600 (label_ref (match_operand 3 "" ""))]
603 if (TARGET_64BIT && <MODE>mode == SImode)
605 rtx t3 = gen_reg_rtx (DImode);
606 rtx t4 = gen_reg_rtx (DImode);
608 if (GET_CODE (operands[1]) != CONST_INT)
609 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
612 riscv_emit_binary (PLUS, operands[0], operands[1], operands[2]);
613 emit_insn (gen_extend_insn (t4, operands[0], DImode, SImode, 0));
615 riscv_expand_conditional_branch (operands[3], LTU, t4, t3);
619 emit_insn (gen_add3_insn (operands[0], operands[1], operands[2]));
620 riscv_expand_conditional_branch (operands[3], LTU, operands[0],
627 (define_insn "addsi3_extended"
628 [(set (match_operand:DI 0 "register_operand" "=r,r")
630 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
631 (match_operand:SI 2 "arith_operand" " r,I"))))]
634 [(set_attr "type" "arith")
635 (set_attr "mode" "SI")])
637 (define_insn "*addsi3_extended2"
638 [(set (match_operand:DI 0 "register_operand" "=r,r")
640 (match_operator:SI 3 "subreg_lowpart_operator"
641 [(plus:DI (match_operand:DI 1 "register_operand" " r,r")
642 (match_operand:DI 2 "arith_operand" " r,I"))])))]
645 [(set_attr "type" "arith")
646 (set_attr "mode" "SI")])
649 ;; ....................
653 ;; ....................
656 (define_insn "sub<mode>3"
657 [(set (match_operand:ANYF 0 "register_operand" "=f")
658 (minus:ANYF (match_operand:ANYF 1 "register_operand" " f")
659 (match_operand:ANYF 2 "register_operand" " f")))]
660 "TARGET_HARD_FLOAT || TARGET_ZFINX"
661 "fsub.<fmt>\t%0,%1,%2"
662 [(set_attr "type" "fadd")
663 (set_attr "mode" "<UNITMODE>")])
665 (define_insn "subdi3"
666 [(set (match_operand:DI 0 "register_operand" "= r")
667 (minus:DI (match_operand:DI 1 "reg_or_0_operand" " rJ")
668 (match_operand:DI 2 "register_operand" " r")))]
671 [(set_attr "type" "arith")
672 (set_attr "mode" "DI")])
674 (define_insn "*subsi3"
675 [(set (match_operand:SI 0 "register_operand" "= r")
676 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
677 (match_operand:SI 2 "register_operand" " r")))]
680 [(set_attr "type" "arith")
681 (set_attr "mode" "SI")])
683 (define_expand "subsi3"
684 [(set (match_operand:SI 0 "register_operand" "= r")
685 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
686 (match_operand:SI 2 "register_operand" " r")))]
691 rtx t = gen_reg_rtx (DImode);
692 emit_insn (gen_subsi3_extended (t, operands[1], operands[2]));
693 t = gen_lowpart (SImode, t);
694 SUBREG_PROMOTED_VAR_P (t) = 1;
695 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
696 emit_move_insn (operands[0], t);
701 (define_expand "subv<mode>4"
702 [(set (match_operand:GPR 0 "register_operand" "= r")
703 (minus:GPR (match_operand:GPR 1 "reg_or_0_operand" " rJ")
704 (match_operand:GPR 2 "register_operand" " r")))
705 (label_ref (match_operand 3 "" ""))]
708 if (TARGET_64BIT && <MODE>mode == SImode)
710 rtx t3 = gen_reg_rtx (DImode);
711 rtx t4 = gen_reg_rtx (DImode);
712 rtx t5 = gen_reg_rtx (DImode);
713 rtx t6 = gen_reg_rtx (DImode);
715 riscv_emit_binary (MINUS, operands[0], operands[1], operands[2]);
716 if (GET_CODE (operands[1]) != CONST_INT)
717 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
720 if (GET_CODE (operands[2]) != CONST_INT)
721 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
724 emit_insn (gen_subdi3 (t3, t4, t5));
725 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
727 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
731 rtx t3 = gen_reg_rtx (<MODE>mode);
732 rtx t4 = gen_reg_rtx (<MODE>mode);
734 emit_insn (gen_sub3_insn (operands[0], operands[1], operands[2]));
736 rtx cmp1 = gen_rtx_LT (<MODE>mode, operands[2], const0_rtx);
737 emit_insn (gen_cstore<mode>4 (t3, cmp1, operands[2], const0_rtx));
739 rtx cmp2 = gen_rtx_LT (<MODE>mode, operands[1], operands[0]);
740 emit_insn (gen_cstore<mode>4 (t4, cmp2, operands[1], operands[0]));
742 riscv_expand_conditional_branch (operands[3], NE, t3, t4);
748 (define_expand "usubv<mode>4"
749 [(set (match_operand:GPR 0 "register_operand" "= r")
750 (minus:GPR (match_operand:GPR 1 "reg_or_0_operand" " rJ")
751 (match_operand:GPR 2 "register_operand" " r")))
752 (label_ref (match_operand 3 "" ""))]
755 if (TARGET_64BIT && <MODE>mode == SImode)
757 rtx t3 = gen_reg_rtx (DImode);
758 rtx t4 = gen_reg_rtx (DImode);
760 if (GET_CODE (operands[1]) != CONST_INT)
761 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
764 riscv_emit_binary (MINUS, operands[0], operands[1], operands[2]);
765 emit_insn (gen_extend_insn (t4, operands[0], DImode, SImode, 0));
767 riscv_expand_conditional_branch (operands[3], LTU, t3, t4);
771 emit_insn (gen_sub3_insn (operands[0], operands[1], operands[2]));
772 riscv_expand_conditional_branch (operands[3], LTU, operands[1],
780 (define_insn "subsi3_extended"
781 [(set (match_operand:DI 0 "register_operand" "= r")
783 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
784 (match_operand:SI 2 "register_operand" " r"))))]
787 [(set_attr "type" "arith")
788 (set_attr "mode" "SI")])
790 (define_insn "*subsi3_extended2"
791 [(set (match_operand:DI 0 "register_operand" "= r")
793 (match_operator:SI 3 "subreg_lowpart_operator"
794 [(minus:DI (match_operand:DI 1 "reg_or_0_operand" " rJ")
795 (match_operand:DI 2 "register_operand" " r"))])))]
798 [(set_attr "type" "arith")
799 (set_attr "mode" "SI")])
801 (define_insn "negdi2"
802 [(set (match_operand:DI 0 "register_operand" "=r")
803 (neg:DI (match_operand:DI 1 "register_operand" " r")))]
806 [(set_attr "type" "arith")
807 (set_attr "mode" "DI")])
809 (define_insn "*negsi2"
810 [(set (match_operand:SI 0 "register_operand" "=r")
811 (neg:SI (match_operand:SI 1 "register_operand" " r")))]
814 [(set_attr "type" "arith")
815 (set_attr "mode" "SI")])
817 (define_expand "negsi2"
818 [(set (match_operand:SI 0 "register_operand" "=r")
819 (neg:SI (match_operand:SI 1 "register_operand" " r")))]
824 rtx t = gen_reg_rtx (DImode);
825 emit_insn (gen_negsi2_extended (t, operands[1]));
826 t = gen_lowpart (SImode, t);
827 SUBREG_PROMOTED_VAR_P (t) = 1;
828 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
829 emit_move_insn (operands[0], t);
834 (define_insn "negsi2_extended"
835 [(set (match_operand:DI 0 "register_operand" "=r")
837 (neg:SI (match_operand:SI 1 "register_operand" " r"))))]
840 [(set_attr "type" "arith")
841 (set_attr "mode" "SI")])
843 (define_insn "*negsi2_extended2"
844 [(set (match_operand:DI 0 "register_operand" "=r")
846 (match_operator:SI 2 "subreg_lowpart_operator"
847 [(neg:DI (match_operand:DI 1 "register_operand" " r"))])))]
850 [(set_attr "type" "arith")
851 (set_attr "mode" "SI")])
854 ;; ....................
858 ;; ....................
861 (define_insn "mul<mode>3"
862 [(set (match_operand:ANYF 0 "register_operand" "=f")
863 (mult:ANYF (match_operand:ANYF 1 "register_operand" " f")
864 (match_operand:ANYF 2 "register_operand" " f")))]
865 "TARGET_HARD_FLOAT || TARGET_ZFINX"
866 "fmul.<fmt>\t%0,%1,%2"
867 [(set_attr "type" "fmul")
868 (set_attr "mode" "<UNITMODE>")])
870 (define_insn "*mulsi3"
871 [(set (match_operand:SI 0 "register_operand" "=r")
872 (mult:SI (match_operand:SI 1 "register_operand" " r")
873 (match_operand:SI 2 "register_operand" " r")))]
874 "TARGET_ZMMUL || TARGET_MUL"
876 [(set_attr "type" "imul")
877 (set_attr "mode" "SI")])
879 (define_expand "mulsi3"
880 [(set (match_operand:SI 0 "register_operand" "=r")
881 (mult:SI (match_operand:SI 1 "register_operand" " r")
882 (match_operand:SI 2 "register_operand" " r")))]
883 "TARGET_ZMMUL || TARGET_MUL"
887 rtx t = gen_reg_rtx (DImode);
888 emit_insn (gen_mulsi3_extended (t, operands[1], operands[2]));
889 t = gen_lowpart (SImode, t);
890 SUBREG_PROMOTED_VAR_P (t) = 1;
891 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
892 emit_move_insn (operands[0], t);
897 (define_insn "muldi3"
898 [(set (match_operand:DI 0 "register_operand" "=r")
899 (mult:DI (match_operand:DI 1 "register_operand" " r")
900 (match_operand:DI 2 "register_operand" " r")))]
901 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
903 [(set_attr "type" "imul")
904 (set_attr "mode" "DI")])
906 (define_expand "mulv<mode>4"
907 [(set (match_operand:GPR 0 "register_operand" "=r")
908 (mult:GPR (match_operand:GPR 1 "register_operand" " r")
909 (match_operand:GPR 2 "register_operand" " r")))
910 (label_ref (match_operand 3 "" ""))]
911 "TARGET_ZMMUL || TARGET_MUL"
913 if (TARGET_64BIT && <MODE>mode == SImode)
915 rtx t3 = gen_reg_rtx (DImode);
916 rtx t4 = gen_reg_rtx (DImode);
917 rtx t5 = gen_reg_rtx (DImode);
918 rtx t6 = gen_reg_rtx (DImode);
920 if (GET_CODE (operands[1]) != CONST_INT)
921 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
924 if (GET_CODE (operands[2]) != CONST_INT)
925 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
928 emit_insn (gen_muldi3 (t3, t4, t5));
930 emit_move_insn (operands[0], gen_lowpart (SImode, t3));
931 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
933 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
937 rtx hp = gen_reg_rtx (<MODE>mode);
938 rtx lp = gen_reg_rtx (<MODE>mode);
940 emit_insn (gen_smul<mode>3_highpart (hp, operands[1], operands[2]));
941 emit_insn (gen_mul<mode>3 (operands[0], operands[1], operands[2]));
942 riscv_emit_binary (ASHIFTRT, lp, operands[0],
943 GEN_INT (BITS_PER_WORD - 1));
945 riscv_expand_conditional_branch (operands[3], NE, hp, lp);
951 (define_expand "umulv<mode>4"
952 [(set (match_operand:GPR 0 "register_operand" "=r")
953 (mult:GPR (match_operand:GPR 1 "register_operand" " r")
954 (match_operand:GPR 2 "register_operand" " r")))
955 (label_ref (match_operand 3 "" ""))]
956 "TARGET_ZMMUL || TARGET_MUL"
958 if (TARGET_64BIT && <MODE>mode == SImode)
960 rtx t3 = gen_reg_rtx (DImode);
961 rtx t4 = gen_reg_rtx (DImode);
962 rtx t5 = gen_reg_rtx (DImode);
963 rtx t6 = gen_reg_rtx (DImode);
964 rtx t7 = gen_reg_rtx (DImode);
965 rtx t8 = gen_reg_rtx (DImode);
967 if (GET_CODE (operands[1]) != CONST_INT)
968 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
971 if (GET_CODE (operands[2]) != CONST_INT)
972 emit_insn (gen_extend_insn (t4, operands[2], DImode, SImode, 0));
976 emit_insn (gen_ashldi3 (t5, t3, GEN_INT (32)));
977 emit_insn (gen_ashldi3 (t6, t4, GEN_INT (32)));
978 emit_insn (gen_umuldi3_highpart (t7, t5, t6));
979 emit_move_insn (operands[0], gen_lowpart (SImode, t7));
980 emit_insn (gen_lshrdi3 (t8, t7, GEN_INT (32)));
982 riscv_expand_conditional_branch (operands[3], NE, t8, const0_rtx);
986 rtx hp = gen_reg_rtx (<MODE>mode);
988 emit_insn (gen_umul<mode>3_highpart (hp, operands[1], operands[2]));
989 emit_insn (gen_mul<mode>3 (operands[0], operands[1], operands[2]));
991 riscv_expand_conditional_branch (operands[3], NE, hp, const0_rtx);
997 (define_insn "mulsi3_extended"
998 [(set (match_operand:DI 0 "register_operand" "=r")
1000 (mult:SI (match_operand:SI 1 "register_operand" " r")
1001 (match_operand:SI 2 "register_operand" " r"))))]
1002 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1004 [(set_attr "type" "imul")
1005 (set_attr "mode" "SI")])
1007 (define_insn "*mulsi3_extended2"
1008 [(set (match_operand:DI 0 "register_operand" "=r")
1010 (match_operator:SI 3 "subreg_lowpart_operator"
1011 [(mult:DI (match_operand:DI 1 "register_operand" " r")
1012 (match_operand:DI 2 "register_operand" " r"))])))]
1013 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1015 [(set_attr "type" "imul")
1016 (set_attr "mode" "SI")])
1019 ;; ........................
1021 ;; MULTIPLICATION HIGH-PART
1023 ;; ........................
1027 (define_expand "<u>mulditi3"
1028 [(set (match_operand:TI 0 "register_operand")
1029 (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
1030 (any_extend:TI (match_operand:DI 2 "register_operand"))))]
1031 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1033 rtx low = gen_reg_rtx (DImode);
1034 emit_insn (gen_muldi3 (low, operands[1], operands[2]));
1036 rtx high = gen_reg_rtx (DImode);
1037 emit_insn (gen_<su>muldi3_highpart (high, operands[1], operands[2]));
1039 emit_move_insn (gen_lowpart (DImode, operands[0]), low);
1040 emit_move_insn (gen_highpart (DImode, operands[0]), high);
1044 (define_insn "<su>muldi3_highpart"
1045 [(set (match_operand:DI 0 "register_operand" "=r")
1048 (mult:TI (any_extend:TI
1049 (match_operand:DI 1 "register_operand" " r"))
1051 (match_operand:DI 2 "register_operand" " r")))
1053 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1055 [(set_attr "type" "imul")
1056 (set_attr "mode" "DI")])
1058 (define_expand "usmulditi3"
1059 [(set (match_operand:TI 0 "register_operand")
1060 (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand"))
1061 (sign_extend:TI (match_operand:DI 2 "register_operand"))))]
1062 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1064 rtx low = gen_reg_rtx (DImode);
1065 emit_insn (gen_muldi3 (low, operands[1], operands[2]));
1067 rtx high = gen_reg_rtx (DImode);
1068 emit_insn (gen_usmuldi3_highpart (high, operands[1], operands[2]));
1070 emit_move_insn (gen_lowpart (DImode, operands[0]), low);
1071 emit_move_insn (gen_highpart (DImode, operands[0]), high);
1075 (define_insn "usmuldi3_highpart"
1076 [(set (match_operand:DI 0 "register_operand" "=r")
1079 (mult:TI (zero_extend:TI
1080 (match_operand:DI 1 "register_operand" "r"))
1082 (match_operand:DI 2 "register_operand" " r")))
1084 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1086 [(set_attr "type" "imul")
1087 (set_attr "mode" "DI")])
1089 (define_expand "<u>mulsidi3"
1090 [(set (match_operand:DI 0 "register_operand" "=r")
1091 (mult:DI (any_extend:DI
1092 (match_operand:SI 1 "register_operand" " r"))
1094 (match_operand:SI 2 "register_operand" " r"))))]
1095 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1097 rtx temp = gen_reg_rtx (SImode);
1098 riscv_emit_binary (MULT, temp, operands[1], operands[2]);
1099 emit_insn (gen_<su>mulsi3_highpart (riscv_subword (operands[0], true),
1100 operands[1], operands[2]));
1101 emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
1105 (define_insn "<su>mulsi3_highpart"
1106 [(set (match_operand:SI 0 "register_operand" "=r")
1109 (mult:DI (any_extend:DI
1110 (match_operand:SI 1 "register_operand" " r"))
1112 (match_operand:SI 2 "register_operand" " r")))
1114 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1116 [(set_attr "type" "imul")
1117 (set_attr "mode" "SI")])
1120 (define_expand "usmulsidi3"
1121 [(set (match_operand:DI 0 "register_operand" "=r")
1122 (mult:DI (zero_extend:DI
1123 (match_operand:SI 1 "register_operand" " r"))
1125 (match_operand:SI 2 "register_operand" " r"))))]
1126 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1128 rtx temp = gen_reg_rtx (SImode);
1129 riscv_emit_binary (MULT, temp, operands[1], operands[2]);
1130 emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
1131 operands[1], operands[2]));
1132 emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
1136 (define_insn "usmulsi3_highpart"
1137 [(set (match_operand:SI 0 "register_operand" "=r")
1140 (mult:DI (zero_extend:DI
1141 (match_operand:SI 1 "register_operand" " r"))
1143 (match_operand:SI 2 "register_operand" " r")))
1145 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1147 [(set_attr "type" "imul")
1148 (set_attr "mode" "SI")])
1151 ;; ....................
1153 ;; DIVISION and REMAINDER
1155 ;; ....................
1158 (define_insn "*<optab>si3"
1159 [(set (match_operand:SI 0 "register_operand" "=r")
1160 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1161 (match_operand:SI 2 "register_operand" " r")))]
1163 "<insn>%i2%~\t%0,%1,%2"
1164 [(set_attr "type" "idiv")
1165 (set_attr "mode" "SI")])
1167 (define_expand "<optab>si3"
1168 [(set (match_operand:SI 0 "register_operand" "=r")
1169 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1170 (match_operand:SI 2 "register_operand" " r")))]
1175 rtx t = gen_reg_rtx (DImode);
1176 emit_insn (gen_<optab>si3_extended (t, operands[1], operands[2]));
1177 t = gen_lowpart (SImode, t);
1178 SUBREG_PROMOTED_VAR_P (t) = 1;
1179 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1180 emit_move_insn (operands[0], t);
1185 (define_insn "<optab>di3"
1186 [(set (match_operand:DI 0 "register_operand" "=r")
1187 (any_div:DI (match_operand:DI 1 "register_operand" " r")
1188 (match_operand:DI 2 "register_operand" " r")))]
1189 "TARGET_DIV && TARGET_64BIT"
1190 "<insn>%i2\t%0,%1,%2"
1191 [(set_attr "type" "idiv")
1192 (set_attr "mode" "DI")])
1194 (define_expand "<u>divmod<mode>4"
1196 [(set (match_operand:GPR 0 "register_operand")
1197 (only_div:GPR (match_operand:GPR 1 "register_operand")
1198 (match_operand:GPR 2 "register_operand")))
1199 (set (match_operand:GPR 3 "register_operand")
1200 (<paired_mod>:GPR (match_dup 1) (match_dup 2)))])]
1201 "TARGET_DIV && riscv_use_divmod_expander ()"
1203 rtx tmp = gen_reg_rtx (<MODE>mode);
1204 emit_insn (gen_<u>div<GPR:mode>3 (operands[0], operands[1], operands[2]));
1205 emit_insn (gen_mul<GPR:mode>3 (tmp, operands[0], operands[2]));
1206 emit_insn (gen_sub<GPR:mode>3 (operands[3], operands[1], tmp));
1210 (define_insn "<optab>si3_extended"
1211 [(set (match_operand:DI 0 "register_operand" "=r")
1213 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1214 (match_operand:SI 2 "register_operand" " r"))))]
1215 "TARGET_DIV && TARGET_64BIT"
1216 "<insn>%i2w\t%0,%1,%2"
1217 [(set_attr "type" "idiv")
1218 (set_attr "mode" "DI")])
1220 (define_insn "div<mode>3"
1221 [(set (match_operand:ANYF 0 "register_operand" "=f")
1222 (div:ANYF (match_operand:ANYF 1 "register_operand" " f")
1223 (match_operand:ANYF 2 "register_operand" " f")))]
1224 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && TARGET_FDIV"
1225 "fdiv.<fmt>\t%0,%1,%2"
1226 [(set_attr "type" "fdiv")
1227 (set_attr "mode" "<UNITMODE>")])
1230 ;; ....................
1234 ;; ....................
1236 (define_insn "sqrt<mode>2"
1237 [(set (match_operand:ANYF 0 "register_operand" "=f")
1238 (sqrt:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1239 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && TARGET_FDIV"
1241 return "fsqrt.<fmt>\t%0,%1";
1243 [(set_attr "type" "fsqrt")
1244 (set_attr "mode" "<UNITMODE>")])
1246 ;; Floating point multiply accumulate instructions.
1249 (define_insn "fma<mode>4"
1250 [(set (match_operand:ANYF 0 "register_operand" "=f")
1251 (fma:ANYF (match_operand:ANYF 1 "register_operand" " f")
1252 (match_operand:ANYF 2 "register_operand" " f")
1253 (match_operand:ANYF 3 "register_operand" " f")))]
1254 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1255 "fmadd.<fmt>\t%0,%1,%2,%3"
1256 [(set_attr "type" "fmadd")
1257 (set_attr "mode" "<UNITMODE>")])
1260 (define_insn "fms<mode>4"
1261 [(set (match_operand:ANYF 0 "register_operand" "=f")
1262 (fma:ANYF (match_operand:ANYF 1 "register_operand" " f")
1263 (match_operand:ANYF 2 "register_operand" " f")
1264 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f"))))]
1265 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1266 "fmsub.<fmt>\t%0,%1,%2,%3"
1267 [(set_attr "type" "fmadd")
1268 (set_attr "mode" "<UNITMODE>")])
1271 (define_insn "fnms<mode>4"
1272 [(set (match_operand:ANYF 0 "register_operand" "=f")
1274 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1275 (match_operand:ANYF 2 "register_operand" " f")
1276 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f"))))]
1277 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1278 "fnmadd.<fmt>\t%0,%1,%2,%3"
1279 [(set_attr "type" "fmadd")
1280 (set_attr "mode" "<UNITMODE>")])
1283 (define_insn "fnma<mode>4"
1284 [(set (match_operand:ANYF 0 "register_operand" "=f")
1286 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1287 (match_operand:ANYF 2 "register_operand" " f")
1288 (match_operand:ANYF 3 "register_operand" " f")))]
1289 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1290 "fnmsub.<fmt>\t%0,%1,%2,%3"
1291 [(set_attr "type" "fmadd")
1292 (set_attr "mode" "<UNITMODE>")])
1294 ;; -(-a * b - c), modulo signed zeros
1295 (define_insn "*fma<mode>4"
1296 [(set (match_operand:ANYF 0 "register_operand" "=f")
1299 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1300 (match_operand:ANYF 2 "register_operand" " f")
1301 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))]
1302 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1303 "fmadd.<fmt>\t%0,%1,%2,%3"
1304 [(set_attr "type" "fmadd")
1305 (set_attr "mode" "<UNITMODE>")])
1307 ;; -(-a * b + c), modulo signed zeros
1308 (define_insn "*fms<mode>4"
1309 [(set (match_operand:ANYF 0 "register_operand" "=f")
1312 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1313 (match_operand:ANYF 2 "register_operand" " f")
1314 (match_operand:ANYF 3 "register_operand" " f"))))]
1315 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1316 "fmsub.<fmt>\t%0,%1,%2,%3"
1317 [(set_attr "type" "fmadd")
1318 (set_attr "mode" "<UNITMODE>")])
1320 ;; -(a * b + c), modulo signed zeros
1321 (define_insn "*fnms<mode>4"
1322 [(set (match_operand:ANYF 0 "register_operand" "=f")
1325 (match_operand:ANYF 1 "register_operand" " f")
1326 (match_operand:ANYF 2 "register_operand" " f")
1327 (match_operand:ANYF 3 "register_operand" " f"))))]
1328 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1329 "fnmadd.<fmt>\t%0,%1,%2,%3"
1330 [(set_attr "type" "fmadd")
1331 (set_attr "mode" "<UNITMODE>")])
1333 ;; -(a * b - c), modulo signed zeros
1334 (define_insn "*fnma<mode>4"
1335 [(set (match_operand:ANYF 0 "register_operand" "=f")
1338 (match_operand:ANYF 1 "register_operand" " f")
1339 (match_operand:ANYF 2 "register_operand" " f")
1340 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))]
1341 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1342 "fnmsub.<fmt>\t%0,%1,%2,%3"
1343 [(set_attr "type" "fmadd")
1344 (set_attr "mode" "<UNITMODE>")])
1347 ;; ....................
1351 ;; ....................
1353 (define_insn "abs<mode>2"
1354 [(set (match_operand:ANYF 0 "register_operand" "=f")
1355 (abs:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1356 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1358 [(set_attr "type" "fmove")
1359 (set_attr "mode" "<UNITMODE>")])
1361 (define_insn "copysign<mode>3"
1362 [(set (match_operand:ANYF 0 "register_operand" "=f")
1363 (unspec:ANYF [(match_operand:ANYF 1 "register_operand" " f")
1364 (match_operand:ANYF 2 "register_operand" " f")]
1366 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1367 "fsgnj.<fmt>\t%0,%1,%2"
1368 [(set_attr "type" "fmove")
1369 (set_attr "mode" "<UNITMODE>")])
1371 (define_insn "neg<mode>2"
1372 [(set (match_operand:ANYF 0 "register_operand" "=f")
1373 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1374 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1376 [(set_attr "type" "fmove")
1377 (set_attr "mode" "<UNITMODE>")])
1380 ;; ....................
1384 ;; ....................
1386 (define_insn "fmin<mode>3"
1387 [(set (match_operand:ANYF 0 "register_operand" "=f")
1388 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1389 (use (match_operand:ANYF 2 "register_operand" " f"))]
1391 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SNANS (<MODE>mode)"
1392 "fmin.<fmt>\t%0,%1,%2"
1393 [(set_attr "type" "fmove")
1394 (set_attr "mode" "<UNITMODE>")])
1396 (define_insn "fmax<mode>3"
1397 [(set (match_operand:ANYF 0 "register_operand" "=f")
1398 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1399 (use (match_operand:ANYF 2 "register_operand" " f"))]
1401 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SNANS (<MODE>mode)"
1402 "fmax.<fmt>\t%0,%1,%2"
1403 [(set_attr "type" "fmove")
1404 (set_attr "mode" "<UNITMODE>")])
1406 (define_insn "smin<mode>3"
1407 [(set (match_operand:ANYF 0 "register_operand" "=f")
1408 (smin:ANYF (match_operand:ANYF 1 "register_operand" " f")
1409 (match_operand:ANYF 2 "register_operand" " f")))]
1410 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1411 "fmin.<fmt>\t%0,%1,%2"
1412 [(set_attr "type" "fmove")
1413 (set_attr "mode" "<UNITMODE>")])
1415 (define_insn "smax<mode>3"
1416 [(set (match_operand:ANYF 0 "register_operand" "=f")
1417 (smax:ANYF (match_operand:ANYF 1 "register_operand" " f")
1418 (match_operand:ANYF 2 "register_operand" " f")))]
1419 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1420 "fmax.<fmt>\t%0,%1,%2"
1421 [(set_attr "type" "fmove")
1422 (set_attr "mode" "<UNITMODE>")])
1425 ;; ....................
1429 ;; ....................
1432 ;; For RV64, we don't expose the SImode operations to the rtl expanders,
1433 ;; but SImode versions exist for combine.
1435 (define_expand "and<mode>3"
1436 [(set (match_operand:X 0 "register_operand")
1437 (and:X (match_operand:X 1 "register_operand")
1438 (match_operand:X 2 "arith_operand_or_mode_mask")))]
1441 /* If the second operand is a mode mask, emit an extension
1443 if (CONST_INT_P (operands[2]))
1445 enum machine_mode tmode = VOIDmode;
1446 if (UINTVAL (operands[2]) == GET_MODE_MASK (HImode))
1448 else if (UINTVAL (operands[2]) == GET_MODE_MASK (SImode))
1451 if (tmode != VOIDmode)
1453 rtx tmp = gen_lowpart (tmode, operands[1]);
1454 emit_insn (gen_extend_insn (operands[0], tmp, <MODE>mode, tmode, 1));
1460 emit_move_insn (operands[0], gen_rtx_AND (<MODE>mode, operands[1], operands[2]));
1465 (define_insn "*and<mode>3"
1466 [(set (match_operand:X 0 "register_operand" "=r,r")
1467 (and:X (match_operand:X 1 "register_operand" "%r,r")
1468 (match_operand:X 2 "arith_operand" " r,I")))]
1471 [(set_attr "type" "logical")
1472 (set_attr "mode" "<MODE>")])
1474 (define_insn "<optab><mode>3"
1475 [(set (match_operand:X 0 "register_operand" "=r,r")
1476 (any_or:X (match_operand:X 1 "register_operand" "%r,r")
1477 (match_operand:X 2 "arith_operand" " r,I")))]
1479 "<insn>%i2\t%0,%1,%2"
1480 [(set_attr "type" "logical")
1481 (set_attr "mode" "<MODE>")])
1483 (define_insn "*<optab>si3_internal"
1484 [(set (match_operand:SI 0 "register_operand" "=r,r")
1485 (any_bitwise:SI (match_operand:SI 1 "register_operand" "%r,r")
1486 (match_operand:SI 2 "arith_operand" " r,I")))]
1488 "<insn>%i2\t%0,%1,%2"
1489 [(set_attr "type" "logical")
1490 (set_attr "mode" "SI")])
1492 (define_insn "one_cmpl<mode>2"
1493 [(set (match_operand:X 0 "register_operand" "=r")
1494 (not:X (match_operand:X 1 "register_operand" " r")))]
1497 [(set_attr "type" "logical")
1498 (set_attr "mode" "<MODE>")])
1500 (define_insn "*one_cmplsi2_internal"
1501 [(set (match_operand:SI 0 "register_operand" "=r")
1502 (not:SI (match_operand:SI 1 "register_operand" " r")))]
1505 [(set_attr "type" "logical")
1506 (set_attr "mode" "SI")])
1509 ;; ....................
1513 ;; ....................
1515 (define_insn "truncdfsf2"
1516 [(set (match_operand:SF 0 "register_operand" "=f")
1518 (match_operand:DF 1 "register_operand" " f")))]
1519 "TARGET_DOUBLE_FLOAT || TARGET_ZDINX"
1521 [(set_attr "type" "fcvt")
1522 (set_attr "mode" "SF")])
1524 (define_insn "truncsfhf2"
1525 [(set (match_operand:HF 0 "register_operand" "=f")
1527 (match_operand:SF 1 "register_operand" " f")))]
1528 "TARGET_ZFHMIN || TARGET_ZHINXMIN"
1530 [(set_attr "type" "fcvt")
1531 (set_attr "mode" "HF")])
1533 (define_insn "truncdfhf2"
1534 [(set (match_operand:HF 0 "register_operand" "=f")
1536 (match_operand:DF 1 "register_operand" " f")))]
1537 "(TARGET_ZFHMIN && TARGET_DOUBLE_FLOAT) ||
1538 (TARGET_ZHINXMIN && TARGET_ZDINX)"
1540 [(set_attr "type" "fcvt")
1541 (set_attr "mode" "HF")])
1544 ;; ....................
1548 ;; ....................
1552 (define_expand "zero_extendsidi2"
1553 [(set (match_operand:DI 0 "register_operand")
1554 (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]
1557 (define_insn_and_split "*zero_extendsidi2_internal"
1558 [(set (match_operand:DI 0 "register_operand" "=r,r")
1560 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1561 "TARGET_64BIT && !TARGET_ZBA
1562 && !(register_operand (operands[1], SImode)
1563 && reg_or_subregno (operands[1]) == VL_REGNUM)"
1567 "&& reload_completed
1568 && REG_P (operands[1])
1569 && !paradoxical_subreg_p (operands[0])"
1571 (ashift:DI (match_dup 1) (const_int 32)))
1573 (lshiftrt:DI (match_dup 0) (const_int 32)))]
1574 { operands[1] = gen_lowpart (DImode, operands[1]); }
1575 [(set_attr "move_type" "shift_shift,load")
1576 (set_attr "mode" "DI")])
1578 (define_expand "zero_extendhi<GPR:mode>2"
1579 [(set (match_operand:GPR 0 "register_operand")
1581 (match_operand:HI 1 "nonimmediate_operand")))]
1584 (define_insn_and_split "*zero_extendhi<GPR:mode>2"
1585 [(set (match_operand:GPR 0 "register_operand" "=r,r")
1587 (match_operand:HI 1 "nonimmediate_operand" " r,m")))]
1592 "&& reload_completed
1593 && REG_P (operands[1])
1594 && !paradoxical_subreg_p (operands[0])"
1596 (ashift:GPR (match_dup 1) (match_dup 2)))
1598 (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
1600 operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
1601 operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
1603 [(set_attr "move_type" "shift_shift,load")
1604 (set_attr "mode" "<GPR:MODE>")])
1606 (define_insn "zero_extendqi<SUPERQI:mode>2"
1607 [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
1608 (zero_extend:SUPERQI
1609 (match_operand:QI 1 "nonimmediate_operand" " r,m")))]
1614 [(set_attr "move_type" "andi,load")
1615 (set_attr "mode" "<SUPERQI:MODE>")])
1618 ;; ....................
1622 ;; ....................
1624 (define_insn "extendsidi2"
1625 [(set (match_operand:DI 0 "register_operand" "=r,r")
1627 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1632 [(set_attr "move_type" "move,load")
1633 (set_attr "mode" "DI")])
1635 (define_expand "extend<SHORT:mode><SUPERQI:mode>2"
1636 [(set (match_operand:SUPERQI 0 "register_operand")
1637 (sign_extend:SUPERQI (match_operand:SHORT 1 "nonimmediate_operand")))]
1640 (define_insn_and_split "*extend<SHORT:mode><SUPERQI:mode>2"
1641 [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
1642 (sign_extend:SUPERQI
1643 (match_operand:SHORT 1 "nonimmediate_operand" " r,m")))]
1647 l<SHORT:size>\t%0,%1"
1648 "&& reload_completed
1649 && REG_P (operands[1])
1650 && !paradoxical_subreg_p (operands[0])"
1651 [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
1652 (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
1654 operands[0] = gen_lowpart (SImode, operands[0]);
1655 operands[1] = gen_lowpart (SImode, operands[1]);
1656 operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
1657 - GET_MODE_BITSIZE (<SHORT:MODE>mode));
1659 [(set_attr "move_type" "shift_shift,load")
1660 (set_attr "mode" "SI")])
1662 (define_insn "extendhfsf2"
1663 [(set (match_operand:SF 0 "register_operand" "=f")
1665 (match_operand:HF 1 "register_operand" " f")))]
1666 "TARGET_ZFHMIN || TARGET_ZHINXMIN"
1668 [(set_attr "type" "fcvt")
1669 (set_attr "mode" "SF")])
1671 (define_insn "extendsfdf2"
1672 [(set (match_operand:DF 0 "register_operand" "=f")
1674 (match_operand:SF 1 "register_operand" " f")))]
1675 "TARGET_DOUBLE_FLOAT || TARGET_ZDINX"
1677 [(set_attr "type" "fcvt")
1678 (set_attr "mode" "DF")])
1680 (define_insn "extendhfdf2"
1681 [(set (match_operand:DF 0 "register_operand" "=f")
1683 (match_operand:HF 1 "register_operand" " f")))]
1684 "(TARGET_ZFHMIN && TARGET_DOUBLE_FLOAT) ||
1685 (TARGET_ZHINXMIN && TARGET_ZDINX)"
1687 [(set_attr "type" "fcvt")
1688 (set_attr "mode" "DF")])
1690 ;; 16-bit floating point moves
1691 (define_expand "movhf"
1692 [(set (match_operand:HF 0 "")
1693 (match_operand:HF 1 ""))]
1696 if (riscv_legitimize_move (HFmode, operands[0], operands[1]))
1700 (define_insn "*movhf_hardfloat"
1701 [(set (match_operand:HF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r, *r,*r,*m")
1702 (match_operand:HF 1 "move_operand" " f,G,m,f,G,*r,*f,*G*r,*m,*r"))]
1704 && (register_operand (operands[0], HFmode)
1705 || reg_or_0_operand (operands[1], HFmode))"
1706 { return riscv_output_move (operands[0], operands[1]); }
1707 [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
1708 (set_attr "mode" "HF")])
1710 (define_insn "*movhf_softfloat"
1711 [(set (match_operand:HF 0 "nonimmediate_operand" "=f, r,r,m,*f,*r")
1712 (match_operand:HF 1 "move_operand" " f,Gr,m,r,*r,*f"))]
1714 && (register_operand (operands[0], HFmode)
1715 || reg_or_0_operand (operands[1], HFmode))"
1716 { return riscv_output_move (operands[0], operands[1]); }
1717 [(set_attr "move_type" "fmove,move,load,store,mtc,mfc")
1718 (set_attr "mode" "HF")])
1721 ;; ....................
1725 ;; ....................
1727 (define_insn "fix_trunc<ANYF:mode><GPR:mode>2"
1728 [(set (match_operand:GPR 0 "register_operand" "=r")
1730 (match_operand:ANYF 1 "register_operand" " f")))]
1731 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1732 "fcvt.<GPR:ifmt>.<ANYF:fmt> %0,%1,rtz"
1733 [(set_attr "type" "fcvt")
1734 (set_attr "mode" "<ANYF:MODE>")])
1736 (define_insn "fixuns_trunc<ANYF:mode><GPR:mode>2"
1737 [(set (match_operand:GPR 0 "register_operand" "=r")
1739 (match_operand:ANYF 1 "register_operand" " f")))]
1740 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1741 "fcvt.<GPR:ifmt>u.<ANYF:fmt> %0,%1,rtz"
1742 [(set_attr "type" "fcvt")
1743 (set_attr "mode" "<ANYF:MODE>")])
1745 (define_insn "float<GPR:mode><ANYF:mode>2"
1746 [(set (match_operand:ANYF 0 "register_operand" "= f")
1748 (match_operand:GPR 1 "reg_or_0_operand" " rJ")))]
1749 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1750 "fcvt.<ANYF:fmt>.<GPR:ifmt>\t%0,%z1"
1751 [(set_attr "type" "fcvt")
1752 (set_attr "mode" "<ANYF:MODE>")])
1754 (define_insn "floatuns<GPR:mode><ANYF:mode>2"
1755 [(set (match_operand:ANYF 0 "register_operand" "= f")
1756 (unsigned_float:ANYF
1757 (match_operand:GPR 1 "reg_or_0_operand" " rJ")))]
1758 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1759 "fcvt.<ANYF:fmt>.<GPR:ifmt>u\t%0,%z1"
1760 [(set_attr "type" "fcvt")
1761 (set_attr "mode" "<ANYF:MODE>")])
1763 (define_insn "l<rint_pattern><ANYF:mode><GPR:mode>2"
1764 [(set (match_operand:GPR 0 "register_operand" "=r")
1766 [(match_operand:ANYF 1 "register_operand" " f")]
1768 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1769 "fcvt.<GPR:ifmt>.<ANYF:fmt> %0,%1,<rint_rm>"
1770 [(set_attr "type" "fcvt")
1771 (set_attr "mode" "<ANYF:MODE>")])
1774 ;; ....................
1778 ;; ....................
1780 ;; Lower-level instructions for loading an address from the GOT.
1781 ;; We could use MEMs, but an unspec gives more optimization
1784 (define_insn "got_load<mode>"
1785 [(set (match_operand:P 0 "register_operand" "=r")
1787 [(match_operand:P 1 "symbolic_operand" "")]
1791 [(set_attr "got" "load")
1792 (set_attr "mode" "<MODE>")])
1794 (define_insn "tls_add_tp_le<mode>"
1795 [(set (match_operand:P 0 "register_operand" "=r")
1797 [(match_operand:P 1 "register_operand" "r")
1798 (match_operand:P 2 "register_operand" "r")
1799 (match_operand:P 3 "symbolic_operand" "")]
1802 "add\t%0,%1,%2,%%tprel_add(%3)"
1803 [(set_attr "type" "arith")
1804 (set_attr "mode" "<MODE>")])
1806 (define_insn "got_load_tls_gd<mode>"
1807 [(set (match_operand:P 0 "register_operand" "=r")
1809 [(match_operand:P 1 "symbolic_operand" "")]
1813 [(set_attr "got" "load")
1814 (set_attr "mode" "<MODE>")])
1816 (define_insn "got_load_tls_ie<mode>"
1817 [(set (match_operand:P 0 "register_operand" "=r")
1819 [(match_operand:P 1 "symbolic_operand" "")]
1823 [(set_attr "got" "load")
1824 (set_attr "mode" "<MODE>")])
1826 (define_insn "auipc<mode>"
1827 [(set (match_operand:P 0 "register_operand" "=r")
1829 [(match_operand:P 1 "symbolic_operand" "")
1830 (match_operand:P 2 "const_int_operand")
1834 ".LA%2: auipc\t%0,%h1"
1835 [(set_attr "type" "auipc")
1836 (set_attr "cannot_copy" "yes")])
1838 ;; Instructions for adding the low 12 bits of an address to a register.
1839 ;; Operand 2 is the address: riscv_print_operand works out which relocation
1840 ;; should be applied.
1842 (define_insn "*low<mode>"
1843 [(set (match_operand:P 0 "register_operand" "=r")
1844 (lo_sum:P (match_operand:P 1 "register_operand" " r")
1845 (match_operand:P 2 "symbolic_operand" "")))]
1848 [(set_attr "type" "arith")
1849 (set_attr "mode" "<MODE>")])
1851 ;; Allow combine to split complex const_int load sequences, using operand 2
1852 ;; to store the intermediate results. See move_operand for details.
1854 [(set (match_operand:GPR 0 "register_operand")
1855 (match_operand:GPR 1 "splittable_const_int_operand"))
1856 (clobber (match_operand:GPR 2 "register_operand"))]
1860 riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]),
1865 ;; Likewise, for symbolic operands.
1867 [(set (match_operand:P 0 "register_operand")
1868 (match_operand:P 1))
1869 (clobber (match_operand:P 2 "register_operand"))]
1870 "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
1871 [(set (match_dup 0) (match_dup 3))]
1873 riscv_split_symbol (operands[2], operands[1],
1874 MAX_MACHINE_MODE, &operands[3]);
1877 ;; Pretend to have the ability to load complex const_int in order to get
1878 ;; better code generation around them.
1880 ;; But avoid constants that are special cased elsewhere.
1881 (define_insn_and_split "*mvconst_internal"
1882 [(set (match_operand:GPR 0 "register_operand" "=r")
1883 (match_operand:GPR 1 "splittable_const_int_operand" "i"))]
1884 "!(p2m1_shift_operand (operands[1], <MODE>mode)
1885 || high_mask_shift_operand (operands[1], <MODE>mode))"
1890 riscv_move_integer (operands[0], operands[0], INTVAL (operands[1]),
1895 ;; 64-bit integer moves
1897 (define_expand "movdi"
1898 [(set (match_operand:DI 0 "")
1899 (match_operand:DI 1 ""))]
1902 if (riscv_legitimize_move (DImode, operands[0], operands[1]))
1906 (define_insn "*movdi_32bit"
1907 [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m, *f,*f,*r,*f,*m,r")
1908 (match_operand:DI 1 "move_operand" " r,i,m,r,*J*r,*m,*f,*f,*f,vp"))]
1910 && (register_operand (operands[0], DImode)
1911 || reg_or_0_operand (operands[1], DImode))"
1912 { return riscv_output_move (operands[0], operands[1]); }
1913 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore,rdvlenb")
1914 (set_attr "mode" "DI")
1915 (set_attr "ext" "base,base,base,base,d,d,d,d,d,vector")])
1917 (define_insn "*movdi_64bit"
1918 [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r, m, *f,*f,*r,*f,*m,r")
1919 (match_operand:DI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,*f,vp"))]
1921 && (register_operand (operands[0], DImode)
1922 || reg_or_0_operand (operands[1], DImode))"
1923 { return riscv_output_move (operands[0], operands[1]); }
1924 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore,rdvlenb")
1925 (set_attr "mode" "DI")
1926 (set_attr "ext" "base,base,base,base,d,d,d,d,d,vector")])
1928 ;; 32-bit Integer moves
1930 (define_expand "mov<mode>"
1931 [(set (match_operand:MOVE32 0 "")
1932 (match_operand:MOVE32 1 ""))]
1935 if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
1939 (define_insn "*movsi_internal"
1940 [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r, m, *f,*f,*r,*m,r")
1941 (match_operand:SI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,vp"))]
1942 "(register_operand (operands[0], SImode)
1943 || reg_or_0_operand (operands[1], SImode))
1944 && !(register_operand (operands[1], SImode)
1945 && reg_or_subregno (operands[1]) == VL_REGNUM)"
1946 { return riscv_output_move (operands[0], operands[1]); }
1947 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore,rdvlenb")
1948 (set_attr "mode" "SI")
1949 (set_attr "ext" "base,base,base,base,f,f,f,f,vector")])
1951 ;; 16-bit Integer moves
1953 ;; Unlike most other insns, the move insns can't be split with
1954 ;; different predicates, because register spilling and other parts of
1955 ;; the compiler, have memoized the insn number already.
1956 ;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
1958 (define_expand "movhi"
1959 [(set (match_operand:HI 0 "")
1960 (match_operand:HI 1 ""))]
1963 if (riscv_legitimize_move (HImode, operands[0], operands[1]))
1967 (define_insn "*movhi_internal"
1968 [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r, m, *f,*r,r")
1969 (match_operand:HI 1 "move_operand" " r,T,m,rJ,*r*J,*f,vp"))]
1970 "(register_operand (operands[0], HImode)
1971 || reg_or_0_operand (operands[1], HImode))"
1972 { return riscv_output_move (operands[0], operands[1]); }
1973 [(set_attr "move_type" "move,const,load,store,mtc,mfc,rdvlenb")
1974 (set_attr "mode" "HI")
1975 (set_attr "ext" "base,base,base,base,f,f,vector")])
1977 ;; HImode constant generation; see riscv_move_integer for details.
1978 ;; si+si->hi without truncation is legal because of
1979 ;; TARGET_TRULY_NOOP_TRUNCATION.
1981 (define_insn "*add<mode>hi3"
1982 [(set (match_operand:HI 0 "register_operand" "=r,r")
1983 (plus:HI (match_operand:HISI 1 "register_operand" " r,r")
1984 (match_operand:HISI 2 "arith_operand" " r,I")))]
1986 "add%i2%~\t%0,%1,%2"
1987 [(set_attr "type" "arith")
1988 (set_attr "mode" "HI")])
1990 (define_insn "*xor<mode>hi3"
1991 [(set (match_operand:HI 0 "register_operand" "=r,r")
1992 (xor:HI (match_operand:HISI 1 "register_operand" " r,r")
1993 (match_operand:HISI 2 "arith_operand" " r,I")))]
1996 [(set_attr "type" "logical")
1997 (set_attr "mode" "HI")])
1999 ;; 8-bit Integer moves
2001 (define_expand "movqi"
2002 [(set (match_operand:QI 0 "")
2003 (match_operand:QI 1 ""))]
2006 if (riscv_legitimize_move (QImode, operands[0], operands[1]))
2010 (define_insn "*movqi_internal"
2011 [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r, m, *f,*r,r")
2012 (match_operand:QI 1 "move_operand" " r,I,m,rJ,*r*J,*f,vp"))]
2013 "(register_operand (operands[0], QImode)
2014 || reg_or_0_operand (operands[1], QImode))"
2015 { return riscv_output_move (operands[0], operands[1]); }
2016 [(set_attr "move_type" "move,const,load,store,mtc,mfc,rdvlenb")
2017 (set_attr "mode" "QI")
2018 (set_attr "ext" "base,base,base,base,f,f,vector")])
2020 ;; 32-bit floating point moves
2022 (define_expand "movsf"
2023 [(set (match_operand:SF 0 "")
2024 (match_operand:SF 1 ""))]
2027 if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
2031 (define_insn "*movsf_hardfloat"
2032 [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r, *r,*r,*m")
2033 (match_operand:SF 1 "move_operand" " f,G,m,f,G,*r,*f,*G*r,*m,*r"))]
2035 && (register_operand (operands[0], SFmode)
2036 || reg_or_0_operand (operands[1], SFmode))"
2037 { return riscv_output_move (operands[0], operands[1]); }
2038 [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2039 (set_attr "mode" "SF")])
2041 (define_insn "*movsf_softfloat"
2042 [(set (match_operand:SF 0 "nonimmediate_operand" "= r,r,m")
2043 (match_operand:SF 1 "move_operand" " Gr,m,r"))]
2045 && (register_operand (operands[0], SFmode)
2046 || reg_or_0_operand (operands[1], SFmode))"
2047 { return riscv_output_move (operands[0], operands[1]); }
2048 [(set_attr "move_type" "move,load,store")
2049 (set_attr "mode" "SF")])
2051 ;; 64-bit floating point moves
2053 (define_expand "movdf"
2054 [(set (match_operand:DF 0 "")
2055 (match_operand:DF 1 ""))]
2058 if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
2063 ;; In RV32, we lack fmv.x.d and fmv.d.x. Go through memory instead.
2064 ;; (However, we can still use fcvt.d.w to zero a floating-point register.)
2065 (define_insn "*movdf_hardfloat_rv32"
2066 [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*th_f_fmv,*th_r_fmv, *r,*r,*m")
2067 (match_operand:DF 1 "move_operand" " f,G,m,f,G,*th_r_fmv,*th_f_fmv,*r*G,*m,*r"))]
2068 "!TARGET_64BIT && TARGET_DOUBLE_FLOAT
2069 && (register_operand (operands[0], DFmode)
2070 || reg_or_0_operand (operands[1], DFmode))"
2071 { return riscv_output_move (operands[0], operands[1]); }
2072 [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2073 (set_attr "mode" "DF")])
2075 (define_insn "*movdf_hardfloat_rv64"
2076 [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r, *r,*r,*m")
2077 (match_operand:DF 1 "move_operand" " f,G,m,f,G,*r,*f,*r*G,*m,*r"))]
2078 "TARGET_64BIT && TARGET_DOUBLE_FLOAT
2079 && (register_operand (operands[0], DFmode)
2080 || reg_or_0_operand (operands[1], DFmode))"
2081 { return riscv_output_move (operands[0], operands[1]); }
2082 [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2083 (set_attr "mode" "DF")])
2085 (define_insn "*movdf_softfloat"
2086 [(set (match_operand:DF 0 "nonimmediate_operand" "= r,r, m")
2087 (match_operand:DF 1 "move_operand" " rG,m,rG"))]
2088 "!TARGET_DOUBLE_FLOAT
2089 && (register_operand (operands[0], DFmode)
2090 || reg_or_0_operand (operands[1], DFmode))"
2091 { return riscv_output_move (operands[0], operands[1]); }
2092 [(set_attr "move_type" "move,load,store")
2093 (set_attr "mode" "DF")])
2096 [(set (match_operand:MOVE64 0 "nonimmediate_operand")
2097 (match_operand:MOVE64 1 "move_operand"))]
2099 && riscv_split_64bit_move_p (operands[0], operands[1])"
2102 riscv_split_doubleword_move (operands[0], operands[1]);
2106 (define_expand "cpymemsi"
2107 [(parallel [(set (match_operand:BLK 0 "general_operand")
2108 (match_operand:BLK 1 "general_operand"))
2109 (use (match_operand:SI 2 ""))
2110 (use (match_operand:SI 3 "const_int_operand"))])]
2113 if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
2119 ;; Expand in-line code to clear the instruction cache between operand[0] and
2121 (define_expand "clear_cache"
2122 [(match_operand 0 "pmode_register_operand")
2123 (match_operand 1 "pmode_register_operand")]
2126 #ifdef ICACHE_FLUSH_FUNC
2127 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, ICACHE_FLUSH_FUNC),
2128 LCT_NORMAL, VOIDmode, operands[0], Pmode,
2129 operands[1], Pmode, const0_rtx, Pmode);
2131 if (TARGET_ZIFENCEI)
2132 emit_insn (gen_fence_i ());
2137 (define_insn "fence"
2138 [(unspec_volatile [(const_int 0)] UNSPECV_FENCE)]
2142 (define_insn "fence_i"
2143 [(unspec_volatile [(const_int 0)] UNSPECV_FENCE_I)]
2147 (define_insn "riscv_pause"
2148 [(unspec_volatile [(const_int 0)] UNSPECV_PAUSE)]
2153 ;; ....................
2157 ;; ....................
2159 ;; Use a QImode shift count, to avoid generating sign or zero extend
2160 ;; instructions for shift counts, and to avoid dropping subregs.
2161 ;; expand_shift_1 can do this automatically when SHIFT_COUNT_TRUNCATED is
2162 ;; defined, but use of that is discouraged.
2164 (define_insn "*<optab>si3"
2165 [(set (match_operand:SI 0 "register_operand" "= r")
2167 (match_operand:SI 1 "register_operand" " r")
2168 (match_operand:QI 2 "arith_operand" " rI")))]
2171 if (GET_CODE (operands[2]) == CONST_INT)
2172 operands[2] = GEN_INT (INTVAL (operands[2])
2173 & (GET_MODE_BITSIZE (SImode) - 1));
2175 return "<insn>%i2%~\t%0,%1,%2";
2177 [(set_attr "type" "shift")
2178 (set_attr "mode" "SI")])
2180 (define_expand "<optab>si3"
2181 [(set (match_operand:SI 0 "register_operand" "= r")
2182 (any_shift:SI (match_operand:SI 1 "register_operand" " r")
2183 (match_operand:QI 2 "arith_operand" " rI")))]
2188 rtx t = gen_reg_rtx (DImode);
2189 emit_insn (gen_<optab>si3_extend (t, operands[1], operands[2]));
2190 t = gen_lowpart (SImode, t);
2191 SUBREG_PROMOTED_VAR_P (t) = 1;
2192 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
2193 emit_move_insn (operands[0], t);
2198 (define_insn "<optab>di3"
2199 [(set (match_operand:DI 0 "register_operand" "= r")
2201 (match_operand:DI 1 "register_operand" " r")
2202 (match_operand:QI 2 "arith_operand" " rI")))]
2205 if (GET_CODE (operands[2]) == CONST_INT)
2206 operands[2] = GEN_INT (INTVAL (operands[2])
2207 & (GET_MODE_BITSIZE (DImode) - 1));
2209 return "<insn>%i2\t%0,%1,%2";
2211 [(set_attr "type" "shift")
2212 (set_attr "mode" "DI")])
2214 (define_insn_and_split "*<optab><GPR:mode>3_mask_1"
2215 [(set (match_operand:GPR 0 "register_operand" "= r")
2217 (match_operand:GPR 1 "register_operand" " r")
2218 (match_operator 4 "subreg_lowpart_operator"
2220 (match_operand:GPR2 2 "register_operand" "r")
2221 (match_operand 3 "<GPR:shiftm1>"))])))]
2226 (any_shift:GPR (match_dup 1)
2228 "operands[2] = gen_lowpart (QImode, operands[2]);"
2229 [(set_attr "type" "shift")
2230 (set_attr "mode" "<GPR:MODE>")])
2232 (define_insn "<optab>si3_extend"
2233 [(set (match_operand:DI 0 "register_operand" "= r")
2235 (any_shift:SI (match_operand:SI 1 "register_operand" " r")
2236 (match_operand:QI 2 "arith_operand" " rI"))))]
2239 if (GET_CODE (operands[2]) == CONST_INT)
2240 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
2242 return "<insn>%i2w\t%0,%1,%2";
2244 [(set_attr "type" "shift")
2245 (set_attr "mode" "SI")])
2247 (define_insn_and_split "*<optab>si3_extend_mask"
2248 [(set (match_operand:DI 0 "register_operand" "= r")
2251 (match_operand:SI 1 "register_operand" " r")
2252 (match_operator 4 "subreg_lowpart_operator"
2254 (match_operand:GPR 2 "register_operand" " r")
2255 (match_operand 3 "const_si_mask_operand"))]))))]
2261 (any_shift:SI (match_dup 1)
2263 "operands[2] = gen_lowpart (QImode, operands[2]);"
2264 [(set_attr "type" "shift")
2265 (set_attr "mode" "SI")])
2267 ;; Non-canonical, but can be formed by ree when combine is not successful at
2268 ;; producing one of the two canonical patterns below.
2269 (define_insn "*lshrsi3_zero_extend_1"
2270 [(set (match_operand:DI 0 "register_operand" "=r")
2272 (lshiftrt:SI (match_operand:SI 1 "register_operand" " r")
2273 (match_operand 2 "const_int_operand"))))]
2274 "TARGET_64BIT && (INTVAL (operands[2]) & 0x1f) > 0"
2276 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
2278 return "srliw\t%0,%1,%2";
2280 [(set_attr "type" "shift")
2281 (set_attr "mode" "SI")])
2283 ;; Canonical form for a zero-extend of a logical right shift.
2284 (define_insn "*lshrsi3_zero_extend_2"
2285 [(set (match_operand:DI 0 "register_operand" "=r")
2286 (zero_extract:DI (match_operand:DI 1 "register_operand" " r")
2287 (match_operand 2 "const_int_operand")
2288 (match_operand 3 "const_int_operand")))]
2289 "(TARGET_64BIT && (INTVAL (operands[3]) > 0)
2290 && (INTVAL (operands[2]) + INTVAL (operands[3]) == 32))"
2292 return "srliw\t%0,%1,%3";
2294 [(set_attr "type" "shift")
2295 (set_attr "mode" "SI")])
2297 ;; Canonical form for a zero-extend of a logical right shift when the
2298 ;; shift count is 31.
2299 (define_insn "*lshrsi3_zero_extend_3"
2300 [(set (match_operand:DI 0 "register_operand" "=r")
2301 (lt:DI (match_operand:SI 1 "register_operand" " r")
2305 return "srliw\t%0,%1,31";
2307 [(set_attr "type" "shift")
2308 (set_attr "mode" "SI")])
2310 ;; Handle AND with 2^N-1 for N from 12 to XLEN. This can be split into
2311 ;; two logical shifts. Otherwise it requires 3 instructions: lui,
2312 ;; xor/addi/srli, and.
2314 ;; Generating a temporary for the shift output gives better combiner results;
2315 ;; and also fixes a problem where op0 could be a paradoxical reg and shifting
2316 ;; by amounts larger than the size of the SUBREG_REG doesn't work.
2318 [(set (match_operand:GPR 0 "register_operand")
2319 (and:GPR (match_operand:GPR 1 "register_operand")
2320 (match_operand:GPR 2 "p2m1_shift_operand")))
2321 (clobber (match_operand:GPR 3 "register_operand"))]
2324 (ashift:GPR (match_dup 1) (match_dup 2)))
2326 (lshiftrt:GPR (match_dup 3) (match_dup 2)))]
2328 /* Op2 is a VOIDmode constant, so get the mode size from op1. */
2329 operands[2] = GEN_INT (GET_MODE_BITSIZE (GET_MODE (operands[1])).to_constant ()
2330 - exact_log2 (INTVAL (operands[2]) + 1));
2333 ;; Handle AND with 0xF...F0...0 where there are 32 to 63 zeros. This can be
2334 ;; split into two shifts. Otherwise it requires 3 instructions: li, sll, and.
2336 [(set (match_operand:DI 0 "register_operand")
2337 (and:DI (match_operand:DI 1 "register_operand")
2338 (match_operand:DI 2 "high_mask_shift_operand")))
2339 (clobber (match_operand:DI 3 "register_operand"))]
2342 (lshiftrt:DI (match_dup 1) (match_dup 2)))
2344 (ashift:DI (match_dup 3) (match_dup 2)))]
2346 operands[2] = GEN_INT (ctz_hwi (INTVAL (operands[2])));
2349 ;; Handle SImode to DImode zero-extend combined with a left shift. This can
2350 ;; occur when unsigned int is used for array indexing. Split this into two
2351 ;; shifts. Otherwise we can get 3 shifts.
2353 (define_insn_and_split "zero_extendsidi2_shifted"
2354 [(set (match_operand:DI 0 "register_operand" "=r")
2355 (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
2356 (match_operand:QI 2 "immediate_operand" "I"))
2357 (match_operand 3 "immediate_operand" "")))
2358 (clobber (match_scratch:DI 4 "=&r"))]
2359 "TARGET_64BIT && !TARGET_ZBA
2360 && ((INTVAL (operands[3]) >> INTVAL (operands[2])) == 0xffffffff)"
2362 "&& reload_completed"
2364 (ashift:DI (match_dup 1) (const_int 32)))
2366 (lshiftrt:DI (match_dup 4) (match_dup 5)))]
2367 "operands[5] = GEN_INT (32 - (INTVAL (operands [2])));"
2368 [(set_attr "type" "shift")
2369 (set_attr "mode" "DI")])
2372 ;; ....................
2374 ;; CONDITIONAL BRANCHES
2376 ;; ....................
2378 ;; Conditional branches
2380 (define_insn_and_split "*branch<ANYI:mode>_shiftedarith_equals_zero"
2382 (if_then_else (match_operator 1 "equality_operator"
2383 [(and:ANYI (match_operand:ANYI 2 "register_operand" "r")
2384 (match_operand 3 "shifted_const_arith_operand" "i"))
2386 (label_ref (match_operand 0 "" ""))
2388 (clobber (match_scratch:X 4 "=&r"))]
2389 "!SMALL_OPERAND (INTVAL (operands[3]))"
2391 "&& reload_completed"
2392 [(set (match_dup 4) (lshiftrt:X (subreg:X (match_dup 2) 0) (match_dup 6)))
2393 (set (match_dup 4) (and:X (match_dup 4) (match_dup 7)))
2394 (set (pc) (if_then_else (match_op_dup 1 [(match_dup 4) (const_int 0)])
2395 (label_ref (match_dup 0)) (pc)))]
2397 HOST_WIDE_INT mask = INTVAL (operands[3]);
2398 int trailing = ctz_hwi (mask);
2400 operands[6] = GEN_INT (trailing);
2401 operands[7] = GEN_INT (mask >> trailing);
2404 (define_insn_and_split "*branch<ANYI:mode>_shiftedmask_equals_zero"
2406 (if_then_else (match_operator 1 "equality_operator"
2407 [(and:ANYI (match_operand:ANYI 2 "register_operand" "r")
2408 (match_operand 3 "consecutive_bits_operand" "i"))
2410 (label_ref (match_operand 0 "" ""))
2412 (clobber (match_scratch:X 4 "=&r"))]
2413 "(INTVAL (operands[3]) >= 0 || !partial_subreg_p (operands[2]))
2414 && popcount_hwi (INTVAL (operands[3])) > 1
2415 && !SMALL_OPERAND (INTVAL (operands[3]))"
2417 "&& reload_completed"
2418 [(set (match_dup 4) (ashift:X (subreg:X (match_dup 2) 0) (match_dup 6)))
2419 (set (match_dup 4) (lshiftrt:X (match_dup 4) (match_dup 7)))
2420 (set (pc) (if_then_else (match_op_dup 1 [(match_dup 4) (const_int 0)])
2421 (label_ref (match_dup 0)) (pc)))]
2423 unsigned HOST_WIDE_INT mask = INTVAL (operands[3]);
2424 int leading = clz_hwi (mask);
2425 int trailing = ctz_hwi (mask);
2427 operands[6] = GEN_INT (leading);
2428 operands[7] = GEN_INT (leading + trailing);
2431 (define_insn "*branch<mode>"
2434 (match_operator 1 "order_operator"
2435 [(match_operand:X 2 "register_operand" "r")
2436 (match_operand:X 3 "reg_or_0_operand" "rJ")])
2437 (label_ref (match_operand 0 "" ""))
2441 [(set_attr "type" "branch")
2442 (set_attr "mode" "none")])
2444 ;; Patterns for implementations that optimize short forward branches.
2446 (define_expand "mov<mode>cc"
2447 [(set (match_operand:GPR 0 "register_operand")
2448 (if_then_else:GPR (match_operand 1 "comparison_operator")
2449 (match_operand:GPR 2 "reg_or_0_operand")
2450 (match_operand:GPR 3 "sfb_alu_operand")))]
2451 "TARGET_SFB_ALU || TARGET_XTHEADCONDMOV"
2453 if (riscv_expand_conditional_move (operands[0], operands[1],
2454 operands[2], operands[3]))
2460 (define_insn "*mov<GPR:mode><X:mode>cc"
2461 [(set (match_operand:GPR 0 "register_operand" "=r,r")
2463 (match_operator 5 "order_operator"
2464 [(match_operand:X 1 "register_operand" "r,r")
2465 (match_operand:X 2 "reg_or_0_operand" "rJ,rJ")])
2466 (match_operand:GPR 3 "register_operand" "0,0")
2467 (match_operand:GPR 4 "sfb_alu_operand" "rJ,IL")))]
2470 b%C5\t%1,%z2,1f\t# movcc\;mv\t%0,%z4\n1:
2471 b%C5\t%1,%z2,1f\t# movcc\;li\t%0,%4\n1:"
2472 [(set_attr "length" "8")
2473 (set_attr "type" "sfb_alu")
2474 (set_attr "mode" "<GPR:MODE>")])
2476 ;; Used to implement built-in functions.
2477 (define_expand "condjump"
2479 (if_then_else (match_operand 0)
2480 (label_ref (match_operand 1))
2483 (define_expand "@cbranch<mode>4"
2485 (if_then_else (match_operator 0 "comparison_operator"
2486 [(match_operand:BR 1 "register_operand")
2487 (match_operand:BR 2 "nonmemory_operand")])
2488 (label_ref (match_operand 3 ""))
2492 riscv_expand_conditional_branch (operands[3], GET_CODE (operands[0]),
2493 operands[1], operands[2]);
2497 (define_expand "@cbranch<mode>4"
2499 (if_then_else (match_operator 0 "fp_branch_comparison"
2500 [(match_operand:ANYF 1 "register_operand")
2501 (match_operand:ANYF 2 "register_operand")])
2502 (label_ref (match_operand 3 ""))
2504 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2506 riscv_expand_conditional_branch (operands[3], GET_CODE (operands[0]),
2507 operands[1], operands[2]);
2511 (define_insn_and_split "*branch_on_bit<X:mode>"
2514 (match_operator 0 "equality_operator"
2515 [(zero_extract:X (match_operand:X 2 "register_operand" "r")
2517 (match_operand 3 "branch_on_bit_operand"))
2519 (label_ref (match_operand 1))
2521 (clobber (match_scratch:X 4 "=&r"))]
2526 (ashift:X (match_dup 2) (match_dup 3)))
2529 (match_op_dup 0 [(match_dup 4) (const_int 0)])
2530 (label_ref (match_operand 1))
2533 int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
2534 operands[3] = GEN_INT (shift);
2536 if (GET_CODE (operands[0]) == EQ)
2537 operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
2539 operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
2542 (define_insn_and_split "*branch_on_bit_range<X:mode>"
2545 (match_operator 0 "equality_operator"
2546 [(zero_extract:X (match_operand:X 2 "register_operand" "r")
2547 (match_operand 3 "branch_on_bit_operand")
2550 (label_ref (match_operand 1))
2552 (clobber (match_scratch:X 4 "=&r"))]
2557 (ashift:X (match_dup 2) (match_dup 3)))
2560 (match_op_dup 0 [(match_dup 4) (const_int 0)])
2561 (label_ref (match_operand 1))
2564 operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
2568 ;; ....................
2570 ;; SETTING A REGISTER FROM A COMPARISON
2572 ;; ....................
2574 ;; Destination is always set in SI mode.
2576 (define_expand "cstore<mode>4"
2577 [(set (match_operand:SI 0 "register_operand")
2578 (match_operator:SI 1 "order_operator"
2579 [(match_operand:GPR 2 "register_operand")
2580 (match_operand:GPR 3 "nonmemory_operand")]))]
2583 riscv_expand_int_scc (operands[0], GET_CODE (operands[1]), operands[2],
2588 (define_expand "cstore<mode>4"
2589 [(set (match_operand:SI 0 "register_operand")
2590 (match_operator:SI 1 "fp_scc_comparison"
2591 [(match_operand:ANYF 2 "register_operand")
2592 (match_operand:ANYF 3 "register_operand")]))]
2593 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2595 riscv_expand_float_scc (operands[0], GET_CODE (operands[1]), operands[2],
2600 (define_insn "*cstore<ANYF:mode><X:mode>4"
2601 [(set (match_operand:X 0 "register_operand" "=r")
2602 (match_operator:X 1 "fp_native_comparison"
2603 [(match_operand:ANYF 2 "register_operand" " f")
2604 (match_operand:ANYF 3 "register_operand" " f")]))]
2605 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2606 "f%C1.<fmt>\t%0,%2,%3"
2607 [(set_attr "type" "fcmp")
2608 (set_attr "mode" "<UNITMODE>")])
2610 (define_expand "f<quiet_pattern>_quiet<ANYF:mode><X:mode>4"
2611 [(set (match_operand:X 0 "register_operand")
2612 (unspec:X [(match_operand:ANYF 1 "register_operand")
2613 (match_operand:ANYF 2 "register_operand")]
2615 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2617 rtx op0 = operands[0];
2618 rtx op1 = operands[1];
2619 rtx op2 = operands[2];
2620 rtx tmp = gen_reg_rtx (SImode);
2621 rtx cmp = gen_rtx_<QUIET_PATTERN> (<X:MODE>mode, op1, op2);
2622 rtx frflags = gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, const0_rtx),
2624 rtx fsflags = gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, tmp),
2627 emit_insn (gen_rtx_SET (tmp, frflags));
2628 emit_insn (gen_rtx_SET (op0, cmp));
2629 emit_insn (fsflags);
2630 if (HONOR_SNANS (<ANYF:MODE>mode))
2631 emit_insn (gen_rtx_UNSPEC_VOLATILE (<ANYF:MODE>mode,
2632 gen_rtvec (2, op1, op2),
2637 (define_insn "*seq_zero_<X:mode><GPR:mode>"
2638 [(set (match_operand:GPR 0 "register_operand" "=r")
2639 (eq:GPR (match_operand:X 1 "register_operand" " r")
2643 [(set_attr "type" "slt")
2644 (set_attr "mode" "<X:MODE>")])
2646 (define_insn "*sne_zero_<X:mode><GPR:mode>"
2647 [(set (match_operand:GPR 0 "register_operand" "=r")
2648 (ne:GPR (match_operand:X 1 "register_operand" " r")
2652 [(set_attr "type" "slt")
2653 (set_attr "mode" "<X:MODE>")])
2655 (define_insn "*sgt<u>_<X:mode><GPR:mode>"
2656 [(set (match_operand:GPR 0 "register_operand" "= r")
2657 (any_gt:GPR (match_operand:X 1 "register_operand" " r")
2658 (match_operand:X 2 "reg_or_0_operand" " rJ")))]
2661 [(set_attr "type" "slt")
2662 (set_attr "mode" "<X:MODE>")])
2664 (define_insn "*sge<u>_<X:mode><GPR:mode>"
2665 [(set (match_operand:GPR 0 "register_operand" "=r")
2666 (any_ge:GPR (match_operand:X 1 "register_operand" " r")
2669 "slt%i2<u>\t%0,zero,%1"
2670 [(set_attr "type" "slt")
2671 (set_attr "mode" "<X:MODE>")])
2673 (define_insn "*slt<u>_<X:mode><GPR:mode>"
2674 [(set (match_operand:GPR 0 "register_operand" "= r")
2675 (any_lt:GPR (match_operand:X 1 "register_operand" " r")
2676 (match_operand:X 2 "arith_operand" " rI")))]
2678 "slt%i2<u>\t%0,%1,%2"
2679 [(set_attr "type" "slt")
2680 (set_attr "mode" "<X:MODE>")])
2682 (define_insn "*sle<u>_<X:mode><GPR:mode>"
2683 [(set (match_operand:GPR 0 "register_operand" "=r")
2684 (any_le:GPR (match_operand:X 1 "register_operand" " r")
2685 (match_operand:X 2 "sle_operand" "")))]
2688 operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
2689 return "slt%i2<u>\t%0,%1,%2";
2691 [(set_attr "type" "slt")
2692 (set_attr "mode" "<X:MODE>")])
2695 ;; ....................
2697 ;; UNCONDITIONAL BRANCHES
2699 ;; ....................
2701 ;; Unconditional branches.
2705 (label_ref (match_operand 0 "" "")))]
2708 [(set_attr "type" "jump")
2709 (set_attr "mode" "none")])
2711 (define_expand "indirect_jump"
2712 [(set (pc) (match_operand 0 "register_operand"))]
2715 operands[0] = force_reg (Pmode, operands[0]);
2716 if (Pmode == SImode)
2717 emit_jump_insn (gen_indirect_jumpsi (operands[0]));
2719 emit_jump_insn (gen_indirect_jumpdi (operands[0]));
2723 (define_insn "indirect_jump<mode>"
2724 [(set (pc) (match_operand:P 0 "register_operand" "l"))]
2727 [(set_attr "type" "jump")
2728 (set_attr "mode" "none")])
2730 (define_expand "tablejump"
2731 [(set (pc) (match_operand 0 "register_operand" ""))
2732 (use (label_ref (match_operand 1 "" "")))]
2735 if (CASE_VECTOR_PC_RELATIVE)
2736 operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
2737 gen_rtx_LABEL_REF (Pmode, operands[1]),
2738 NULL_RTX, 0, OPTAB_DIRECT);
2740 if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
2741 emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
2743 emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
2747 (define_insn "tablejump<mode>"
2748 [(set (pc) (match_operand:GPR 0 "register_operand" "l"))
2749 (use (label_ref (match_operand 1 "" "")))]
2752 [(set_attr "type" "jump")
2753 (set_attr "mode" "none")])
2756 ;; ....................
2758 ;; Function prologue/epilogue
2760 ;; ....................
2763 (define_expand "prologue"
2767 riscv_expand_prologue ();
2771 ;; Block any insns from being moved before this point, since the
2772 ;; profiling call to mcount can use various registers that aren't
2773 ;; saved or used to pass arguments.
2775 (define_insn "blockage"
2776 [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
2779 [(set_attr "type" "ghost")
2780 (set_attr "mode" "none")])
2782 (define_expand "epilogue"
2786 riscv_expand_epilogue (NORMAL_RETURN);
2790 (define_expand "sibcall_epilogue"
2794 riscv_expand_epilogue (SIBCALL_RETURN);
2798 ;; Trivial return. Make it look like a normal return insn as that
2799 ;; allows jump optimizations to work better.
2801 (define_expand "return"
2803 "riscv_can_use_return_insn ()"
2806 (define_insn "simple_return"
2810 return riscv_output_return ();
2812 [(set_attr "type" "jump")
2813 (set_attr "mode" "none")])
2817 (define_insn "simple_return_internal"
2819 (use (match_operand 0 "pmode_register_operand" ""))]
2822 [(set_attr "type" "jump")
2823 (set_attr "mode" "none")])
2825 ;; This is used in compiling the unwind routines.
2826 (define_expand "eh_return"
2827 [(use (match_operand 0 "general_operand"))]
2830 if (GET_MODE (operands[0]) != word_mode)
2831 operands[0] = convert_to_mode (word_mode, operands[0], 0);
2833 emit_insn (gen_eh_set_lr_di (operands[0]));
2835 emit_insn (gen_eh_set_lr_si (operands[0]));
2837 emit_jump_insn (gen_eh_return_internal ());
2842 ;; Clobber the return address on the stack. We can't expand this
2843 ;; until we know where it will be put in the stack frame.
2845 (define_insn "eh_set_lr_si"
2846 [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
2847 (clobber (match_scratch:SI 1 "=&r"))]
2851 (define_insn "eh_set_lr_di"
2852 [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
2853 (clobber (match_scratch:DI 1 "=&r"))]
2858 [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
2859 (clobber (match_scratch 1))]
2863 riscv_set_return_address (operands[0], operands[1]);
2867 (define_insn_and_split "eh_return_internal"
2871 "epilogue_completed"
2873 "riscv_expand_epilogue (EXCEPTION_RETURN); DONE;")
2876 ;; ....................
2880 ;; ....................
2882 (define_expand "sibcall"
2883 [(parallel [(call (match_operand 0 "")
2884 (match_operand 1 ""))
2885 (use (match_operand 2 "")) ;; next_arg_reg
2886 (use (match_operand 3 ""))])] ;; struct_value_size_rtx
2889 rtx target = riscv_legitimize_call_address (XEXP (operands[0], 0));
2890 emit_call_insn (gen_sibcall_internal (target, operands[1]));
2894 (define_insn "sibcall_internal"
2895 [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S,U"))
2896 (match_operand 1 "" ""))]
2897 "SIBLING_CALL_P (insn)"
2902 [(set_attr "type" "call")])
2904 (define_expand "sibcall_value"
2905 [(parallel [(set (match_operand 0 "")
2906 (call (match_operand 1 "")
2907 (match_operand 2 "")))
2908 (use (match_operand 3 ""))])] ;; next_arg_reg
2911 rtx target = riscv_legitimize_call_address (XEXP (operands[1], 0));
2912 emit_call_insn (gen_sibcall_value_internal (operands[0], target, operands[2]));
2916 (define_insn "sibcall_value_internal"
2917 [(set (match_operand 0 "" "")
2918 (call (mem:SI (match_operand 1 "call_insn_operand" "j,S,U"))
2919 (match_operand 2 "" "")))]
2920 "SIBLING_CALL_P (insn)"
2925 [(set_attr "type" "call")])
2927 (define_expand "call"
2928 [(parallel [(call (match_operand 0 "")
2929 (match_operand 1 ""))
2930 (use (match_operand 2 "")) ;; next_arg_reg
2931 (use (match_operand 3 ""))])] ;; struct_value_size_rtx
2934 rtx target = riscv_legitimize_call_address (XEXP (operands[0], 0));
2935 emit_call_insn (gen_call_internal (target, operands[1]));
2939 (define_insn "call_internal"
2940 [(call (mem:SI (match_operand 0 "call_insn_operand" "l,S,U"))
2941 (match_operand 1 "" ""))
2942 (clobber (reg:SI RETURN_ADDR_REGNUM))]
2948 [(set_attr "type" "call")])
2950 (define_expand "call_value"
2951 [(parallel [(set (match_operand 0 "")
2952 (call (match_operand 1 "")
2953 (match_operand 2 "")))
2954 (use (match_operand 3 ""))])] ;; next_arg_reg
2957 rtx target = riscv_legitimize_call_address (XEXP (operands[1], 0));
2958 emit_call_insn (gen_call_value_internal (operands[0], target, operands[2]));
2962 (define_insn "call_value_internal"
2963 [(set (match_operand 0 "" "")
2964 (call (mem:SI (match_operand 1 "call_insn_operand" "l,S,U"))
2965 (match_operand 2 "" "")))
2966 (clobber (reg:SI RETURN_ADDR_REGNUM))]
2972 [(set_attr "type" "call")])
2974 ;; Call subroutine returning any type.
2976 (define_expand "untyped_call"
2977 [(parallel [(call (match_operand 0 "")
2979 (match_operand 1 "")
2980 (match_operand 2 "")])]
2985 emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx));
2987 for (i = 0; i < XVECLEN (operands[2], 0); i++)
2989 rtx set = XVECEXP (operands[2], 0, i);
2990 riscv_emit_move (SET_DEST (set), SET_SRC (set));
2993 emit_insn (gen_blockage ());
3001 [(set_attr "type" "nop")
3002 (set_attr "mode" "none")])
3005 [(trap_if (const_int 1) (const_int 0))]
3009 ;; Must use the registers that we save to prevent the rename reg optimization
3010 ;; pass from using them before the gpr_save pattern when shrink wrapping
3011 ;; occurs. See bug 95252 for instance.
3013 (define_insn "gpr_save"
3014 [(match_parallel 1 "gpr_save_operation"
3015 [(unspec_volatile [(match_operand 0 "const_int_operand")]
3016 UNSPECV_GPR_SAVE)])]
3018 "call\tt0,__riscv_save_%0")
3020 (define_insn "gpr_restore"
3021 [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_RESTORE)]
3023 "tail\t__riscv_restore_%0")
3025 (define_insn "gpr_restore_return"
3027 (use (match_operand 0 "pmode_register_operand" ""))
3032 (define_insn "riscv_frflags"
3033 [(set (match_operand:SI 0 "register_operand" "=r")
3034 (unspec_volatile [(const_int 0)] UNSPECV_FRFLAGS))]
3035 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3038 (define_insn "riscv_fsflags"
3039 [(unspec_volatile [(match_operand:SI 0 "csr_operand" "rK")] UNSPECV_FSFLAGS)]
3040 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3043 (define_insn "*riscv_fsnvsnan<mode>2"
3044 [(unspec_volatile [(match_operand:ANYF 0 "register_operand" "f")
3045 (match_operand:ANYF 1 "register_operand" "f")]
3047 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3048 "feq.<fmt>\tzero,%0,%1"
3049 [(set_attr "type" "fcmp")
3050 (set_attr "mode" "<UNITMODE>")])
3052 (define_insn "riscv_mret"
3054 (unspec_volatile [(const_int 0)] UNSPECV_MRET)]
3058 (define_insn "riscv_sret"
3060 (unspec_volatile [(const_int 0)] UNSPECV_SRET)]
3064 (define_insn "riscv_uret"
3066 (unspec_volatile [(const_int 0)] UNSPECV_URET)]
3070 (define_insn "stack_tie<mode>"
3071 [(set (mem:BLK (scratch))
3072 (unspec:BLK [(match_operand:X 0 "register_operand" "r")
3073 (match_operand:X 1 "register_operand" "r")]
3077 [(set_attr "length" "0")]
3080 ;; This fixes a failure with gcc.c-torture/execute/pr64242.c at -O2 for a
3081 ;; 32-bit target when using -mtune=sifive-7-series. The first sched pass
3082 ;; runs before register elimination, and we have a non-obvious dependency
3083 ;; between a use of the soft fp and a set of the hard fp. We fix this by
3084 ;; emitting a clobber using the hard fp between the two insns.
3085 (define_expand "restore_stack_nonlocal"
3086 [(match_operand 0 "register_operand")
3087 (match_operand 1 "memory_operand")]
3090 emit_move_insn (operands[0], operands[1]);
3091 /* Prevent the following hard fp restore from being moved before the move
3092 insn above which uses a copy of the soft fp reg. */
3093 emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
3097 ;; Named pattern for expanding thread pointer reference.
3098 (define_expand "get_thread_pointer<mode>"
3099 [(set (match_operand:P 0 "register_operand" "=r")
3104 ;; Named patterns for stack smashing protection.
3106 (define_expand "stack_protect_set"
3107 [(match_operand 0 "memory_operand")
3108 (match_operand 1 "memory_operand")]
3111 machine_mode mode = GET_MODE (operands[0]);
3112 if (riscv_stack_protector_guard == SSP_TLS)
3114 rtx reg = gen_rtx_REG (Pmode, riscv_stack_protector_guard_reg);
3115 rtx offset = GEN_INT (riscv_stack_protector_guard_offset);
3116 rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
3117 operands[1] = gen_rtx_MEM (Pmode, addr);
3120 emit_insn ((mode == DImode
3121 ? gen_stack_protect_set_di
3122 : gen_stack_protect_set_si) (operands[0], operands[1]));
3126 ;; DO NOT SPLIT THIS PATTERN. It is important for security reasons that the
3127 ;; canary value does not live beyond the life of this sequence.
3128 (define_insn "stack_protect_set_<mode>"
3129 [(set (match_operand:GPR 0 "memory_operand" "=m")
3130 (unspec:GPR [(match_operand:GPR 1 "memory_operand" "m")]
3132 (set (match_scratch:GPR 2 "=&r") (const_int 0))]
3134 "<load>\t%2, %1\;<store>\t%2, %0\;li\t%2, 0"
3135 [(set_attr "length" "12")])
3137 (define_expand "stack_protect_test"
3138 [(match_operand 0 "memory_operand")
3139 (match_operand 1 "memory_operand")
3144 machine_mode mode = GET_MODE (operands[0]);
3146 result = gen_reg_rtx(mode);
3147 if (riscv_stack_protector_guard == SSP_TLS)
3149 rtx reg = gen_rtx_REG (Pmode, riscv_stack_protector_guard_reg);
3150 rtx offset = GEN_INT (riscv_stack_protector_guard_offset);
3151 rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
3152 operands[1] = gen_rtx_MEM (Pmode, addr);
3154 emit_insn ((mode == DImode
3155 ? gen_stack_protect_test_di
3156 : gen_stack_protect_test_si) (result,
3160 rtx cond = gen_rtx_EQ (VOIDmode, result, const0_rtx);
3161 emit_jump_insn (gen_cbranch4 (mode, cond, result, const0_rtx, operands[2]));
3166 (define_insn "stack_protect_test_<mode>"
3167 [(set (match_operand:GPR 0 "register_operand" "=r")
3168 (unspec:GPR [(match_operand:GPR 1 "memory_operand" "m")
3169 (match_operand:GPR 2 "memory_operand" "m")]
3171 (clobber (match_scratch:GPR 3 "=&r"))]
3173 "<load>\t%3, %1\;<load>\t%0, %2\;xor\t%0, %3, %0\;li\t%3, 0"
3174 [(set_attr "length" "12")])
3176 (define_insn "riscv_clean_<mode>"
3177 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
3183 (define_insn "riscv_flush_<mode>"
3184 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
3190 (define_insn "riscv_inval_<mode>"
3191 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
3197 (define_insn "riscv_zero_<mode>"
3198 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
3204 (define_insn "prefetch"
3205 [(prefetch (match_operand 0 "address_operand" "r")
3206 (match_operand 1 "imm5_operand" "i")
3207 (match_operand 2 "const_int_operand" "n"))]
3210 switch (INTVAL (operands[1]))
3212 case 0: return "prefetch.r\t%a0";
3213 case 1: return "prefetch.w\t%a0";
3214 default: gcc_unreachable ();
3218 (define_insn "riscv_prefetchi_<mode>"
3219 [(unspec_volatile:X [(match_operand:X 0 "address_operand" "r")
3220 (match_operand:X 1 "imm5_operand" "i")]
3226 (define_expand "extv<mode>"
3227 [(set (match_operand:GPR 0 "register_operand" "=r")
3228 (sign_extract:GPR (match_operand:GPR 1 "register_operand" "r")
3229 (match_operand 2 "const_int_operand")
3230 (match_operand 3 "const_int_operand")))]
3234 (define_expand "extzv<mode>"
3235 [(set (match_operand:GPR 0 "register_operand" "=r")
3236 (zero_extract:GPR (match_operand:GPR 1 "register_operand" "r")
3237 (match_operand 2 "const_int_operand")
3238 (match_operand 3 "const_int_operand")))]
3242 && (INTVAL (operands[2]) < 8) && (INTVAL (operands[3]) == 0))
3246 (define_expand "maddhisi4"
3247 [(set (match_operand:SI 0 "register_operand")
3249 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand"))
3250 (sign_extend:SI (match_operand:HI 2 "register_operand")))
3251 (match_operand:SI 3 "register_operand")))]
3255 (define_expand "msubhisi4"
3256 [(set (match_operand:SI 0 "register_operand")
3258 (match_operand:SI 3 "register_operand")
3259 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand"))
3260 (sign_extend:SI (match_operand:HI 2 "register_operand")))))]
3264 (include "bitmanip.md")
3265 (include "crypto.md")
3267 (include "peephole.md")
3269 (include "generic.md")
3270 (include "sifive-7.md")
3271 (include "thead.md")
3272 (include "vector.md")