1 ;; Machine description for RISC-V for GNU compiler.
2 ;; Copyright (C) 2011-2024 Free Software Foundation, Inc.
3 ;; Contributed by Andrew Waterman (andrew@sifive.com).
4 ;; Based on MIPS target for GNU compiler.
6 ;; This file is part of GCC.
8 ;; GCC is free software; you can redistribute it and/or modify
9 ;; it under the terms of the GNU General Public License as published by
10 ;; the Free Software Foundation; either version 3, or (at your option)
13 ;; GCC is distributed in the hope that it will be useful,
14 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ;; GNU General Public License for more details.
18 ;; You should have received a copy of the GNU General Public License
19 ;; along with GCC; see the file COPYING3. If not see
20 ;; <http://www.gnu.org/licenses/>.
23 ;; Keep this list and the one above riscv_print_operand in sync.
24 ;; The special asm out single letter directives following a '%' are:
25 ;; h -- Print the high-part relocation associated with OP, after stripping
26 ;; any outermost HIGH.
27 ;; R -- Print the low-part relocation associated with OP.
28 ;; C -- Print the integer branch condition for comparison OP.
29 ;; A -- Print the atomic operation suffix for memory model OP.
30 ;; F -- Print a FENCE if the memory model requires a release.
31 ;; z -- Print x0 if OP is zero, otherwise print OP normally.
32 ;; i -- Print i if the operand is not a register.
33 ;; S -- Print shift-index of single-bit mask OP.
34 ;; T -- Print shift-index of inverted single-bit mask OP.
35 ;; ~ -- Print w if TARGET_64BIT is true; otherwise not print anything.
37 (define_c_enum "unspec" [
38 ;; Override return address for exception handling.
41 ;; Symbolic accesses. The order of this list must match that of
42 ;; enum riscv_symbol_type in riscv-protos.h.
52 ;; High part of PC-relative address.
55 ;; Floating-point unspecs.
86 ;; the calling convention of callee
92 ;; Workaround for HFmode and BFmode without hardware extension
100 (define_c_enum "unspecv" [
101 ;; Register save and restore.
105 ;; Floating-point unspecs.
112 ;; Interrupt handler instructions.
117 ;; Blockage and synchronization.
122 ;; Stack Smash Protector
133 ;; Zihintpause unspec
137 UNSPECV_XTHEADINT_PUSH
138 UNSPECV_XTHEADINT_POP
142 [(RETURN_ADDR_REGNUM 1)
172 (include "predicates.md")
173 (include "constraints.md")
174 (include "iterators.md")
176 ;; ....................
180 ;; ....................
182 (define_attr "got" "unset,xgot_high,load"
183 (const_string "unset"))
185 ;; Classification of moves, extensions and truncations. Most values
186 ;; are as for "type" (see below) but there are also the following
187 ;; move-specific values:
189 ;; andi a single ANDI instruction
190 ;; shift_shift a shift left followed by a shift right
192 ;; This attribute is used to determine the instruction's length and
193 ;; scheduling type. For doubleword moves, the attribute always describes
194 ;; the split instructions; in some cases, it is more appropriate for the
195 ;; scheduling type to be "multi" instead.
196 (define_attr "move_type"
197 "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
198 const,logical,arith,andi,shift_shift,rdvlenb"
199 (const_string "unknown"))
201 ;; Main data type used by the insn
202 (define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,HF,BF,SF,DF,TF,
203 RVVMF64BI,RVVMF32BI,RVVMF16BI,RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,
204 RVVM8QI,RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI,
205 RVVM8HI,RVVM4HI,RVVM2HI,RVVM1HI,RVVMF2HI,RVVMF4HI,
206 RVVM8BF,RVVM4BF,RVVM2BF,RVVM1BF,RVVMF2BF,RVVMF4BF,
207 RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF,
208 RVVM8SI,RVVM4SI,RVVM2SI,RVVM1SI,RVVMF2SI,
209 RVVM8SF,RVVM4SF,RVVM2SF,RVVM1SF,RVVMF2SF,
210 RVVM8DI,RVVM4DI,RVVM2DI,RVVM1DI,
211 RVVM8DF,RVVM4DF,RVVM2DF,RVVM1DF,
212 RVVM1x8QI,RVVMF2x8QI,RVVMF4x8QI,RVVMF8x8QI,
213 RVVM1x7QI,RVVMF2x7QI,RVVMF4x7QI,RVVMF8x7QI,
214 RVVM1x6QI,RVVMF2x6QI,RVVMF4x6QI,RVVMF8x6QI,
215 RVVM1x5QI,RVVMF2x5QI,RVVMF4x5QI,RVVMF8x5QI,
216 RVVM2x4QI,RVVM1x4QI,RVVMF2x4QI,RVVMF4x4QI,RVVMF8x4QI,
217 RVVM2x3QI,RVVM1x3QI,RVVMF2x3QI,RVVMF4x3QI,RVVMF8x3QI,
218 RVVM4x2QI,RVVM2x2QI,RVVM1x2QI,RVVMF2x2QI,RVVMF4x2QI,RVVMF8x2QI,
219 RVVM1x8HI,RVVMF2x8HI,RVVMF4x8HI,
220 RVVM1x7HI,RVVMF2x7HI,RVVMF4x7HI,
221 RVVM1x6HI,RVVMF2x6HI,RVVMF4x6HI,
222 RVVM1x5HI,RVVMF2x5HI,RVVMF4x5HI,
223 RVVM2x4HI,RVVM1x4HI,RVVMF2x4HI,RVVMF4x4HI,
224 RVVM2x3HI,RVVM1x3HI,RVVMF2x3HI,RVVMF4x3HI,
225 RVVM4x2HI,RVVM2x2HI,RVVM1x2HI,RVVMF2x2HI,RVVMF4x2HI,
226 RVVM1x8BF,RVVMF2x8BF,RVVMF4x8BF,RVVM1x7BF,RVVMF2x7BF,
227 RVVMF4x7BF,RVVM1x6BF,RVVMF2x6BF,RVVMF4x6BF,RVVM1x5BF,
228 RVVMF2x5BF,RVVMF4x5BF,RVVM2x4BF,RVVM1x4BF,RVVMF2x4BF,
229 RVVMF4x4BF,RVVM2x3BF,RVVM1x3BF,RVVMF2x3BF,RVVMF4x3BF,
230 RVVM4x2BF,RVVM2x2BF,RVVM1x2BF,RVVMF2x2BF,RVVMF4x2BF,
231 RVVM1x8HF,RVVMF2x8HF,RVVMF4x8HF,RVVM1x7HF,RVVMF2x7HF,
232 RVVMF4x7HF,RVVM1x6HF,RVVMF2x6HF,RVVMF4x6HF,RVVM1x5HF,
233 RVVMF2x5HF,RVVMF4x5HF,RVVM2x4HF,RVVM1x4HF,RVVMF2x4HF,
234 RVVMF4x4HF,RVVM2x3HF,RVVM1x3HF,RVVMF2x3HF,RVVMF4x3HF,
235 RVVM4x2HF,RVVM2x2HF,RVVM1x2HF,RVVMF2x2HF,RVVMF4x2HF,
236 RVVM1x8SI,RVVMF2x8SI,
237 RVVM1x7SI,RVVMF2x7SI,
238 RVVM1x6SI,RVVMF2x6SI,
239 RVVM1x5SI,RVVMF2x5SI,
240 RVVM2x4SI,RVVM1x4SI,RVVMF2x4SI,
241 RVVM2x3SI,RVVM1x3SI,RVVMF2x3SI,
242 RVVM4x2SI,RVVM2x2SI,RVVM1x2SI,RVVMF2x2SI,
243 RVVM1x8SF,RVVMF2x8SF,RVVM1x7SF,RVVMF2x7SF,
244 RVVM1x6SF,RVVMF2x6SF,RVVM1x5SF,RVVMF2x5SF,
245 RVVM2x4SF,RVVM1x4SF,RVVMF2x4SF,RVVM2x3SF,
246 RVVM1x3SF,RVVMF2x3SF,RVVM4x2SF,RVVM2x2SF,
247 RVVM1x2SF,RVVMF2x2SF,
248 RVVM1x8DI,RVVM1x7DI,RVVM1x6DI,RVVM1x5DI,
249 RVVM2x4DI,RVVM1x4DI,RVVM2x3DI,RVVM1x3DI,
250 RVVM4x2DI,RVVM2x2DI,RVVM1x2DI,RVVM1x8DF,
251 RVVM1x7DF,RVVM1x6DF,RVVM1x5DF,RVVM2x4DF,
252 RVVM1x4DF,RVVM2x3DF,RVVM1x3DF,RVVM4x2DF,
254 V1QI,V2QI,V4QI,V8QI,V16QI,V32QI,V64QI,V128QI,V256QI,V512QI,V1024QI,V2048QI,V4096QI,
255 V1HI,V2HI,V4HI,V8HI,V16HI,V32HI,V64HI,V128HI,V256HI,V512HI,V1024HI,V2048HI,
256 V1SI,V2SI,V4SI,V8SI,V16SI,V32SI,V64SI,V128SI,V256SI,V512SI,V1024SI,
257 V1DI,V2DI,V4DI,V8DI,V16DI,V32DI,V64DI,V128DI,V256DI,V512DI,
258 V1HF,V2HF,V4HF,V8HF,V16HF,V32HF,V64HF,V128HF,V256HF,V512HF,V1024HF,V2048HF,
259 V1SF,V2SF,V4SF,V8SF,V16SF,V32SF,V64SF,V128SF,V256SF,V512SF,V1024SF,
260 V1DF,V2DF,V4DF,V8DF,V16DF,V32DF,V64DF,V128DF,V256DF,V512DF,
261 V1BI,V2BI,V4BI,V8BI,V16BI,V32BI,V64BI,V128BI,V256BI,V512BI,V1024BI,V2048BI,V4096BI"
262 (const_string "unknown"))
264 ;; True if the main data type is twice the size of a word.
265 (define_attr "dword_mode" "no,yes"
266 (cond [(and (eq_attr "mode" "DI,DF")
267 (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
270 (and (eq_attr "mode" "TI,TF")
271 (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
272 (const_string "yes")]
273 (const_string "no")))
276 (define_attr "ext" "base,f,d,vector"
277 (const_string "base"))
279 ;; True if the extension is enabled.
280 (define_attr "ext_enabled" "no,yes"
281 (cond [(eq_attr "ext" "base")
284 (and (eq_attr "ext" "f")
285 (match_test "TARGET_HARD_FLOAT"))
288 (and (eq_attr "ext" "d")
289 (match_test "TARGET_DOUBLE_FLOAT"))
292 (and (eq_attr "ext" "vector")
293 (match_test "TARGET_VECTOR"))
296 (const_string "no")))
298 ;; Classification of each insn.
299 ;; branch conditional branch
300 ;; jump unconditional direct jump
301 ;; jalr unconditional indirect jump
302 ;; ret various returns, no arguments
303 ;; call unconditional call
304 ;; load load instruction(s)
305 ;; fpload floating point load
306 ;; store store instruction(s)
307 ;; fpstore floating point store
308 ;; mtc transfer to coprocessor
309 ;; mfc transfer from coprocessor
310 ;; const load constant
311 ;; arith integer arithmetic instructions
312 ;; logical integer logical instructions
313 ;; shift integer shift instructions
314 ;; slt set less than instructions
315 ;; imul integer multiply
316 ;; idiv integer divide
317 ;; move integer register move (addi rd, rs1, 0)
318 ;; fmove floating point register move
319 ;; fadd floating point add/subtract
320 ;; fmul floating point multiply
321 ;; fmadd floating point multiply-add
322 ;; fdiv floating point divide
323 ;; fcmp floating point compare
324 ;; fcvt floating point convert
325 ;; fcvt_i2f integer to floating point convert
326 ;; fcvt_f2i floating point to integer convert
327 ;; fsqrt floating point square root
328 ;; multi multiword sequence (or user asm statements)
329 ;; auipc integer addition to PC
330 ;; sfb_alu SFB ALU instruction
332 ;; trap trap instruction
333 ;; ghost an instruction that produces no real code
334 ;; bitmanip bit manipulation instructions
335 ;; clmul clmul, clmulh, clmulr
336 ;; rotate rotation instructions
337 ;; atomic atomic instructions
338 ;; condmove conditional moves
339 ;; crypto cryptography instructions
340 ;; mvpair zc move pair instructions
341 ;; zicond zicond instructions
342 ;; Classification of RVV instructions which will be added to each RVV .md pattern and used by scheduler.
343 ;; rdvlenb vector byte length vlenb csrr read
344 ;; rdvl vector length vl csrr read
345 ;; wrvxrm vector fixed-point rounding mode write
346 ;; wrfrm vector floating-point rounding mode write
347 ;; vsetvl vector configuration-setting instrucions
348 ;; 7. Vector Loads and Stores
349 ;; vlde vector unit-stride load instructions
350 ;; vste vector unit-stride store instructions
351 ;; vldm vector unit-stride mask load instructions
352 ;; vstm vector unit-stride mask store instructions
353 ;; vlds vector strided load instructions
354 ;; vsts vector strided store instructions
355 ;; vldux vector unordered indexed load instructions
356 ;; vldox vector ordered indexed load instructions
357 ;; vstux vector unordered indexed store instructions
358 ;; vstox vector ordered indexed store instructions
359 ;; vldff vector unit-stride fault-only-first load instructions
360 ;; vldr vector whole register load instructions
361 ;; vstr vector whole register store instructions
362 ;; vlsegde vector segment unit-stride load instructions
363 ;; vssegte vector segment unit-stride store instructions
364 ;; vlsegds vector segment strided load instructions
365 ;; vssegts vector segment strided store instructions
366 ;; vlsegdux vector segment unordered indexed load instructions
367 ;; vlsegdox vector segment ordered indexed load instructions
368 ;; vssegtux vector segment unordered indexed store instructions
369 ;; vssegtox vector segment ordered indexed store instructions
370 ;; vlsegdff vector segment unit-stride fault-only-first load instructions
371 ;; 11. Vector integer arithmetic instructions
372 ;; vialu vector single-width integer add and subtract and logical nstructions
373 ;; viwalu vector widening integer add/subtract
374 ;; vext vector integer extension
375 ;; vicalu vector arithmetic with carry or borrow instructions
376 ;; vshift vector single-width bit shift instructions
377 ;; vnshift vector narrowing integer shift instructions
378 ;; viminmax vector integer min/max instructions
379 ;; vicmp vector integer comparison instructions
380 ;; vimul vector single-width integer multiply instructions
381 ;; vidiv vector single-width integer divide instructions
382 ;; viwmul vector widening integer multiply instructions
383 ;; vimuladd vector single-width integer multiply-add instructions
384 ;; viwmuladd vector widening integer multiply-add instructions
385 ;; vimerge vector integer merge instructions
386 ;; vimov vector integer move vector instructions
387 ;; 12. Vector fixed-point arithmetic instructions
388 ;; vsalu vector single-width saturating add and subtract and logical instructions
389 ;; vaalu vector single-width averaging add and subtract and logical instructions
390 ;; vsmul vector single-width fractional multiply with rounding and saturation instructions
391 ;; vsshift vector single-width scaling shift instructions
392 ;; vnclip vector narrowing fixed-point clip instructions
393 ;; 13. Vector floating-point instructions
394 ;; vfalu vector single-width floating-point add/subtract instructions
395 ;; vfwalu vector widening floating-point add/subtract instructions
396 ;; vfmul vector single-width floating-point multiply instructions
397 ;; vfdiv vector single-width floating-point divide instructions
398 ;; vfwmul vector widening floating-point multiply instructions
399 ;; vfmuladd vector single-width floating-point multiply-add instructions
400 ;; vfwmuladd vector widening floating-point multiply-add instructions
401 ;; vfsqrt vector floating-point square-root instructions
402 ;; vfrecp vector floating-point reciprocal square-root instructions
403 ;; vfminmax vector floating-point min/max instructions
404 ;; vfcmp vector floating-point comparison instructions
405 ;; vfsgnj vector floating-point sign-injection instructions
406 ;; vfclass vector floating-point classify instruction
407 ;; vfmerge vector floating-point merge instruction
408 ;; vfmov vector floating-point move instruction
409 ;; vfcvtitof vector single-width integer to floating-point instruction
410 ;; vfcvtftoi vector single-width floating-point to integer instruction
411 ;; vfwcvtitof vector widening integer to floating-point instruction
412 ;; vfwcvtftoi vector widening floating-point to integer instruction
413 ;; vfwcvtftof vector widening floating-point to floating-point instruction
414 ;; vfncvtitof vector narrowing integer to floating-point instruction
415 ;; vfncvtftoi vector narrowing floating-point to integer instruction
416 ;; vfncvtftof vector narrowing floating-point to floating-point instruction
417 ;; 14. Vector reduction operations
418 ;; vired vector single-width integer reduction instructions
419 ;; viwred vector widening integer reduction instructions
420 ;; vfredu vector single-width floating-point un-ordered reduction instruction
421 ;; vfredo vector single-width floating-point ordered reduction instruction
422 ;; vfwredu vector widening floating-point un-ordered reduction instruction
423 ;; vfwredo vector widening floating-point ordered reduction instruction
424 ;; 15. Vector mask instructions
425 ;; vmalu vector mask-register logical instructions
426 ;; vmpop vector mask population count
427 ;; vmffs vector find-first-set mask bit
428 ;; vmsfs vector set mask bit
429 ;; vmiota vector iota
430 ;; vmidx vector element index instruction
431 ;; 16. Vector permutation instructions
432 ;; vimovvx integer scalar move instructions
433 ;; vimovxv integer scalar move instructions
434 ;; vfmovvf floating-point scalar move instructions
435 ;; vfmovfv floating-point scalar move instructions
436 ;; vslideup vector slide instructions
437 ;; vslidedown vector slide instructions
438 ;; vislide1up vector slide instructions
439 ;; vislide1down vector slide instructions
440 ;; vfslide1up vector slide instructions
441 ;; vfslide1down vector slide instructions
442 ;; vgather vector register gather instructions
443 ;; vcompress vector compress instruction
444 ;; vmov whole vector register move
445 ;; vector unknown vector instruction
446 ;; 17. Crypto Vector instructions
447 ;; vandn crypto vector bitwise and-not instructions
448 ;; vbrev crypto vector reverse bits in elements instructions
449 ;; vbrev8 crypto vector reverse bits in bytes instructions
450 ;; vrev8 crypto vector reverse bytes instructions
451 ;; vclz crypto vector count leading Zeros instructions
452 ;; vctz crypto vector count lrailing Zeros instructions
453 ;; vrol crypto vector rotate left instructions
454 ;; vror crypto vector rotate right instructions
455 ;; vwsll crypto vector widening shift left logical instructions
456 ;; vclmul crypto vector carry-less multiply - return low half instructions
457 ;; vclmulh crypto vector carry-less multiply - return high half instructions
458 ;; vghsh crypto vector add-multiply over GHASH Galois-Field instructions
459 ;; vgmul crypto vector multiply over GHASH Galois-Field instrumctions
460 ;; vaesef crypto vector AES final-round encryption instructions
461 ;; vaesem crypto vector AES middle-round encryption instructions
462 ;; vaesdf crypto vector AES final-round decryption instructions
463 ;; vaesdm crypto vector AES middle-round decryption instructions
464 ;; vaeskf1 crypto vector AES-128 Forward KeySchedule generation instructions
465 ;; vaeskf2 crypto vector AES-256 Forward KeySchedule generation instructions
466 ;; vaesz crypto vector AES round zero encryption/decryption instructions
467 ;; vsha2ms crypto vector SHA-2 message schedule instructions
468 ;; vsha2ch crypto vector SHA-2 two rounds of compression instructions
469 ;; vsha2cl crypto vector SHA-2 two rounds of compression instructions
470 ;; vsm4k crypto vector SM4 KeyExpansion instructions
471 ;; vsm4r crypto vector SM4 Rounds instructions
472 ;; vsm3me crypto vector SM3 Message Expansion instructions
473 ;; vsm3c crypto vector SM3 Compression instructions
474 ;; 18.Vector BF16 instrctions
475 ;; vfncvtbf16 vector narrowing single floating-point to brain floating-point instruction
476 ;; vfwcvtbf16 vector widening brain floating-point to single floating-point instruction
477 ;; vfwmaccbf16 vector BF16 widening multiply-accumulate
479 "unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore,
480 mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
481 fmadd,fdiv,fcmp,fcvt,fcvt_i2f,fcvt_f2i,fsqrt,multi,auipc,sfb_alu,nop,trap,
482 ghost,bitmanip,rotate,clmul,min,max,minu,maxu,clz,ctz,cpop,
483 atomic,condmove,crypto,mvpair,zicond,rdvlenb,rdvl,wrvxrm,wrfrm,
484 rdfrm,vsetvl,vsetvl_pre,vlde,vste,vldm,vstm,vlds,vsts,
485 vldux,vldox,vstux,vstox,vldff,vldr,vstr,
486 vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,vssegtux,vssegtox,vlsegdff,
487 vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,
488 vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,
489 vsalu,vaalu,vsmul,vsshift,vnclip,
490 vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,
491 vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov,
492 vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,
493 vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,
494 vired,viwred,vfredu,vfredo,vfwredu,vfwredo,
495 vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,
496 vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
497 vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vcpop,vrol,vror,vwsll,
498 vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,vaeskf1,vaeskf2,vaesz,
499 vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c,vfncvtbf16,vfwcvtbf16,vfwmaccbf16"
500 (cond [(eq_attr "got" "load") (const_string "load")
502 ;; If a doubleword move uses these expensive instructions,
503 ;; it is usually better to schedule them in the same way
504 ;; as the singleword form, rather than as "multi".
505 (eq_attr "move_type" "load") (const_string "load")
506 (eq_attr "move_type" "fpload") (const_string "fpload")
507 (eq_attr "move_type" "store") (const_string "store")
508 (eq_attr "move_type" "fpstore") (const_string "fpstore")
509 (eq_attr "move_type" "mtc") (const_string "mtc")
510 (eq_attr "move_type" "mfc") (const_string "mfc")
512 ;; These types of move are always single insns.
513 (eq_attr "move_type" "fmove") (const_string "fmove")
514 (eq_attr "move_type" "arith") (const_string "arith")
515 (eq_attr "move_type" "logical") (const_string "logical")
516 (eq_attr "move_type" "andi") (const_string "logical")
518 ;; These types of move are always split.
519 (eq_attr "move_type" "shift_shift")
520 (const_string "multi")
522 ;; These types of move are split for doubleword modes only.
523 (and (eq_attr "move_type" "move,const")
524 (eq_attr "dword_mode" "yes"))
525 (const_string "multi")
526 (eq_attr "move_type" "move") (const_string "move")
527 (eq_attr "move_type" "const") (const_string "const")
528 (eq_attr "move_type" "rdvlenb") (const_string "rdvlenb")]
529 (const_string "unknown")))
531 ;; True if the float point vector is disabled.
532 (define_attr "fp_vector_disabled" "no,yes"
534 (and (eq_attr "type" "vfmov,vfalu,vfmul,vfdiv,
535 vfwalu,vfwmul,vfmuladd,vfwmuladd,
536 vfsqrt,vfrecp,vfminmax,vfsgnj,vfcmp,
538 vfncvtitof,vfwcvtftoi,vfcvtftoi,vfcvtitof,
539 vfredo,vfredu,vfwredo,vfwredu,
540 vfslide1up,vfslide1down")
541 (and (eq_attr "mode" "RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF")
542 (match_test "!TARGET_ZVFH")))
545 ;; The mode records as QI for the FP16 <=> INT8 instruction.
546 (and (eq_attr "type" "vfncvtftoi,vfwcvtitof")
547 (and (eq_attr "mode" "RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI")
548 (match_test "!TARGET_ZVFH")))
551 (const_string "no")))
553 ;; This attribute marks the alternatives not matching the constraints
554 ;; described in spec as disabled.
555 (define_attr "spec_restriction" "none,thv,rvv"
556 (const_string "none"))
558 (define_attr "spec_restriction_disabled" "no,yes"
559 (cond [(eq_attr "spec_restriction" "none")
562 (and (eq_attr "spec_restriction" "thv")
563 (match_test "TARGET_XTHEADVECTOR"))
566 (and (eq_attr "spec_restriction" "rvv")
567 (match_test "TARGET_VECTOR && !TARGET_XTHEADVECTOR"))
570 (const_string "no")))
572 ;; Attribute to control enable or disable instructions.
573 (define_attr "enabled" "no,yes"
575 (eq_attr "ext_enabled" "no")
578 (eq_attr "fp_vector_disabled" "yes")
581 (eq_attr "spec_restriction_disabled" "yes")
584 (const_string "yes")))
586 ;; Length of instruction in bytes.
587 (define_attr "length" ""
589 ;; Branches further than +/- 1 MiB require three instructions.
590 ;; Branches further than +/- 4 KiB require two instructions.
591 (eq_attr "type" "branch")
592 (if_then_else (and (le (minus (match_dup 0) (pc))
594 (le (minus (pc) (match_dup 0))
597 (if_then_else (and (le (minus (match_dup 0) (pc))
599 (le (minus (pc) (match_dup 0))
600 (const_int 1048572)))
604 ;; Jumps further than +/- 1 MiB require two instructions.
605 (eq_attr "type" "jump")
606 (if_then_else (and (le (minus (match_dup 0) (pc))
608 (le (minus (pc) (match_dup 0))
609 (const_int 1048572)))
613 ;; Conservatively assume calls take two instructions (AUIPC + JALR).
614 ;; The linker will opportunistically relax the sequence to JAL.
615 (eq_attr "type" "call") (const_int 8)
617 ;; "Ghost" instructions occupy no space.
618 (eq_attr "type" "ghost") (const_int 0)
620 (eq_attr "got" "load") (const_int 8)
622 ;; SHIFT_SHIFTs are decomposed into two separate instructions.
623 (eq_attr "move_type" "shift_shift")
626 ;; Check for doubleword moves that are decomposed into two
628 (and (eq_attr "move_type" "mtc,mfc,move")
629 (eq_attr "dword_mode" "yes"))
632 ;; Doubleword CONST{,N} moves are split into two word
634 (and (eq_attr "move_type" "const")
635 (eq_attr "dword_mode" "yes"))
636 (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
638 ;; Otherwise, constants, loads and stores are handled by external
640 (eq_attr "move_type" "load,fpload")
641 (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
642 (eq_attr "move_type" "store,fpstore")
643 (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
646 ;; Is copying of this instruction disallowed?
647 (define_attr "cannot_copy" "no,yes" (const_string "no"))
649 ;; Microarchitectures we know how to tune for.
650 ;; Keep this in sync with enum riscv_microarchitecture.
652 "generic,sifive_7,sifive_p400,sifive_p600,xiangshan,generic_ooo"
653 (const (symbol_ref "((enum attr_tune) riscv_microarchitecture)")))
655 ;; Describe a user's asm statement.
656 (define_asm_attributes
657 [(set_attr "type" "multi")])
659 ;; Ghost instructions produce no real code and introduce no hazards.
660 ;; They exist purely to express an effect on dataflow.
661 (define_insn_reservation "ghost" 0
662 (eq_attr "type" "ghost")
666 ;; ....................
670 ;; ....................
673 (define_insn "add<mode>3"
674 [(set (match_operand:ANYF 0 "register_operand" "=f")
675 (plus:ANYF (match_operand:ANYF 1 "register_operand" " f")
676 (match_operand:ANYF 2 "register_operand" " f")))]
677 "TARGET_HARD_FLOAT || TARGET_ZFINX"
678 "fadd.<fmt>\t%0,%1,%2"
679 [(set_attr "type" "fadd")
680 (set_attr "mode" "<UNITMODE>")])
682 (define_insn "*addsi3"
683 [(set (match_operand:SI 0 "register_operand" "=r,r")
684 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
685 (match_operand:SI 2 "arith_operand" " r,I")))]
688 [(set_attr "type" "arith")
689 (set_attr "mode" "SI")])
691 (define_expand "addsi3"
692 [(set (match_operand:SI 0 "register_operand" "=r,r")
693 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
694 (match_operand:SI 2 "arith_operand" " r,I")))]
699 rtx t = gen_reg_rtx (DImode);
700 emit_insn (gen_addsi3_extended (t, operands[1], operands[2]));
701 t = gen_lowpart (SImode, t);
702 SUBREG_PROMOTED_VAR_P (t) = 1;
703 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
704 emit_move_insn (operands[0], t);
709 (define_insn "adddi3"
710 [(set (match_operand:DI 0 "register_operand" "=r,r")
711 (plus:DI (match_operand:DI 1 "register_operand" " r,r")
712 (match_operand:DI 2 "arith_operand" " r,I")))]
715 [(set_attr "type" "arith")
716 (set_attr "mode" "DI")])
718 ;; Special case of adding a reg and constant if latter is sum of two S12
719 ;; values (in range -2048 to 2047). Avoid materialized the const and fuse
720 ;; into the add (with an additional add for 2nd value). Makes a 3 insn
721 ;; sequence into 2 insn.
723 (define_insn_and_split "*add<mode>3_const_sum_of_two_s12"
724 [(set (match_operand:P 0 "register_operand" "=r,r")
725 (plus:P (match_operand:P 1 "register_operand" " r,r")
726 (match_operand:P 2 "const_two_s12" " MiG,r")))]
727 "!riscv_reg_frame_related (operands[0])"
729 /* operand matching MiG constraint is always meant to be split. */
730 if (which_alternative == 0)
733 return "add %0,%1,%2";
737 (plus:P (match_dup 1) (match_dup 3)))
739 (plus:P (match_dup 0) (match_dup 4)))]
741 int val = INTVAL (operands[2]);
742 if (SUM_OF_TWO_S12_P (val))
744 operands[3] = GEN_INT (2047);
745 operands[4] = GEN_INT (val - 2047);
747 else if (SUM_OF_TWO_S12_N (val))
749 operands[3] = GEN_INT (-2048);
750 operands[4] = GEN_INT (val + 2048);
755 [(set_attr "type" "arith")
756 (set_attr "mode" "<P:MODE>")])
758 (define_expand "addv<mode>4"
759 [(set (match_operand:GPR 0 "register_operand" "=r,r")
760 (plus:GPR (match_operand:GPR 1 "register_operand" " r,r")
761 (match_operand:GPR 2 "arith_operand" " r,I")))
762 (label_ref (match_operand 3 "" ""))]
765 if (TARGET_64BIT && <MODE>mode == SImode)
767 rtx t3 = gen_reg_rtx (DImode);
768 rtx t4 = gen_reg_rtx (DImode);
769 rtx t5 = gen_reg_rtx (DImode);
770 rtx t6 = gen_reg_rtx (DImode);
772 riscv_emit_binary (PLUS, operands[0], operands[1], operands[2]);
773 if (GET_CODE (operands[1]) != CONST_INT)
774 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
777 if (GET_CODE (operands[2]) != CONST_INT)
778 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
781 emit_insn (gen_adddi3 (t3, t4, t5));
782 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
784 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
788 rtx t3 = gen_reg_rtx (<MODE>mode);
789 rtx t4 = gen_reg_rtx (<MODE>mode);
791 emit_insn (gen_add3_insn (operands[0], operands[1], operands[2]));
792 rtx cmp1 = gen_rtx_LT (<MODE>mode, operands[2], const0_rtx);
793 emit_insn (gen_cstore<mode>4 (t3, cmp1, operands[2], const0_rtx));
794 rtx cmp2 = gen_rtx_LT (<MODE>mode, operands[0], operands[1]);
796 emit_insn (gen_cstore<mode>4 (t4, cmp2, operands[0], operands[1]));
797 riscv_expand_conditional_branch (operands[3], NE, t3, t4);
802 (define_expand "uaddv<mode>4"
803 [(set (match_operand:GPR 0 "register_operand" "=r,r")
804 (plus:GPR (match_operand:GPR 1 "register_operand" " r,r")
805 (match_operand:GPR 2 "arith_operand" " r,I")))
806 (label_ref (match_operand 3 "" ""))]
809 if (TARGET_64BIT && <MODE>mode == SImode)
811 rtx t3 = gen_reg_rtx (DImode);
812 rtx t4 = gen_reg_rtx (DImode);
814 if (GET_CODE (operands[1]) != CONST_INT)
815 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
818 riscv_emit_binary (PLUS, operands[0], operands[1], operands[2]);
819 emit_insn (gen_extend_insn (t4, operands[0], DImode, SImode, 0));
821 riscv_expand_conditional_branch (operands[3], LTU, t4, t3);
825 emit_insn (gen_add3_insn (operands[0], operands[1], operands[2]));
826 riscv_expand_conditional_branch (operands[3], LTU, operands[0],
833 (define_insn "addsi3_extended"
834 [(set (match_operand:DI 0 "register_operand" "=r,r")
836 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
837 (match_operand:SI 2 "arith_operand" " r,I"))))]
840 [(set_attr "type" "arith")
841 (set_attr "mode" "SI")])
843 (define_insn "*addsi3_extended2"
844 [(set (match_operand:DI 0 "register_operand" "=r,r")
846 (match_operator:SI 3 "subreg_lowpart_operator"
847 [(plus:DI (match_operand:DI 1 "register_operand" " r,r")
848 (match_operand:DI 2 "arith_operand" " r,I"))])))]
851 [(set_attr "type" "arith")
852 (set_attr "mode" "SI")])
855 ;; ....................
859 ;; ....................
862 (define_insn "sub<mode>3"
863 [(set (match_operand:ANYF 0 "register_operand" "=f")
864 (minus:ANYF (match_operand:ANYF 1 "register_operand" " f")
865 (match_operand:ANYF 2 "register_operand" " f")))]
866 "TARGET_HARD_FLOAT || TARGET_ZFINX"
867 "fsub.<fmt>\t%0,%1,%2"
868 [(set_attr "type" "fadd")
869 (set_attr "mode" "<UNITMODE>")])
871 (define_insn "subdi3"
872 [(set (match_operand:DI 0 "register_operand" "= r")
873 (minus:DI (match_operand:DI 1 "reg_or_0_operand" " rJ")
874 (match_operand:DI 2 "register_operand" " r")))]
877 [(set_attr "type" "arith")
878 (set_attr "mode" "DI")])
880 (define_insn "*subsi3"
881 [(set (match_operand:SI 0 "register_operand" "= r")
882 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
883 (match_operand:SI 2 "register_operand" " r")))]
886 [(set_attr "type" "arith")
887 (set_attr "mode" "SI")])
889 (define_expand "subsi3"
890 [(set (match_operand:SI 0 "register_operand" "= r")
891 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
892 (match_operand:SI 2 "register_operand" " r")))]
897 rtx t = gen_reg_rtx (DImode);
898 emit_insn (gen_subsi3_extended (t, operands[1], operands[2]));
899 t = gen_lowpart (SImode, t);
900 SUBREG_PROMOTED_VAR_P (t) = 1;
901 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
902 emit_move_insn (operands[0], t);
907 (define_expand "subv<mode>4"
908 [(set (match_operand:GPR 0 "register_operand" "= r")
909 (minus:GPR (match_operand:GPR 1 "reg_or_0_operand" " rJ")
910 (match_operand:GPR 2 "register_operand" " r")))
911 (label_ref (match_operand 3 "" ""))]
914 if (TARGET_64BIT && <MODE>mode == SImode)
916 rtx t3 = gen_reg_rtx (DImode);
917 rtx t4 = gen_reg_rtx (DImode);
918 rtx t5 = gen_reg_rtx (DImode);
919 rtx t6 = gen_reg_rtx (DImode);
921 riscv_emit_binary (MINUS, operands[0], operands[1], operands[2]);
922 if (GET_CODE (operands[1]) != CONST_INT)
923 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
926 if (GET_CODE (operands[2]) != CONST_INT)
927 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
930 emit_insn (gen_subdi3 (t3, t4, t5));
931 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
933 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
937 rtx t3 = gen_reg_rtx (<MODE>mode);
938 rtx t4 = gen_reg_rtx (<MODE>mode);
940 emit_insn (gen_sub3_insn (operands[0], operands[1], operands[2]));
942 rtx cmp1 = gen_rtx_LT (<MODE>mode, operands[2], const0_rtx);
943 emit_insn (gen_cstore<mode>4 (t3, cmp1, operands[2], const0_rtx));
945 rtx cmp2 = gen_rtx_LT (<MODE>mode, operands[1], operands[0]);
946 emit_insn (gen_cstore<mode>4 (t4, cmp2, operands[1], operands[0]));
948 riscv_expand_conditional_branch (operands[3], NE, t3, t4);
954 (define_expand "usubv<mode>4"
955 [(set (match_operand:GPR 0 "register_operand" "= r")
956 (minus:GPR (match_operand:GPR 1 "reg_or_0_operand" " rJ")
957 (match_operand:GPR 2 "register_operand" " r")))
958 (label_ref (match_operand 3 "" ""))]
961 if (TARGET_64BIT && <MODE>mode == SImode)
963 rtx t3 = gen_reg_rtx (DImode);
964 rtx t4 = gen_reg_rtx (DImode);
966 if (GET_CODE (operands[1]) != CONST_INT)
967 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
970 riscv_emit_binary (MINUS, operands[0], operands[1], operands[2]);
971 emit_insn (gen_extend_insn (t4, operands[0], DImode, SImode, 0));
973 riscv_expand_conditional_branch (operands[3], LTU, t3, t4);
977 emit_insn (gen_sub3_insn (operands[0], operands[1], operands[2]));
978 riscv_expand_conditional_branch (operands[3], LTU, operands[1],
986 (define_insn "subsi3_extended"
987 [(set (match_operand:DI 0 "register_operand" "= r")
989 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
990 (match_operand:SI 2 "register_operand" " r"))))]
993 [(set_attr "type" "arith")
994 (set_attr "mode" "SI")])
996 (define_insn "*subsi3_extended2"
997 [(set (match_operand:DI 0 "register_operand" "= r")
999 (match_operator:SI 3 "subreg_lowpart_operator"
1000 [(minus:DI (match_operand:DI 1 "reg_or_0_operand" " rJ")
1001 (match_operand:DI 2 "register_operand" " r"))])))]
1004 [(set_attr "type" "arith")
1005 (set_attr "mode" "SI")])
1007 (define_insn "negdi2"
1008 [(set (match_operand:DI 0 "register_operand" "=r")
1009 (neg:DI (match_operand:DI 1 "register_operand" " r")))]
1012 [(set_attr "type" "arith")
1013 (set_attr "mode" "DI")])
1015 (define_insn "*negsi2"
1016 [(set (match_operand:SI 0 "register_operand" "=r")
1017 (neg:SI (match_operand:SI 1 "register_operand" " r")))]
1020 [(set_attr "type" "arith")
1021 (set_attr "mode" "SI")])
1023 (define_expand "negsi2"
1024 [(set (match_operand:SI 0 "register_operand" "=r")
1025 (neg:SI (match_operand:SI 1 "register_operand" " r")))]
1030 rtx t = gen_reg_rtx (DImode);
1031 emit_insn (gen_negsi2_extended (t, operands[1]));
1032 t = gen_lowpart (SImode, t);
1033 SUBREG_PROMOTED_VAR_P (t) = 1;
1034 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1035 emit_move_insn (operands[0], t);
1040 (define_insn "negsi2_extended"
1041 [(set (match_operand:DI 0 "register_operand" "=r")
1043 (neg:SI (match_operand:SI 1 "register_operand" " r"))))]
1046 [(set_attr "type" "arith")
1047 (set_attr "mode" "SI")])
1049 (define_insn "*negsi2_extended2"
1050 [(set (match_operand:DI 0 "register_operand" "=r")
1052 (match_operator:SI 2 "subreg_lowpart_operator"
1053 [(neg:DI (match_operand:DI 1 "register_operand" " r"))])))]
1056 [(set_attr "type" "arith")
1057 (set_attr "mode" "SI")])
1060 ;; ....................
1064 ;; ....................
1067 (define_insn "mul<mode>3"
1068 [(set (match_operand:ANYF 0 "register_operand" "=f")
1069 (mult:ANYF (match_operand:ANYF 1 "register_operand" " f")
1070 (match_operand:ANYF 2 "register_operand" " f")))]
1071 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1072 "fmul.<fmt>\t%0,%1,%2"
1073 [(set_attr "type" "fmul")
1074 (set_attr "mode" "<UNITMODE>")])
1076 (define_insn "*mulsi3"
1077 [(set (match_operand:SI 0 "register_operand" "=r")
1078 (mult:SI (match_operand:SI 1 "register_operand" " r")
1079 (match_operand:SI 2 "register_operand" " r")))]
1080 "TARGET_ZMMUL || TARGET_MUL"
1082 [(set_attr "type" "imul")
1083 (set_attr "mode" "SI")])
1085 (define_expand "mulsi3"
1086 [(set (match_operand:SI 0 "register_operand" "=r")
1087 (mult:SI (match_operand:SI 1 "register_operand" " r")
1088 (match_operand:SI 2 "register_operand" " r")))]
1089 "TARGET_ZMMUL || TARGET_MUL"
1093 rtx t = gen_reg_rtx (DImode);
1094 emit_insn (gen_mulsi3_extended (t, operands[1], operands[2]));
1095 t = gen_lowpart (SImode, t);
1096 SUBREG_PROMOTED_VAR_P (t) = 1;
1097 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1098 emit_move_insn (operands[0], t);
1103 (define_insn "muldi3"
1104 [(set (match_operand:DI 0 "register_operand" "=r")
1105 (mult:DI (match_operand:DI 1 "register_operand" " r")
1106 (match_operand:DI 2 "register_operand" " r")))]
1107 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1109 [(set_attr "type" "imul")
1110 (set_attr "mode" "DI")])
1112 (define_expand "mulv<mode>4"
1113 [(set (match_operand:GPR 0 "register_operand" "=r")
1114 (mult:GPR (match_operand:GPR 1 "register_operand" " r")
1115 (match_operand:GPR 2 "register_operand" " r")))
1116 (label_ref (match_operand 3 "" ""))]
1117 "TARGET_ZMMUL || TARGET_MUL"
1119 if (TARGET_64BIT && <MODE>mode == SImode)
1121 rtx t3 = gen_reg_rtx (DImode);
1122 rtx t4 = gen_reg_rtx (DImode);
1123 rtx t5 = gen_reg_rtx (DImode);
1124 rtx t6 = gen_reg_rtx (DImode);
1126 if (GET_CODE (operands[1]) != CONST_INT)
1127 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
1130 if (GET_CODE (operands[2]) != CONST_INT)
1131 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
1134 emit_insn (gen_muldi3 (t3, t4, t5));
1136 emit_move_insn (operands[0], gen_lowpart (SImode, t3));
1137 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
1139 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
1143 rtx hp = gen_reg_rtx (<MODE>mode);
1144 rtx lp = gen_reg_rtx (<MODE>mode);
1146 emit_insn (gen_smul<mode>3_highpart (hp, operands[1], operands[2]));
1147 emit_insn (gen_mul<mode>3 (operands[0], operands[1], operands[2]));
1148 riscv_emit_binary (ASHIFTRT, lp, operands[0],
1149 GEN_INT (BITS_PER_WORD - 1));
1151 riscv_expand_conditional_branch (operands[3], NE, hp, lp);
1157 (define_expand "umulv<mode>4"
1158 [(set (match_operand:GPR 0 "register_operand" "=r")
1159 (mult:GPR (match_operand:GPR 1 "register_operand" " r")
1160 (match_operand:GPR 2 "register_operand" " r")))
1161 (label_ref (match_operand 3 "" ""))]
1162 "TARGET_ZMMUL || TARGET_MUL"
1164 if (TARGET_64BIT && <MODE>mode == SImode)
1166 rtx t3 = gen_reg_rtx (DImode);
1167 rtx t4 = gen_reg_rtx (DImode);
1168 rtx t5 = gen_reg_rtx (DImode);
1169 rtx t6 = gen_reg_rtx (DImode);
1170 rtx t7 = gen_reg_rtx (DImode);
1171 rtx t8 = gen_reg_rtx (DImode);
1173 if (GET_CODE (operands[1]) != CONST_INT)
1174 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
1177 if (GET_CODE (operands[2]) != CONST_INT)
1178 emit_insn (gen_extend_insn (t4, operands[2], DImode, SImode, 0));
1182 emit_insn (gen_ashldi3 (t5, t3, GEN_INT (32)));
1183 emit_insn (gen_ashldi3 (t6, t4, GEN_INT (32)));
1184 emit_insn (gen_umuldi3_highpart (t7, t5, t6));
1185 emit_move_insn (operands[0], gen_lowpart (SImode, t7));
1186 emit_insn (gen_lshrdi3 (t8, t7, GEN_INT (32)));
1188 riscv_expand_conditional_branch (operands[3], NE, t8, const0_rtx);
1192 rtx hp = gen_reg_rtx (<MODE>mode);
1194 emit_insn (gen_umul<mode>3_highpart (hp, operands[1], operands[2]));
1195 emit_insn (gen_mul<mode>3 (operands[0], operands[1], operands[2]));
1197 riscv_expand_conditional_branch (operands[3], NE, hp, const0_rtx);
1203 (define_insn "mulsi3_extended"
1204 [(set (match_operand:DI 0 "register_operand" "=r")
1206 (mult:SI (match_operand:SI 1 "register_operand" " r")
1207 (match_operand:SI 2 "register_operand" " r"))))]
1208 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1210 [(set_attr "type" "imul")
1211 (set_attr "mode" "SI")])
1213 (define_insn "*mulsi3_extended2"
1214 [(set (match_operand:DI 0 "register_operand" "=r")
1216 (match_operator:SI 3 "subreg_lowpart_operator"
1217 [(mult:DI (match_operand:DI 1 "register_operand" " r")
1218 (match_operand:DI 2 "register_operand" " r"))])))]
1219 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1221 [(set_attr "type" "imul")
1222 (set_attr "mode" "SI")])
1225 ;; ........................
1227 ;; MULTIPLICATION HIGH-PART
1229 ;; ........................
1233 (define_expand "<u>mulditi3"
1234 [(set (match_operand:TI 0 "register_operand")
1235 (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
1236 (any_extend:TI (match_operand:DI 2 "register_operand"))))]
1237 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1239 rtx low = gen_reg_rtx (DImode);
1240 emit_insn (gen_muldi3 (low, operands[1], operands[2]));
1242 rtx high = gen_reg_rtx (DImode);
1243 emit_insn (gen_<su>muldi3_highpart (high, operands[1], operands[2]));
1245 emit_move_insn (gen_lowpart (DImode, operands[0]), low);
1246 emit_move_insn (gen_highpart (DImode, operands[0]), high);
1250 (define_insn "<su>muldi3_highpart"
1251 [(set (match_operand:DI 0 "register_operand" "=r")
1254 (mult:TI (any_extend:TI
1255 (match_operand:DI 1 "register_operand" " r"))
1257 (match_operand:DI 2 "register_operand" " r")))
1259 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1261 [(set_attr "type" "imul")
1262 (set_attr "mode" "DI")])
1264 (define_expand "usmulditi3"
1265 [(set (match_operand:TI 0 "register_operand")
1266 (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand"))
1267 (sign_extend:TI (match_operand:DI 2 "register_operand"))))]
1268 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1270 rtx low = gen_reg_rtx (DImode);
1271 emit_insn (gen_muldi3 (low, operands[1], operands[2]));
1273 rtx high = gen_reg_rtx (DImode);
1274 emit_insn (gen_usmuldi3_highpart (high, operands[1], operands[2]));
1276 emit_move_insn (gen_lowpart (DImode, operands[0]), low);
1277 emit_move_insn (gen_highpart (DImode, operands[0]), high);
1281 (define_insn "usmuldi3_highpart"
1282 [(set (match_operand:DI 0 "register_operand" "=r")
1285 (mult:TI (zero_extend:TI
1286 (match_operand:DI 1 "register_operand" "r"))
1288 (match_operand:DI 2 "register_operand" " r")))
1290 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1292 [(set_attr "type" "imul")
1293 (set_attr "mode" "DI")])
1295 (define_expand "<u>mulsidi3"
1296 [(set (match_operand:DI 0 "register_operand" "=r")
1297 (mult:DI (any_extend:DI
1298 (match_operand:SI 1 "register_operand" " r"))
1300 (match_operand:SI 2 "register_operand" " r"))))]
1301 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1303 rtx temp = gen_reg_rtx (SImode);
1304 riscv_emit_binary (MULT, temp, operands[1], operands[2]);
1305 emit_insn (gen_<su>mulsi3_highpart (riscv_subword (operands[0], true),
1306 operands[1], operands[2]));
1307 emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
1311 (define_insn "<su>mulsi3_highpart"
1312 [(set (match_operand:SI 0 "register_operand" "=r")
1315 (mult:DI (any_extend:DI
1316 (match_operand:SI 1 "register_operand" " r"))
1318 (match_operand:SI 2 "register_operand" " r")))
1320 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1322 [(set_attr "type" "imul")
1323 (set_attr "mode" "SI")])
1326 (define_expand "usmulsidi3"
1327 [(set (match_operand:DI 0 "register_operand" "=r")
1328 (mult:DI (zero_extend:DI
1329 (match_operand:SI 1 "register_operand" " r"))
1331 (match_operand:SI 2 "register_operand" " r"))))]
1332 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1334 rtx temp = gen_reg_rtx (SImode);
1335 riscv_emit_binary (MULT, temp, operands[1], operands[2]);
1336 emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
1337 operands[1], operands[2]));
1338 emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
1342 (define_insn "usmulsi3_highpart"
1343 [(set (match_operand:SI 0 "register_operand" "=r")
1346 (mult:DI (zero_extend:DI
1347 (match_operand:SI 1 "register_operand" " r"))
1349 (match_operand:SI 2 "register_operand" " r")))
1351 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1353 [(set_attr "type" "imul")
1354 (set_attr "mode" "SI")])
1357 ;; ....................
1359 ;; DIVISION and REMAINDER
1361 ;; ....................
1364 (define_insn "*<optab>si3"
1365 [(set (match_operand:SI 0 "register_operand" "=r")
1366 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1367 (match_operand:SI 2 "register_operand" " r")))]
1369 "<insn>%i2%~\t%0,%1,%2"
1370 [(set_attr "type" "idiv")
1371 (set_attr "mode" "SI")])
1373 (define_expand "<optab>si3"
1374 [(set (match_operand:SI 0 "register_operand" "=r")
1375 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1376 (match_operand:SI 2 "register_operand" " r")))]
1381 rtx t = gen_reg_rtx (DImode);
1382 emit_insn (gen_<optab>si3_extended (t, operands[1], operands[2]));
1383 t = gen_lowpart (SImode, t);
1384 SUBREG_PROMOTED_VAR_P (t) = 1;
1385 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1386 emit_move_insn (operands[0], t);
1391 (define_insn "<optab>di3"
1392 [(set (match_operand:DI 0 "register_operand" "=r")
1393 (any_div:DI (match_operand:DI 1 "register_operand" " r")
1394 (match_operand:DI 2 "register_operand" " r")))]
1395 "TARGET_DIV && TARGET_64BIT"
1396 "<insn>%i2\t%0,%1,%2"
1397 [(set_attr "type" "idiv")
1398 (set_attr "mode" "DI")])
1400 (define_expand "<u>divmod<mode>4"
1402 [(set (match_operand:GPR 0 "register_operand")
1403 (only_div:GPR (match_operand:GPR 1 "register_operand")
1404 (match_operand:GPR 2 "register_operand")))
1405 (set (match_operand:GPR 3 "register_operand")
1406 (<paired_mod>:GPR (match_dup 1) (match_dup 2)))])]
1407 "TARGET_DIV && riscv_use_divmod_expander ()"
1409 rtx tmp = gen_reg_rtx (<MODE>mode);
1410 emit_insn (gen_<u>div<GPR:mode>3 (operands[0], operands[1], operands[2]));
1411 emit_insn (gen_mul<GPR:mode>3 (tmp, operands[0], operands[2]));
1412 emit_insn (gen_sub<GPR:mode>3 (operands[3], operands[1], tmp));
1416 (define_insn "<optab>si3_extended"
1417 [(set (match_operand:DI 0 "register_operand" "=r")
1419 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1420 (match_operand:SI 2 "register_operand" " r"))))]
1421 "TARGET_DIV && TARGET_64BIT"
1422 "<insn>%i2w\t%0,%1,%2"
1423 [(set_attr "type" "idiv")
1424 (set_attr "mode" "DI")])
1426 (define_insn "div<mode>3"
1427 [(set (match_operand:ANYF 0 "register_operand" "=f")
1428 (div:ANYF (match_operand:ANYF 1 "register_operand" " f")
1429 (match_operand:ANYF 2 "register_operand" " f")))]
1430 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && TARGET_FDIV"
1431 "fdiv.<fmt>\t%0,%1,%2"
1432 [(set_attr "type" "fdiv")
1433 (set_attr "mode" "<UNITMODE>")])
1436 ;; ....................
1440 ;; ....................
1442 (define_insn "sqrt<mode>2"
1443 [(set (match_operand:ANYF 0 "register_operand" "=f")
1444 (sqrt:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1445 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && TARGET_FDIV"
1447 return "fsqrt.<fmt>\t%0,%1";
1449 [(set_attr "type" "fsqrt")
1450 (set_attr "mode" "<UNITMODE>")])
1452 ;; Floating point multiply accumulate instructions.
1455 (define_insn "fma<mode>4"
1456 [(set (match_operand:ANYF 0 "register_operand" "=f")
1457 (fma:ANYF (match_operand:ANYF 1 "register_operand" " f")
1458 (match_operand:ANYF 2 "register_operand" " f")
1459 (match_operand:ANYF 3 "register_operand" " f")))]
1460 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1461 "fmadd.<fmt>\t%0,%1,%2,%3"
1462 [(set_attr "type" "fmadd")
1463 (set_attr "mode" "<UNITMODE>")])
1466 (define_insn "fms<mode>4"
1467 [(set (match_operand:ANYF 0 "register_operand" "=f")
1468 (fma:ANYF (match_operand:ANYF 1 "register_operand" " f")
1469 (match_operand:ANYF 2 "register_operand" " f")
1470 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f"))))]
1471 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1472 "fmsub.<fmt>\t%0,%1,%2,%3"
1473 [(set_attr "type" "fmadd")
1474 (set_attr "mode" "<UNITMODE>")])
1477 (define_insn "fnms<mode>4"
1478 [(set (match_operand:ANYF 0 "register_operand" "=f")
1480 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1481 (match_operand:ANYF 2 "register_operand" " f")
1482 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f"))))]
1483 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1484 "fnmadd.<fmt>\t%0,%1,%2,%3"
1485 [(set_attr "type" "fmadd")
1486 (set_attr "mode" "<UNITMODE>")])
1489 (define_insn "fnma<mode>4"
1490 [(set (match_operand:ANYF 0 "register_operand" "=f")
1492 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1493 (match_operand:ANYF 2 "register_operand" " f")
1494 (match_operand:ANYF 3 "register_operand" " f")))]
1495 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1496 "fnmsub.<fmt>\t%0,%1,%2,%3"
1497 [(set_attr "type" "fmadd")
1498 (set_attr "mode" "<UNITMODE>")])
1500 ;; -(-a * b - c), modulo signed zeros
1501 (define_insn "*fma<mode>4"
1502 [(set (match_operand:ANYF 0 "register_operand" "=f")
1505 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1506 (match_operand:ANYF 2 "register_operand" " f")
1507 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))]
1508 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1509 "fmadd.<fmt>\t%0,%1,%2,%3"
1510 [(set_attr "type" "fmadd")
1511 (set_attr "mode" "<UNITMODE>")])
1513 ;; -(-a * b + c), modulo signed zeros
1514 (define_insn "*fms<mode>4"
1515 [(set (match_operand:ANYF 0 "register_operand" "=f")
1518 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1519 (match_operand:ANYF 2 "register_operand" " f")
1520 (match_operand:ANYF 3 "register_operand" " f"))))]
1521 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1522 "fmsub.<fmt>\t%0,%1,%2,%3"
1523 [(set_attr "type" "fmadd")
1524 (set_attr "mode" "<UNITMODE>")])
1526 ;; -(a * b + c), modulo signed zeros
1527 (define_insn "*fnms<mode>4"
1528 [(set (match_operand:ANYF 0 "register_operand" "=f")
1531 (match_operand:ANYF 1 "register_operand" " f")
1532 (match_operand:ANYF 2 "register_operand" " f")
1533 (match_operand:ANYF 3 "register_operand" " f"))))]
1534 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1535 "fnmadd.<fmt>\t%0,%1,%2,%3"
1536 [(set_attr "type" "fmadd")
1537 (set_attr "mode" "<UNITMODE>")])
1539 ;; -(a * b - c), modulo signed zeros
1540 (define_insn "*fnma<mode>4"
1541 [(set (match_operand:ANYF 0 "register_operand" "=f")
1544 (match_operand:ANYF 1 "register_operand" " f")
1545 (match_operand:ANYF 2 "register_operand" " f")
1546 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))]
1547 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1548 "fnmsub.<fmt>\t%0,%1,%2,%3"
1549 [(set_attr "type" "fmadd")
1550 (set_attr "mode" "<UNITMODE>")])
1553 ;; ....................
1557 ;; ....................
1559 (define_insn "abs<mode>2"
1560 [(set (match_operand:ANYF 0 "register_operand" "=f")
1561 (abs:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1562 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1564 [(set_attr "type" "fmove")
1565 (set_attr "mode" "<UNITMODE>")])
1567 (define_insn "copysign<mode>3"
1568 [(set (match_operand:ANYF 0 "register_operand" "=f")
1569 (unspec:ANYF [(match_operand:ANYF 1 "register_operand" " f")
1570 (match_operand:ANYF 2 "register_operand" " f")]
1572 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1573 "fsgnj.<fmt>\t%0,%1,%2"
1574 [(set_attr "type" "fmove")
1575 (set_attr "mode" "<UNITMODE>")])
1577 (define_insn "neg<mode>2"
1578 [(set (match_operand:ANYF 0 "register_operand" "=f")
1579 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1580 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1582 [(set_attr "type" "fmove")
1583 (set_attr "mode" "<UNITMODE>")])
1586 ;; ....................
1590 ;; ....................
1592 (define_insn "fminm<mode>3"
1593 [(set (match_operand:ANYF 0 "register_operand" "=f")
1594 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1595 (use (match_operand:ANYF 2 "register_operand" " f"))]
1597 "TARGET_HARD_FLOAT && TARGET_ZFA"
1598 "fminm.<fmt>\t%0,%1,%2"
1599 [(set_attr "type" "fmove")
1600 (set_attr "mode" "<UNITMODE>")])
1602 (define_insn "fmaxm<mode>3"
1603 [(set (match_operand:ANYF 0 "register_operand" "=f")
1604 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1605 (use (match_operand:ANYF 2 "register_operand" " f"))]
1607 "TARGET_HARD_FLOAT && TARGET_ZFA"
1608 "fmaxm.<fmt>\t%0,%1,%2"
1609 [(set_attr "type" "fmove")
1610 (set_attr "mode" "<UNITMODE>")])
1612 (define_insn "fmin<mode>3"
1613 [(set (match_operand:ANYF 0 "register_operand" "=f")
1614 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1615 (use (match_operand:ANYF 2 "register_operand" " f"))]
1617 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SNANS (<MODE>mode)"
1618 "fmin.<fmt>\t%0,%1,%2"
1619 [(set_attr "type" "fmove")
1620 (set_attr "mode" "<UNITMODE>")])
1622 (define_insn "fmax<mode>3"
1623 [(set (match_operand:ANYF 0 "register_operand" "=f")
1624 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1625 (use (match_operand:ANYF 2 "register_operand" " f"))]
1627 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SNANS (<MODE>mode)"
1628 "fmax.<fmt>\t%0,%1,%2"
1629 [(set_attr "type" "fmove")
1630 (set_attr "mode" "<UNITMODE>")])
1632 (define_insn "smin<mode>3"
1633 [(set (match_operand:ANYF 0 "register_operand" "=f")
1634 (smin:ANYF (match_operand:ANYF 1 "register_operand" " f")
1635 (match_operand:ANYF 2 "register_operand" " f")))]
1636 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1637 "fmin.<fmt>\t%0,%1,%2"
1638 [(set_attr "type" "fmove")
1639 (set_attr "mode" "<UNITMODE>")])
1641 (define_insn "smax<mode>3"
1642 [(set (match_operand:ANYF 0 "register_operand" "=f")
1643 (smax:ANYF (match_operand:ANYF 1 "register_operand" " f")
1644 (match_operand:ANYF 2 "register_operand" " f")))]
1645 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1646 "fmax.<fmt>\t%0,%1,%2"
1647 [(set_attr "type" "fmove")
1648 (set_attr "mode" "<UNITMODE>")])
1651 ;; ....................
1655 ;; ....................
1658 ;; For RV64, we don't expose the SImode operations to the rtl expanders,
1659 ;; but SImode versions exist for combine.
1661 (define_expand "and<mode>3"
1662 [(set (match_operand:X 0 "register_operand")
1663 (and:X (match_operand:X 1 "register_operand")
1664 (match_operand:X 2 "arith_or_mode_mask_or_zbs_operand")))]
1667 /* If the second operand is a mode mask, emit an extension
1669 if (CONST_INT_P (operands[2]))
1671 enum machine_mode tmode = VOIDmode;
1672 if (UINTVAL (operands[2]) == GET_MODE_MASK (HImode))
1674 else if (UINTVAL (operands[2]) == GET_MODE_MASK (SImode))
1677 if (tmode != VOIDmode)
1679 rtx tmp = gen_lowpart (tmode, operands[1]);
1680 emit_insn (gen_extend_insn (operands[0], tmp, <MODE>mode, tmode, 1));
1686 (define_insn "*and<mode>3"
1687 [(set (match_operand:X 0 "register_operand" "=r,r")
1688 (and:X (match_operand:X 1 "register_operand" "%r,r")
1689 (match_operand:X 2 "arith_operand" " r,I")))]
1692 [(set_attr "type" "logical")
1693 (set_attr "mode" "<MODE>")])
1695 ;; When we construct constants we may want to twiddle a single bit
1696 ;; by generating an IOR. But the constant likely doesn't fit
1697 ;; arith_operand. So the generic code will reload the constant into
1698 ;; a register. Post-reload we won't have the chance to squash things
1699 ;; back into a Zbs insn.
1701 ;; So indirect through a define_expand. That allows us to have a
1702 ;; predicate that conditionally accepts single bit constants without
1703 ;; putting the details of Zbs instructions in here.
1704 (define_expand "<optab><mode>3"
1705 [(set (match_operand:X 0 "register_operand")
1706 (any_or:X (match_operand:X 1 "register_operand" "")
1707 (match_operand:X 2 "arith_or_zbs_operand" "")))]
1710 (define_insn "*<optab><mode>3"
1711 [(set (match_operand:X 0 "register_operand" "=r,r")
1712 (any_or:X (match_operand:X 1 "register_operand" "%r,r")
1713 (match_operand:X 2 "arith_operand" " r,I")))]
1715 "<insn>%i2\t%0,%1,%2"
1716 [(set_attr "type" "logical")
1717 (set_attr "mode" "<MODE>")])
1719 (define_insn "*<optab>si3_internal"
1720 [(set (match_operand:SI 0 "register_operand" "=r,r")
1721 (any_bitwise:SI (match_operand:SI 1 "register_operand" "%r,r")
1722 (match_operand:SI 2 "arith_operand" " r,I")))]
1724 "<insn>%i2\t%0,%1,%2"
1725 [(set_attr "type" "logical")
1726 (set_attr "mode" "SI")])
1728 (define_insn "one_cmpl<mode>2"
1729 [(set (match_operand:X 0 "register_operand" "=r")
1730 (not:X (match_operand:X 1 "register_operand" " r")))]
1733 [(set_attr "type" "logical")
1734 (set_attr "mode" "<MODE>")])
1736 (define_insn "*one_cmplsi2_internal"
1737 [(set (match_operand:SI 0 "register_operand" "=r")
1738 (not:SI (match_operand:SI 1 "register_operand" " r")))]
1741 [(set_attr "type" "logical")
1742 (set_attr "mode" "SI")])
1745 ;; ....................
1749 ;; ....................
1751 (define_insn "truncdfsf2"
1752 [(set (match_operand:SF 0 "register_operand" "=f")
1754 (match_operand:DF 1 "register_operand" " f")))]
1755 "TARGET_DOUBLE_FLOAT || TARGET_ZDINX"
1757 [(set_attr "type" "fcvt")
1758 (set_attr "mode" "SF")])
1760 (define_insn "truncsfhf2"
1761 [(set (match_operand:HF 0 "register_operand" "=f")
1763 (match_operand:SF 1 "register_operand" " f")))]
1764 "TARGET_ZFHMIN || TARGET_ZHINXMIN"
1766 [(set_attr "type" "fcvt")
1767 (set_attr "mode" "HF")])
1769 (define_insn "truncdfhf2"
1770 [(set (match_operand:HF 0 "register_operand" "=f")
1772 (match_operand:DF 1 "register_operand" " f")))]
1773 "(TARGET_ZFHMIN && TARGET_DOUBLE_FLOAT) ||
1774 (TARGET_ZHINXMIN && TARGET_ZDINX)"
1776 [(set_attr "type" "fcvt")
1777 (set_attr "mode" "HF")])
1779 (define_insn "truncsfbf2"
1780 [(set (match_operand:BF 0 "register_operand" "=f")
1782 (match_operand:SF 1 "register_operand" " f")))]
1784 "fcvt.bf16.s\t%0,%1"
1785 [(set_attr "type" "fcvt")
1786 (set_attr "mode" "BF")])
1788 ;; The conversion of HF/DF/TF to BF needs to be done with SF if there is a
1789 ;; chance to generate at least one instruction, otherwise just using
1790 ;; libfunc __trunc[h|d|t]fbf2.
1791 (define_expand "trunc<mode>bf2"
1792 [(set (match_operand:BF 0 "register_operand" "=f")
1794 (match_operand:FBF 1 "register_operand" " f")))]
1797 convert_move (operands[0],
1798 convert_modes (SFmode, <MODE>mode, operands[1], 0), 0);
1801 [(set_attr "type" "fcvt")
1802 (set_attr "mode" "BF")])
1805 ;; ....................
1809 ;; ....................
1813 (define_expand "zero_extendsidi2"
1814 [(set (match_operand:DI 0 "register_operand")
1815 (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]
1818 if (SUBREG_P (operands[1]) && SUBREG_PROMOTED_VAR_P (operands[1])
1819 && SUBREG_PROMOTED_UNSIGNED_P (operands[1]))
1821 emit_insn (gen_movdi (operands[0], SUBREG_REG (operands[1])));
1826 (define_insn_and_split "*zero_extendsidi2_internal"
1827 [(set (match_operand:DI 0 "register_operand" "=r,r")
1829 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1830 "TARGET_64BIT && !TARGET_ZBA && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX
1831 && !(REG_P (operands[1]) && VL_REG_P (REGNO (operands[1])))"
1835 "&& reload_completed
1836 && REG_P (operands[1])
1837 && !paradoxical_subreg_p (operands[0])"
1839 (ashift:DI (match_dup 1) (const_int 32)))
1841 (lshiftrt:DI (match_dup 0) (const_int 32)))]
1842 { operands[1] = gen_lowpart (DImode, operands[1]); }
1843 [(set_attr "move_type" "shift_shift,load")
1844 (set_attr "type" "load")
1845 (set_attr "mode" "DI")])
1847 (define_expand "zero_extendhi<GPR:mode>2"
1848 [(set (match_operand:GPR 0 "register_operand")
1850 (match_operand:HI 1 "nonimmediate_operand")))]
1853 (define_insn_and_split "*zero_extendhi<GPR:mode>2"
1854 [(set (match_operand:GPR 0 "register_operand" "=r,r")
1856 (match_operand:HI 1 "nonimmediate_operand" " r,m")))]
1857 "!TARGET_ZBB && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX"
1861 "&& reload_completed
1862 && REG_P (operands[1])
1863 && !paradoxical_subreg_p (operands[0])"
1865 (ashift:GPR (match_dup 1) (match_dup 2)))
1867 (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
1869 operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
1870 operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
1872 [(set_attr "move_type" "shift_shift,load")
1873 (set_attr "type" "load")
1874 (set_attr "mode" "<GPR:MODE>")])
1876 (define_expand "zero_extendqi<SUPERQI:mode>2"
1877 [(set (match_operand:SUPERQI 0 "register_operand")
1878 (zero_extend:SUPERQI
1879 (match_operand:QI 1 "nonimmediate_operand")))]
1882 (define_insn "*zero_extendqi<SUPERQI:mode>2_internal"
1883 [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
1884 (zero_extend:SUPERQI
1885 (match_operand:QI 1 "nonimmediate_operand" " r,m")))]
1886 "!TARGET_XTHEADMEMIDX"
1890 [(set_attr "move_type" "andi,load")
1891 (set_attr "type" "arith,load")
1892 (set_attr "mode" "<SUPERQI:MODE>")])
1895 ;; ....................
1899 ;; ....................
1901 (define_expand "extendsidi2"
1902 [(set (match_operand:DI 0 "register_operand" "=r,r")
1904 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1907 if (SUBREG_P (operands[1]) && SUBREG_PROMOTED_VAR_P (operands[1])
1908 && SUBREG_PROMOTED_SIGNED_P (operands[1]))
1910 emit_insn (gen_movdi (operands[0], SUBREG_REG (operands[1])));
1915 (define_insn "*extendsidi2_internal"
1916 [(set (match_operand:DI 0 "register_operand" "=r,r")
1918 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1919 "TARGET_64BIT && !TARGET_XTHEADMEMIDX"
1923 [(set_attr "move_type" "move,load")
1924 (set_attr "type" "move,load")
1925 (set_attr "mode" "DI")])
1927 (define_expand "extend<SHORT:mode><SUPERQI:mode>2"
1928 [(set (match_operand:SUPERQI 0 "register_operand")
1929 (sign_extend:SUPERQI (match_operand:SHORT 1 "nonimmediate_operand")))]
1932 (define_insn_and_split "*extend<SHORT:mode><SUPERQI:mode>2"
1933 [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
1934 (sign_extend:SUPERQI
1935 (match_operand:SHORT 1 "nonimmediate_operand" " r,m")))]
1936 "!TARGET_ZBB && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX"
1939 l<SHORT:size>\t%0,%1"
1940 "&& reload_completed
1941 && REG_P (operands[1])
1942 && !paradoxical_subreg_p (operands[0])"
1943 [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
1944 (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
1946 operands[0] = gen_lowpart (SImode, operands[0]);
1947 operands[1] = gen_lowpart (SImode, operands[1]);
1948 operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
1949 - GET_MODE_BITSIZE (<SHORT:MODE>mode));
1951 [(set_attr "move_type" "shift_shift,load")
1952 (set_attr "type" "load")
1953 (set_attr "mode" "SI")])
1955 (define_insn "extendhfsf2"
1956 [(set (match_operand:SF 0 "register_operand" "=f")
1958 (match_operand:HF 1 "register_operand" " f")))]
1959 "TARGET_ZFHMIN || TARGET_ZHINXMIN"
1961 [(set_attr "type" "fcvt")
1962 (set_attr "mode" "SF")])
1964 (define_insn "extendbfsf2"
1965 [(set (match_operand:SF 0 "register_operand" "=f")
1967 (match_operand:BF 1 "register_operand" " f")))]
1969 "fcvt.s.bf16\t%0,%1"
1970 [(set_attr "type" "fcvt")
1971 (set_attr "mode" "SF")])
1973 (define_insn "extendsfdf2"
1974 [(set (match_operand:DF 0 "register_operand" "=f")
1976 (match_operand:SF 1 "register_operand" " f")))]
1977 "TARGET_DOUBLE_FLOAT || TARGET_ZDINX"
1979 [(set_attr "type" "fcvt")
1980 (set_attr "mode" "DF")])
1982 (define_insn "extendhfdf2"
1983 [(set (match_operand:DF 0 "register_operand" "=f")
1985 (match_operand:HF 1 "register_operand" " f")))]
1986 "(TARGET_ZFHMIN && TARGET_DOUBLE_FLOAT) ||
1987 (TARGET_ZHINXMIN && TARGET_ZDINX)"
1989 [(set_attr "type" "fcvt")
1990 (set_attr "mode" "DF")])
1992 ;; 16-bit floating point moves
1993 (define_expand "mov<mode>"
1994 [(set (match_operand:HFBF 0 "")
1995 (match_operand:HFBF 1 ""))]
1998 if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
2002 (define_insn "*mov<mode>_hardfloat"
2003 [(set (match_operand:HFBF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
2004 (match_operand:HFBF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*G*r,*m,*r"))]
2005 "((TARGET_ZFHMIN && <MODE>mode == HFmode)
2006 || (TARGET_ZFBFMIN && <MODE>mode == BFmode))
2007 && (register_operand (operands[0], <MODE>mode)
2008 || reg_or_0_operand (operands[1], <MODE>mode))"
2009 { return riscv_output_move (operands[0], operands[1]); }
2010 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2011 (set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2012 (set_attr "mode" "<MODE>")])
2014 (define_insn "*mov<mode>_softfloat"
2015 [(set (match_operand:HFBF 0 "nonimmediate_operand" "=f, r,r,m,*f,*r")
2016 (match_operand:HFBF 1 "move_operand" " f,Gr,m,r,*r,*f"))]
2017 "((!TARGET_ZFHMIN && <MODE>mode == HFmode) || (<MODE>mode == BFmode))
2018 && (register_operand (operands[0], <MODE>mode)
2019 || reg_or_0_operand (operands[1], <MODE>mode))"
2020 { return riscv_output_move (operands[0], operands[1]); }
2021 [(set_attr "move_type" "fmove,move,load,store,mtc,mfc")
2022 (set_attr "type" "fmove,move,load,store,mtc,mfc")
2023 (set_attr "mode" "<MODE>")])
2025 (define_insn "*mov<HFBF:mode>_softfloat_boxing"
2026 [(set (match_operand:HFBF 0 "register_operand" "=f")
2027 (unspec:HFBF [(match_operand:X 1 "register_operand" " r")]
2028 UNSPEC_FMV_FP16_X))]
2031 [(set_attr "type" "fmove")
2032 (set_attr "mode" "SF")])
2035 ;; ....................
2039 ;; ....................
2041 (define_expand "<fix_uns>_trunc<ANYF:mode>si2"
2042 [(set (match_operand:SI 0 "register_operand" "=r")
2044 (match_operand:ANYF 1 "register_operand" " f")))]
2045 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2049 rtx t = gen_reg_rtx (DImode);
2050 emit_insn (gen_<fix_uns>_trunc<ANYF:mode>si2_sext (t, operands[1]));
2051 t = gen_lowpart (SImode, t);
2052 SUBREG_PROMOTED_VAR_P (t) = 1;
2053 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
2054 emit_move_insn (operands[0], t);
2059 (define_insn "*<fix_uns>_trunc<ANYF:mode>si2"
2060 [(set (match_operand:SI 0 "register_operand" "=r")
2062 (match_operand:ANYF 1 "register_operand" " f")))]
2063 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2064 "fcvt.w<u>.<ANYF:fmt> %0,%1,rtz"
2065 [(set_attr "type" "fcvt_f2i")
2066 (set_attr "mode" "<ANYF:MODE>")])
2068 (define_insn "<fix_uns>_trunc<ANYF:mode>si2_sext"
2069 [(set (match_operand:DI 0 "register_operand" "=r")
2070 (sign_extend:DI (fix_ops:SI
2071 (match_operand:ANYF 1 "register_operand" " f"))))]
2072 "TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
2073 "fcvt.w<u>.<ANYF:fmt> %0,%1,rtz"
2074 [(set_attr "type" "fcvt_f2i")
2075 (set_attr "mode" "<ANYF:MODE>")])
2077 (define_insn "<fix_uns>_trunc<ANYF:mode>di2"
2078 [(set (match_operand:DI 0 "register_operand" "=r")
2080 (match_operand:ANYF 1 "register_operand" " f")))]
2081 "TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
2082 "fcvt.l<u>.<ANYF:fmt> %0,%1,rtz"
2083 [(set_attr "type" "fcvt_f2i")
2084 (set_attr "mode" "<ANYF:MODE>")])
2086 (define_insn "float<GPR:mode><ANYF:mode>2"
2087 [(set (match_operand:ANYF 0 "register_operand" "= f")
2089 (match_operand:GPR 1 "reg_or_0_operand" " rJ")))]
2090 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2091 "fcvt.<ANYF:fmt>.<GPR:ifmt>\t%0,%z1"
2092 [(set_attr "type" "fcvt_i2f")
2093 (set_attr "mode" "<ANYF:MODE>")])
2095 (define_insn "floatuns<GPR:mode><ANYF:mode>2"
2096 [(set (match_operand:ANYF 0 "register_operand" "= f")
2097 (unsigned_float:ANYF
2098 (match_operand:GPR 1 "reg_or_0_operand" " rJ")))]
2099 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2100 "fcvt.<ANYF:fmt>.<GPR:ifmt>u\t%0,%z1"
2101 [(set_attr "type" "fcvt_i2f")
2102 (set_attr "mode" "<ANYF:MODE>")])
2104 (define_expand "lrint<ANYF:mode>si2"
2105 [(set (match_operand:SI 0 "register_operand" "=r")
2107 [(match_operand:ANYF 1 "register_operand" " f")]
2109 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2113 rtx t = gen_reg_rtx (DImode);
2114 emit_insn (gen_lrint<ANYF:mode>si2_sext (t, operands[1]));
2115 t = gen_lowpart (SImode, t);
2116 SUBREG_PROMOTED_VAR_P (t) = 1;
2117 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
2118 emit_move_insn (operands[0], t);
2123 (define_insn "*lrint<ANYF:mode>si2"
2124 [(set (match_operand:SI 0 "register_operand" "=r")
2126 [(match_operand:ANYF 1 "register_operand" " f")]
2128 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2129 "fcvt.w.<ANYF:fmt> %0,%1,dyn"
2130 [(set_attr "type" "fcvt_f2i")
2131 (set_attr "mode" "<ANYF:MODE>")])
2133 (define_insn "lrint<ANYF:mode>si2_sext"
2134 [(set (match_operand:DI 0 "register_operand" "=r")
2135 (sign_extend:DI (unspec:SI
2136 [(match_operand:ANYF 1 "register_operand" " f")]
2138 "TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
2139 "fcvt.w.<ANYF:fmt> %0,%1,dyn"
2140 [(set_attr "type" "fcvt_f2i")
2141 (set_attr "mode" "<ANYF:MODE>")])
2143 (define_insn "lrint<ANYF:mode>di2"
2144 [(set (match_operand:DI 0 "register_operand" "=r")
2146 [(match_operand:ANYF 1 "register_operand" " f")]
2148 "TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
2149 "fcvt.l.<ANYF:fmt> %0,%1,dyn"
2150 [(set_attr "type" "fcvt_f2i")
2151 (set_attr "mode" "<ANYF:MODE>")])
2153 (define_expand "l<round_pattern><ANYF:mode>si2"
2154 [(set (match_operand:SI 0 "register_operand" "=r")
2156 [(match_operand:ANYF 1 "register_operand" " f")]
2158 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2162 rtx t = gen_reg_rtx (DImode);
2163 emit_insn (gen_l<round_pattern><ANYF:mode>si2_sext (t, operands[1]));
2164 t = gen_lowpart (SImode, t);
2165 SUBREG_PROMOTED_VAR_P (t) = 1;
2166 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
2167 emit_move_insn (operands[0], t);
2172 (define_insn "*l<round_pattern><ANYF:mode>si2"
2173 [(set (match_operand:SI 0 "register_operand" "=r")
2175 [(match_operand:ANYF 1 "register_operand" " f")]
2177 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2178 "fcvt.w.<ANYF:fmt> %0,%1,<round_rm>"
2179 [(set_attr "type" "fcvt_f2i")
2180 (set_attr "mode" "<ANYF:MODE>")])
2182 (define_insn "l<round_pattern><ANYF:mode>si2_sext"
2183 [(set (match_operand:DI 0 "register_operand" "=r")
2184 (sign_extend:DI (unspec:SI
2185 [(match_operand:ANYF 1 "register_operand" " f")]
2187 "TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
2188 "fcvt.w.<ANYF:fmt> %0,%1,<round_rm>"
2189 [(set_attr "type" "fcvt_f2i")
2190 (set_attr "mode" "<ANYF:MODE>")])
2192 (define_insn "l<round_pattern><ANYF:mode>di2"
2193 [(set (match_operand:DI 0 "register_operand" "=r")
2195 [(match_operand:ANYF 1 "register_operand" " f")]
2197 "TARGET_64BIT && (TARGET_HARD_FLOAT || TARGET_ZFINX)"
2198 "fcvt.l.<ANYF:fmt> %0,%1,<round_rm>"
2199 [(set_attr "type" "fcvt_f2i")
2200 (set_attr "mode" "<ANYF:MODE>")])
2202 ;; There are a couple non-obvious restrictions to be aware of.
2204 ;; We'll do a FP-INT conversion in the sequence. But we don't
2205 ;; have a .l (64bit) variant of those instructions for rv32.
2206 ;; To preserve proper semantics we must reject DFmode inputs
2207 ;; for rv32 unless Zfa is enabled.
2209 ;; The ANYF iterator allows HFmode. We don't have all the
2210 ;; necessary patterns defined for HFmode. So restrict HFmode
2212 (define_expand "<round_pattern><ANYF:mode>2"
2213 [(set (match_operand:ANYF 0 "register_operand" "=f")
2215 [(match_operand:ANYF 1 "register_operand" " f")]
2218 && (TARGET_ZFA || flag_fp_int_builtin_inexact || !flag_trapping_math)
2219 && (TARGET_ZFA || TARGET_64BIT || <ANYF:MODE>mode != DFmode)
2220 && (TARGET_ZFA || <ANYF:MODE>mode != HFmode))"
2223 emit_insn (gen_<round_pattern><ANYF:mode>_zfa2 (operands[0],
2228 rtx label = gen_label_rtx ();
2229 rtx end_label = gen_label_rtx ();
2230 rtx abs_reg = gen_reg_rtx (<ANYF:MODE>mode);
2231 rtx coeff_reg = gen_reg_rtx (<ANYF:MODE>mode);
2232 rtx tmp_reg = gen_reg_rtx (<ANYF:MODE>mode);
2234 riscv_emit_move (tmp_reg, operands[1]);
2235 riscv_emit_move (coeff_reg,
2236 riscv_vector::get_fp_rounding_coefficient (<ANYF:MODE>mode));
2237 emit_insn (gen_abs<ANYF:mode>2 (abs_reg, operands[1]));
2239 riscv_expand_conditional_branch (label, LT, abs_reg, coeff_reg);
2241 emit_jump_insn (gen_jump (end_label));
2245 switch (<ANYF:MODE>mode)
2248 reg = gen_reg_rtx (SImode);
2249 emit_insn (gen_l<round_pattern>sfsi2 (reg, operands[1]));
2250 emit_insn (gen_floatsisf2 (abs_reg, reg));
2253 reg = gen_reg_rtx (DImode);
2254 emit_insn (gen_l<round_pattern>dfdi2 (reg, operands[1]));
2255 emit_insn (gen_floatdidf2 (abs_reg, reg));
2261 emit_insn (gen_copysign<ANYF:mode>3 (tmp_reg, abs_reg, operands[1]));
2263 emit_label (end_label);
2264 riscv_emit_move (operands[0], tmp_reg);
2270 (define_insn "<round_pattern><ANYF:mode>_zfa2"
2271 [(set (match_operand:ANYF 0 "register_operand" "=f")
2273 [(match_operand:ANYF 1 "register_operand" " f")]
2275 "TARGET_HARD_FLOAT && TARGET_ZFA"
2276 "fround.<ANYF:fmt>\t%0,%1,<round_rm>"
2277 [(set_attr "type" "fcvt")
2278 (set_attr "mode" "<ANYF:MODE>")])
2280 (define_insn "rint<ANYF:mode>2"
2281 [(set (match_operand:ANYF 0 "register_operand" "=f")
2283 [(match_operand:ANYF 1 "register_operand" " f")]
2285 "TARGET_HARD_FLOAT && TARGET_ZFA"
2286 "froundnx.<ANYF:fmt>\t%0,%1"
2287 [(set_attr "type" "fcvt")
2288 (set_attr "mode" "<ANYF:MODE>")])
2291 ;; ....................
2295 ;; ....................
2297 ;; Lower-level instructions for loading an address from the GOT.
2298 ;; We could use MEMs, but an unspec gives more optimization
2301 (define_insn "got_load<mode>"
2302 [(set (match_operand:P 0 "register_operand" "=r")
2304 [(match_operand:P 1 "symbolic_operand" "")]
2308 [(set_attr "got" "load")
2309 (set_attr "type" "load")
2310 (set_attr "mode" "<MODE>")])
2312 (define_insn "tls_add_tp_le<mode>"
2313 [(set (match_operand:P 0 "register_operand" "=r")
2315 [(match_operand:P 1 "register_operand" "r")
2316 (match_operand:P 2 "register_operand" "r")
2317 (match_operand:P 3 "symbolic_operand" "")]
2320 "add\t%0,%1,%2,%%tprel_add(%3)"
2321 [(set_attr "type" "arith")
2322 (set_attr "mode" "<MODE>")])
2324 (define_insn "got_load_tls_gd<mode>"
2325 [(set (match_operand:P 0 "register_operand" "=r")
2327 [(match_operand:P 1 "symbolic_operand" "")]
2331 [(set_attr "got" "load")
2332 (set_attr "type" "load")
2333 (set_attr "mode" "<MODE>")])
2335 (define_insn "got_load_tls_ie<mode>"
2336 [(set (match_operand:P 0 "register_operand" "=r")
2338 [(match_operand:P 1 "symbolic_operand" "")]
2342 [(set_attr "got" "load")
2343 (set_attr "type" "load")
2344 (set_attr "mode" "<MODE>")])
2346 (define_insn "@tlsdesc<mode>"
2347 [(set (reg:P A0_REGNUM)
2349 [(match_operand:P 0 "symbolic_operand" "")]
2351 (clobber (reg:P T0_REGNUM))]
2354 return ".LT%=: auipc\ta0,%%tlsdesc_hi(%0)\;"
2355 "<load>\tt0,%%tlsdesc_load_lo(.LT%=)(a0)\;"
2356 "addi\ta0,a0,%%tlsdesc_add_lo(.LT%=)\;"
2357 "jalr\tt0,t0,%%tlsdesc_call(.LT%=)";
2359 [(set_attr "type" "multi")
2360 (set_attr "length" "16")
2361 (set_attr "mode" "<MODE>")])
2363 (define_insn "auipc<mode>"
2364 [(set (match_operand:P 0 "register_operand" "=r")
2366 [(match_operand:P 1 "symbolic_operand" "")
2367 (match_operand:P 2 "const_int_operand")
2371 ".LA%2: auipc\t%0,%h1"
2372 [(set_attr "type" "auipc")
2373 (set_attr "cannot_copy" "yes")])
2375 ;; Instructions for adding the low 12 bits of an address to a register.
2376 ;; Operand 2 is the address: riscv_print_operand works out which relocation
2377 ;; should be applied.
2379 (define_insn "*low<mode>"
2380 [(set (match_operand:P 0 "register_operand" "=r")
2381 (lo_sum:P (match_operand:P 1 "register_operand" " r")
2382 (match_operand:P 2 "symbolic_operand" "")))]
2385 [(set_attr "type" "arith")
2386 (set_attr "mode" "<MODE>")])
2388 ;; Allow combine to split complex const_int load sequences, using operand 2
2389 ;; to store the intermediate results. See move_operand for details.
2391 [(set (match_operand:GPR 0 "register_operand")
2392 (match_operand:GPR 1 "splittable_const_int_operand"))
2393 (clobber (match_operand:GPR 2 "register_operand"))]
2397 riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]),
2402 ;; Likewise, for symbolic operands.
2404 [(set (match_operand:P 0 "register_operand")
2405 (match_operand:P 1))
2406 (clobber (match_operand:P 2 "register_operand"))]
2407 "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
2408 [(set (match_dup 0) (match_dup 3))]
2410 riscv_split_symbol (operands[2], operands[1],
2411 MAX_MACHINE_MODE, &operands[3]);
2414 ;; Pretend to have the ability to load complex const_int in order to get
2415 ;; better code generation around them.
2416 ;; But avoid constants that are special cased elsewhere.
2418 ;; Hide it from IRA register equiv recog* () to elide potential undoing of split
2420 (define_insn_and_split "*mvconst_internal"
2421 [(set (match_operand:GPR 0 "register_operand" "=r")
2422 (match_operand:GPR 1 "splittable_const_int_operand" "i"))]
2424 && !(p2m1_shift_operand (operands[1], <MODE>mode)
2425 || high_mask_shift_operand (operands[1], <MODE>mode))"
2430 riscv_move_integer (operands[0], operands[0], INTVAL (operands[1]),
2434 [(set_attr "type" "move")])
2436 ;; 64-bit integer moves
2438 (define_expand "movdi"
2439 [(set (match_operand:DI 0 "")
2440 (match_operand:DI 1 ""))]
2443 if (riscv_legitimize_move (DImode, operands[0], operands[1]))
2447 (define_insn "*movdi_32bit"
2448 [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m, *f,*f,*r,*f,*m,r")
2449 (match_operand:DI 1 "move_operand" " r,i,m,r,*J*r,*m,*f,*f,*f,vp"))]
2451 && (register_operand (operands[0], DImode)
2452 || reg_or_0_operand (operands[1], DImode))"
2453 { return riscv_output_move (operands[0], operands[1]); }
2454 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore,rdvlenb")
2455 (set_attr "mode" "DI")
2456 (set_attr "type" "move,move,load,store,move,fpload,move,fmove,fpstore,move")
2457 (set_attr "ext" "base,base,base,base,d,d,d,d,d,vector")])
2459 (define_insn "*movdi_64bit"
2460 [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r, m, *f,*f,*r,*f,*m,r")
2461 (match_operand:DI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,*f,vp"))]
2463 && (register_operand (operands[0], DImode)
2464 || reg_or_0_operand (operands[1], DImode))"
2465 { return riscv_output_move (operands[0], operands[1]); }
2466 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore,rdvlenb")
2467 (set_attr "mode" "DI")
2468 (set_attr "type" "move,move,load,store,mtc,fpload,mfc,fmove,fpstore,move")
2469 (set_attr "ext" "base,base,base,base,d,d,d,d,d,vector")])
2471 ;; 32-bit Integer moves
2473 (define_expand "mov<mode>"
2474 [(set (match_operand:MOVE32 0 "")
2475 (match_operand:MOVE32 1 ""))]
2478 if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
2482 (define_insn "*movsi_internal"
2483 [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r, m, *f,*f,*r,*m,r")
2484 (match_operand:SI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,vp"))]
2485 "(register_operand (operands[0], SImode)
2486 || reg_or_0_operand (operands[1], SImode))
2487 && !(REG_P (operands[1]) && VL_REG_P (REGNO (operands[1])))"
2488 { return riscv_output_move (operands[0], operands[1]); }
2489 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore,rdvlenb")
2490 (set_attr "mode" "SI")
2491 (set_attr "type" "move,move,load,store,mtc,fpload,mfc,fpstore,move")
2492 (set_attr "ext" "base,base,base,base,f,f,f,f,vector")])
2494 ;; 16-bit Integer moves
2496 ;; Unlike most other insns, the move insns can't be split with
2497 ;; different predicates, because register spilling and other parts of
2498 ;; the compiler, have memoized the insn number already.
2499 ;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
2501 (define_expand "movhi"
2502 [(set (match_operand:HI 0 "")
2503 (match_operand:HI 1 ""))]
2506 if (riscv_legitimize_move (HImode, operands[0], operands[1]))
2510 (define_insn "*movhi_internal"
2511 [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r, m, *f,*r,r")
2512 (match_operand:HI 1 "move_operand" " r,T,m,rJ,*r*J,*f,vp"))]
2513 "(register_operand (operands[0], HImode)
2514 || reg_or_0_operand (operands[1], HImode))"
2515 { return riscv_output_move (operands[0], operands[1]); }
2516 [(set_attr "move_type" "move,const,load,store,mtc,mfc,rdvlenb")
2517 (set_attr "mode" "HI")
2518 (set_attr "type" "move,move,load,store,mtc,mfc,move")
2519 (set_attr "ext" "base,base,base,base,f,f,vector")])
2521 ;; HImode constant generation; see riscv_move_integer for details.
2522 ;; si+si->hi without truncation is legal because of
2523 ;; TARGET_TRULY_NOOP_TRUNCATION.
2525 (define_insn "*add<mode>hi3"
2526 [(set (match_operand:HI 0 "register_operand" "=r,r")
2527 (plus:HI (match_operand:HISI 1 "register_operand" " r,r")
2528 (match_operand:HISI 2 "arith_operand" " r,I")))]
2530 "add%i2%~\t%0,%1,%2"
2531 [(set_attr "type" "arith")
2532 (set_attr "mode" "HI")])
2534 (define_insn "*xor<mode>hi3"
2535 [(set (match_operand:HI 0 "register_operand" "=r,r")
2536 (xor:HI (match_operand:HISI 1 "register_operand" " r,r")
2537 (match_operand:HISI 2 "arith_operand" " r,I")))]
2540 [(set_attr "type" "logical")
2541 (set_attr "mode" "HI")])
2543 ;; 8-bit Integer moves
2545 (define_expand "movqi"
2546 [(set (match_operand:QI 0 "")
2547 (match_operand:QI 1 ""))]
2550 if (riscv_legitimize_move (QImode, operands[0], operands[1]))
2554 (define_insn "*movqi_internal"
2555 [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r, m, *f,*r,r")
2556 (match_operand:QI 1 "move_operand" " r,I,m,rJ,*r*J,*f,vp"))]
2557 "(register_operand (operands[0], QImode)
2558 || reg_or_0_operand (operands[1], QImode))"
2559 { return riscv_output_move (operands[0], operands[1]); }
2560 [(set_attr "move_type" "move,const,load,store,mtc,mfc,rdvlenb")
2561 (set_attr "mode" "QI")
2562 (set_attr "type" "move,move,load,store,mtc,mfc,move")
2563 (set_attr "ext" "base,base,base,base,f,f,vector")])
2565 ;; 32-bit floating point moves
2567 (define_expand "movsf"
2568 [(set (match_operand:SF 0 "")
2569 (match_operand:SF 1 ""))]
2572 if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
2576 (define_insn "*movsf_hardfloat"
2577 [(set (match_operand:SF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
2578 (match_operand:SF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*G*r,*m,*r"))]
2580 && (register_operand (operands[0], SFmode)
2581 || reg_or_0_operand (operands[1], SFmode))"
2582 { return riscv_output_move (operands[0], operands[1]); }
2583 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2584 (set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2585 (set_attr "mode" "SF")])
2587 (define_insn "*movsf_softfloat"
2588 [(set (match_operand:SF 0 "nonimmediate_operand" "= r,r,m")
2589 (match_operand:SF 1 "move_operand" " Gr,m,r"))]
2591 && (register_operand (operands[0], SFmode)
2592 || reg_or_0_operand (operands[1], SFmode))"
2593 { return riscv_output_move (operands[0], operands[1]); }
2594 [(set_attr "move_type" "move,load,store")
2595 (set_attr "type" "move,load,store")
2596 (set_attr "mode" "SF")])
2598 ;; 64-bit floating point moves
2600 (define_expand "movdf"
2601 [(set (match_operand:DF 0 "")
2602 (match_operand:DF 1 ""))]
2605 if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
2610 ;; In RV32, we lack fmv.x.d and fmv.d.x. Go through memory instead.
2611 ;; (However, we can still use fcvt.d.w to zero a floating-point register.)
2612 (define_insn "*movdf_hardfloat_rv32"
2613 [(set (match_operand:DF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*zmvf,*zmvr, *r,*r,*th_m_noi")
2614 (match_operand:DF 1 "move_operand" " f,zfli,G,m,f,G,*zmvr,*zmvf,*r*G,*th_m_noi,*r"))]
2615 "!TARGET_64BIT && TARGET_DOUBLE_FLOAT
2616 && (register_operand (operands[0], DFmode)
2617 || reg_or_0_operand (operands[1], DFmode))"
2618 { return riscv_output_move (operands[0], operands[1]); }
2619 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2620 (set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2621 (set_attr "mode" "DF")])
2623 (define_insn "*movdf_hardfloat_rv64"
2624 [(set (match_operand:DF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
2625 (match_operand:DF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*r*G,*m,*r"))]
2626 "TARGET_64BIT && TARGET_DOUBLE_FLOAT
2627 && (register_operand (operands[0], DFmode)
2628 || reg_or_0_operand (operands[1], DFmode))"
2629 { return riscv_output_move (operands[0], operands[1]); }
2630 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2631 (set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2632 (set_attr "mode" "DF")])
2634 (define_insn "*movdf_softfloat"
2635 [(set (match_operand:DF 0 "nonimmediate_operand" "= r,r, m")
2636 (match_operand:DF 1 "move_operand" " rG,m,rG"))]
2637 "!TARGET_DOUBLE_FLOAT
2638 && (register_operand (operands[0], DFmode)
2639 || reg_or_0_operand (operands[1], DFmode))"
2640 { return riscv_output_move (operands[0], operands[1]); }
2641 [(set_attr "move_type" "move,load,store")
2642 (set_attr "type" "fmove,fpload,fpstore")
2643 (set_attr "mode" "DF")])
2645 (define_insn "movsidf2_low_rv32"
2646 [(set (match_operand:SI 0 "register_operand" "= r")
2648 [(match_operand:DF 1 "register_operand" "zmvf")]
2650 "TARGET_HARD_FLOAT && !TARGET_64BIT && TARGET_ZFA"
2652 [(set_attr "move_type" "fmove")
2653 (set_attr "type" "fmove")
2654 (set_attr "mode" "DF")])
2657 (define_insn "movsidf2_high_rv32"
2658 [(set (match_operand:SI 0 "register_operand" "= r")
2660 [(match_operand:DF 1 "register_operand" "zmvf")]
2662 "TARGET_HARD_FLOAT && !TARGET_64BIT && TARGET_ZFA"
2664 [(set_attr "move_type" "fmove")
2665 (set_attr "type" "fmove")
2666 (set_attr "mode" "DF")])
2668 (define_insn "movdfsisi3_rv32"
2669 [(set (match_operand:DF 0 "register_operand" "= f")
2671 (match_operand:SI 2 "register_operand" "zmvr")
2673 (match_operand:SI 1 "register_operand" "zmvr")
2675 "TARGET_HARD_FLOAT && !TARGET_64BIT && TARGET_ZFA"
2676 "fmvp.d.x\t%0,%2,%1"
2677 [(set_attr "move_type" "fmove")
2678 (set_attr "type" "fmove")
2679 (set_attr "mode" "DF")])
2682 [(set (match_operand:MOVE64 0 "nonimmediate_operand")
2683 (match_operand:MOVE64 1 "move_operand"))]
2685 && riscv_split_64bit_move_p (operands[0], operands[1])"
2688 riscv_split_doubleword_move (operands[0], operands[1]);
2692 (define_expand "cmpmemsi"
2693 [(parallel [(set (match_operand:SI 0)
2694 (compare:SI (match_operand:BLK 1)
2695 (match_operand:BLK 2)))
2696 (use (match_operand:SI 3))
2697 (use (match_operand:SI 4))])]
2700 /* If TARGET_VECTOR is false, this routine will return false and we will
2701 try scalar expansion. */
2702 if (riscv_vector::expand_vec_cmpmem (operands[0], operands[1],
2703 operands[2], operands[3]))
2706 rtx temp = gen_reg_rtx (word_mode);
2707 if (riscv_expand_block_compare (temp, operands[1], operands[2],
2712 temp = gen_lowpart (SImode, temp);
2713 SUBREG_PROMOTED_VAR_P (temp) = 1;
2714 SUBREG_PROMOTED_SET (temp, SRP_SIGNED);
2716 emit_move_insn (operands[0], temp);
2723 (define_expand "cpymem<mode>"
2724 [(parallel [(set (match_operand:BLK 0 "general_operand")
2725 (match_operand:BLK 1 "general_operand"))
2726 (use (match_operand:P 2 ""))
2727 (use (match_operand:SI 3 "const_int_operand"))])]
2730 if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
2736 ;; Fill memory with constant byte.
2737 ;; Argument 0 is the destination
2738 ;; Argument 1 is the constant byte
2739 ;; Argument 2 is the length
2740 ;; Argument 3 is the alignment
2742 (define_expand "setmem<mode>"
2743 [(parallel [(set (match_operand:BLK 0 "memory_operand")
2744 (match_operand:QI 2 "nonmemory_operand"))
2745 (use (match_operand:P 1 ""))
2746 (use (match_operand:SI 3 "const_int_operand"))])]
2749 /* If TARGET_VECTOR is false, this routine will return false and we will
2750 try scalar expansion. */
2751 if (riscv_vector::expand_vec_setmem (operands[0], operands[1], operands[2]))
2754 /* If value to set is not zero, use the library routine. */
2755 if (operands[2] != const0_rtx)
2758 if (riscv_expand_block_clear (operands[0], operands[1]))
2764 (define_expand "movmem<mode>"
2765 [(parallel [(set (match_operand:BLK 0 "general_operand")
2766 (match_operand:BLK 1 "general_operand"))
2767 (use (match_operand:P 2 "const_int_operand"))
2768 (use (match_operand:SI 3 "const_int_operand"))])]
2771 if (riscv_vector::expand_block_move (operands[0], operands[1], operands[2],
2778 ;; Expand in-line code to clear the instruction cache between operand[0] and
2780 (define_expand "clear_cache"
2781 [(match_operand 0 "pmode_register_operand")
2782 (match_operand 1 "pmode_register_operand")]
2785 #ifdef ICACHE_FLUSH_FUNC
2786 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, ICACHE_FLUSH_FUNC),
2787 LCT_NORMAL, VOIDmode, operands[0], Pmode,
2788 operands[1], Pmode, const0_rtx, Pmode);
2790 if (TARGET_ZIFENCEI)
2791 emit_insn (gen_fence_i ());
2796 (define_insn "fence"
2797 [(unspec_volatile [(const_int 0)] UNSPECV_FENCE)]
2800 [(set_attr "type" "atomic")])
2802 (define_insn "fence_i"
2803 [(unspec_volatile [(const_int 0)] UNSPECV_FENCE_I)]
2806 [(set_attr "type" "atomic")])
2808 (define_insn "riscv_pause"
2809 [(unspec_volatile [(const_int 0)] UNSPECV_PAUSE)]
2811 "* return TARGET_ZIHINTPAUSE ? \"pause\" : \".insn\t0x0100000f\";"
2812 [(set_attr "type" "atomic")])
2815 ;; ....................
2819 ;; ....................
2821 ;; Use a QImode shift count, to avoid generating sign or zero extend
2822 ;; instructions for shift counts, and to avoid dropping subregs.
2823 ;; expand_shift_1 can do this automatically when SHIFT_COUNT_TRUNCATED is
2824 ;; defined, but use of that is discouraged.
2826 (define_insn "*<optab>si3"
2827 [(set (match_operand:SI 0 "register_operand" "= r")
2829 (match_operand:SI 1 "register_operand" " r")
2830 (match_operand:QI 2 "arith_operand" " rI")))]
2833 if (GET_CODE (operands[2]) == CONST_INT)
2834 operands[2] = GEN_INT (INTVAL (operands[2])
2835 & (GET_MODE_BITSIZE (SImode) - 1));
2837 return "<insn>%i2%~\t%0,%1,%2";
2839 [(set_attr "type" "shift")
2840 (set_attr "mode" "SI")])
2842 (define_expand "<optab>si3"
2843 [(set (match_operand:SI 0 "register_operand" "= r")
2844 (any_shift:SI (match_operand:SI 1 "register_operand" " r")
2845 (match_operand:QI 2 "arith_operand" " rI")))]
2850 rtx t = gen_reg_rtx (DImode);
2851 emit_insn (gen_<optab>si3_extend (t, operands[1], operands[2]));
2852 t = gen_lowpart (SImode, t);
2853 SUBREG_PROMOTED_VAR_P (t) = 1;
2854 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
2855 emit_move_insn (operands[0], t);
2860 (define_insn "<optab>di3"
2861 [(set (match_operand:DI 0 "register_operand" "= r")
2863 (match_operand:DI 1 "register_operand" " r")
2864 (match_operand:QI 2 "arith_operand" " rI")))]
2867 if (GET_CODE (operands[2]) == CONST_INT)
2868 operands[2] = GEN_INT (INTVAL (operands[2])
2869 & (GET_MODE_BITSIZE (DImode) - 1));
2871 return "<insn>%i2\t%0,%1,%2";
2873 [(set_attr "type" "shift")
2874 (set_attr "mode" "DI")])
2876 (define_insn_and_split "*<optab><GPR:mode>3_mask_1"
2877 [(set (match_operand:GPR 0 "register_operand" "= r")
2879 (match_operand:GPR 1 "register_operand" " r")
2880 (match_operator 4 "subreg_lowpart_operator"
2882 (match_operand:GPR2 2 "register_operand" "r")
2883 (match_operand 3 "<GPR:shiftm1>"))])))]
2888 (any_shift:GPR (match_dup 1)
2890 "operands[2] = gen_lowpart (QImode, operands[2]);"
2891 [(set_attr "type" "shift")
2892 (set_attr "mode" "<GPR:MODE>")])
2894 (define_insn "<optab>si3_extend"
2895 [(set (match_operand:DI 0 "register_operand" "= r")
2897 (any_shift:SI (match_operand:SI 1 "register_operand" " r")
2898 (match_operand:QI 2 "arith_operand" " rI"))))]
2901 if (GET_CODE (operands[2]) == CONST_INT)
2902 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
2904 return "<insn>%i2w\t%0,%1,%2";
2906 [(set_attr "type" "shift")
2907 (set_attr "mode" "SI")])
2909 (define_insn_and_split "*<optab>si3_extend_mask"
2910 [(set (match_operand:DI 0 "register_operand" "= r")
2913 (match_operand:SI 1 "register_operand" " r")
2914 (match_operator 4 "subreg_lowpart_operator"
2916 (match_operand:GPR 2 "register_operand" " r")
2917 (match_operand 3 "const_si_mask_operand"))]))))]
2923 (any_shift:SI (match_dup 1)
2925 "operands[2] = gen_lowpart (QImode, operands[2]);"
2926 [(set_attr "type" "shift")
2927 (set_attr "mode" "SI")])
2929 ;; We can reassociate the shift and bitwise operator which may allow us to
2930 ;; reduce the immediate operand of the bitwise operator into a range that
2931 ;; fits in a simm12.
2933 ;; We need to make sure that shifting does not lose any bits, particularly
2934 ;; for IOR/XOR. It probably doesn't matter for AND.
2936 ;; We also don't want to do this if the immediate already fits in a simm12
2937 ;; field, or is a single bit operand, or when we might be able to generate
2938 ;; a shift-add sequence via the splitter in bitmanip.md
2939 ;; in bitmanip.md for masks that are a run of consecutive ones.
2940 (define_insn_and_split "<optab>_shift_reverse<X:mode>"
2941 [(set (match_operand:X 0 "register_operand" "=r")
2942 (any_bitwise:X (ashift:X (match_operand:X 1 "register_operand" "r")
2943 (match_operand 2 "immediate_operand" "n"))
2944 (match_operand 3 "immediate_operand" "n")))]
2945 "(!SMALL_OPERAND (INTVAL (operands[3]))
2946 && SMALL_OPERAND (INTVAL (operands[3]) >> INTVAL (operands[2]))
2947 && popcount_hwi (INTVAL (operands[3])) > 1
2948 && (!(TARGET_64BIT && TARGET_ZBA)
2949 || !consecutive_bits_operand (operands[3], VOIDmode)
2950 || !imm123_operand (operands[2], VOIDmode))
2951 && (INTVAL (operands[3]) & ((1ULL << INTVAL (operands[2])) - 1)) == 0)"
2954 [(set (match_dup 0) (any_bitwise:X (match_dup 1) (match_dup 3)))
2955 (set (match_dup 0) (ashift:X (match_dup 0) (match_dup 2)))]
2957 operands[3] = GEN_INT (INTVAL (operands[3]) >> INTVAL (operands[2]));
2959 [(set_attr "type" "shift")
2960 (set_attr "mode" "<X:MODE>")])
2962 ;; Non-canonical, but can be formed by ree when combine is not successful at
2963 ;; producing one of the two canonical patterns below.
2964 (define_insn "*lshrsi3_zero_extend_1"
2965 [(set (match_operand:DI 0 "register_operand" "=r")
2967 (lshiftrt:SI (match_operand:SI 1 "register_operand" " r")
2968 (match_operand 2 "const_int_operand"))))]
2969 "TARGET_64BIT && (INTVAL (operands[2]) & 0x1f) > 0"
2971 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
2973 return "srliw\t%0,%1,%2";
2975 [(set_attr "type" "shift")
2976 (set_attr "mode" "SI")])
2978 ;; Canonical form for a sign/zero-extend of a logical right shift.
2979 ;; Special case: extract MSB bits of lower 32-bit word
2980 (define_insn "*lshrsi3_extend_2"
2981 [(set (match_operand:DI 0 "register_operand" "=r")
2982 (any_extract:DI (match_operand:DI 1 "register_operand" " r")
2983 (match_operand 2 "const_int_operand")
2984 (match_operand 3 "const_int_operand")))]
2985 "(TARGET_64BIT && (INTVAL (operands[3]) > 0)
2986 && (INTVAL (operands[2]) + INTVAL (operands[3]) == 32))"
2988 return "<extract_sidi_shift>\t%0,%1,%3";
2990 [(set_attr "type" "shift")
2991 (set_attr "mode" "SI")])
2993 ;; Canonical form for a zero-extend of a logical right shift when the
2994 ;; shift count is 31.
2995 (define_insn "*lshrsi3_zero_extend_3"
2996 [(set (match_operand:DI 0 "register_operand" "=r")
2997 (lt:DI (match_operand:SI 1 "register_operand" " r")
3001 return "srliw\t%0,%1,31";
3003 [(set_attr "type" "shift")
3004 (set_attr "mode" "SI")])
3006 ;; Canonical form for a extend of a logical shift right (sign/zero extraction).
3007 ;; Special cases, that are ignored (handled elsewhere):
3008 ;; * Single-bit extraction (Zbs/XTheadBs)
3009 ;; * Single-bit extraction (Zicondops/XVentanaCondops)
3010 ;; * Single-bit extraction (SFB)
3011 ;; * Extraction instruction th.ext(u) (XTheadBb)
3012 ;; * lshrsi3_extend_2 (see above)
3013 (define_insn_and_split "*<any_extract:optab><GPR:mode>3"
3014 [(set (match_operand:GPR 0 "register_operand" "=r")
3016 (match_operand:GPR 1 "register_operand" " r")
3017 (match_operand 2 "const_int_operand")
3018 (match_operand 3 "const_int_operand")))
3019 (clobber (match_scratch:GPR 4 "=&r"))]
3020 "!((TARGET_ZBS || TARGET_XTHEADBS || TARGET_ZICOND
3021 || TARGET_XVENTANACONDOPS || TARGET_SFB_ALU)
3022 && (INTVAL (operands[2]) == 1))
3025 && (INTVAL (operands[3]) > 0)
3026 && (INTVAL (operands[2]) + INTVAL (operands[3]) == 32))"
3028 "&& reload_completed"
3030 (ashift:GPR (match_dup 1) (match_dup 2)))
3032 (<extract_shift>:GPR (match_dup 4) (match_dup 3)))]
3034 int regbits = GET_MODE_BITSIZE (GET_MODE (operands[0])).to_constant ();
3035 int sizebits = INTVAL (operands[2]);
3036 int startbits = INTVAL (operands[3]);
3037 int lshamt = regbits - sizebits - startbits;
3038 int rshamt = lshamt + startbits;
3039 operands[2] = GEN_INT (lshamt);
3040 operands[3] = GEN_INT (rshamt);
3042 [(set_attr "type" "shift")
3043 (set_attr "mode" "<GPR:MODE>")])
3045 ;; Handle AND with 2^N-1 for N from 12 to XLEN. This can be split into
3046 ;; two logical shifts. Otherwise it requires 3 instructions: lui,
3047 ;; xor/addi/srli, and.
3049 ;; Generating a temporary for the shift output gives better combiner results;
3050 ;; and also fixes a problem where op0 could be a paradoxical reg and shifting
3051 ;; by amounts larger than the size of the SUBREG_REG doesn't work.
3053 [(set (match_operand:GPR 0 "register_operand")
3054 (and:GPR (match_operand:GPR 1 "register_operand")
3055 (match_operand:GPR 2 "p2m1_shift_operand")))
3056 (clobber (match_operand:GPR 3 "register_operand"))]
3059 (ashift:GPR (match_dup 1) (match_dup 2)))
3061 (lshiftrt:GPR (match_dup 3) (match_dup 2)))]
3063 /* Op2 is a VOIDmode constant, so get the mode size from op1. */
3064 operands[2] = GEN_INT (GET_MODE_BITSIZE (GET_MODE (operands[1])).to_constant ()
3065 - exact_log2 (INTVAL (operands[2]) + 1));
3068 ;; Handle AND with 0xF...F0...0 where there are 32 to 63 zeros. This can be
3069 ;; split into two shifts. Otherwise it requires 3 instructions: li, sll, and.
3071 [(set (match_operand:DI 0 "register_operand")
3072 (and:DI (match_operand:DI 1 "register_operand")
3073 (match_operand:DI 2 "high_mask_shift_operand")))
3074 (clobber (match_operand:DI 3 "register_operand"))]
3077 (lshiftrt:DI (match_dup 1) (match_dup 2)))
3079 (ashift:DI (match_dup 3) (match_dup 2)))]
3081 operands[2] = GEN_INT (ctz_hwi (INTVAL (operands[2])));
3084 ;; Handle SImode to DImode zero-extend combined with a left shift. This can
3085 ;; occur when unsigned int is used for array indexing. Split this into two
3086 ;; shifts. Otherwise we can get 3 shifts.
3088 (define_insn_and_split "zero_extendsidi2_shifted"
3089 [(set (match_operand:DI 0 "register_operand" "=r")
3090 (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
3091 (match_operand:QI 2 "immediate_operand" "I"))
3092 (match_operand 3 "immediate_operand" "")))
3093 (clobber (match_scratch:DI 4 "=&r"))]
3094 "TARGET_64BIT && !TARGET_ZBA
3095 && ((INTVAL (operands[3]) >> INTVAL (operands[2])) == 0xffffffff)"
3097 "&& reload_completed"
3099 (ashift:DI (match_dup 1) (const_int 32)))
3101 (lshiftrt:DI (match_dup 4) (match_dup 5)))]
3102 "operands[5] = GEN_INT (32 - (INTVAL (operands [2])));"
3103 [(set_attr "type" "shift")
3104 (set_attr "mode" "DI")])
3107 ;; ....................
3109 ;; CONDITIONAL BRANCHES
3111 ;; ....................
3113 ;; Conditional branches
3115 (define_insn_and_split "*branch<ANYI:mode>_shiftedarith_equals_zero"
3117 (if_then_else (match_operator 1 "equality_operator"
3118 [(and:ANYI (match_operand:ANYI 2 "register_operand" "r")
3119 (match_operand 3 "shifted_const_arith_operand" "i"))
3121 (label_ref (match_operand 0 "" ""))
3123 (clobber (match_scratch:X 4 "=&r"))]
3124 "!SMALL_OPERAND (INTVAL (operands[3]))"
3126 "&& reload_completed"
3127 [(set (match_dup 4) (lshiftrt:X (subreg:X (match_dup 2) 0) (match_dup 6)))
3128 (set (match_dup 4) (and:X (match_dup 4) (match_dup 7)))
3129 (set (pc) (if_then_else (match_op_dup 1 [(match_dup 4) (const_int 0)])
3130 (label_ref (match_dup 0)) (pc)))]
3132 HOST_WIDE_INT mask = INTVAL (operands[3]);
3133 int trailing = ctz_hwi (mask);
3135 operands[6] = GEN_INT (trailing);
3136 operands[7] = GEN_INT (mask >> trailing);
3138 [(set_attr "type" "branch")])
3140 (define_insn_and_split "*branch<ANYI:mode>_shiftedarith_<optab>_shifted"
3142 (if_then_else (any_eq
3143 (and:ANYI (match_operand:ANYI 1 "register_operand" "r")
3144 (match_operand 2 "shifted_const_arith_operand" "i"))
3145 (match_operand 3 "shifted_const_arith_operand" "i"))
3146 (label_ref (match_operand 0 "" ""))
3148 (clobber (match_scratch:X 4 "=&r"))
3149 (clobber (match_scratch:X 5 "=&r"))]
3150 "!SMALL_OPERAND (INTVAL (operands[2]))
3151 && !SMALL_OPERAND (INTVAL (operands[3]))
3152 && SMALL_AFTER_COMMON_TRAILING_SHIFT (INTVAL (operands[2]),
3153 INTVAL (operands[3]))"
3155 "&& reload_completed"
3156 [(set (match_dup 4) (lshiftrt:X (match_dup 1) (match_dup 7)))
3157 (set (match_dup 4) (and:X (match_dup 4) (match_dup 8)))
3158 (set (match_dup 5) (match_dup 9))
3159 (set (pc) (if_then_else (any_eq (match_dup 4) (match_dup 5))
3160 (label_ref (match_dup 0)) (pc)))]
3162 HOST_WIDE_INT mask1 = INTVAL (operands[2]);
3163 HOST_WIDE_INT mask2 = INTVAL (operands[3]);
3164 int trailing_shift = COMMON_TRAILING_ZEROS (mask1, mask2);
3166 operands[7] = GEN_INT (trailing_shift);
3167 operands[8] = GEN_INT (mask1 >> trailing_shift);
3168 operands[9] = GEN_INT (mask2 >> trailing_shift);
3170 [(set_attr "type" "branch")])
3172 (define_insn_and_split "*branch<ANYI:mode>_shiftedmask_equals_zero"
3174 (if_then_else (match_operator 1 "equality_operator"
3175 [(and:ANYI (match_operand:ANYI 2 "register_operand" "r")
3176 (match_operand 3 "consecutive_bits_operand" "i"))
3178 (label_ref (match_operand 0 "" ""))
3180 (clobber (match_scratch:X 4 "=&r"))]
3181 "(INTVAL (operands[3]) >= 0 || !partial_subreg_p (operands[2]))
3182 && popcount_hwi (INTVAL (operands[3])) > 1
3183 && !SMALL_OPERAND (INTVAL (operands[3]))"
3185 "&& reload_completed"
3186 [(set (match_dup 4) (ashift:X (subreg:X (match_dup 2) 0) (match_dup 6)))
3187 (set (match_dup 4) (lshiftrt:X (match_dup 4) (match_dup 7)))
3188 (set (pc) (if_then_else (match_op_dup 1 [(match_dup 4) (const_int 0)])
3189 (label_ref (match_dup 0)) (pc)))]
3191 unsigned HOST_WIDE_INT mask = INTVAL (operands[3]);
3192 int leading = clz_hwi (mask);
3193 int trailing = ctz_hwi (mask);
3195 operands[6] = GEN_INT (leading);
3196 operands[7] = GEN_INT (leading + trailing);
3198 [(set_attr "type" "branch")])
3200 (define_insn "*branch<mode>"
3203 (match_operator 1 "ordered_comparison_operator"
3204 [(match_operand:X 2 "register_operand" "r")
3205 (match_operand:X 3 "reg_or_0_operand" "rJ")])
3206 (label_ref (match_operand 0 "" ""))
3210 if (get_attr_length (insn) == 12)
3211 return "b%N1\t%2,%z3,1f; jump\t%l0,ra; 1:";
3213 return "b%C1\t%2,%z3,%l0";
3215 [(set_attr "type" "branch")
3216 (set_attr "mode" "none")])
3218 ;; Conditional move and add patterns.
3220 (define_expand "mov<mode>cc"
3221 [(set (match_operand:GPR 0 "register_operand")
3222 (if_then_else:GPR (match_operand 1 "comparison_operator")
3223 (match_operand:GPR 2 "movcc_operand")
3224 (match_operand:GPR 3 "movcc_operand")))]
3225 "TARGET_SFB_ALU || TARGET_XTHEADCONDMOV || TARGET_ZICOND_LIKE
3228 if (riscv_expand_conditional_move (operands[0], operands[1],
3229 operands[2], operands[3]))
3235 (define_expand "add<mode>cc"
3236 [(match_operand:GPR 0 "register_operand")
3237 (match_operand 1 "comparison_operator")
3238 (match_operand:GPR 2 "arith_operand")
3239 (match_operand:GPR 3 "arith_operand")]
3242 rtx cmp = operands[1];
3243 rtx cmp0 = XEXP (cmp, 0);
3244 rtx cmp1 = XEXP (cmp, 1);
3245 machine_mode mode0 = GET_MODE (cmp0);
3247 /* We only handle word mode integer compares for now. */
3248 if (INTEGRAL_MODE_P (mode0) && mode0 != word_mode)
3251 enum rtx_code code = GET_CODE (cmp);
3252 rtx reg0 = gen_reg_rtx (<MODE>mode);
3253 rtx reg1 = gen_reg_rtx (<MODE>mode);
3254 rtx reg2 = gen_reg_rtx (<MODE>mode);
3255 bool invert = false;
3257 if (INTEGRAL_MODE_P (mode0))
3258 riscv_expand_int_scc (reg0, code, cmp0, cmp1, &invert);
3259 else if (FLOAT_MODE_P (mode0) && fp_scc_comparison (cmp, GET_MODE (cmp)))
3260 riscv_expand_float_scc (reg0, code, cmp0, cmp1, &invert);
3265 riscv_emit_binary (PLUS, reg1, reg0, constm1_rtx);
3267 riscv_emit_unary (NEG, reg1, reg0);
3268 riscv_emit_binary (AND, reg2, reg1, operands[3]);
3269 riscv_emit_binary (PLUS, operands[0], reg2, operands[2]);
3274 ;; Used to implement built-in functions.
3275 (define_expand "condjump"
3277 (if_then_else (match_operand 0)
3278 (label_ref (match_operand 1))
3281 (define_expand "@cbranch<mode>4"
3283 (if_then_else (match_operator 0 "comparison_operator"
3284 [(match_operand:BR 1 "register_operand")
3285 (match_operand:BR 2 "nonmemory_operand")])
3286 (label_ref (match_operand 3 ""))
3290 riscv_expand_conditional_branch (operands[3], GET_CODE (operands[0]),
3291 operands[1], operands[2]);
3295 (define_expand "@cbranch<ANYF:mode>4"
3296 [(parallel [(set (pc)
3297 (if_then_else (match_operator 0 "fp_branch_comparison"
3298 [(match_operand:ANYF 1 "register_operand")
3299 (match_operand:ANYF 2 "register_operand")])
3300 (label_ref (match_operand 3 ""))
3302 (clobber (match_operand 4 ""))])]
3303 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3305 if (!signed_order_operator (operands[0], GET_MODE (operands[0])))
3307 riscv_expand_conditional_branch (operands[3], GET_CODE (operands[0]),
3308 operands[1], operands[2]);
3311 operands[4] = gen_reg_rtx (TARGET_64BIT ? DImode : SImode);
3314 (define_insn_and_split "*cbranch<ANYF:mode>4"
3316 (if_then_else (match_operator 1 "fp_native_comparison"
3317 [(match_operand:ANYF 2 "register_operand" "f")
3318 (match_operand:ANYF 3 "register_operand" "f")])
3319 (label_ref (match_operand 0 ""))
3321 (clobber (match_operand:X 4 "register_operand" "=r"))]
3322 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3324 "&& reload_completed"
3326 (match_op_dup:X 1 [(match_dup 2) (match_dup 3)]))
3328 (if_then_else (ne:X (match_dup 4) (const_int 0))
3329 (label_ref (match_operand 0))
3332 [(set_attr "type" "branch")
3333 (set (attr "length")
3334 (if_then_else (and (le (minus (match_dup 0) (pc))
3336 (le (minus (pc) (match_dup 0))
3339 (if_then_else (and (le (minus (match_dup 0) (pc))
3340 (const_int 1048564))
3341 (le (minus (pc) (match_dup 0))
3342 (const_int 1048576)))
3346 (define_insn_and_split "*cbranch<ANYF:mode>4"
3348 (if_then_else (match_operator 1 "ne_operator"
3349 [(match_operand:ANYF 2 "register_operand" "f")
3350 (match_operand:ANYF 3 "register_operand" "f")])
3351 (label_ref (match_operand 0 ""))
3353 (clobber (match_operand:X 4 "register_operand" "=r"))]
3354 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3356 "&& reload_completed"
3358 (eq:X (match_dup 2) (match_dup 3)))
3360 (if_then_else (eq:X (match_dup 4) (const_int 0))
3361 (label_ref (match_operand 0))
3364 [(set_attr "type" "branch")
3365 (set (attr "length")
3366 (if_then_else (and (le (minus (match_dup 0) (pc))
3368 (le (minus (pc) (match_dup 0))
3371 (if_then_else (and (le (minus (match_dup 0) (pc))
3372 (const_int 1048564))
3373 (le (minus (pc) (match_dup 0))
3374 (const_int 1048576)))
3378 (define_insn_and_split "*branch_on_bit<X:mode>"
3381 (match_operator 0 "equality_operator"
3382 [(zero_extract:X (match_operand:X 2 "register_operand" "r")
3384 (match_operand 3 "branch_on_bit_operand"))
3386 (label_ref (match_operand 1))
3388 (clobber (match_scratch:X 4 "=&r"))]
3393 (ashift:X (match_dup 2) (match_dup 3)))
3396 (match_op_dup 0 [(match_dup 4) (const_int 0)])
3397 (label_ref (match_operand 1))
3400 int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
3401 operands[3] = GEN_INT (shift);
3403 if (GET_CODE (operands[0]) == EQ)
3404 operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
3406 operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
3408 [(set_attr "type" "branch")])
3410 (define_insn_and_split "*branch_on_bit_range<X:mode>"
3413 (match_operator 0 "equality_operator"
3414 [(zero_extract:X (match_operand:X 2 "register_operand" "r")
3415 (match_operand 3 "branch_on_bit_operand")
3418 (label_ref (match_operand 1))
3420 (clobber (match_scratch:X 4 "=&r"))]
3425 (ashift:X (match_dup 2) (match_dup 3)))
3428 (match_op_dup 0 [(match_dup 4) (const_int 0)])
3429 (label_ref (match_operand 1))
3432 operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
3434 [(set_attr "type" "branch")])
3437 ;; ....................
3439 ;; SETTING A REGISTER FROM A COMPARISON
3441 ;; ....................
3443 ;; Destination is always set in SI mode.
3445 (define_expand "cstore<mode>4"
3446 [(set (match_operand:SI 0 "register_operand")
3447 (match_operator:SI 1 "ordered_comparison_operator"
3448 [(match_operand:GPR 2 "register_operand")
3449 (match_operand:GPR 3 "nonmemory_operand")]))]
3452 riscv_expand_int_scc (operands[0], GET_CODE (operands[1]), operands[2],
3457 (define_expand "cstore<mode>4"
3458 [(set (match_operand:SI 0 "register_operand")
3459 (match_operator:SI 1 "fp_scc_comparison"
3460 [(match_operand:ANYF 2 "register_operand")
3461 (match_operand:ANYF 3 "register_operand")]))]
3462 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3464 riscv_expand_float_scc (operands[0], GET_CODE (operands[1]), operands[2],
3469 (define_insn "*cstore<ANYF:mode><X:mode>4"
3470 [(set (match_operand:X 0 "register_operand" "=r")
3471 (match_operator:X 1 "fp_native_comparison"
3472 [(match_operand:ANYF 2 "register_operand" " f")
3473 (match_operand:ANYF 3 "register_operand" " f")]))]
3474 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3475 "f%C1.<fmt>\t%0,%2,%3"
3476 [(set_attr "type" "fcmp")
3477 (set_attr "mode" "<UNITMODE>")])
3479 (define_expand "f<quiet_pattern>_quiet<ANYF:mode><X:mode>4"
3480 [(set (match_operand:X 0 "register_operand")
3481 (unspec:X [(match_operand:ANYF 1 "register_operand")
3482 (match_operand:ANYF 2 "register_operand")]
3484 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3486 rtx op0 = operands[0];
3487 rtx op1 = operands[1];
3488 rtx op2 = operands[2];
3491 emit_insn (gen_f<quiet_pattern>_quiet<ANYF:mode><X:mode>4_zfa(op0, op1, op2));
3494 rtx tmp = gen_reg_rtx (SImode);
3495 rtx cmp = gen_rtx_<QUIET_PATTERN> (<X:MODE>mode, op1, op2);
3496 rtx frflags = gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, const0_rtx),
3498 rtx fsflags = gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, tmp),
3501 emit_insn (gen_rtx_SET (tmp, frflags));
3502 emit_insn (gen_rtx_SET (op0, cmp));
3503 emit_insn (fsflags);
3506 if (HONOR_SNANS (<ANYF:MODE>mode))
3507 emit_insn (gen_rtx_UNSPEC_VOLATILE (<ANYF:MODE>mode,
3508 gen_rtvec (2, op1, op2),
3513 (define_insn "f<quiet_pattern>_quiet<ANYF:mode><X:mode>4_zfa"
3514 [(set (match_operand:X 0 "register_operand" "=r")
3516 [(match_operand:ANYF 1 "register_operand" " f")
3517 (match_operand:ANYF 2 "register_operand" " f")]
3519 "TARGET_HARD_FLOAT && TARGET_ZFA"
3520 "f<quiet_pattern>q.<fmt>\t%0,%1,%2"
3521 [(set_attr "type" "fcmp")
3522 (set_attr "mode" "<UNITMODE>")
3523 (set (attr "length") (const_int 16))])
3525 ;; fclass instruction output bitmap
3526 ;; 0 negative infinity
3527 ;; 1 negative normal number.
3528 ;; 2 negative subnormal number.
3531 ;; 5 positive subnormal number.
3532 ;; 6 positive normal number.
3533 ;; 7 positive infinity
3537 (define_insn "fclass<ANYF:mode><X:mode>"
3538 [(set (match_operand:X 0 "register_operand" "=r")
3539 (unspec [(match_operand:ANYF 1 "register_operand" " f")]
3542 "fclass.<fmt>\t%0,%1";
3543 [(set_attr "type" "fcmp")
3544 (set_attr "mode" "<UNITMODE>")])
3546 ;; Implements optab for isfinite, isnormal, isinf
3548 (define_int_iterator FCLASS_MASK [126 66 129])
3549 (define_int_attr fclass_optab
3554 (define_expand "<FCLASS_MASK:fclass_optab><ANYF:mode>2"
3555 [(match_operand 0 "register_operand" "=r")
3556 (match_operand:ANYF 1 "register_operand" " f")
3557 (const_int FCLASS_MASK)]
3560 if (GET_MODE (operands[0]) != SImode
3561 && GET_MODE (operands[0]) != word_mode)
3564 rtx t = gen_reg_rtx (word_mode);
3565 rtx t_op0 = gen_reg_rtx (word_mode);
3568 emit_insn (gen_fclass<ANYF:mode>di (t, operands[1]));
3570 emit_insn (gen_fclass<ANYF:mode>si (t, operands[1]));
3572 riscv_emit_binary (AND, t, t, GEN_INT (<FCLASS_MASK>));
3573 rtx cmp = gen_rtx_NE (word_mode, t, const0_rtx);
3574 emit_insn (gen_cstore<mode>4 (t_op0, cmp, t, const0_rtx));
3578 t_op0 = gen_lowpart (SImode, t_op0);
3579 SUBREG_PROMOTED_VAR_P (t_op0) = 1;
3580 SUBREG_PROMOTED_SET (t_op0, SRP_SIGNED);
3583 emit_move_insn (operands[0], t_op0);
3587 (define_insn "*seq_zero_<X:mode><GPR:mode>"
3588 [(set (match_operand:GPR 0 "register_operand" "=r")
3589 (eq:GPR (match_operand:X 1 "register_operand" " r")
3593 [(set_attr "type" "slt")
3594 (set_attr "mode" "<X:MODE>")])
3596 (define_insn "*sne_zero_<X:mode><GPR:mode>"
3597 [(set (match_operand:GPR 0 "register_operand" "=r")
3598 (ne:GPR (match_operand:X 1 "register_operand" " r")
3602 [(set_attr "type" "slt")
3603 (set_attr "mode" "<X:MODE>")])
3605 (define_insn "*sgt<u>_<X:mode><GPR:mode>"
3606 [(set (match_operand:GPR 0 "register_operand" "= r")
3607 (any_gt:GPR (match_operand:X 1 "register_operand" " r")
3608 (match_operand:X 2 "reg_or_0_operand" " rJ")))]
3611 [(set_attr "type" "slt")
3612 (set_attr "mode" "<X:MODE>")])
3614 (define_insn "*sge<u>_<X:mode><GPR:mode>"
3615 [(set (match_operand:GPR 0 "register_operand" "=r")
3616 (any_ge:GPR (match_operand:X 1 "register_operand" " r")
3619 "slti<u>\t%0,zero,%1"
3620 [(set_attr "type" "slt")
3621 (set_attr "mode" "<X:MODE>")])
3623 (define_insn "@slt<u>_<X:mode><GPR:mode>3"
3624 [(set (match_operand:GPR 0 "register_operand" "= r")
3625 (any_lt:GPR (match_operand:X 1 "register_operand" " r")
3626 (match_operand:X 2 "arith_operand" " rI")))]
3628 "slt%i2<u>\t%0,%1,%2"
3629 [(set_attr "type" "slt")
3630 (set_attr "mode" "<X:MODE>")])
3632 (define_insn "*sle<u>_<X:mode><GPR:mode>"
3633 [(set (match_operand:GPR 0 "register_operand" "=r")
3634 (any_le:GPR (match_operand:X 1 "register_operand" " r")
3635 (match_operand:X 2 "sle_operand" "")))]
3638 operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
3639 return "slt%i2<u>\t%0,%1,%2";
3641 [(set_attr "type" "slt")
3642 (set_attr "mode" "<X:MODE>")])
3645 ;; ....................
3647 ;; UNCONDITIONAL BRANCHES
3649 ;; ....................
3651 ;; Unconditional branches.
3654 [(set (pc) (label_ref (match_operand 0 "" "")))]
3657 /* Hopefully this does not happen often as this is going
3658 to clobber $ra and muck up the return stack predictors. */
3659 if (get_attr_length (insn) == 8)
3660 return "jump\t%l0,ra";
3664 [(set_attr "type" "jump")
3665 (set_attr "mode" "none")])
3667 (define_expand "indirect_jump"
3668 [(set (pc) (match_operand 0 "register_operand"))]
3671 operands[0] = force_reg (Pmode, operands[0]);
3672 if (Pmode == SImode)
3673 emit_jump_insn (gen_indirect_jumpsi (operands[0]));
3675 emit_jump_insn (gen_indirect_jumpdi (operands[0]));
3679 (define_insn "indirect_jump<mode>"
3680 [(set (pc) (match_operand:P 0 "register_operand" "l"))]
3683 [(set_attr "type" "jalr")
3684 (set_attr "mode" "none")])
3686 (define_expand "tablejump"
3687 [(set (pc) (match_operand 0 "register_operand" ""))
3688 (use (label_ref (match_operand 1 "" "")))]
3691 if (CASE_VECTOR_PC_RELATIVE)
3692 operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
3693 gen_rtx_LABEL_REF (Pmode, operands[1]),
3694 NULL_RTX, 0, OPTAB_DIRECT);
3696 if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
3697 emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
3699 emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
3703 (define_insn "tablejump<mode>"
3704 [(set (pc) (match_operand:GPR 0 "register_operand" "l"))
3705 (use (label_ref (match_operand 1 "" "")))]
3708 [(set_attr "type" "jalr")
3709 (set_attr "mode" "none")])
3712 ;; ....................
3714 ;; Function prologue/epilogue
3716 ;; ....................
3719 (define_expand "prologue"
3723 riscv_expand_prologue ();
3727 ;; Block any insns from being moved before this point, since the
3728 ;; profiling call to mcount can use various registers that aren't
3729 ;; saved or used to pass arguments.
3731 (define_insn "blockage"
3732 [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
3735 [(set_attr "type" "ghost")
3736 (set_attr "mode" "none")])
3738 (define_expand "epilogue"
3742 riscv_expand_epilogue (NORMAL_RETURN);
3746 (define_expand "sibcall_epilogue"
3750 riscv_expand_epilogue (SIBCALL_RETURN);
3754 ;; Trivial return. Make it look like a normal return insn as that
3755 ;; allows jump optimizations to work better.
3757 (define_expand "return"
3759 "riscv_can_use_return_insn ()"
3762 (define_insn "simple_return"
3766 return riscv_output_return ();
3768 [(set_attr "type" "jalr")
3769 (set_attr "mode" "none")])
3773 (define_insn "simple_return_internal"
3775 (use (match_operand 0 "pmode_register_operand" ""))]
3778 [(set_attr "type" "jalr")
3779 (set_attr "mode" "none")])
3781 ;; This is used in compiling the unwind routines.
3782 (define_expand "eh_return"
3783 [(use (match_operand 0 "general_operand"))]
3786 if (GET_MODE (operands[0]) != word_mode)
3787 operands[0] = convert_to_mode (word_mode, operands[0], 0);
3789 emit_insn (gen_eh_set_lr_di (operands[0]));
3791 emit_insn (gen_eh_set_lr_si (operands[0]));
3793 emit_jump_insn (gen_eh_return_internal ());
3798 ;; Clobber the return address on the stack. We can't expand this
3799 ;; until we know where it will be put in the stack frame.
3801 (define_insn "eh_set_lr_si"
3802 [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
3803 (clobber (match_scratch:SI 1 "=&r"))]
3806 [(set_attr "type" "jump")])
3808 (define_insn "eh_set_lr_di"
3809 [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
3810 (clobber (match_scratch:DI 1 "=&r"))]
3813 [(set_attr "type" "jump")])
3816 [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
3817 (clobber (match_scratch 1))]
3821 riscv_set_return_address (operands[0], operands[1]);
3825 (define_insn_and_split "eh_return_internal"
3829 "epilogue_completed"
3831 "riscv_expand_epilogue (EXCEPTION_RETURN); DONE;"
3832 [(set_attr "type" "ret")])
3835 ;; ....................
3839 ;; ....................
3841 (define_expand "sibcall"
3842 [(parallel [(call (match_operand 0 "")
3843 (match_operand 1 ""))
3845 (match_operand 2 "const_int_operand")
3846 ] UNSPEC_CALLEE_CC))])]
3849 rtx target = riscv_legitimize_call_address (XEXP (operands[0], 0));
3850 emit_call_insn (gen_sibcall_internal (target, operands[1], operands[2]));
3854 (define_insn "sibcall_internal"
3855 [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S,U"))
3856 (match_operand 1 "" ""))
3858 (match_operand 2 "const_int_operand")
3859 ] UNSPEC_CALLEE_CC))]
3860 "SIBLING_CALL_P (insn)"
3865 [(set_attr "type" "call")])
3867 (define_expand "sibcall_value"
3868 [(parallel [(set (match_operand 0 "")
3869 (call (match_operand 1 "")
3870 (match_operand 2 "")))
3872 (match_operand 3 "const_int_operand")
3873 ] UNSPEC_CALLEE_CC))])]
3876 rtx target = riscv_legitimize_call_address (XEXP (operands[1], 0));
3877 emit_call_insn (gen_sibcall_value_internal (operands[0], target, operands[2],
3882 (define_insn "sibcall_value_internal"
3883 [(set (match_operand 0 "" "")
3884 (call (mem:SI (match_operand 1 "call_insn_operand" "j,S,U"))
3885 (match_operand 2 "" "")))
3887 (match_operand 3 "const_int_operand")
3888 ] UNSPEC_CALLEE_CC))]
3889 "SIBLING_CALL_P (insn)"
3894 [(set_attr "type" "call")])
3896 (define_expand "call"
3897 [(parallel [(call (match_operand 0 "")
3898 (match_operand 1 ""))
3900 (match_operand 2 "const_int_operand")
3901 ] UNSPEC_CALLEE_CC))])]
3904 rtx target = riscv_legitimize_call_address (XEXP (operands[0], 0));
3905 emit_call_insn (gen_call_internal (target, operands[1], operands[2]));
3909 (define_insn "call_internal"
3910 [(call (mem:SI (match_operand 0 "call_insn_operand" "l,S,U"))
3911 (match_operand 1 "" ""))
3913 (match_operand 2 "const_int_operand")
3914 ] UNSPEC_CALLEE_CC))
3915 (clobber (reg:SI RETURN_ADDR_REGNUM))]
3921 [(set_attr "type" "call")])
3923 (define_expand "call_value"
3924 [(parallel [(set (match_operand 0 "")
3925 (call (match_operand 1 "")
3926 (match_operand 2 "")))
3928 (match_operand 3 "const_int_operand")
3929 ] UNSPEC_CALLEE_CC))])]
3932 rtx target = riscv_legitimize_call_address (XEXP (operands[1], 0));
3933 emit_call_insn (gen_call_value_internal (operands[0], target, operands[2],
3938 (define_insn "call_value_internal"
3939 [(set (match_operand 0 "" "")
3940 (call (mem:SI (match_operand 1 "call_insn_operand" "l,S,U"))
3941 (match_operand 2 "" "")))
3943 (match_operand 3 "const_int_operand")
3944 ] UNSPEC_CALLEE_CC))
3945 (clobber (reg:SI RETURN_ADDR_REGNUM))]
3951 [(set_attr "type" "call")])
3953 ;; Call subroutine returning any type.
3955 (define_expand "untyped_call"
3956 [(parallel [(call (match_operand 0 "")
3958 (match_operand 1 "")
3959 (match_operand 2 "")])]
3964 /* Untyped calls always use the RISCV_CC_BASE calling convention. */
3965 emit_call_insn (gen_call (operands[0], const0_rtx,
3966 gen_int_mode (RISCV_CC_BASE, SImode)));
3968 for (i = 0; i < XVECLEN (operands[2], 0); i++)
3970 rtx set = XVECEXP (operands[2], 0, i);
3971 riscv_emit_move (SET_DEST (set), SET_SRC (set));
3974 emit_insn (gen_blockage ());
3982 [(set_attr "type" "nop")
3983 (set_attr "mode" "none")])
3986 [(trap_if (const_int 1) (const_int 0))]
3989 [(set_attr "type" "trap")])
3991 ;; Must use the registers that we save to prevent the rename reg optimization
3992 ;; pass from using them before the gpr_save pattern when shrink wrapping
3993 ;; occurs. See bug 95252 for instance.
3995 (define_insn "gpr_save"
3996 [(match_parallel 1 "gpr_save_operation"
3997 [(unspec_volatile [(match_operand 0 "const_int_operand")]
3998 UNSPECV_GPR_SAVE)])]
4000 "call\tt0,__riscv_save_%0"
4001 [(set_attr "type" "call")])
4003 (define_insn "gpr_restore"
4004 [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_RESTORE)]
4006 "tail\t__riscv_restore_%0"
4007 [(set_attr "type" "call")])
4009 (define_insn "gpr_restore_return"
4011 (use (match_operand 0 "pmode_register_operand" ""))
4015 [(set_attr "type" "ret")])
4017 (define_insn "riscv_frcsr"
4018 [(set (match_operand:SI 0 "register_operand" "=r")
4019 (unspec_volatile:SI [(const_int 0)] UNSPECV_FRCSR))]
4020 "TARGET_HARD_FLOAT || TARGET_ZFINX"
4022 [(set_attr "type" "fmove")])
4024 (define_insn "riscv_fscsr"
4025 [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")] UNSPECV_FSCSR)]
4026 "TARGET_HARD_FLOAT || TARGET_ZFINX"
4028 [(set_attr "type" "fmove")])
4030 (define_insn "riscv_frflags"
4031 [(set (match_operand:SI 0 "register_operand" "=r")
4032 (unspec_volatile:SI [(const_int 0)] UNSPECV_FRFLAGS))]
4033 "TARGET_HARD_FLOAT || TARGET_ZFINX"
4035 [(set_attr "type" "fmove")])
4037 (define_insn "riscv_fsflags"
4038 [(unspec_volatile [(match_operand:SI 0 "csr_operand" "rK")] UNSPECV_FSFLAGS)]
4039 "TARGET_HARD_FLOAT || TARGET_ZFINX"
4041 [(set_attr "type" "fmove")])
4043 (define_insn "*riscv_fsnvsnan<mode>2"
4044 [(unspec_volatile [(match_operand:ANYF 0 "register_operand" "f")
4045 (match_operand:ANYF 1 "register_operand" "f")]
4047 "TARGET_HARD_FLOAT || TARGET_ZFINX"
4048 "feq.<fmt>\tzero,%0,%1"
4049 [(set_attr "type" "fcmp")
4050 (set_attr "mode" "<UNITMODE>")])
4052 (define_insn "riscv_mret"
4054 (unspec_volatile [(const_int 0)] UNSPECV_MRET)]
4057 [(set_attr "type" "ret")])
4059 (define_insn "riscv_sret"
4061 (unspec_volatile [(const_int 0)] UNSPECV_SRET)]
4064 [(set_attr "type" "ret")])
4066 (define_insn "riscv_uret"
4068 (unspec_volatile [(const_int 0)] UNSPECV_URET)]
4071 [(set_attr "type" "ret")])
4073 (define_insn "stack_tie<mode>"
4074 [(set (mem:BLK (scratch))
4075 (unspec:BLK [(match_operand:X 0 "register_operand" "r")
4076 (match_operand:X 1 "register_operand" "r")]
4078 "!rtx_equal_p (operands[0], operands[1])"
4080 [(set_attr "type" "ghost")
4081 (set_attr "length" "0")]
4084 ;; This fixes a failure with gcc.c-torture/execute/pr64242.c at -O2 for a
4085 ;; 32-bit target when using -mtune=sifive-7-series. The first sched pass
4086 ;; runs before register elimination, and we have a non-obvious dependency
4087 ;; between a use of the soft fp and a set of the hard fp. We fix this by
4088 ;; emitting a clobber using the hard fp between the two insns.
4089 (define_expand "restore_stack_nonlocal"
4090 [(match_operand 0 "register_operand")
4091 (match_operand 1 "memory_operand")]
4094 emit_move_insn (operands[0], operands[1]);
4095 /* Prevent the following hard fp restore from being moved before the move
4096 insn above which uses a copy of the soft fp reg. */
4097 emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
4101 ;; Named pattern for expanding thread pointer reference.
4102 (define_expand "get_thread_pointer<mode>"
4103 [(set (match_operand:P 0 "register_operand" "=r")
4108 ;; Named patterns for stack smashing protection.
4110 (define_expand "stack_protect_set"
4111 [(match_operand 0 "memory_operand")
4112 (match_operand 1 "memory_operand")]
4115 machine_mode mode = GET_MODE (operands[0]);
4116 if (riscv_stack_protector_guard == SSP_TLS)
4118 rtx reg = gen_rtx_REG (Pmode, riscv_stack_protector_guard_reg);
4119 rtx offset = GEN_INT (riscv_stack_protector_guard_offset);
4120 rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
4121 operands[1] = gen_rtx_MEM (Pmode, addr);
4124 emit_insn ((mode == DImode
4125 ? gen_stack_protect_set_di
4126 : gen_stack_protect_set_si) (operands[0], operands[1]));
4130 ;; DO NOT SPLIT THIS PATTERN. It is important for security reasons that the
4131 ;; canary value does not live beyond the life of this sequence.
4132 (define_insn "stack_protect_set_<mode>"
4133 [(set (match_operand:GPR 0 "memory_operand" "=m")
4134 (unspec:GPR [(match_operand:GPR 1 "memory_operand" "m")]
4136 (set (match_scratch:GPR 2 "=&r") (const_int 0))]
4138 "<load>\t%2, %1\;<store>\t%2, %0\;li\t%2, 0"
4139 [(set_attr "type" "multi")
4140 (set_attr "length" "12")])
4142 (define_expand "stack_protect_test"
4143 [(match_operand 0 "memory_operand")
4144 (match_operand 1 "memory_operand")
4149 machine_mode mode = GET_MODE (operands[0]);
4151 result = gen_reg_rtx(mode);
4152 if (riscv_stack_protector_guard == SSP_TLS)
4154 rtx reg = gen_rtx_REG (Pmode, riscv_stack_protector_guard_reg);
4155 rtx offset = GEN_INT (riscv_stack_protector_guard_offset);
4156 rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
4157 operands[1] = gen_rtx_MEM (Pmode, addr);
4159 emit_insn ((mode == DImode
4160 ? gen_stack_protect_test_di
4161 : gen_stack_protect_test_si) (result,
4165 rtx cond = gen_rtx_EQ (VOIDmode, result, const0_rtx);
4166 emit_jump_insn (gen_cbranch4 (mode, cond, result, const0_rtx, operands[2]));
4171 (define_insn "stack_protect_test_<mode>"
4172 [(set (match_operand:GPR 0 "register_operand" "=r")
4173 (unspec:GPR [(match_operand:GPR 1 "memory_operand" "m")
4174 (match_operand:GPR 2 "memory_operand" "m")]
4176 (clobber (match_scratch:GPR 3 "=&r"))]
4178 "<load>\t%3, %1\;<load>\t%0, %2\;xor\t%0, %3, %0\;li\t%3, 0"
4179 [(set_attr "type" "multi")
4180 (set_attr "length" "12")])
4182 (define_insn "riscv_clean_<mode>"
4183 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
4187 [(set_attr "type" "store")]
4190 (define_insn "riscv_flush_<mode>"
4191 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
4195 [(set_attr "type" "store")]
4198 (define_insn "riscv_inval_<mode>"
4199 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
4203 [(set_attr "type" "store")]
4206 (define_insn "riscv_zero_<mode>"
4207 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
4211 [(set_attr "type" "store")]
4214 (define_insn "prefetch"
4215 [(prefetch (match_operand 0 "address_operand" "r")
4216 (match_operand 1 "imm5_operand" "i")
4217 (match_operand 2 "const_int_operand" "n"))]
4220 switch (INTVAL (operands[1]))
4222 case 0: return TARGET_ZIHINTNTL ? "%L2prefetch.r\t%a0" : "prefetch.r\t%a0";
4223 case 1: return TARGET_ZIHINTNTL ? "%L2prefetch.w\t%a0" : "prefetch.w\t%a0";
4224 default: gcc_unreachable ();
4227 [(set_attr "type" "store")
4228 (set (attr "length") (if_then_else (and (match_test "TARGET_ZIHINTNTL")
4229 (match_test "IN_RANGE (INTVAL (operands[2]), 0, 2)"))
4231 (const_string "4")))])
4233 (define_insn "riscv_prefetchi_<mode>"
4234 [(unspec_volatile:X [(match_operand:X 0 "address_operand" "r")
4235 (match_operand:X 1 "imm5_operand" "i")]
4239 [(set_attr "type" "store")])
4241 (define_expand "extv<mode>"
4242 [(set (match_operand:GPR 0 "register_operand" "=r")
4243 (sign_extract:GPR (match_operand:GPR 1 "register_operand" "r")
4244 (match_operand 2 "const_int_operand")
4245 (match_operand 3 "const_int_operand")))]
4249 (define_expand "extzv<mode>"
4250 [(set (match_operand:GPR 0 "register_operand" "=r")
4251 (zero_extract:GPR (match_operand:GPR 1 "register_operand" "r")
4252 (match_operand 2 "const_int_operand")
4253 (match_operand 3 "const_int_operand")))]
4257 && (INTVAL (operands[2]) < 8) && (INTVAL (operands[3]) == 0))
4261 (define_expand "maddhisi4"
4262 [(set (match_operand:SI 0 "register_operand")
4264 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand"))
4265 (sign_extend:SI (match_operand:HI 2 "register_operand")))
4266 (match_operand:SI 3 "register_operand")))]
4270 (define_expand "msubhisi4"
4271 [(set (match_operand:SI 0 "register_operand")
4273 (match_operand:SI 3 "register_operand")
4274 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand"))
4275 (sign_extend:SI (match_operand:HI 2 "register_operand")))))]
4279 ;; String compare with length insn.
4280 ;; Argument 0 is the target (result)
4281 ;; Argument 1 is the source1
4282 ;; Argument 2 is the source2
4283 ;; Argument 3 is the length
4284 ;; Argument 4 is the alignment
4286 (define_expand "cmpstrnsi"
4287 [(parallel [(set (match_operand:SI 0)
4288 (compare:SI (match_operand:BLK 1)
4289 (match_operand:BLK 2)))
4290 (use (match_operand:SI 3))
4291 (use (match_operand:SI 4))])]
4292 "riscv_inline_strncmp && !optimize_size
4293 && (TARGET_ZBB || TARGET_XTHEADBB || TARGET_VECTOR)"
4295 rtx temp = gen_reg_rtx (word_mode);
4296 if (riscv_expand_strcmp (temp, operands[1], operands[2],
4297 operands[3], operands[4]))
4301 temp = gen_lowpart (SImode, temp);
4302 SUBREG_PROMOTED_VAR_P (temp) = 1;
4303 SUBREG_PROMOTED_SET (temp, SRP_SIGNED);
4305 emit_move_insn (operands[0], temp);
4312 ;; String compare insn.
4313 ;; Argument 0 is the target (result)
4314 ;; Argument 1 is the source1
4315 ;; Argument 2 is the source2
4316 ;; Argument 3 is the alignment
4318 (define_expand "cmpstrsi"
4319 [(parallel [(set (match_operand:SI 0)
4320 (compare:SI (match_operand:BLK 1)
4321 (match_operand:BLK 2)))
4322 (use (match_operand:SI 3))])]
4323 "riscv_inline_strcmp && !optimize_size
4324 && (TARGET_ZBB || TARGET_XTHEADBB || TARGET_VECTOR)"
4326 rtx temp = gen_reg_rtx (word_mode);
4327 if (riscv_expand_strcmp (temp, operands[1], operands[2],
4328 NULL_RTX, operands[3]))
4332 temp = gen_lowpart (SImode, temp);
4333 SUBREG_PROMOTED_VAR_P (temp) = 1;
4334 SUBREG_PROMOTED_SET (temp, SRP_SIGNED);
4336 emit_move_insn (operands[0], temp);
4343 ;; Search character in string (generalization of strlen).
4344 ;; Argument 0 is the resulting offset
4345 ;; Argument 1 is the string
4346 ;; Argument 2 is the search character
4347 ;; Argument 3 is the alignment
4349 (define_expand "strlen<mode>"
4350 [(set (match_operand:X 0 "register_operand")
4351 (unspec:X [(match_operand:BLK 1 "general_operand")
4352 (match_operand:SI 2 "const_int_operand")
4353 (match_operand:SI 3 "const_int_operand")]
4355 "riscv_inline_strlen && !optimize_size
4356 && (TARGET_ZBB || TARGET_XTHEADBB || TARGET_VECTOR)"
4358 rtx search_char = operands[2];
4360 if (search_char != const0_rtx)
4363 if (riscv_expand_strlen (operands[0], operands[1], operands[2], operands[3]))
4369 (define_insn "*large_load_address"
4370 [(set (match_operand:DI 0 "register_operand" "=r")
4371 (mem:DI (match_operand 1 "pcrel_symbol_operand" "")))]
4372 "TARGET_64BIT && riscv_cmodel == CM_LARGE"
4374 [(set_attr "type" "load")
4375 (set (attr "length") (const_int 8))])
4377 ;; The AND is redunant here. It always turns off the high 32 bits and the
4378 ;; low number of bits equal to the shift count. Those upper 32 bits will be
4379 ;; reset by the SIGN_EXTEND at the end.
4381 ;; One could argue combine should have realized this and simplified what it
4382 ;; presented to the backend. But we can obviously cope with what it gave us.
4383 (define_insn_and_split ""
4384 [(set (match_operand:DI 0 "register_operand" "=r")
4388 (ashift:DI (match_operand:DI 1 "register_operand" "r")
4389 (match_operand 2 "const_int_operand" "n"))
4390 (match_operand 3 "const_int_operand" "n")) 0)
4391 (match_operand:SI 4 "register_operand" "r"))))
4392 (clobber (match_scratch:DI 5 "=&r"))]
4394 && (INTVAL (operands[3]) | ((1 << INTVAL (operands[2])) - 1)) == 0xffffffff"
4396 "&& reload_completed"
4397 [(set (match_dup 5) (ashift:DI (match_dup 1) (match_dup 2)))
4398 (set (match_dup 0) (sign_extend:DI (plus:SI (match_dup 6) (match_dup 4))))]
4399 "{ operands[6] = gen_lowpart (SImode, operands[5]); }"
4400 [(set_attr "type" "arith")])
4402 (define_expand "usadd<mode>3"
4403 [(match_operand:ANYI 0 "register_operand")
4404 (match_operand:ANYI 1 "reg_or_int_operand")
4405 (match_operand:ANYI 2 "reg_or_int_operand")]
4408 riscv_expand_usadd (operands[0], operands[1], operands[2]);
4413 (define_expand "ssadd<mode>3"
4414 [(match_operand:ANYI 0 "register_operand")
4415 (match_operand:ANYI 1 "register_operand")
4416 (match_operand:ANYI 2 "register_operand")]
4419 riscv_expand_ssadd (operands[0], operands[1], operands[2]);
4424 (define_expand "ussub<mode>3"
4425 [(match_operand:ANYI 0 "register_operand")
4426 (match_operand:ANYI 1 "reg_or_int_operand")
4427 (match_operand:ANYI 2 "reg_or_int_operand")]
4430 riscv_expand_ussub (operands[0], operands[1], operands[2]);
4435 (define_expand "sssub<mode>3"
4436 [(match_operand:ANYI 0 "register_operand")
4437 (match_operand:ANYI 1 "register_operand")
4438 (match_operand:ANYI 2 "register_operand")]
4441 riscv_expand_sssub (operands[0], operands[1], operands[2]);
4446 (define_expand "ustrunc<mode><anyi_double_truncated>2"
4447 [(match_operand:<ANYI_DOUBLE_TRUNCATED> 0 "register_operand")
4448 (match_operand:ANYI_DOUBLE_TRUNC 1 "register_operand")]
4451 riscv_expand_ustrunc (operands[0], operands[1]);
4456 (define_expand "sstrunc<mode><anyi_double_truncated>2"
4457 [(match_operand:<ANYI_DOUBLE_TRUNCATED> 0 "register_operand")
4458 (match_operand:ANYI_DOUBLE_TRUNC 1 "register_operand")]
4461 riscv_expand_sstrunc (operands[0], operands[1]);
4466 (define_expand "ustrunc<mode><anyi_quad_truncated>2"
4467 [(match_operand:<ANYI_QUAD_TRUNCATED> 0 "register_operand")
4468 (match_operand:ANYI_QUAD_TRUNC 1 "register_operand")]
4471 riscv_expand_ustrunc (operands[0], operands[1]);
4476 (define_expand "sstrunc<mode><anyi_quad_truncated>2"
4477 [(match_operand:<ANYI_QUAD_TRUNCATED> 0 "register_operand")
4478 (match_operand:ANYI_QUAD_TRUNC 1 "register_operand")]
4481 riscv_expand_sstrunc (operands[0], operands[1]);
4486 (define_expand "ustrunc<mode><anyi_oct_truncated>2"
4487 [(match_operand:<ANYI_OCT_TRUNCATED> 0 "register_operand")
4488 (match_operand:ANYI_OCT_TRUNC 1 "register_operand")]
4491 riscv_expand_ustrunc (operands[0], operands[1]);
4496 (define_expand "sstrunc<mode><anyi_oct_truncated>2"
4497 [(match_operand:<ANYI_OCT_TRUNCATED> 0 "register_operand")
4498 (match_operand:ANYI_OCT_TRUNC 1 "register_operand")]
4501 riscv_expand_sstrunc (operands[0], operands[1]);
4506 ;; These are forms of (x << C1) + C2, potentially canonicalized from
4507 ;; ((x + C2') << C1. Depending on the cost to load C2 vs C2' we may
4508 ;; want to go ahead and recognize this form as C2 may be cheaper to
4509 ;; synthesize than C2'.
4511 ;; It might be better to refactor riscv_const_insns a bit so that we
4512 ;; can have an API that passes integer values around rather than
4513 ;; constructing a lot of garbage RTL.
4515 ;; The mvconst_internal pattern in effect requires this pattern to
4516 ;; also be a define_insn_and_split due to insn count costing when
4517 ;; splitting in combine.
4518 (define_insn_and_split ""
4519 [(set (match_operand:DI 0 "register_operand" "=r")
4520 (plus:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
4521 (match_operand 2 "const_int_operand" "n"))
4522 (match_operand 3 "const_int_operand" "n")))
4523 (clobber (match_scratch:DI 4 "=&r"))]
4525 && riscv_const_insns (operands[3], false)
4526 && ((riscv_const_insns (operands[3], false)
4527 < riscv_const_insns (GEN_INT (INTVAL (operands[3]) >> INTVAL (operands[2])), false))
4528 || riscv_const_insns (GEN_INT (INTVAL (operands[3]) >> INTVAL (operands[2])), false) == 0))"
4530 "&& reload_completed"
4531 [(set (match_dup 0) (ashift:DI (match_dup 1) (match_dup 2)))
4532 (set (match_dup 4) (match_dup 3))
4533 (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))]
4535 [(set_attr "type" "arith")])
4537 (define_insn_and_split ""
4538 [(set (match_operand:DI 0 "register_operand" "=r")
4539 (sign_extend:DI (plus:SI (ashift:SI
4540 (match_operand:SI 1 "register_operand" "r")
4541 (match_operand 2 "const_int_operand" "n"))
4542 (match_operand 3 "const_int_operand" "n"))))
4543 (clobber (match_scratch:DI 4 "=&r"))]
4545 && riscv_const_insns (operands[3], false)
4546 && ((riscv_const_insns (operands[3], false)
4547 < riscv_const_insns (GEN_INT (INTVAL (operands[3]) >> INTVAL (operands[2])), false))
4548 || riscv_const_insns (GEN_INT (INTVAL (operands[3]) >> INTVAL (operands[2])), false) == 0))"
4550 "&& reload_completed"
4551 [(set (match_dup 0) (ashift:DI (match_dup 1) (match_dup 2)))
4552 (set (match_dup 4) (match_dup 3))
4553 (set (match_dup 0) (sign_extend:DI (plus:SI (match_dup 5) (match_dup 6))))]
4555 operands[1] = gen_lowpart (DImode, operands[1]);
4556 operands[5] = gen_lowpart (SImode, operands[0]);
4557 operands[6] = gen_lowpart (SImode, operands[4]);
4559 [(set_attr "type" "arith")])
4562 (include "bitmanip.md")
4563 (include "crypto.md")
4565 (include "sync-rvwmo.md")
4566 (include "sync-ztso.md")
4567 (include "peephole.md")
4569 (include "generic.md")
4570 (include "sifive-7.md")
4571 (include "sifive-p400.md")
4572 (include "sifive-p600.md")
4573 (include "thead.md")
4574 (include "generic-vector-ooo.md")
4575 (include "generic-ooo.md")
4576 (include "vector.md")
4577 (include "vector-crypto.md")
4578 (include "vector-bfloat16.md")
4579 (include "zicond.md")
4582 (include "corev.md")
4583 (include "xiangshan.md")