1 ;; Machine description for RISC-V for GNU compiler.
2 ;; Copyright (C) 2011-2024 Free Software Foundation, Inc.
3 ;; Contributed by Andrew Waterman (andrew@sifive.com).
4 ;; Based on MIPS target for GNU compiler.
6 ;; This file is part of GCC.
8 ;; GCC is free software; you can redistribute it and/or modify
9 ;; it under the terms of the GNU General Public License as published by
10 ;; the Free Software Foundation; either version 3, or (at your option)
13 ;; GCC is distributed in the hope that it will be useful,
14 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ;; GNU General Public License for more details.
18 ;; You should have received a copy of the GNU General Public License
19 ;; along with GCC; see the file COPYING3. If not see
20 ;; <http://www.gnu.org/licenses/>.
23 ;; Keep this list and the one above riscv_print_operand in sync.
24 ;; The special asm out single letter directives following a '%' are:
25 ;; h -- Print the high-part relocation associated with OP, after stripping
26 ;; any outermost HIGH.
27 ;; R -- Print the low-part relocation associated with OP.
28 ;; C -- Print the integer branch condition for comparison OP.
29 ;; A -- Print the atomic operation suffix for memory model OP.
30 ;; F -- Print a FENCE if the memory model requires a release.
31 ;; z -- Print x0 if OP is zero, otherwise print OP normally.
32 ;; i -- Print i if the operand is not a register.
33 ;; S -- Print shift-index of single-bit mask OP.
34 ;; T -- Print shift-index of inverted single-bit mask OP.
35 ;; ~ -- Print w if TARGET_64BIT is true; otherwise not print anything.
37 (define_c_enum "unspec" [
38 ;; Override return address for exception handling.
41 ;; Symbolic accesses. The order of this list must match that of
42 ;; enum riscv_symbol_type in riscv-protos.h.
52 ;; High part of PC-relative address.
55 ;; Floating-point unspecs.
84 ;; the calling convention of callee
90 ;; Workaround for HFmode without hardware extension
98 (define_c_enum "unspecv" [
99 ;; Register save and restore.
103 ;; Floating-point unspecs.
110 ;; Interrupt handler instructions.
115 ;; Blockage and synchronization.
120 ;; Stack Smash Protector
131 ;; Zihintpause unspec
135 UNSPECV_XTHEADINT_PUSH
136 UNSPECV_XTHEADINT_POP
140 [(RETURN_ADDR_REGNUM 1)
170 (include "predicates.md")
171 (include "constraints.md")
172 (include "iterators.md")
174 ;; ....................
178 ;; ....................
180 (define_attr "got" "unset,xgot_high,load"
181 (const_string "unset"))
183 ;; Classification of moves, extensions and truncations. Most values
184 ;; are as for "type" (see below) but there are also the following
185 ;; move-specific values:
187 ;; andi a single ANDI instruction
188 ;; shift_shift a shift left followed by a shift right
190 ;; This attribute is used to determine the instruction's length and
191 ;; scheduling type. For doubleword moves, the attribute always describes
192 ;; the split instructions; in some cases, it is more appropriate for the
193 ;; scheduling type to be "multi" instead.
194 (define_attr "move_type"
195 "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
196 const,logical,arith,andi,shift_shift,rdvlenb"
197 (const_string "unknown"))
199 ;; Main data type used by the insn
200 (define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,HF,SF,DF,TF,
201 RVVMF64BI,RVVMF32BI,RVVMF16BI,RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,
202 RVVM8QI,RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI,
203 RVVM8HI,RVVM4HI,RVVM2HI,RVVM1HI,RVVMF2HI,RVVMF4HI,
204 RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF,
205 RVVM8SI,RVVM4SI,RVVM2SI,RVVM1SI,RVVMF2SI,
206 RVVM8SF,RVVM4SF,RVVM2SF,RVVM1SF,RVVMF2SF,
207 RVVM8DI,RVVM4DI,RVVM2DI,RVVM1DI,
208 RVVM8DF,RVVM4DF,RVVM2DF,RVVM1DF,
209 RVVM1x8QI,RVVMF2x8QI,RVVMF4x8QI,RVVMF8x8QI,
210 RVVM1x7QI,RVVMF2x7QI,RVVMF4x7QI,RVVMF8x7QI,
211 RVVM1x6QI,RVVMF2x6QI,RVVMF4x6QI,RVVMF8x6QI,
212 RVVM1x5QI,RVVMF2x5QI,RVVMF4x5QI,RVVMF8x5QI,
213 RVVM2x4QI,RVVM1x4QI,RVVMF2x4QI,RVVMF4x4QI,RVVMF8x4QI,
214 RVVM2x3QI,RVVM1x3QI,RVVMF2x3QI,RVVMF4x3QI,RVVMF8x3QI,
215 RVVM4x2QI,RVVM2x2QI,RVVM1x2QI,RVVMF2x2QI,RVVMF4x2QI,RVVMF8x2QI,
216 RVVM1x8HI,RVVMF2x8HI,RVVMF4x8HI,
217 RVVM1x7HI,RVVMF2x7HI,RVVMF4x7HI,
218 RVVM1x6HI,RVVMF2x6HI,RVVMF4x6HI,
219 RVVM1x5HI,RVVMF2x5HI,RVVMF4x5HI,
220 RVVM2x4HI,RVVM1x4HI,RVVMF2x4HI,RVVMF4x4HI,
221 RVVM2x3HI,RVVM1x3HI,RVVMF2x3HI,RVVMF4x3HI,
222 RVVM4x2HI,RVVM2x2HI,RVVM1x2HI,RVVMF2x2HI,RVVMF4x2HI,
223 RVVM1x8HF,RVVMF2x8HF,RVVMF4x8HF,RVVM1x7HF,RVVMF2x7HF,
224 RVVMF4x7HF,RVVM1x6HF,RVVMF2x6HF,RVVMF4x6HF,RVVM1x5HF,
225 RVVMF2x5HF,RVVMF4x5HF,RVVM2x4HF,RVVM1x4HF,RVVMF2x4HF,
226 RVVMF4x4HF,RVVM2x3HF,RVVM1x3HF,RVVMF2x3HF,RVVMF4x3HF,
227 RVVM4x2HF,RVVM2x2HF,RVVM1x2HF,RVVMF2x2HF,RVVMF4x2HF,
228 RVVM1x8SI,RVVMF2x8SI,
229 RVVM1x7SI,RVVMF2x7SI,
230 RVVM1x6SI,RVVMF2x6SI,
231 RVVM1x5SI,RVVMF2x5SI,
232 RVVM2x4SI,RVVM1x4SI,RVVMF2x4SI,
233 RVVM2x3SI,RVVM1x3SI,RVVMF2x3SI,
234 RVVM4x2SI,RVVM2x2SI,RVVM1x2SI,RVVMF2x2SI,
235 RVVM1x8SF,RVVMF2x8SF,RVVM1x7SF,RVVMF2x7SF,
236 RVVM1x6SF,RVVMF2x6SF,RVVM1x5SF,RVVMF2x5SF,
237 RVVM2x4SF,RVVM1x4SF,RVVMF2x4SF,RVVM2x3SF,
238 RVVM1x3SF,RVVMF2x3SF,RVVM4x2SF,RVVM2x2SF,
239 RVVM1x2SF,RVVMF2x2SF,
240 RVVM1x8DI,RVVM1x7DI,RVVM1x6DI,RVVM1x5DI,
241 RVVM2x4DI,RVVM1x4DI,RVVM2x3DI,RVVM1x3DI,
242 RVVM4x2DI,RVVM2x2DI,RVVM1x2DI,RVVM1x8DF,
243 RVVM1x7DF,RVVM1x6DF,RVVM1x5DF,RVVM2x4DF,
244 RVVM1x4DF,RVVM2x3DF,RVVM1x3DF,RVVM4x2DF,
246 V1QI,V2QI,V4QI,V8QI,V16QI,V32QI,V64QI,V128QI,V256QI,V512QI,V1024QI,V2048QI,V4096QI,
247 V1HI,V2HI,V4HI,V8HI,V16HI,V32HI,V64HI,V128HI,V256HI,V512HI,V1024HI,V2048HI,
248 V1SI,V2SI,V4SI,V8SI,V16SI,V32SI,V64SI,V128SI,V256SI,V512SI,V1024SI,
249 V1DI,V2DI,V4DI,V8DI,V16DI,V32DI,V64DI,V128DI,V256DI,V512DI,
250 V1HF,V2HF,V4HF,V8HF,V16HF,V32HF,V64HF,V128HF,V256HF,V512HF,V1024HF,V2048HF,
251 V1SF,V2SF,V4SF,V8SF,V16SF,V32SF,V64SF,V128SF,V256SF,V512SF,V1024SF,
252 V1DF,V2DF,V4DF,V8DF,V16DF,V32DF,V64DF,V128DF,V256DF,V512DF,
253 V1BI,V2BI,V4BI,V8BI,V16BI,V32BI,V64BI,V128BI,V256BI,V512BI,V1024BI,V2048BI,V4096BI"
254 (const_string "unknown"))
256 ;; True if the main data type is twice the size of a word.
257 (define_attr "dword_mode" "no,yes"
258 (cond [(and (eq_attr "mode" "DI,DF")
259 (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
262 (and (eq_attr "mode" "TI,TF")
263 (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
264 (const_string "yes")]
265 (const_string "no")))
268 (define_attr "ext" "base,f,d,vector"
269 (const_string "base"))
271 ;; True if the extension is enabled.
272 (define_attr "ext_enabled" "no,yes"
273 (cond [(eq_attr "ext" "base")
276 (and (eq_attr "ext" "f")
277 (match_test "TARGET_HARD_FLOAT"))
280 (and (eq_attr "ext" "d")
281 (match_test "TARGET_DOUBLE_FLOAT"))
284 (and (eq_attr "ext" "vector")
285 (match_test "TARGET_VECTOR"))
288 (const_string "no")))
290 ;; Classification of each insn.
291 ;; branch conditional branch
292 ;; jump unconditional direct jump
293 ;; jalr unconditional indirect jump
294 ;; ret various returns, no arguments
295 ;; call unconditional call
296 ;; load load instruction(s)
297 ;; fpload floating point load
298 ;; store store instruction(s)
299 ;; fpstore floating point store
300 ;; mtc transfer to coprocessor
301 ;; mfc transfer from coprocessor
302 ;; const load constant
303 ;; arith integer arithmetic instructions
304 ;; logical integer logical instructions
305 ;; shift integer shift instructions
306 ;; slt set less than instructions
307 ;; imul integer multiply
308 ;; idiv integer divide
309 ;; move integer register move (addi rd, rs1, 0)
310 ;; fmove floating point register move
311 ;; fadd floating point add/subtract
312 ;; fmul floating point multiply
313 ;; fmadd floating point multiply-add
314 ;; fdiv floating point divide
315 ;; fcmp floating point compare
316 ;; fcvt floating point convert
317 ;; fcvt_i2f integer to floating point convert
318 ;; fcvt_f2i floating point to integer convert
319 ;; fsqrt floating point square root
320 ;; multi multiword sequence (or user asm statements)
321 ;; auipc integer addition to PC
322 ;; sfb_alu SFB ALU instruction
324 ;; trap trap instruction
325 ;; ghost an instruction that produces no real code
326 ;; bitmanip bit manipulation instructions
327 ;; clmul clmul, clmulh, clmulr
328 ;; rotate rotation instructions
329 ;; atomic atomic instructions
330 ;; condmove conditional moves
331 ;; crypto cryptography instructions
332 ;; mvpair zc move pair instructions
333 ;; zicond zicond instructions
334 ;; Classification of RVV instructions which will be added to each RVV .md pattern and used by scheduler.
335 ;; rdvlenb vector byte length vlenb csrr read
336 ;; rdvl vector length vl csrr read
337 ;; wrvxrm vector fixed-point rounding mode write
338 ;; wrfrm vector floating-point rounding mode write
339 ;; vsetvl vector configuration-setting instrucions
340 ;; 7. Vector Loads and Stores
341 ;; vlde vector unit-stride load instructions
342 ;; vste vector unit-stride store instructions
343 ;; vldm vector unit-stride mask load instructions
344 ;; vstm vector unit-stride mask store instructions
345 ;; vlds vector strided load instructions
346 ;; vsts vector strided store instructions
347 ;; vldux vector unordered indexed load instructions
348 ;; vldox vector ordered indexed load instructions
349 ;; vstux vector unordered indexed store instructions
350 ;; vstox vector ordered indexed store instructions
351 ;; vldff vector unit-stride fault-only-first load instructions
352 ;; vldr vector whole register load instructions
353 ;; vstr vector whole register store instructions
354 ;; vlsegde vector segment unit-stride load instructions
355 ;; vssegte vector segment unit-stride store instructions
356 ;; vlsegds vector segment strided load instructions
357 ;; vssegts vector segment strided store instructions
358 ;; vlsegdux vector segment unordered indexed load instructions
359 ;; vlsegdox vector segment ordered indexed load instructions
360 ;; vssegtux vector segment unordered indexed store instructions
361 ;; vssegtox vector segment ordered indexed store instructions
362 ;; vlsegdff vector segment unit-stride fault-only-first load instructions
363 ;; 11. Vector integer arithmetic instructions
364 ;; vialu vector single-width integer add and subtract and logical nstructions
365 ;; viwalu vector widening integer add/subtract
366 ;; vext vector integer extension
367 ;; vicalu vector arithmetic with carry or borrow instructions
368 ;; vshift vector single-width bit shift instructions
369 ;; vnshift vector narrowing integer shift instructions
370 ;; viminmax vector integer min/max instructions
371 ;; vicmp vector integer comparison instructions
372 ;; vimul vector single-width integer multiply instructions
373 ;; vidiv vector single-width integer divide instructions
374 ;; viwmul vector widening integer multiply instructions
375 ;; vimuladd vector single-width integer multiply-add instructions
376 ;; viwmuladd vector widening integer multiply-add instructions
377 ;; vimerge vector integer merge instructions
378 ;; vimov vector integer move vector instructions
379 ;; 12. Vector fixed-point arithmetic instructions
380 ;; vsalu vector single-width saturating add and subtract and logical instructions
381 ;; vaalu vector single-width averaging add and subtract and logical instructions
382 ;; vsmul vector single-width fractional multiply with rounding and saturation instructions
383 ;; vsshift vector single-width scaling shift instructions
384 ;; vnclip vector narrowing fixed-point clip instructions
385 ;; 13. Vector floating-point instructions
386 ;; vfalu vector single-width floating-point add/subtract instructions
387 ;; vfwalu vector widening floating-point add/subtract instructions
388 ;; vfmul vector single-width floating-point multiply instructions
389 ;; vfdiv vector single-width floating-point divide instructions
390 ;; vfwmul vector widening floating-point multiply instructions
391 ;; vfmuladd vector single-width floating-point multiply-add instructions
392 ;; vfwmuladd vector widening floating-point multiply-add instructions
393 ;; vfsqrt vector floating-point square-root instructions
394 ;; vfrecp vector floating-point reciprocal square-root instructions
395 ;; vfminmax vector floating-point min/max instructions
396 ;; vfcmp vector floating-point comparison instructions
397 ;; vfsgnj vector floating-point sign-injection instructions
398 ;; vfclass vector floating-point classify instruction
399 ;; vfmerge vector floating-point merge instruction
400 ;; vfmov vector floating-point move instruction
401 ;; vfcvtitof vector single-width integer to floating-point instruction
402 ;; vfcvtftoi vector single-width floating-point to integer instruction
403 ;; vfwcvtitof vector widening integer to floating-point instruction
404 ;; vfwcvtftoi vector widening floating-point to integer instruction
405 ;; vfwcvtftof vector widening floating-point to floating-point instruction
406 ;; vfncvtitof vector narrowing integer to floating-point instruction
407 ;; vfncvtftoi vector narrowing floating-point to integer instruction
408 ;; vfncvtftof vector narrowing floating-point to floating-point instruction
409 ;; 14. Vector reduction operations
410 ;; vired vector single-width integer reduction instructions
411 ;; viwred vector widening integer reduction instructions
412 ;; vfredu vector single-width floating-point un-ordered reduction instruction
413 ;; vfredo vector single-width floating-point ordered reduction instruction
414 ;; vfwredu vector widening floating-point un-ordered reduction instruction
415 ;; vfwredo vector widening floating-point ordered reduction instruction
416 ;; 15. Vector mask instructions
417 ;; vmalu vector mask-register logical instructions
418 ;; vmpop vector mask population count
419 ;; vmffs vector find-first-set mask bit
420 ;; vmsfs vector set mask bit
421 ;; vmiota vector iota
422 ;; vmidx vector element index instruction
423 ;; 16. Vector permutation instructions
424 ;; vimovvx integer scalar move instructions
425 ;; vimovxv integer scalar move instructions
426 ;; vfmovvf floating-point scalar move instructions
427 ;; vfmovfv floating-point scalar move instructions
428 ;; vslideup vector slide instructions
429 ;; vslidedown vector slide instructions
430 ;; vislide1up vector slide instructions
431 ;; vislide1down vector slide instructions
432 ;; vfslide1up vector slide instructions
433 ;; vfslide1down vector slide instructions
434 ;; vgather vector register gather instructions
435 ;; vcompress vector compress instruction
436 ;; vmov whole vector register move
437 ;; vector unknown vector instruction
438 ;; 17. Crypto Vector instructions
439 ;; vandn crypto vector bitwise and-not instructions
440 ;; vbrev crypto vector reverse bits in elements instructions
441 ;; vbrev8 crypto vector reverse bits in bytes instructions
442 ;; vrev8 crypto vector reverse bytes instructions
443 ;; vclz crypto vector count leading Zeros instructions
444 ;; vctz crypto vector count lrailing Zeros instructions
445 ;; vrol crypto vector rotate left instructions
446 ;; vror crypto vector rotate right instructions
447 ;; vwsll crypto vector widening shift left logical instructions
448 ;; vclmul crypto vector carry-less multiply - return low half instructions
449 ;; vclmulh crypto vector carry-less multiply - return high half instructions
450 ;; vghsh crypto vector add-multiply over GHASH Galois-Field instructions
451 ;; vgmul crypto vector multiply over GHASH Galois-Field instrumctions
452 ;; vaesef crypto vector AES final-round encryption instructions
453 ;; vaesem crypto vector AES middle-round encryption instructions
454 ;; vaesdf crypto vector AES final-round decryption instructions
455 ;; vaesdm crypto vector AES middle-round decryption instructions
456 ;; vaeskf1 crypto vector AES-128 Forward KeySchedule generation instructions
457 ;; vaeskf2 crypto vector AES-256 Forward KeySchedule generation instructions
458 ;; vaesz crypto vector AES round zero encryption/decryption instructions
459 ;; vsha2ms crypto vector SHA-2 message schedule instructions
460 ;; vsha2ch crypto vector SHA-2 two rounds of compression instructions
461 ;; vsha2cl crypto vector SHA-2 two rounds of compression instructions
462 ;; vsm4k crypto vector SM4 KeyExpansion instructions
463 ;; vsm4r crypto vector SM4 Rounds instructions
464 ;; vsm3me crypto vector SM3 Message Expansion instructions
465 ;; vsm3c crypto vector SM3 Compression instructions
467 "unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore,
468 mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
469 fmadd,fdiv,fcmp,fcvt,fcvt_i2f,fcvt_f2i,fsqrt,multi,auipc,sfb_alu,nop,trap,
470 ghost,bitmanip,rotate,clmul,min,max,minu,maxu,clz,ctz,cpop,
471 atomic,condmove,crypto,mvpair,zicond,rdvlenb,rdvl,wrvxrm,wrfrm,
472 rdfrm,vsetvl,vsetvl_pre,vlde,vste,vldm,vstm,vlds,vsts,
473 vldux,vldox,vstux,vstox,vldff,vldr,vstr,
474 vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,vssegtux,vssegtox,vlsegdff,
475 vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,
476 vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,
477 vsalu,vaalu,vsmul,vsshift,vnclip,
478 vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,
479 vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov,
480 vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,
481 vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,
482 vired,viwred,vfredu,vfredo,vfwredu,vfwredo,
483 vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,
484 vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
485 vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vcpop,vrol,vror,vwsll,
486 vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,vaeskf1,vaeskf2,vaesz,
487 vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c"
488 (cond [(eq_attr "got" "load") (const_string "load")
490 ;; If a doubleword move uses these expensive instructions,
491 ;; it is usually better to schedule them in the same way
492 ;; as the singleword form, rather than as "multi".
493 (eq_attr "move_type" "load") (const_string "load")
494 (eq_attr "move_type" "fpload") (const_string "fpload")
495 (eq_attr "move_type" "store") (const_string "store")
496 (eq_attr "move_type" "fpstore") (const_string "fpstore")
497 (eq_attr "move_type" "mtc") (const_string "mtc")
498 (eq_attr "move_type" "mfc") (const_string "mfc")
500 ;; These types of move are always single insns.
501 (eq_attr "move_type" "fmove") (const_string "fmove")
502 (eq_attr "move_type" "arith") (const_string "arith")
503 (eq_attr "move_type" "logical") (const_string "logical")
504 (eq_attr "move_type" "andi") (const_string "logical")
506 ;; These types of move are always split.
507 (eq_attr "move_type" "shift_shift")
508 (const_string "multi")
510 ;; These types of move are split for doubleword modes only.
511 (and (eq_attr "move_type" "move,const")
512 (eq_attr "dword_mode" "yes"))
513 (const_string "multi")
514 (eq_attr "move_type" "move") (const_string "move")
515 (eq_attr "move_type" "const") (const_string "const")
516 (eq_attr "move_type" "rdvlenb") (const_string "rdvlenb")]
517 (const_string "unknown")))
519 ;; True if the float point vector is disabled.
520 (define_attr "fp_vector_disabled" "no,yes"
522 (and (eq_attr "type" "vfmov,vfalu,vfmul,vfdiv,
523 vfwalu,vfwmul,vfmuladd,vfwmuladd,
524 vfsqrt,vfrecp,vfminmax,vfsgnj,vfcmp,
526 vfncvtitof,vfwcvtftoi,vfcvtftoi,vfcvtitof,
527 vfredo,vfredu,vfwredo,vfwredu,
528 vfslide1up,vfslide1down")
529 (and (eq_attr "mode" "RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF")
530 (match_test "!TARGET_ZVFH")))
533 ;; The mode records as QI for the FP16 <=> INT8 instruction.
534 (and (eq_attr "type" "vfncvtftoi,vfwcvtitof")
535 (and (eq_attr "mode" "RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI")
536 (match_test "!TARGET_ZVFH")))
539 (const_string "no")))
541 ;; Widening instructions have group-overlap constraints. Those are only
542 ;; valid for certain register-group sizes. This attribute marks the
543 ;; alternatives not matching the required register-group size as disabled.
544 (define_attr "group_overlap" "none,W21,W42,W84,W43,W86,W87"
545 (const_string "none"))
547 (define_attr "group_overlap_valid" "no,yes"
548 (cond [(eq_attr "group_overlap" "none")
551 (and (eq_attr "group_overlap" "W21")
552 (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 2"))
555 (and (eq_attr "group_overlap" "W42,W43")
556 (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 4"))
559 (and (eq_attr "group_overlap" "W84,W86,W87")
560 (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 8"))
563 (const_string "yes")))
565 ;; This attribute marks the alternatives not matching the constraints
566 ;; described in spec as disabled.
567 (define_attr "spec_restriction" "none,thv,rvv"
568 (const_string "none"))
570 (define_attr "spec_restriction_disabled" "no,yes"
571 (cond [(eq_attr "spec_restriction" "none")
574 (and (eq_attr "spec_restriction" "thv")
575 (match_test "TARGET_XTHEADVECTOR"))
578 (and (eq_attr "spec_restriction" "rvv")
579 (match_test "TARGET_VECTOR && !TARGET_XTHEADVECTOR"))
582 (const_string "no")))
584 ;; Attribute to control enable or disable instructions.
585 (define_attr "enabled" "no,yes"
587 (eq_attr "ext_enabled" "no")
590 (eq_attr "fp_vector_disabled" "yes")
593 (eq_attr "group_overlap_valid" "no")
596 (eq_attr "spec_restriction_disabled" "yes")
599 (const_string "yes")))
601 ;; Length of instruction in bytes.
602 (define_attr "length" ""
604 ;; Branches further than +/- 1 MiB require three instructions.
605 ;; Branches further than +/- 4 KiB require two instructions.
606 (eq_attr "type" "branch")
607 (if_then_else (and (le (minus (match_dup 0) (pc))
609 (le (minus (pc) (match_dup 0))
612 (if_then_else (and (le (minus (match_dup 0) (pc))
614 (le (minus (pc) (match_dup 0))
615 (const_int 1048572)))
619 ;; Jumps further than +/- 1 MiB require two instructions.
620 (eq_attr "type" "jump")
621 (if_then_else (and (le (minus (match_dup 0) (pc))
623 (le (minus (pc) (match_dup 0))
624 (const_int 1048572)))
628 ;; Conservatively assume calls take two instructions (AUIPC + JALR).
629 ;; The linker will opportunistically relax the sequence to JAL.
630 (eq_attr "type" "call") (const_int 8)
632 ;; "Ghost" instructions occupy no space.
633 (eq_attr "type" "ghost") (const_int 0)
635 (eq_attr "got" "load") (const_int 8)
637 ;; SHIFT_SHIFTs are decomposed into two separate instructions.
638 (eq_attr "move_type" "shift_shift")
641 ;; Check for doubleword moves that are decomposed into two
643 (and (eq_attr "move_type" "mtc,mfc,move")
644 (eq_attr "dword_mode" "yes"))
647 ;; Doubleword CONST{,N} moves are split into two word
649 (and (eq_attr "move_type" "const")
650 (eq_attr "dword_mode" "yes"))
651 (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
653 ;; Otherwise, constants, loads and stores are handled by external
655 (eq_attr "move_type" "load,fpload")
656 (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
657 (eq_attr "move_type" "store,fpstore")
658 (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
661 ;; Is copying of this instruction disallowed?
662 (define_attr "cannot_copy" "no,yes" (const_string "no"))
664 ;; Microarchitectures we know how to tune for.
665 ;; Keep this in sync with enum riscv_microarchitecture.
667 "generic,sifive_7,sifive_p400,sifive_p600,xiangshan,generic_ooo"
668 (const (symbol_ref "((enum attr_tune) riscv_microarchitecture)")))
670 ;; Describe a user's asm statement.
671 (define_asm_attributes
672 [(set_attr "type" "multi")])
674 ;; Ghost instructions produce no real code and introduce no hazards.
675 ;; They exist purely to express an effect on dataflow.
676 (define_insn_reservation "ghost" 0
677 (eq_attr "type" "ghost")
681 ;; ....................
685 ;; ....................
688 (define_insn "add<mode>3"
689 [(set (match_operand:ANYF 0 "register_operand" "=f")
690 (plus:ANYF (match_operand:ANYF 1 "register_operand" " f")
691 (match_operand:ANYF 2 "register_operand" " f")))]
692 "TARGET_HARD_FLOAT || TARGET_ZFINX"
693 "fadd.<fmt>\t%0,%1,%2"
694 [(set_attr "type" "fadd")
695 (set_attr "mode" "<UNITMODE>")])
697 (define_insn "*addsi3"
698 [(set (match_operand:SI 0 "register_operand" "=r,r")
699 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
700 (match_operand:SI 2 "arith_operand" " r,I")))]
703 [(set_attr "type" "arith")
704 (set_attr "mode" "SI")])
706 (define_expand "addsi3"
707 [(set (match_operand:SI 0 "register_operand" "=r,r")
708 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
709 (match_operand:SI 2 "arith_operand" " r,I")))]
714 rtx t = gen_reg_rtx (DImode);
715 emit_insn (gen_addsi3_extended (t, operands[1], operands[2]));
716 t = gen_lowpart (SImode, t);
717 SUBREG_PROMOTED_VAR_P (t) = 1;
718 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
719 emit_move_insn (operands[0], t);
724 (define_insn "adddi3"
725 [(set (match_operand:DI 0 "register_operand" "=r,r")
726 (plus:DI (match_operand:DI 1 "register_operand" " r,r")
727 (match_operand:DI 2 "arith_operand" " r,I")))]
730 [(set_attr "type" "arith")
731 (set_attr "mode" "DI")])
733 (define_expand "addv<mode>4"
734 [(set (match_operand:GPR 0 "register_operand" "=r,r")
735 (plus:GPR (match_operand:GPR 1 "register_operand" " r,r")
736 (match_operand:GPR 2 "arith_operand" " r,I")))
737 (label_ref (match_operand 3 "" ""))]
740 if (TARGET_64BIT && <MODE>mode == SImode)
742 rtx t3 = gen_reg_rtx (DImode);
743 rtx t4 = gen_reg_rtx (DImode);
744 rtx t5 = gen_reg_rtx (DImode);
745 rtx t6 = gen_reg_rtx (DImode);
747 riscv_emit_binary (PLUS, operands[0], operands[1], operands[2]);
748 if (GET_CODE (operands[1]) != CONST_INT)
749 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
752 if (GET_CODE (operands[2]) != CONST_INT)
753 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
756 emit_insn (gen_adddi3 (t3, t4, t5));
757 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
759 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
763 rtx t3 = gen_reg_rtx (<MODE>mode);
764 rtx t4 = gen_reg_rtx (<MODE>mode);
766 emit_insn (gen_add3_insn (operands[0], operands[1], operands[2]));
767 rtx cmp1 = gen_rtx_LT (<MODE>mode, operands[2], const0_rtx);
768 emit_insn (gen_cstore<mode>4 (t3, cmp1, operands[2], const0_rtx));
769 rtx cmp2 = gen_rtx_LT (<MODE>mode, operands[0], operands[1]);
771 emit_insn (gen_cstore<mode>4 (t4, cmp2, operands[0], operands[1]));
772 riscv_expand_conditional_branch (operands[3], NE, t3, t4);
777 (define_expand "uaddv<mode>4"
778 [(set (match_operand:GPR 0 "register_operand" "=r,r")
779 (plus:GPR (match_operand:GPR 1 "register_operand" " r,r")
780 (match_operand:GPR 2 "arith_operand" " r,I")))
781 (label_ref (match_operand 3 "" ""))]
784 if (TARGET_64BIT && <MODE>mode == SImode)
786 rtx t3 = gen_reg_rtx (DImode);
787 rtx t4 = gen_reg_rtx (DImode);
789 if (GET_CODE (operands[1]) != CONST_INT)
790 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
793 riscv_emit_binary (PLUS, operands[0], operands[1], operands[2]);
794 emit_insn (gen_extend_insn (t4, operands[0], DImode, SImode, 0));
796 riscv_expand_conditional_branch (operands[3], LTU, t4, t3);
800 emit_insn (gen_add3_insn (operands[0], operands[1], operands[2]));
801 riscv_expand_conditional_branch (operands[3], LTU, operands[0],
808 (define_insn "addsi3_extended"
809 [(set (match_operand:DI 0 "register_operand" "=r,r")
811 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
812 (match_operand:SI 2 "arith_operand" " r,I"))))]
815 [(set_attr "type" "arith")
816 (set_attr "mode" "SI")])
818 (define_insn "*addsi3_extended2"
819 [(set (match_operand:DI 0 "register_operand" "=r,r")
821 (match_operator:SI 3 "subreg_lowpart_operator"
822 [(plus:DI (match_operand:DI 1 "register_operand" " r,r")
823 (match_operand:DI 2 "arith_operand" " r,I"))])))]
826 [(set_attr "type" "arith")
827 (set_attr "mode" "SI")])
830 ;; ....................
834 ;; ....................
837 (define_insn "sub<mode>3"
838 [(set (match_operand:ANYF 0 "register_operand" "=f")
839 (minus:ANYF (match_operand:ANYF 1 "register_operand" " f")
840 (match_operand:ANYF 2 "register_operand" " f")))]
841 "TARGET_HARD_FLOAT || TARGET_ZFINX"
842 "fsub.<fmt>\t%0,%1,%2"
843 [(set_attr "type" "fadd")
844 (set_attr "mode" "<UNITMODE>")])
846 (define_insn "subdi3"
847 [(set (match_operand:DI 0 "register_operand" "= r")
848 (minus:DI (match_operand:DI 1 "reg_or_0_operand" " rJ")
849 (match_operand:DI 2 "register_operand" " r")))]
852 [(set_attr "type" "arith")
853 (set_attr "mode" "DI")])
855 (define_insn "*subsi3"
856 [(set (match_operand:SI 0 "register_operand" "= r")
857 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
858 (match_operand:SI 2 "register_operand" " r")))]
861 [(set_attr "type" "arith")
862 (set_attr "mode" "SI")])
864 (define_expand "subsi3"
865 [(set (match_operand:SI 0 "register_operand" "= r")
866 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
867 (match_operand:SI 2 "register_operand" " r")))]
872 rtx t = gen_reg_rtx (DImode);
873 emit_insn (gen_subsi3_extended (t, operands[1], operands[2]));
874 t = gen_lowpart (SImode, t);
875 SUBREG_PROMOTED_VAR_P (t) = 1;
876 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
877 emit_move_insn (operands[0], t);
882 (define_expand "subv<mode>4"
883 [(set (match_operand:GPR 0 "register_operand" "= r")
884 (minus:GPR (match_operand:GPR 1 "reg_or_0_operand" " rJ")
885 (match_operand:GPR 2 "register_operand" " r")))
886 (label_ref (match_operand 3 "" ""))]
889 if (TARGET_64BIT && <MODE>mode == SImode)
891 rtx t3 = gen_reg_rtx (DImode);
892 rtx t4 = gen_reg_rtx (DImode);
893 rtx t5 = gen_reg_rtx (DImode);
894 rtx t6 = gen_reg_rtx (DImode);
896 riscv_emit_binary (MINUS, operands[0], operands[1], operands[2]);
897 if (GET_CODE (operands[1]) != CONST_INT)
898 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
901 if (GET_CODE (operands[2]) != CONST_INT)
902 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
905 emit_insn (gen_subdi3 (t3, t4, t5));
906 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
908 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
912 rtx t3 = gen_reg_rtx (<MODE>mode);
913 rtx t4 = gen_reg_rtx (<MODE>mode);
915 emit_insn (gen_sub3_insn (operands[0], operands[1], operands[2]));
917 rtx cmp1 = gen_rtx_LT (<MODE>mode, operands[2], const0_rtx);
918 emit_insn (gen_cstore<mode>4 (t3, cmp1, operands[2], const0_rtx));
920 rtx cmp2 = gen_rtx_LT (<MODE>mode, operands[1], operands[0]);
921 emit_insn (gen_cstore<mode>4 (t4, cmp2, operands[1], operands[0]));
923 riscv_expand_conditional_branch (operands[3], NE, t3, t4);
929 (define_expand "usubv<mode>4"
930 [(set (match_operand:GPR 0 "register_operand" "= r")
931 (minus:GPR (match_operand:GPR 1 "reg_or_0_operand" " rJ")
932 (match_operand:GPR 2 "register_operand" " r")))
933 (label_ref (match_operand 3 "" ""))]
936 if (TARGET_64BIT && <MODE>mode == SImode)
938 rtx t3 = gen_reg_rtx (DImode);
939 rtx t4 = gen_reg_rtx (DImode);
941 if (GET_CODE (operands[1]) != CONST_INT)
942 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
945 riscv_emit_binary (MINUS, operands[0], operands[1], operands[2]);
946 emit_insn (gen_extend_insn (t4, operands[0], DImode, SImode, 0));
948 riscv_expand_conditional_branch (operands[3], LTU, t3, t4);
952 emit_insn (gen_sub3_insn (operands[0], operands[1], operands[2]));
953 riscv_expand_conditional_branch (operands[3], LTU, operands[1],
961 (define_insn "subsi3_extended"
962 [(set (match_operand:DI 0 "register_operand" "= r")
964 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
965 (match_operand:SI 2 "register_operand" " r"))))]
968 [(set_attr "type" "arith")
969 (set_attr "mode" "SI")])
971 (define_insn "*subsi3_extended2"
972 [(set (match_operand:DI 0 "register_operand" "= r")
974 (match_operator:SI 3 "subreg_lowpart_operator"
975 [(minus:DI (match_operand:DI 1 "reg_or_0_operand" " rJ")
976 (match_operand:DI 2 "register_operand" " r"))])))]
979 [(set_attr "type" "arith")
980 (set_attr "mode" "SI")])
982 (define_insn "negdi2"
983 [(set (match_operand:DI 0 "register_operand" "=r")
984 (neg:DI (match_operand:DI 1 "register_operand" " r")))]
987 [(set_attr "type" "arith")
988 (set_attr "mode" "DI")])
990 (define_insn "*negsi2"
991 [(set (match_operand:SI 0 "register_operand" "=r")
992 (neg:SI (match_operand:SI 1 "register_operand" " r")))]
995 [(set_attr "type" "arith")
996 (set_attr "mode" "SI")])
998 (define_expand "negsi2"
999 [(set (match_operand:SI 0 "register_operand" "=r")
1000 (neg:SI (match_operand:SI 1 "register_operand" " r")))]
1005 rtx t = gen_reg_rtx (DImode);
1006 emit_insn (gen_negsi2_extended (t, operands[1]));
1007 t = gen_lowpart (SImode, t);
1008 SUBREG_PROMOTED_VAR_P (t) = 1;
1009 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1010 emit_move_insn (operands[0], t);
1015 (define_insn "negsi2_extended"
1016 [(set (match_operand:DI 0 "register_operand" "=r")
1018 (neg:SI (match_operand:SI 1 "register_operand" " r"))))]
1021 [(set_attr "type" "arith")
1022 (set_attr "mode" "SI")])
1024 (define_insn "*negsi2_extended2"
1025 [(set (match_operand:DI 0 "register_operand" "=r")
1027 (match_operator:SI 2 "subreg_lowpart_operator"
1028 [(neg:DI (match_operand:DI 1 "register_operand" " r"))])))]
1031 [(set_attr "type" "arith")
1032 (set_attr "mode" "SI")])
1035 ;; ....................
1039 ;; ....................
1042 (define_insn "mul<mode>3"
1043 [(set (match_operand:ANYF 0 "register_operand" "=f")
1044 (mult:ANYF (match_operand:ANYF 1 "register_operand" " f")
1045 (match_operand:ANYF 2 "register_operand" " f")))]
1046 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1047 "fmul.<fmt>\t%0,%1,%2"
1048 [(set_attr "type" "fmul")
1049 (set_attr "mode" "<UNITMODE>")])
1051 (define_insn "*mulsi3"
1052 [(set (match_operand:SI 0 "register_operand" "=r")
1053 (mult:SI (match_operand:SI 1 "register_operand" " r")
1054 (match_operand:SI 2 "register_operand" " r")))]
1055 "TARGET_ZMMUL || TARGET_MUL"
1057 [(set_attr "type" "imul")
1058 (set_attr "mode" "SI")])
1060 (define_expand "mulsi3"
1061 [(set (match_operand:SI 0 "register_operand" "=r")
1062 (mult:SI (match_operand:SI 1 "register_operand" " r")
1063 (match_operand:SI 2 "register_operand" " r")))]
1064 "TARGET_ZMMUL || TARGET_MUL"
1068 rtx t = gen_reg_rtx (DImode);
1069 emit_insn (gen_mulsi3_extended (t, operands[1], operands[2]));
1070 t = gen_lowpart (SImode, t);
1071 SUBREG_PROMOTED_VAR_P (t) = 1;
1072 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1073 emit_move_insn (operands[0], t);
1078 (define_insn "muldi3"
1079 [(set (match_operand:DI 0 "register_operand" "=r")
1080 (mult:DI (match_operand:DI 1 "register_operand" " r")
1081 (match_operand:DI 2 "register_operand" " r")))]
1082 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1084 [(set_attr "type" "imul")
1085 (set_attr "mode" "DI")])
1087 (define_expand "mulv<mode>4"
1088 [(set (match_operand:GPR 0 "register_operand" "=r")
1089 (mult:GPR (match_operand:GPR 1 "register_operand" " r")
1090 (match_operand:GPR 2 "register_operand" " r")))
1091 (label_ref (match_operand 3 "" ""))]
1092 "TARGET_ZMMUL || TARGET_MUL"
1094 if (TARGET_64BIT && <MODE>mode == SImode)
1096 rtx t3 = gen_reg_rtx (DImode);
1097 rtx t4 = gen_reg_rtx (DImode);
1098 rtx t5 = gen_reg_rtx (DImode);
1099 rtx t6 = gen_reg_rtx (DImode);
1101 if (GET_CODE (operands[1]) != CONST_INT)
1102 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
1105 if (GET_CODE (operands[2]) != CONST_INT)
1106 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
1109 emit_insn (gen_muldi3 (t3, t4, t5));
1111 emit_move_insn (operands[0], gen_lowpart (SImode, t3));
1112 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
1114 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
1118 rtx hp = gen_reg_rtx (<MODE>mode);
1119 rtx lp = gen_reg_rtx (<MODE>mode);
1121 emit_insn (gen_smul<mode>3_highpart (hp, operands[1], operands[2]));
1122 emit_insn (gen_mul<mode>3 (operands[0], operands[1], operands[2]));
1123 riscv_emit_binary (ASHIFTRT, lp, operands[0],
1124 GEN_INT (BITS_PER_WORD - 1));
1126 riscv_expand_conditional_branch (operands[3], NE, hp, lp);
1132 (define_expand "umulv<mode>4"
1133 [(set (match_operand:GPR 0 "register_operand" "=r")
1134 (mult:GPR (match_operand:GPR 1 "register_operand" " r")
1135 (match_operand:GPR 2 "register_operand" " r")))
1136 (label_ref (match_operand 3 "" ""))]
1137 "TARGET_ZMMUL || TARGET_MUL"
1139 if (TARGET_64BIT && <MODE>mode == SImode)
1141 rtx t3 = gen_reg_rtx (DImode);
1142 rtx t4 = gen_reg_rtx (DImode);
1143 rtx t5 = gen_reg_rtx (DImode);
1144 rtx t6 = gen_reg_rtx (DImode);
1145 rtx t7 = gen_reg_rtx (DImode);
1146 rtx t8 = gen_reg_rtx (DImode);
1148 if (GET_CODE (operands[1]) != CONST_INT)
1149 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
1152 if (GET_CODE (operands[2]) != CONST_INT)
1153 emit_insn (gen_extend_insn (t4, operands[2], DImode, SImode, 0));
1157 emit_insn (gen_ashldi3 (t5, t3, GEN_INT (32)));
1158 emit_insn (gen_ashldi3 (t6, t4, GEN_INT (32)));
1159 emit_insn (gen_umuldi3_highpart (t7, t5, t6));
1160 emit_move_insn (operands[0], gen_lowpart (SImode, t7));
1161 emit_insn (gen_lshrdi3 (t8, t7, GEN_INT (32)));
1163 riscv_expand_conditional_branch (operands[3], NE, t8, const0_rtx);
1167 rtx hp = gen_reg_rtx (<MODE>mode);
1169 emit_insn (gen_umul<mode>3_highpart (hp, operands[1], operands[2]));
1170 emit_insn (gen_mul<mode>3 (operands[0], operands[1], operands[2]));
1172 riscv_expand_conditional_branch (operands[3], NE, hp, const0_rtx);
1178 (define_insn "mulsi3_extended"
1179 [(set (match_operand:DI 0 "register_operand" "=r")
1181 (mult:SI (match_operand:SI 1 "register_operand" " r")
1182 (match_operand:SI 2 "register_operand" " r"))))]
1183 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1185 [(set_attr "type" "imul")
1186 (set_attr "mode" "SI")])
1188 (define_insn "*mulsi3_extended2"
1189 [(set (match_operand:DI 0 "register_operand" "=r")
1191 (match_operator:SI 3 "subreg_lowpart_operator"
1192 [(mult:DI (match_operand:DI 1 "register_operand" " r")
1193 (match_operand:DI 2 "register_operand" " r"))])))]
1194 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1196 [(set_attr "type" "imul")
1197 (set_attr "mode" "SI")])
1200 ;; ........................
1202 ;; MULTIPLICATION HIGH-PART
1204 ;; ........................
1208 (define_expand "<u>mulditi3"
1209 [(set (match_operand:TI 0 "register_operand")
1210 (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
1211 (any_extend:TI (match_operand:DI 2 "register_operand"))))]
1212 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1214 rtx low = gen_reg_rtx (DImode);
1215 emit_insn (gen_muldi3 (low, operands[1], operands[2]));
1217 rtx high = gen_reg_rtx (DImode);
1218 emit_insn (gen_<su>muldi3_highpart (high, operands[1], operands[2]));
1220 emit_move_insn (gen_lowpart (DImode, operands[0]), low);
1221 emit_move_insn (gen_highpart (DImode, operands[0]), high);
1225 (define_insn "<su>muldi3_highpart"
1226 [(set (match_operand:DI 0 "register_operand" "=r")
1229 (mult:TI (any_extend:TI
1230 (match_operand:DI 1 "register_operand" " r"))
1232 (match_operand:DI 2 "register_operand" " r")))
1234 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1236 [(set_attr "type" "imul")
1237 (set_attr "mode" "DI")])
1239 (define_expand "usmulditi3"
1240 [(set (match_operand:TI 0 "register_operand")
1241 (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand"))
1242 (sign_extend:TI (match_operand:DI 2 "register_operand"))))]
1243 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1245 rtx low = gen_reg_rtx (DImode);
1246 emit_insn (gen_muldi3 (low, operands[1], operands[2]));
1248 rtx high = gen_reg_rtx (DImode);
1249 emit_insn (gen_usmuldi3_highpart (high, operands[1], operands[2]));
1251 emit_move_insn (gen_lowpart (DImode, operands[0]), low);
1252 emit_move_insn (gen_highpart (DImode, operands[0]), high);
1256 (define_insn "usmuldi3_highpart"
1257 [(set (match_operand:DI 0 "register_operand" "=r")
1260 (mult:TI (zero_extend:TI
1261 (match_operand:DI 1 "register_operand" "r"))
1263 (match_operand:DI 2 "register_operand" " r")))
1265 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1267 [(set_attr "type" "imul")
1268 (set_attr "mode" "DI")])
1270 (define_expand "<u>mulsidi3"
1271 [(set (match_operand:DI 0 "register_operand" "=r")
1272 (mult:DI (any_extend:DI
1273 (match_operand:SI 1 "register_operand" " r"))
1275 (match_operand:SI 2 "register_operand" " r"))))]
1276 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1278 rtx temp = gen_reg_rtx (SImode);
1279 riscv_emit_binary (MULT, temp, operands[1], operands[2]);
1280 emit_insn (gen_<su>mulsi3_highpart (riscv_subword (operands[0], true),
1281 operands[1], operands[2]));
1282 emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
1286 (define_insn "<su>mulsi3_highpart"
1287 [(set (match_operand:SI 0 "register_operand" "=r")
1290 (mult:DI (any_extend:DI
1291 (match_operand:SI 1 "register_operand" " r"))
1293 (match_operand:SI 2 "register_operand" " r")))
1295 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1297 [(set_attr "type" "imul")
1298 (set_attr "mode" "SI")])
1301 (define_expand "usmulsidi3"
1302 [(set (match_operand:DI 0 "register_operand" "=r")
1303 (mult:DI (zero_extend:DI
1304 (match_operand:SI 1 "register_operand" " r"))
1306 (match_operand:SI 2 "register_operand" " r"))))]
1307 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1309 rtx temp = gen_reg_rtx (SImode);
1310 riscv_emit_binary (MULT, temp, operands[1], operands[2]);
1311 emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
1312 operands[1], operands[2]));
1313 emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
1317 (define_insn "usmulsi3_highpart"
1318 [(set (match_operand:SI 0 "register_operand" "=r")
1321 (mult:DI (zero_extend:DI
1322 (match_operand:SI 1 "register_operand" " r"))
1324 (match_operand:SI 2 "register_operand" " r")))
1326 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1328 [(set_attr "type" "imul")
1329 (set_attr "mode" "SI")])
1332 ;; ....................
1334 ;; DIVISION and REMAINDER
1336 ;; ....................
1339 (define_insn "*<optab>si3"
1340 [(set (match_operand:SI 0 "register_operand" "=r")
1341 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1342 (match_operand:SI 2 "register_operand" " r")))]
1344 "<insn>%i2%~\t%0,%1,%2"
1345 [(set_attr "type" "idiv")
1346 (set_attr "mode" "SI")])
1348 (define_expand "<optab>si3"
1349 [(set (match_operand:SI 0 "register_operand" "=r")
1350 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1351 (match_operand:SI 2 "register_operand" " r")))]
1356 rtx t = gen_reg_rtx (DImode);
1357 emit_insn (gen_<optab>si3_extended (t, operands[1], operands[2]));
1358 t = gen_lowpart (SImode, t);
1359 SUBREG_PROMOTED_VAR_P (t) = 1;
1360 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1361 emit_move_insn (operands[0], t);
1366 (define_insn "<optab>di3"
1367 [(set (match_operand:DI 0 "register_operand" "=r")
1368 (any_div:DI (match_operand:DI 1 "register_operand" " r")
1369 (match_operand:DI 2 "register_operand" " r")))]
1370 "TARGET_DIV && TARGET_64BIT"
1371 "<insn>%i2\t%0,%1,%2"
1372 [(set_attr "type" "idiv")
1373 (set_attr "mode" "DI")])
1375 (define_expand "<u>divmod<mode>4"
1377 [(set (match_operand:GPR 0 "register_operand")
1378 (only_div:GPR (match_operand:GPR 1 "register_operand")
1379 (match_operand:GPR 2 "register_operand")))
1380 (set (match_operand:GPR 3 "register_operand")
1381 (<paired_mod>:GPR (match_dup 1) (match_dup 2)))])]
1382 "TARGET_DIV && riscv_use_divmod_expander ()"
1384 rtx tmp = gen_reg_rtx (<MODE>mode);
1385 emit_insn (gen_<u>div<GPR:mode>3 (operands[0], operands[1], operands[2]));
1386 emit_insn (gen_mul<GPR:mode>3 (tmp, operands[0], operands[2]));
1387 emit_insn (gen_sub<GPR:mode>3 (operands[3], operands[1], tmp));
1391 (define_insn "<optab>si3_extended"
1392 [(set (match_operand:DI 0 "register_operand" "=r")
1394 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1395 (match_operand:SI 2 "register_operand" " r"))))]
1396 "TARGET_DIV && TARGET_64BIT"
1397 "<insn>%i2w\t%0,%1,%2"
1398 [(set_attr "type" "idiv")
1399 (set_attr "mode" "DI")])
1401 (define_insn "div<mode>3"
1402 [(set (match_operand:ANYF 0 "register_operand" "=f")
1403 (div:ANYF (match_operand:ANYF 1 "register_operand" " f")
1404 (match_operand:ANYF 2 "register_operand" " f")))]
1405 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && TARGET_FDIV"
1406 "fdiv.<fmt>\t%0,%1,%2"
1407 [(set_attr "type" "fdiv")
1408 (set_attr "mode" "<UNITMODE>")])
1411 ;; ....................
1415 ;; ....................
1417 (define_insn "sqrt<mode>2"
1418 [(set (match_operand:ANYF 0 "register_operand" "=f")
1419 (sqrt:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1420 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && TARGET_FDIV"
1422 return "fsqrt.<fmt>\t%0,%1";
1424 [(set_attr "type" "fsqrt")
1425 (set_attr "mode" "<UNITMODE>")])
1427 ;; Floating point multiply accumulate instructions.
1430 (define_insn "fma<mode>4"
1431 [(set (match_operand:ANYF 0 "register_operand" "=f")
1432 (fma:ANYF (match_operand:ANYF 1 "register_operand" " f")
1433 (match_operand:ANYF 2 "register_operand" " f")
1434 (match_operand:ANYF 3 "register_operand" " f")))]
1435 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1436 "fmadd.<fmt>\t%0,%1,%2,%3"
1437 [(set_attr "type" "fmadd")
1438 (set_attr "mode" "<UNITMODE>")])
1441 (define_insn "fms<mode>4"
1442 [(set (match_operand:ANYF 0 "register_operand" "=f")
1443 (fma:ANYF (match_operand:ANYF 1 "register_operand" " f")
1444 (match_operand:ANYF 2 "register_operand" " f")
1445 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f"))))]
1446 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1447 "fmsub.<fmt>\t%0,%1,%2,%3"
1448 [(set_attr "type" "fmadd")
1449 (set_attr "mode" "<UNITMODE>")])
1452 (define_insn "fnms<mode>4"
1453 [(set (match_operand:ANYF 0 "register_operand" "=f")
1455 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1456 (match_operand:ANYF 2 "register_operand" " f")
1457 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f"))))]
1458 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1459 "fnmadd.<fmt>\t%0,%1,%2,%3"
1460 [(set_attr "type" "fmadd")
1461 (set_attr "mode" "<UNITMODE>")])
1464 (define_insn "fnma<mode>4"
1465 [(set (match_operand:ANYF 0 "register_operand" "=f")
1467 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1468 (match_operand:ANYF 2 "register_operand" " f")
1469 (match_operand:ANYF 3 "register_operand" " f")))]
1470 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1471 "fnmsub.<fmt>\t%0,%1,%2,%3"
1472 [(set_attr "type" "fmadd")
1473 (set_attr "mode" "<UNITMODE>")])
1475 ;; -(-a * b - c), modulo signed zeros
1476 (define_insn "*fma<mode>4"
1477 [(set (match_operand:ANYF 0 "register_operand" "=f")
1480 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1481 (match_operand:ANYF 2 "register_operand" " f")
1482 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))]
1483 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1484 "fmadd.<fmt>\t%0,%1,%2,%3"
1485 [(set_attr "type" "fmadd")
1486 (set_attr "mode" "<UNITMODE>")])
1488 ;; -(-a * b + c), modulo signed zeros
1489 (define_insn "*fms<mode>4"
1490 [(set (match_operand:ANYF 0 "register_operand" "=f")
1493 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1494 (match_operand:ANYF 2 "register_operand" " f")
1495 (match_operand:ANYF 3 "register_operand" " f"))))]
1496 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1497 "fmsub.<fmt>\t%0,%1,%2,%3"
1498 [(set_attr "type" "fmadd")
1499 (set_attr "mode" "<UNITMODE>")])
1501 ;; -(a * b + c), modulo signed zeros
1502 (define_insn "*fnms<mode>4"
1503 [(set (match_operand:ANYF 0 "register_operand" "=f")
1506 (match_operand:ANYF 1 "register_operand" " f")
1507 (match_operand:ANYF 2 "register_operand" " f")
1508 (match_operand:ANYF 3 "register_operand" " f"))))]
1509 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1510 "fnmadd.<fmt>\t%0,%1,%2,%3"
1511 [(set_attr "type" "fmadd")
1512 (set_attr "mode" "<UNITMODE>")])
1514 ;; -(a * b - c), modulo signed zeros
1515 (define_insn "*fnma<mode>4"
1516 [(set (match_operand:ANYF 0 "register_operand" "=f")
1519 (match_operand:ANYF 1 "register_operand" " f")
1520 (match_operand:ANYF 2 "register_operand" " f")
1521 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))]
1522 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1523 "fnmsub.<fmt>\t%0,%1,%2,%3"
1524 [(set_attr "type" "fmadd")
1525 (set_attr "mode" "<UNITMODE>")])
1528 ;; ....................
1532 ;; ....................
1534 (define_insn "abs<mode>2"
1535 [(set (match_operand:ANYF 0 "register_operand" "=f")
1536 (abs:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1537 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1539 [(set_attr "type" "fmove")
1540 (set_attr "mode" "<UNITMODE>")])
1542 (define_insn "copysign<mode>3"
1543 [(set (match_operand:ANYF 0 "register_operand" "=f")
1544 (unspec:ANYF [(match_operand:ANYF 1 "register_operand" " f")
1545 (match_operand:ANYF 2 "register_operand" " f")]
1547 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1548 "fsgnj.<fmt>\t%0,%1,%2"
1549 [(set_attr "type" "fmove")
1550 (set_attr "mode" "<UNITMODE>")])
1552 (define_insn "neg<mode>2"
1553 [(set (match_operand:ANYF 0 "register_operand" "=f")
1554 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1555 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1557 [(set_attr "type" "fmove")
1558 (set_attr "mode" "<UNITMODE>")])
1561 ;; ....................
1565 ;; ....................
1567 (define_insn "fminm<mode>3"
1568 [(set (match_operand:ANYF 0 "register_operand" "=f")
1569 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1570 (use (match_operand:ANYF 2 "register_operand" " f"))]
1572 "TARGET_HARD_FLOAT && TARGET_ZFA"
1573 "fminm.<fmt>\t%0,%1,%2"
1574 [(set_attr "type" "fmove")
1575 (set_attr "mode" "<UNITMODE>")])
1577 (define_insn "fmaxm<mode>3"
1578 [(set (match_operand:ANYF 0 "register_operand" "=f")
1579 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1580 (use (match_operand:ANYF 2 "register_operand" " f"))]
1582 "TARGET_HARD_FLOAT && TARGET_ZFA"
1583 "fmaxm.<fmt>\t%0,%1,%2"
1584 [(set_attr "type" "fmove")
1585 (set_attr "mode" "<UNITMODE>")])
1587 (define_insn "fmin<mode>3"
1588 [(set (match_operand:ANYF 0 "register_operand" "=f")
1589 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1590 (use (match_operand:ANYF 2 "register_operand" " f"))]
1592 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SNANS (<MODE>mode)"
1593 "fmin.<fmt>\t%0,%1,%2"
1594 [(set_attr "type" "fmove")
1595 (set_attr "mode" "<UNITMODE>")])
1597 (define_insn "fmax<mode>3"
1598 [(set (match_operand:ANYF 0 "register_operand" "=f")
1599 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1600 (use (match_operand:ANYF 2 "register_operand" " f"))]
1602 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SNANS (<MODE>mode)"
1603 "fmax.<fmt>\t%0,%1,%2"
1604 [(set_attr "type" "fmove")
1605 (set_attr "mode" "<UNITMODE>")])
1607 (define_insn "smin<mode>3"
1608 [(set (match_operand:ANYF 0 "register_operand" "=f")
1609 (smin:ANYF (match_operand:ANYF 1 "register_operand" " f")
1610 (match_operand:ANYF 2 "register_operand" " f")))]
1611 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1612 "fmin.<fmt>\t%0,%1,%2"
1613 [(set_attr "type" "fmove")
1614 (set_attr "mode" "<UNITMODE>")])
1616 (define_insn "smax<mode>3"
1617 [(set (match_operand:ANYF 0 "register_operand" "=f")
1618 (smax:ANYF (match_operand:ANYF 1 "register_operand" " f")
1619 (match_operand:ANYF 2 "register_operand" " f")))]
1620 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1621 "fmax.<fmt>\t%0,%1,%2"
1622 [(set_attr "type" "fmove")
1623 (set_attr "mode" "<UNITMODE>")])
1626 ;; ....................
1630 ;; ....................
1633 ;; For RV64, we don't expose the SImode operations to the rtl expanders,
1634 ;; but SImode versions exist for combine.
1636 (define_expand "and<mode>3"
1637 [(set (match_operand:X 0 "register_operand")
1638 (and:X (match_operand:X 1 "register_operand")
1639 (match_operand:X 2 "arith_operand_or_mode_mask")))]
1642 /* If the second operand is a mode mask, emit an extension
1644 if (CONST_INT_P (operands[2]))
1646 enum machine_mode tmode = VOIDmode;
1647 if (UINTVAL (operands[2]) == GET_MODE_MASK (HImode))
1649 else if (UINTVAL (operands[2]) == GET_MODE_MASK (SImode))
1652 if (tmode != VOIDmode)
1654 rtx tmp = gen_lowpart (tmode, operands[1]);
1655 emit_insn (gen_extend_insn (operands[0], tmp, <MODE>mode, tmode, 1));
1661 (define_insn "*and<mode>3"
1662 [(set (match_operand:X 0 "register_operand" "=r,r")
1663 (and:X (match_operand:X 1 "register_operand" "%r,r")
1664 (match_operand:X 2 "arith_operand" " r,I")))]
1667 [(set_attr "type" "logical")
1668 (set_attr "mode" "<MODE>")])
1670 (define_insn "<optab><mode>3"
1671 [(set (match_operand:X 0 "register_operand" "=r,r")
1672 (any_or:X (match_operand:X 1 "register_operand" "%r,r")
1673 (match_operand:X 2 "arith_operand" " r,I")))]
1675 "<insn>%i2\t%0,%1,%2"
1676 [(set_attr "type" "logical")
1677 (set_attr "mode" "<MODE>")])
1679 (define_insn "*<optab>si3_internal"
1680 [(set (match_operand:SI 0 "register_operand" "=r,r")
1681 (any_bitwise:SI (match_operand:SI 1 "register_operand" "%r,r")
1682 (match_operand:SI 2 "arith_operand" " r,I")))]
1684 "<insn>%i2\t%0,%1,%2"
1685 [(set_attr "type" "logical")
1686 (set_attr "mode" "SI")])
1688 (define_insn "one_cmpl<mode>2"
1689 [(set (match_operand:X 0 "register_operand" "=r")
1690 (not:X (match_operand:X 1 "register_operand" " r")))]
1693 [(set_attr "type" "logical")
1694 (set_attr "mode" "<MODE>")])
1696 (define_insn "*one_cmplsi2_internal"
1697 [(set (match_operand:SI 0 "register_operand" "=r")
1698 (not:SI (match_operand:SI 1 "register_operand" " r")))]
1701 [(set_attr "type" "logical")
1702 (set_attr "mode" "SI")])
1705 ;; ....................
1709 ;; ....................
1711 (define_insn "truncdfsf2"
1712 [(set (match_operand:SF 0 "register_operand" "=f")
1714 (match_operand:DF 1 "register_operand" " f")))]
1715 "TARGET_DOUBLE_FLOAT || TARGET_ZDINX"
1717 [(set_attr "type" "fcvt")
1718 (set_attr "mode" "SF")])
1720 (define_insn "truncsfhf2"
1721 [(set (match_operand:HF 0 "register_operand" "=f")
1723 (match_operand:SF 1 "register_operand" " f")))]
1724 "TARGET_ZFHMIN || TARGET_ZHINXMIN"
1726 [(set_attr "type" "fcvt")
1727 (set_attr "mode" "HF")])
1729 (define_insn "truncdfhf2"
1730 [(set (match_operand:HF 0 "register_operand" "=f")
1732 (match_operand:DF 1 "register_operand" " f")))]
1733 "(TARGET_ZFHMIN && TARGET_DOUBLE_FLOAT) ||
1734 (TARGET_ZHINXMIN && TARGET_ZDINX)"
1736 [(set_attr "type" "fcvt")
1737 (set_attr "mode" "HF")])
1740 ;; ....................
1744 ;; ....................
1748 (define_expand "zero_extendsidi2"
1749 [(set (match_operand:DI 0 "register_operand")
1750 (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]
1753 (define_insn_and_split "*zero_extendsidi2_internal"
1754 [(set (match_operand:DI 0 "register_operand" "=r,r")
1756 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1757 "TARGET_64BIT && !TARGET_ZBA && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX
1758 && !(REG_P (operands[1]) && VL_REG_P (REGNO (operands[1])))"
1762 "&& reload_completed
1763 && REG_P (operands[1])
1764 && !paradoxical_subreg_p (operands[0])"
1766 (ashift:DI (match_dup 1) (const_int 32)))
1768 (lshiftrt:DI (match_dup 0) (const_int 32)))]
1769 { operands[1] = gen_lowpart (DImode, operands[1]); }
1770 [(set_attr "move_type" "shift_shift,load")
1771 (set_attr "type" "load")
1772 (set_attr "mode" "DI")])
1774 (define_expand "zero_extendhi<GPR:mode>2"
1775 [(set (match_operand:GPR 0 "register_operand")
1777 (match_operand:HI 1 "nonimmediate_operand")))]
1780 (define_insn_and_split "*zero_extendhi<GPR:mode>2"
1781 [(set (match_operand:GPR 0 "register_operand" "=r,r")
1783 (match_operand:HI 1 "nonimmediate_operand" " r,m")))]
1784 "!TARGET_ZBB && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX"
1788 "&& reload_completed
1789 && REG_P (operands[1])
1790 && !paradoxical_subreg_p (operands[0])"
1792 (ashift:GPR (match_dup 1) (match_dup 2)))
1794 (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
1796 operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
1797 operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
1799 [(set_attr "move_type" "shift_shift,load")
1800 (set_attr "type" "load")
1801 (set_attr "mode" "<GPR:MODE>")])
1803 (define_expand "zero_extendqi<SUPERQI:mode>2"
1804 [(set (match_operand:SUPERQI 0 "register_operand")
1805 (zero_extend:SUPERQI
1806 (match_operand:QI 1 "nonimmediate_operand")))]
1809 (define_insn "*zero_extendqi<SUPERQI:mode>2_internal"
1810 [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
1811 (zero_extend:SUPERQI
1812 (match_operand:QI 1 "nonimmediate_operand" " r,m")))]
1813 "!TARGET_XTHEADMEMIDX"
1817 [(set_attr "move_type" "andi,load")
1818 (set_attr "type" "arith,load")
1819 (set_attr "mode" "<SUPERQI:MODE>")])
1822 ;; ....................
1826 ;; ....................
1828 (define_expand "extendsidi2"
1829 [(set (match_operand:DI 0 "register_operand" "=r,r")
1831 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1834 (define_insn "*extendsidi2_internal"
1835 [(set (match_operand:DI 0 "register_operand" "=r,r")
1837 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1838 "TARGET_64BIT && !TARGET_XTHEADMEMIDX"
1842 [(set_attr "move_type" "move,load")
1843 (set_attr "type" "move,load")
1844 (set_attr "mode" "DI")])
1846 (define_expand "extend<SHORT:mode><SUPERQI:mode>2"
1847 [(set (match_operand:SUPERQI 0 "register_operand")
1848 (sign_extend:SUPERQI (match_operand:SHORT 1 "nonimmediate_operand")))]
1851 (define_insn_and_split "*extend<SHORT:mode><SUPERQI:mode>2"
1852 [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
1853 (sign_extend:SUPERQI
1854 (match_operand:SHORT 1 "nonimmediate_operand" " r,m")))]
1855 "!TARGET_ZBB && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX"
1858 l<SHORT:size>\t%0,%1"
1859 "&& reload_completed
1860 && REG_P (operands[1])
1861 && !paradoxical_subreg_p (operands[0])"
1862 [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
1863 (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
1865 operands[0] = gen_lowpart (SImode, operands[0]);
1866 operands[1] = gen_lowpart (SImode, operands[1]);
1867 operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
1868 - GET_MODE_BITSIZE (<SHORT:MODE>mode));
1870 [(set_attr "move_type" "shift_shift,load")
1871 (set_attr "type" "load")
1872 (set_attr "mode" "SI")])
1874 (define_insn "extendhfsf2"
1875 [(set (match_operand:SF 0 "register_operand" "=f")
1877 (match_operand:HF 1 "register_operand" " f")))]
1878 "TARGET_ZFHMIN || TARGET_ZHINXMIN"
1880 [(set_attr "type" "fcvt")
1881 (set_attr "mode" "SF")])
1883 (define_insn "extendsfdf2"
1884 [(set (match_operand:DF 0 "register_operand" "=f")
1886 (match_operand:SF 1 "register_operand" " f")))]
1887 "TARGET_DOUBLE_FLOAT || TARGET_ZDINX"
1889 [(set_attr "type" "fcvt")
1890 (set_attr "mode" "DF")])
1892 (define_insn "extendhfdf2"
1893 [(set (match_operand:DF 0 "register_operand" "=f")
1895 (match_operand:HF 1 "register_operand" " f")))]
1896 "(TARGET_ZFHMIN && TARGET_DOUBLE_FLOAT) ||
1897 (TARGET_ZHINXMIN && TARGET_ZDINX)"
1899 [(set_attr "type" "fcvt")
1900 (set_attr "mode" "DF")])
1902 ;; 16-bit floating point moves
1903 (define_expand "movhf"
1904 [(set (match_operand:HF 0 "")
1905 (match_operand:HF 1 ""))]
1908 if (riscv_legitimize_move (HFmode, operands[0], operands[1]))
1912 (define_insn "*movhf_hardfloat"
1913 [(set (match_operand:HF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
1914 (match_operand:HF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*G*r,*m,*r"))]
1916 && (register_operand (operands[0], HFmode)
1917 || reg_or_0_operand (operands[1], HFmode))"
1918 { return riscv_output_move (operands[0], operands[1]); }
1919 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
1920 (set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
1921 (set_attr "mode" "HF")])
1923 (define_insn "*movhf_softfloat"
1924 [(set (match_operand:HF 0 "nonimmediate_operand" "=f, r,r,m,*f,*r")
1925 (match_operand:HF 1 "move_operand" " f,Gr,m,r,*r,*f"))]
1927 && (register_operand (operands[0], HFmode)
1928 || reg_or_0_operand (operands[1], HFmode))"
1929 { return riscv_output_move (operands[0], operands[1]); }
1930 [(set_attr "move_type" "fmove,move,load,store,mtc,mfc")
1931 (set_attr "type" "fmove,move,load,store,mtc,mfc")
1932 (set_attr "mode" "HF")])
1934 (define_insn "*movhf_softfloat_boxing"
1935 [(set (match_operand:HF 0 "register_operand" "=f")
1936 (unspec:HF [(match_operand:X 1 "register_operand" " r")] UNSPEC_FMV_SFP16_X))]
1939 [(set_attr "type" "fmove")
1940 (set_attr "mode" "SF")])
1943 ;; ....................
1947 ;; ....................
1949 (define_insn "fix_trunc<ANYF:mode><GPR:mode>2"
1950 [(set (match_operand:GPR 0 "register_operand" "=r")
1952 (match_operand:ANYF 1 "register_operand" " f")))]
1953 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1954 "fcvt.<GPR:ifmt>.<ANYF:fmt> %0,%1,rtz"
1955 [(set_attr "type" "fcvt_f2i")
1956 (set_attr "mode" "<ANYF:MODE>")])
1958 (define_insn "fixuns_trunc<ANYF:mode><GPR:mode>2"
1959 [(set (match_operand:GPR 0 "register_operand" "=r")
1961 (match_operand:ANYF 1 "register_operand" " f")))]
1962 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1963 "fcvt.<GPR:ifmt>u.<ANYF:fmt> %0,%1,rtz"
1964 [(set_attr "type" "fcvt_f2i")
1965 (set_attr "mode" "<ANYF:MODE>")])
1967 (define_insn "float<GPR:mode><ANYF:mode>2"
1968 [(set (match_operand:ANYF 0 "register_operand" "= f")
1970 (match_operand:GPR 1 "reg_or_0_operand" " rJ")))]
1971 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1972 "fcvt.<ANYF:fmt>.<GPR:ifmt>\t%0,%z1"
1973 [(set_attr "type" "fcvt_i2f")
1974 (set_attr "mode" "<ANYF:MODE>")])
1976 (define_insn "floatuns<GPR:mode><ANYF:mode>2"
1977 [(set (match_operand:ANYF 0 "register_operand" "= f")
1978 (unsigned_float:ANYF
1979 (match_operand:GPR 1 "reg_or_0_operand" " rJ")))]
1980 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1981 "fcvt.<ANYF:fmt>.<GPR:ifmt>u\t%0,%z1"
1982 [(set_attr "type" "fcvt_i2f")
1983 (set_attr "mode" "<ANYF:MODE>")])
1985 (define_insn "l<rint_pattern><ANYF:mode><GPR:mode>2"
1986 [(set (match_operand:GPR 0 "register_operand" "=r")
1988 [(match_operand:ANYF 1 "register_operand" " f")]
1990 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1991 "fcvt.<GPR:ifmt>.<ANYF:fmt> %0,%1,<rint_rm>"
1992 [(set_attr "type" "fcvt_f2i")
1993 (set_attr "mode" "<ANYF:MODE>")])
1995 (define_insn "<round_pattern><ANYF:mode>2"
1996 [(set (match_operand:ANYF 0 "register_operand" "=f")
1998 [(match_operand:ANYF 1 "register_operand" " f")]
2000 "TARGET_HARD_FLOAT && TARGET_ZFA"
2001 "fround.<ANYF:fmt>\t%0,%1,<round_rm>"
2002 [(set_attr "type" "fcvt")
2003 (set_attr "mode" "<ANYF:MODE>")])
2005 (define_insn "rint<ANYF:mode>2"
2006 [(set (match_operand:ANYF 0 "register_operand" "=f")
2008 [(match_operand:ANYF 1 "register_operand" " f")]
2010 "TARGET_HARD_FLOAT && TARGET_ZFA"
2011 "froundnx.<ANYF:fmt>\t%0,%1"
2012 [(set_attr "type" "fcvt")
2013 (set_attr "mode" "<ANYF:MODE>")])
2016 ;; ....................
2020 ;; ....................
2022 ;; Lower-level instructions for loading an address from the GOT.
2023 ;; We could use MEMs, but an unspec gives more optimization
2026 (define_insn "got_load<mode>"
2027 [(set (match_operand:P 0 "register_operand" "=r")
2029 [(match_operand:P 1 "symbolic_operand" "")]
2033 [(set_attr "got" "load")
2034 (set_attr "type" "load")
2035 (set_attr "mode" "<MODE>")])
2037 (define_insn "tls_add_tp_le<mode>"
2038 [(set (match_operand:P 0 "register_operand" "=r")
2040 [(match_operand:P 1 "register_operand" "r")
2041 (match_operand:P 2 "register_operand" "r")
2042 (match_operand:P 3 "symbolic_operand" "")]
2045 "add\t%0,%1,%2,%%tprel_add(%3)"
2046 [(set_attr "type" "arith")
2047 (set_attr "mode" "<MODE>")])
2049 (define_insn "got_load_tls_gd<mode>"
2050 [(set (match_operand:P 0 "register_operand" "=r")
2052 [(match_operand:P 1 "symbolic_operand" "")]
2056 [(set_attr "got" "load")
2057 (set_attr "type" "load")
2058 (set_attr "mode" "<MODE>")])
2060 (define_insn "got_load_tls_ie<mode>"
2061 [(set (match_operand:P 0 "register_operand" "=r")
2063 [(match_operand:P 1 "symbolic_operand" "")]
2067 [(set_attr "got" "load")
2068 (set_attr "type" "load")
2069 (set_attr "mode" "<MODE>")])
2071 (define_insn "@tlsdesc<mode>"
2072 [(set (reg:P A0_REGNUM)
2074 [(match_operand:P 0 "symbolic_operand" "")
2075 (match_operand:P 1 "const_int_operand")]
2077 (clobber (reg:P T0_REGNUM))]
2080 return ".LT%1: auipc\ta0,%%tlsdesc_hi(%0)\;"
2081 "<load>\tt0,%%tlsdesc_load_lo(.LT%1)(a0)\;"
2082 "addi\ta0,a0,%%tlsdesc_add_lo(.LT%1)\;"
2083 "jalr\tt0,t0,%%tlsdesc_call(.LT%1)";
2085 [(set_attr "type" "multi")
2086 (set_attr "length" "16")
2087 (set_attr "mode" "<MODE>")])
2089 (define_insn "auipc<mode>"
2090 [(set (match_operand:P 0 "register_operand" "=r")
2092 [(match_operand:P 1 "symbolic_operand" "")
2093 (match_operand:P 2 "const_int_operand")
2097 ".LA%2: auipc\t%0,%h1"
2098 [(set_attr "type" "auipc")
2099 (set_attr "cannot_copy" "yes")])
2101 ;; Instructions for adding the low 12 bits of an address to a register.
2102 ;; Operand 2 is the address: riscv_print_operand works out which relocation
2103 ;; should be applied.
2105 (define_insn "*low<mode>"
2106 [(set (match_operand:P 0 "register_operand" "=r")
2107 (lo_sum:P (match_operand:P 1 "register_operand" " r")
2108 (match_operand:P 2 "symbolic_operand" "")))]
2111 [(set_attr "type" "arith")
2112 (set_attr "mode" "<MODE>")])
2114 ;; Allow combine to split complex const_int load sequences, using operand 2
2115 ;; to store the intermediate results. See move_operand for details.
2117 [(set (match_operand:GPR 0 "register_operand")
2118 (match_operand:GPR 1 "splittable_const_int_operand"))
2119 (clobber (match_operand:GPR 2 "register_operand"))]
2123 riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]),
2128 ;; Likewise, for symbolic operands.
2130 [(set (match_operand:P 0 "register_operand")
2131 (match_operand:P 1))
2132 (clobber (match_operand:P 2 "register_operand"))]
2133 "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
2134 [(set (match_dup 0) (match_dup 3))]
2136 riscv_split_symbol (operands[2], operands[1],
2137 MAX_MACHINE_MODE, &operands[3]);
2140 ;; Pretend to have the ability to load complex const_int in order to get
2141 ;; better code generation around them.
2142 ;; But avoid constants that are special cased elsewhere.
2144 ;; Hide it from IRA register equiv recog* () to elide potential undoing of split
2146 (define_insn_and_split "*mvconst_internal"
2147 [(set (match_operand:GPR 0 "register_operand" "=r")
2148 (match_operand:GPR 1 "splittable_const_int_operand" "i"))]
2150 && !(p2m1_shift_operand (operands[1], <MODE>mode)
2151 || high_mask_shift_operand (operands[1], <MODE>mode))"
2156 riscv_move_integer (operands[0], operands[0], INTVAL (operands[1]),
2160 [(set_attr "type" "move")])
2162 ;; 64-bit integer moves
2164 (define_expand "movdi"
2165 [(set (match_operand:DI 0 "")
2166 (match_operand:DI 1 ""))]
2169 if (riscv_legitimize_move (DImode, operands[0], operands[1]))
2173 (define_insn "*movdi_32bit"
2174 [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m, *f,*f,*r,*f,*m,r")
2175 (match_operand:DI 1 "move_operand" " r,i,m,r,*J*r,*m,*f,*f,*f,vp"))]
2177 && (register_operand (operands[0], DImode)
2178 || reg_or_0_operand (operands[1], DImode))"
2179 { return riscv_output_move (operands[0], operands[1]); }
2180 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore,rdvlenb")
2181 (set_attr "mode" "DI")
2182 (set_attr "type" "move,move,load,store,move,fpload,move,fmove,fpstore,move")
2183 (set_attr "ext" "base,base,base,base,d,d,d,d,d,vector")])
2185 (define_insn "*movdi_64bit"
2186 [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r, m, *f,*f,*r,*f,*m,r")
2187 (match_operand:DI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,*f,vp"))]
2189 && (register_operand (operands[0], DImode)
2190 || reg_or_0_operand (operands[1], DImode))"
2191 { return riscv_output_move (operands[0], operands[1]); }
2192 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore,rdvlenb")
2193 (set_attr "mode" "DI")
2194 (set_attr "type" "move,move,load,store,mtc,fpload,mfc,fmove,fpstore,move")
2195 (set_attr "ext" "base,base,base,base,d,d,d,d,d,vector")])
2197 ;; 32-bit Integer moves
2199 (define_expand "mov<mode>"
2200 [(set (match_operand:MOVE32 0 "")
2201 (match_operand:MOVE32 1 ""))]
2204 if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
2208 (define_insn "*movsi_internal"
2209 [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r, m, *f,*f,*r,*m,r")
2210 (match_operand:SI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,vp"))]
2211 "(register_operand (operands[0], SImode)
2212 || reg_or_0_operand (operands[1], SImode))
2213 && !(REG_P (operands[1]) && VL_REG_P (REGNO (operands[1])))"
2214 { return riscv_output_move (operands[0], operands[1]); }
2215 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore,rdvlenb")
2216 (set_attr "mode" "SI")
2217 (set_attr "type" "move,move,load,store,mtc,fpload,mfc,fpstore,move")
2218 (set_attr "ext" "base,base,base,base,f,f,f,f,vector")])
2220 ;; 16-bit Integer moves
2222 ;; Unlike most other insns, the move insns can't be split with
2223 ;; different predicates, because register spilling and other parts of
2224 ;; the compiler, have memoized the insn number already.
2225 ;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
2227 (define_expand "movhi"
2228 [(set (match_operand:HI 0 "")
2229 (match_operand:HI 1 ""))]
2232 if (riscv_legitimize_move (HImode, operands[0], operands[1]))
2236 (define_insn "*movhi_internal"
2237 [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r, m, *f,*r,r")
2238 (match_operand:HI 1 "move_operand" " r,T,m,rJ,*r*J,*f,vp"))]
2239 "(register_operand (operands[0], HImode)
2240 || reg_or_0_operand (operands[1], HImode))"
2241 { return riscv_output_move (operands[0], operands[1]); }
2242 [(set_attr "move_type" "move,const,load,store,mtc,mfc,rdvlenb")
2243 (set_attr "mode" "HI")
2244 (set_attr "type" "move,move,load,store,mtc,mfc,move")
2245 (set_attr "ext" "base,base,base,base,f,f,vector")])
2247 ;; HImode constant generation; see riscv_move_integer for details.
2248 ;; si+si->hi without truncation is legal because of
2249 ;; TARGET_TRULY_NOOP_TRUNCATION.
2251 (define_insn "*add<mode>hi3"
2252 [(set (match_operand:HI 0 "register_operand" "=r,r")
2253 (plus:HI (match_operand:HISI 1 "register_operand" " r,r")
2254 (match_operand:HISI 2 "arith_operand" " r,I")))]
2256 "add%i2%~\t%0,%1,%2"
2257 [(set_attr "type" "arith")
2258 (set_attr "mode" "HI")])
2260 (define_insn "*xor<mode>hi3"
2261 [(set (match_operand:HI 0 "register_operand" "=r,r")
2262 (xor:HI (match_operand:HISI 1 "register_operand" " r,r")
2263 (match_operand:HISI 2 "arith_operand" " r,I")))]
2266 [(set_attr "type" "logical")
2267 (set_attr "mode" "HI")])
2269 ;; 8-bit Integer moves
2271 (define_expand "movqi"
2272 [(set (match_operand:QI 0 "")
2273 (match_operand:QI 1 ""))]
2276 if (riscv_legitimize_move (QImode, operands[0], operands[1]))
2280 (define_insn "*movqi_internal"
2281 [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r, m, *f,*r,r")
2282 (match_operand:QI 1 "move_operand" " r,I,m,rJ,*r*J,*f,vp"))]
2283 "(register_operand (operands[0], QImode)
2284 || reg_or_0_operand (operands[1], QImode))"
2285 { return riscv_output_move (operands[0], operands[1]); }
2286 [(set_attr "move_type" "move,const,load,store,mtc,mfc,rdvlenb")
2287 (set_attr "mode" "QI")
2288 (set_attr "type" "move,move,load,store,mtc,mfc,move")
2289 (set_attr "ext" "base,base,base,base,f,f,vector")])
2291 ;; 32-bit floating point moves
2293 (define_expand "movsf"
2294 [(set (match_operand:SF 0 "")
2295 (match_operand:SF 1 ""))]
2298 if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
2302 (define_insn "*movsf_hardfloat"
2303 [(set (match_operand:SF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
2304 (match_operand:SF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*G*r,*m,*r"))]
2306 && (register_operand (operands[0], SFmode)
2307 || reg_or_0_operand (operands[1], SFmode))"
2308 { return riscv_output_move (operands[0], operands[1]); }
2309 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2310 (set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2311 (set_attr "mode" "SF")])
2313 (define_insn "*movsf_softfloat"
2314 [(set (match_operand:SF 0 "nonimmediate_operand" "= r,r,m")
2315 (match_operand:SF 1 "move_operand" " Gr,m,r"))]
2317 && (register_operand (operands[0], SFmode)
2318 || reg_or_0_operand (operands[1], SFmode))"
2319 { return riscv_output_move (operands[0], operands[1]); }
2320 [(set_attr "move_type" "move,load,store")
2321 (set_attr "type" "move,load,store")
2322 (set_attr "mode" "SF")])
2324 ;; 64-bit floating point moves
2326 (define_expand "movdf"
2327 [(set (match_operand:DF 0 "")
2328 (match_operand:DF 1 ""))]
2331 if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
2336 ;; In RV32, we lack fmv.x.d and fmv.d.x. Go through memory instead.
2337 ;; (However, we can still use fcvt.d.w to zero a floating-point register.)
2338 (define_insn "*movdf_hardfloat_rv32"
2339 [(set (match_operand:DF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*zmvf,*zmvr, *r,*r,*m")
2340 (match_operand:DF 1 "move_operand" " f,zfli,G,m,f,G,*zmvr,*zmvf,*r*G,*m,*r"))]
2341 "!TARGET_64BIT && TARGET_DOUBLE_FLOAT
2342 && (register_operand (operands[0], DFmode)
2343 || reg_or_0_operand (operands[1], DFmode))"
2344 { return riscv_output_move (operands[0], operands[1]); }
2345 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2346 (set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2347 (set_attr "mode" "DF")])
2349 (define_insn "*movdf_hardfloat_rv64"
2350 [(set (match_operand:DF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
2351 (match_operand:DF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*r*G,*m,*r"))]
2352 "TARGET_64BIT && TARGET_DOUBLE_FLOAT
2353 && (register_operand (operands[0], DFmode)
2354 || reg_or_0_operand (operands[1], DFmode))"
2355 { return riscv_output_move (operands[0], operands[1]); }
2356 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2357 (set_attr "type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2358 (set_attr "mode" "DF")])
2360 (define_insn "*movdf_softfloat"
2361 [(set (match_operand:DF 0 "nonimmediate_operand" "= r,r, m")
2362 (match_operand:DF 1 "move_operand" " rG,m,rG"))]
2363 "!TARGET_DOUBLE_FLOAT
2364 && (register_operand (operands[0], DFmode)
2365 || reg_or_0_operand (operands[1], DFmode))"
2366 { return riscv_output_move (operands[0], operands[1]); }
2367 [(set_attr "move_type" "move,load,store")
2368 (set_attr "type" "fmove,fpload,fpstore")
2369 (set_attr "mode" "DF")])
2371 (define_insn "movsidf2_low_rv32"
2372 [(set (match_operand:SI 0 "register_operand" "= r")
2374 (match_operand:DF 1 "register_operand" "zmvf")))]
2375 "TARGET_HARD_FLOAT && !TARGET_64BIT && TARGET_ZFA"
2377 [(set_attr "move_type" "fmove")
2378 (set_attr "type" "fmove")
2379 (set_attr "mode" "DF")])
2382 (define_insn "movsidf2_high_rv32"
2383 [(set (match_operand:SI 0 "register_operand" "= r")
2386 (match_operand:DF 1 "register_operand" "zmvf")
2388 "TARGET_HARD_FLOAT && !TARGET_64BIT && TARGET_ZFA"
2390 [(set_attr "move_type" "fmove")
2391 (set_attr "type" "fmove")
2392 (set_attr "mode" "DF")])
2394 (define_insn "movdfsisi3_rv32"
2395 [(set (match_operand:DF 0 "register_operand" "= f")
2397 (match_operand:SI 2 "register_operand" "zmvr")
2399 (match_operand:SI 1 "register_operand" "zmvr")
2401 "TARGET_HARD_FLOAT && !TARGET_64BIT && TARGET_ZFA"
2402 "fmvp.d.x\t%0,%2,%1"
2403 [(set_attr "move_type" "fmove")
2404 (set_attr "type" "fmove")
2405 (set_attr "mode" "DF")])
2408 [(set (match_operand:MOVE64 0 "nonimmediate_operand")
2409 (match_operand:MOVE64 1 "move_operand"))]
2411 && riscv_split_64bit_move_p (operands[0], operands[1])"
2414 riscv_split_doubleword_move (operands[0], operands[1]);
2418 (define_expand "cpymem<mode>"
2419 [(parallel [(set (match_operand:BLK 0 "general_operand")
2420 (match_operand:BLK 1 "general_operand"))
2421 (use (match_operand:P 2 ""))
2422 (use (match_operand:SI 3 "const_int_operand"))])]
2425 if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
2431 ;; Expand in-line code to clear the instruction cache between operand[0] and
2433 (define_expand "clear_cache"
2434 [(match_operand 0 "pmode_register_operand")
2435 (match_operand 1 "pmode_register_operand")]
2438 #ifdef ICACHE_FLUSH_FUNC
2439 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, ICACHE_FLUSH_FUNC),
2440 LCT_NORMAL, VOIDmode, operands[0], Pmode,
2441 operands[1], Pmode, const0_rtx, Pmode);
2443 if (TARGET_ZIFENCEI)
2444 emit_insn (gen_fence_i ());
2449 (define_insn "fence"
2450 [(unspec_volatile [(const_int 0)] UNSPECV_FENCE)]
2453 [(set_attr "type" "atomic")])
2455 (define_insn "fence_i"
2456 [(unspec_volatile [(const_int 0)] UNSPECV_FENCE_I)]
2459 [(set_attr "type" "atomic")])
2461 (define_insn "riscv_pause"
2462 [(unspec_volatile [(const_int 0)] UNSPECV_PAUSE)]
2464 "* return TARGET_ZIHINTPAUSE ? \"pause\" : \".insn\t0x0100000f\";"
2465 [(set_attr "type" "atomic")])
2468 ;; ....................
2472 ;; ....................
2474 ;; Use a QImode shift count, to avoid generating sign or zero extend
2475 ;; instructions for shift counts, and to avoid dropping subregs.
2476 ;; expand_shift_1 can do this automatically when SHIFT_COUNT_TRUNCATED is
2477 ;; defined, but use of that is discouraged.
2479 (define_insn "*<optab>si3"
2480 [(set (match_operand:SI 0 "register_operand" "= r")
2482 (match_operand:SI 1 "register_operand" " r")
2483 (match_operand:QI 2 "arith_operand" " rI")))]
2486 if (GET_CODE (operands[2]) == CONST_INT)
2487 operands[2] = GEN_INT (INTVAL (operands[2])
2488 & (GET_MODE_BITSIZE (SImode) - 1));
2490 return "<insn>%i2%~\t%0,%1,%2";
2492 [(set_attr "type" "shift")
2493 (set_attr "mode" "SI")])
2495 (define_expand "<optab>si3"
2496 [(set (match_operand:SI 0 "register_operand" "= r")
2497 (any_shift:SI (match_operand:SI 1 "register_operand" " r")
2498 (match_operand:QI 2 "arith_operand" " rI")))]
2503 rtx t = gen_reg_rtx (DImode);
2504 emit_insn (gen_<optab>si3_extend (t, operands[1], operands[2]));
2505 t = gen_lowpart (SImode, t);
2506 SUBREG_PROMOTED_VAR_P (t) = 1;
2507 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
2508 emit_move_insn (operands[0], t);
2513 (define_insn "<optab>di3"
2514 [(set (match_operand:DI 0 "register_operand" "= r")
2516 (match_operand:DI 1 "register_operand" " r")
2517 (match_operand:QI 2 "arith_operand" " rI")))]
2520 if (GET_CODE (operands[2]) == CONST_INT)
2521 operands[2] = GEN_INT (INTVAL (operands[2])
2522 & (GET_MODE_BITSIZE (DImode) - 1));
2524 return "<insn>%i2\t%0,%1,%2";
2526 [(set_attr "type" "shift")
2527 (set_attr "mode" "DI")])
2529 (define_insn_and_split "*<optab><GPR:mode>3_mask_1"
2530 [(set (match_operand:GPR 0 "register_operand" "= r")
2532 (match_operand:GPR 1 "register_operand" " r")
2533 (match_operator 4 "subreg_lowpart_operator"
2535 (match_operand:GPR2 2 "register_operand" "r")
2536 (match_operand 3 "<GPR:shiftm1>"))])))]
2541 (any_shift:GPR (match_dup 1)
2543 "operands[2] = gen_lowpart (QImode, operands[2]);"
2544 [(set_attr "type" "shift")
2545 (set_attr "mode" "<GPR:MODE>")])
2547 (define_insn "<optab>si3_extend"
2548 [(set (match_operand:DI 0 "register_operand" "= r")
2550 (any_shift:SI (match_operand:SI 1 "register_operand" " r")
2551 (match_operand:QI 2 "arith_operand" " rI"))))]
2554 if (GET_CODE (operands[2]) == CONST_INT)
2555 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
2557 return "<insn>%i2w\t%0,%1,%2";
2559 [(set_attr "type" "shift")
2560 (set_attr "mode" "SI")])
2562 (define_insn_and_split "*<optab>si3_extend_mask"
2563 [(set (match_operand:DI 0 "register_operand" "= r")
2566 (match_operand:SI 1 "register_operand" " r")
2567 (match_operator 4 "subreg_lowpart_operator"
2569 (match_operand:GPR 2 "register_operand" " r")
2570 (match_operand 3 "const_si_mask_operand"))]))))]
2576 (any_shift:SI (match_dup 1)
2578 "operands[2] = gen_lowpart (QImode, operands[2]);"
2579 [(set_attr "type" "shift")
2580 (set_attr "mode" "SI")])
2582 ;; Non-canonical, but can be formed by ree when combine is not successful at
2583 ;; producing one of the two canonical patterns below.
2584 (define_insn "*lshrsi3_zero_extend_1"
2585 [(set (match_operand:DI 0 "register_operand" "=r")
2587 (lshiftrt:SI (match_operand:SI 1 "register_operand" " r")
2588 (match_operand 2 "const_int_operand"))))]
2589 "TARGET_64BIT && (INTVAL (operands[2]) & 0x1f) > 0"
2591 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
2593 return "srliw\t%0,%1,%2";
2595 [(set_attr "type" "shift")
2596 (set_attr "mode" "SI")])
2598 ;; Canonical form for a zero-extend of a logical right shift.
2599 (define_insn "*lshrsi3_zero_extend_2"
2600 [(set (match_operand:DI 0 "register_operand" "=r")
2601 (zero_extract:DI (match_operand:DI 1 "register_operand" " r")
2602 (match_operand 2 "const_int_operand")
2603 (match_operand 3 "const_int_operand")))]
2604 "(TARGET_64BIT && (INTVAL (operands[3]) > 0)
2605 && (INTVAL (operands[2]) + INTVAL (operands[3]) == 32))"
2607 return "srliw\t%0,%1,%3";
2609 [(set_attr "type" "shift")
2610 (set_attr "mode" "SI")])
2612 ;; Canonical form for a zero-extend of a logical right shift when the
2613 ;; shift count is 31.
2614 (define_insn "*lshrsi3_zero_extend_3"
2615 [(set (match_operand:DI 0 "register_operand" "=r")
2616 (lt:DI (match_operand:SI 1 "register_operand" " r")
2620 return "srliw\t%0,%1,31";
2622 [(set_attr "type" "shift")
2623 (set_attr "mode" "SI")])
2625 ;; Handle AND with 2^N-1 for N from 12 to XLEN. This can be split into
2626 ;; two logical shifts. Otherwise it requires 3 instructions: lui,
2627 ;; xor/addi/srli, and.
2629 ;; Generating a temporary for the shift output gives better combiner results;
2630 ;; and also fixes a problem where op0 could be a paradoxical reg and shifting
2631 ;; by amounts larger than the size of the SUBREG_REG doesn't work.
2633 [(set (match_operand:GPR 0 "register_operand")
2634 (and:GPR (match_operand:GPR 1 "register_operand")
2635 (match_operand:GPR 2 "p2m1_shift_operand")))
2636 (clobber (match_operand:GPR 3 "register_operand"))]
2639 (ashift:GPR (match_dup 1) (match_dup 2)))
2641 (lshiftrt:GPR (match_dup 3) (match_dup 2)))]
2643 /* Op2 is a VOIDmode constant, so get the mode size from op1. */
2644 operands[2] = GEN_INT (GET_MODE_BITSIZE (GET_MODE (operands[1])).to_constant ()
2645 - exact_log2 (INTVAL (operands[2]) + 1));
2648 ;; Handle AND with 0xF...F0...0 where there are 32 to 63 zeros. This can be
2649 ;; split into two shifts. Otherwise it requires 3 instructions: li, sll, and.
2651 [(set (match_operand:DI 0 "register_operand")
2652 (and:DI (match_operand:DI 1 "register_operand")
2653 (match_operand:DI 2 "high_mask_shift_operand")))
2654 (clobber (match_operand:DI 3 "register_operand"))]
2657 (lshiftrt:DI (match_dup 1) (match_dup 2)))
2659 (ashift:DI (match_dup 3) (match_dup 2)))]
2661 operands[2] = GEN_INT (ctz_hwi (INTVAL (operands[2])));
2664 ;; Handle SImode to DImode zero-extend combined with a left shift. This can
2665 ;; occur when unsigned int is used for array indexing. Split this into two
2666 ;; shifts. Otherwise we can get 3 shifts.
2668 (define_insn_and_split "zero_extendsidi2_shifted"
2669 [(set (match_operand:DI 0 "register_operand" "=r")
2670 (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
2671 (match_operand:QI 2 "immediate_operand" "I"))
2672 (match_operand 3 "immediate_operand" "")))
2673 (clobber (match_scratch:DI 4 "=&r"))]
2674 "TARGET_64BIT && !TARGET_ZBA
2675 && ((INTVAL (operands[3]) >> INTVAL (operands[2])) == 0xffffffff)"
2677 "&& reload_completed"
2679 (ashift:DI (match_dup 1) (const_int 32)))
2681 (lshiftrt:DI (match_dup 4) (match_dup 5)))]
2682 "operands[5] = GEN_INT (32 - (INTVAL (operands [2])));"
2683 [(set_attr "type" "shift")
2684 (set_attr "mode" "DI")])
2687 ;; ....................
2689 ;; CONDITIONAL BRANCHES
2691 ;; ....................
2693 ;; Conditional branches
2695 (define_insn_and_split "*branch<ANYI:mode>_shiftedarith_equals_zero"
2697 (if_then_else (match_operator 1 "equality_operator"
2698 [(and:ANYI (match_operand:ANYI 2 "register_operand" "r")
2699 (match_operand 3 "shifted_const_arith_operand" "i"))
2701 (label_ref (match_operand 0 "" ""))
2703 (clobber (match_scratch:X 4 "=&r"))]
2704 "!SMALL_OPERAND (INTVAL (operands[3]))"
2706 "&& reload_completed"
2707 [(set (match_dup 4) (lshiftrt:X (subreg:X (match_dup 2) 0) (match_dup 6)))
2708 (set (match_dup 4) (and:X (match_dup 4) (match_dup 7)))
2709 (set (pc) (if_then_else (match_op_dup 1 [(match_dup 4) (const_int 0)])
2710 (label_ref (match_dup 0)) (pc)))]
2712 HOST_WIDE_INT mask = INTVAL (operands[3]);
2713 int trailing = ctz_hwi (mask);
2715 operands[6] = GEN_INT (trailing);
2716 operands[7] = GEN_INT (mask >> trailing);
2718 [(set_attr "type" "branch")])
2720 (define_insn_and_split "*branch<ANYI:mode>_shiftedmask_equals_zero"
2722 (if_then_else (match_operator 1 "equality_operator"
2723 [(and:ANYI (match_operand:ANYI 2 "register_operand" "r")
2724 (match_operand 3 "consecutive_bits_operand" "i"))
2726 (label_ref (match_operand 0 "" ""))
2728 (clobber (match_scratch:X 4 "=&r"))]
2729 "(INTVAL (operands[3]) >= 0 || !partial_subreg_p (operands[2]))
2730 && popcount_hwi (INTVAL (operands[3])) > 1
2731 && !SMALL_OPERAND (INTVAL (operands[3]))"
2733 "&& reload_completed"
2734 [(set (match_dup 4) (ashift:X (subreg:X (match_dup 2) 0) (match_dup 6)))
2735 (set (match_dup 4) (lshiftrt:X (match_dup 4) (match_dup 7)))
2736 (set (pc) (if_then_else (match_op_dup 1 [(match_dup 4) (const_int 0)])
2737 (label_ref (match_dup 0)) (pc)))]
2739 unsigned HOST_WIDE_INT mask = INTVAL (operands[3]);
2740 int leading = clz_hwi (mask);
2741 int trailing = ctz_hwi (mask);
2743 operands[6] = GEN_INT (leading);
2744 operands[7] = GEN_INT (leading + trailing);
2746 [(set_attr "type" "branch")])
2748 (define_insn "*branch<mode>"
2751 (match_operator 1 "ordered_comparison_operator"
2752 [(match_operand:X 2 "register_operand" "r")
2753 (match_operand:X 3 "reg_or_0_operand" "rJ")])
2754 (label_ref (match_operand 0 "" ""))
2758 if (get_attr_length (insn) == 12)
2759 return "b%N1\t%2,%z3,1f; jump\t%l0,ra; 1:";
2761 return "b%C1\t%2,%z3,%l0";
2763 [(set_attr "type" "branch")
2764 (set_attr "mode" "none")])
2766 ;; Conditional move and add patterns.
2768 (define_expand "mov<mode>cc"
2769 [(set (match_operand:GPR 0 "register_operand")
2770 (if_then_else:GPR (match_operand 1 "comparison_operator")
2771 (match_operand:GPR 2 "movcc_operand")
2772 (match_operand:GPR 3 "movcc_operand")))]
2773 "TARGET_SFB_ALU || TARGET_XTHEADCONDMOV || TARGET_ZICOND_LIKE
2776 if (riscv_expand_conditional_move (operands[0], operands[1],
2777 operands[2], operands[3]))
2783 (define_expand "add<mode>cc"
2784 [(match_operand:GPR 0 "register_operand")
2785 (match_operand 1 "comparison_operator")
2786 (match_operand:GPR 2 "arith_operand")
2787 (match_operand:GPR 3 "arith_operand")]
2790 rtx cmp = operands[1];
2791 rtx cmp0 = XEXP (cmp, 0);
2792 rtx cmp1 = XEXP (cmp, 1);
2793 machine_mode mode0 = GET_MODE (cmp0);
2795 /* We only handle word mode integer compares for now. */
2796 if (INTEGRAL_MODE_P (mode0) && mode0 != word_mode)
2799 enum rtx_code code = GET_CODE (cmp);
2800 rtx reg0 = gen_reg_rtx (<MODE>mode);
2801 rtx reg1 = gen_reg_rtx (<MODE>mode);
2802 rtx reg2 = gen_reg_rtx (<MODE>mode);
2803 bool invert = false;
2805 if (INTEGRAL_MODE_P (mode0))
2806 riscv_expand_int_scc (reg0, code, cmp0, cmp1, &invert);
2807 else if (FLOAT_MODE_P (mode0) && fp_scc_comparison (cmp, GET_MODE (cmp)))
2808 riscv_expand_float_scc (reg0, code, cmp0, cmp1, &invert);
2813 riscv_emit_binary (PLUS, reg1, reg0, constm1_rtx);
2815 riscv_emit_unary (NEG, reg1, reg0);
2816 riscv_emit_binary (AND, reg2, reg1, operands[3]);
2817 riscv_emit_binary (PLUS, operands[0], reg2, operands[2]);
2822 ;; Used to implement built-in functions.
2823 (define_expand "condjump"
2825 (if_then_else (match_operand 0)
2826 (label_ref (match_operand 1))
2829 (define_expand "@cbranch<mode>4"
2831 (if_then_else (match_operator 0 "comparison_operator"
2832 [(match_operand:BR 1 "register_operand")
2833 (match_operand:BR 2 "nonmemory_operand")])
2834 (label_ref (match_operand 3 ""))
2838 riscv_expand_conditional_branch (operands[3], GET_CODE (operands[0]),
2839 operands[1], operands[2]);
2843 (define_expand "@cbranch<ANYF:mode>4"
2844 [(parallel [(set (pc)
2845 (if_then_else (match_operator 0 "fp_branch_comparison"
2846 [(match_operand:ANYF 1 "register_operand")
2847 (match_operand:ANYF 2 "register_operand")])
2848 (label_ref (match_operand 3 ""))
2850 (clobber (match_operand 4 ""))])]
2851 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2853 if (!signed_order_operator (operands[0], GET_MODE (operands[0])))
2855 riscv_expand_conditional_branch (operands[3], GET_CODE (operands[0]),
2856 operands[1], operands[2]);
2859 operands[4] = gen_reg_rtx (TARGET_64BIT ? DImode : SImode);
2862 (define_insn_and_split "*cbranch<ANYF:mode>4"
2864 (if_then_else (match_operator 1 "fp_native_comparison"
2865 [(match_operand:ANYF 2 "register_operand" "f")
2866 (match_operand:ANYF 3 "register_operand" "f")])
2867 (label_ref (match_operand 0 ""))
2869 (clobber (match_operand:X 4 "register_operand" "=r"))]
2870 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2872 "&& reload_completed"
2874 (match_op_dup:X 1 [(match_dup 2) (match_dup 3)]))
2876 (if_then_else (ne:X (match_dup 4) (const_int 0))
2877 (label_ref (match_operand 0))
2880 [(set_attr "type" "branch")
2881 (set (attr "length")
2882 (if_then_else (and (le (minus (match_dup 0) (pc))
2884 (le (minus (pc) (match_dup 0))
2887 (if_then_else (and (le (minus (match_dup 0) (pc))
2888 (const_int 1048564))
2889 (le (minus (pc) (match_dup 0))
2890 (const_int 1048576)))
2894 (define_insn_and_split "*cbranch<ANYF:mode>4"
2896 (if_then_else (match_operator 1 "ne_operator"
2897 [(match_operand:ANYF 2 "register_operand" "f")
2898 (match_operand:ANYF 3 "register_operand" "f")])
2899 (label_ref (match_operand 0 ""))
2901 (clobber (match_operand:X 4 "register_operand" "=r"))]
2902 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2904 "&& reload_completed"
2906 (eq:X (match_dup 2) (match_dup 3)))
2908 (if_then_else (eq:X (match_dup 4) (const_int 0))
2909 (label_ref (match_operand 0))
2912 [(set_attr "type" "branch")
2913 (set (attr "length")
2914 (if_then_else (and (le (minus (match_dup 0) (pc))
2916 (le (minus (pc) (match_dup 0))
2919 (if_then_else (and (le (minus (match_dup 0) (pc))
2920 (const_int 1048564))
2921 (le (minus (pc) (match_dup 0))
2922 (const_int 1048576)))
2926 (define_insn_and_split "*branch_on_bit<X:mode>"
2929 (match_operator 0 "equality_operator"
2930 [(zero_extract:X (match_operand:X 2 "register_operand" "r")
2932 (match_operand 3 "branch_on_bit_operand"))
2934 (label_ref (match_operand 1))
2936 (clobber (match_scratch:X 4 "=&r"))]
2941 (ashift:X (match_dup 2) (match_dup 3)))
2944 (match_op_dup 0 [(match_dup 4) (const_int 0)])
2945 (label_ref (match_operand 1))
2948 int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
2949 operands[3] = GEN_INT (shift);
2951 if (GET_CODE (operands[0]) == EQ)
2952 operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
2954 operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
2956 [(set_attr "type" "branch")])
2958 (define_insn_and_split "*branch_on_bit_range<X:mode>"
2961 (match_operator 0 "equality_operator"
2962 [(zero_extract:X (match_operand:X 2 "register_operand" "r")
2963 (match_operand 3 "branch_on_bit_operand")
2966 (label_ref (match_operand 1))
2968 (clobber (match_scratch:X 4 "=&r"))]
2973 (ashift:X (match_dup 2) (match_dup 3)))
2976 (match_op_dup 0 [(match_dup 4) (const_int 0)])
2977 (label_ref (match_operand 1))
2980 operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
2982 [(set_attr "type" "branch")])
2985 ;; ....................
2987 ;; SETTING A REGISTER FROM A COMPARISON
2989 ;; ....................
2991 ;; Destination is always set in SI mode.
2993 (define_expand "cstore<mode>4"
2994 [(set (match_operand:SI 0 "register_operand")
2995 (match_operator:SI 1 "ordered_comparison_operator"
2996 [(match_operand:GPR 2 "register_operand")
2997 (match_operand:GPR 3 "nonmemory_operand")]))]
3000 riscv_expand_int_scc (operands[0], GET_CODE (operands[1]), operands[2],
3005 (define_expand "cstore<mode>4"
3006 [(set (match_operand:SI 0 "register_operand")
3007 (match_operator:SI 1 "fp_scc_comparison"
3008 [(match_operand:ANYF 2 "register_operand")
3009 (match_operand:ANYF 3 "register_operand")]))]
3010 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3012 riscv_expand_float_scc (operands[0], GET_CODE (operands[1]), operands[2],
3017 (define_insn "*cstore<ANYF:mode><X:mode>4"
3018 [(set (match_operand:X 0 "register_operand" "=r")
3019 (match_operator:X 1 "fp_native_comparison"
3020 [(match_operand:ANYF 2 "register_operand" " f")
3021 (match_operand:ANYF 3 "register_operand" " f")]))]
3022 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3023 "f%C1.<fmt>\t%0,%2,%3"
3024 [(set_attr "type" "fcmp")
3025 (set_attr "mode" "<UNITMODE>")])
3027 (define_expand "f<quiet_pattern>_quiet<ANYF:mode><X:mode>4"
3028 [(set (match_operand:X 0 "register_operand")
3029 (unspec:X [(match_operand:ANYF 1 "register_operand")
3030 (match_operand:ANYF 2 "register_operand")]
3032 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3034 rtx op0 = operands[0];
3035 rtx op1 = operands[1];
3036 rtx op2 = operands[2];
3039 emit_insn (gen_f<quiet_pattern>_quiet<ANYF:mode><X:mode>4_zfa(op0, op1, op2));
3042 rtx tmp = gen_reg_rtx (SImode);
3043 rtx cmp = gen_rtx_<QUIET_PATTERN> (<X:MODE>mode, op1, op2);
3044 rtx frflags = gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, const0_rtx),
3046 rtx fsflags = gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, tmp),
3049 emit_insn (gen_rtx_SET (tmp, frflags));
3050 emit_insn (gen_rtx_SET (op0, cmp));
3051 emit_insn (fsflags);
3054 if (HONOR_SNANS (<ANYF:MODE>mode))
3055 emit_insn (gen_rtx_UNSPEC_VOLATILE (<ANYF:MODE>mode,
3056 gen_rtvec (2, op1, op2),
3061 (define_insn "f<quiet_pattern>_quiet<ANYF:mode><X:mode>4_zfa"
3062 [(set (match_operand:X 0 "register_operand" "=r")
3064 [(match_operand:ANYF 1 "register_operand" " f")
3065 (match_operand:ANYF 2 "register_operand" " f")]
3067 "TARGET_HARD_FLOAT && TARGET_ZFA"
3068 "f<quiet_pattern>q.<fmt>\t%0,%1,%2"
3069 [(set_attr "type" "fcmp")
3070 (set_attr "mode" "<UNITMODE>")
3071 (set (attr "length") (const_int 16))])
3073 (define_insn "*seq_zero_<X:mode><GPR:mode>"
3074 [(set (match_operand:GPR 0 "register_operand" "=r")
3075 (eq:GPR (match_operand:X 1 "register_operand" " r")
3079 [(set_attr "type" "slt")
3080 (set_attr "mode" "<X:MODE>")])
3082 (define_insn "*sne_zero_<X:mode><GPR:mode>"
3083 [(set (match_operand:GPR 0 "register_operand" "=r")
3084 (ne:GPR (match_operand:X 1 "register_operand" " r")
3088 [(set_attr "type" "slt")
3089 (set_attr "mode" "<X:MODE>")])
3091 (define_insn "*sgt<u>_<X:mode><GPR:mode>"
3092 [(set (match_operand:GPR 0 "register_operand" "= r")
3093 (any_gt:GPR (match_operand:X 1 "register_operand" " r")
3094 (match_operand:X 2 "reg_or_0_operand" " rJ")))]
3097 [(set_attr "type" "slt")
3098 (set_attr "mode" "<X:MODE>")])
3100 (define_insn "*sge<u>_<X:mode><GPR:mode>"
3101 [(set (match_operand:GPR 0 "register_operand" "=r")
3102 (any_ge:GPR (match_operand:X 1 "register_operand" " r")
3105 "slti<u>\t%0,zero,%1"
3106 [(set_attr "type" "slt")
3107 (set_attr "mode" "<X:MODE>")])
3109 (define_insn "@slt<u>_<X:mode><GPR:mode>3"
3110 [(set (match_operand:GPR 0 "register_operand" "= r")
3111 (any_lt:GPR (match_operand:X 1 "register_operand" " r")
3112 (match_operand:X 2 "arith_operand" " rI")))]
3114 "slt%i2<u>\t%0,%1,%2"
3115 [(set_attr "type" "slt")
3116 (set_attr "mode" "<X:MODE>")])
3118 (define_insn "*sle<u>_<X:mode><GPR:mode>"
3119 [(set (match_operand:GPR 0 "register_operand" "=r")
3120 (any_le:GPR (match_operand:X 1 "register_operand" " r")
3121 (match_operand:X 2 "sle_operand" "")))]
3124 operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
3125 return "slt%i2<u>\t%0,%1,%2";
3127 [(set_attr "type" "slt")
3128 (set_attr "mode" "<X:MODE>")])
3131 ;; ....................
3133 ;; UNCONDITIONAL BRANCHES
3135 ;; ....................
3137 ;; Unconditional branches.
3140 [(set (pc) (label_ref (match_operand 0 "" "")))]
3143 /* Hopefully this does not happen often as this is going
3144 to clobber $ra and muck up the return stack predictors. */
3145 if (get_attr_length (insn) == 8)
3146 return "jump\t%l0,ra";
3150 [(set_attr "type" "jump")
3151 (set_attr "mode" "none")])
3153 (define_expand "indirect_jump"
3154 [(set (pc) (match_operand 0 "register_operand"))]
3157 operands[0] = force_reg (Pmode, operands[0]);
3158 if (Pmode == SImode)
3159 emit_jump_insn (gen_indirect_jumpsi (operands[0]));
3161 emit_jump_insn (gen_indirect_jumpdi (operands[0]));
3165 (define_insn "indirect_jump<mode>"
3166 [(set (pc) (match_operand:P 0 "register_operand" "l"))]
3169 [(set_attr "type" "jalr")
3170 (set_attr "mode" "none")])
3172 (define_expand "tablejump"
3173 [(set (pc) (match_operand 0 "register_operand" ""))
3174 (use (label_ref (match_operand 1 "" "")))]
3177 if (CASE_VECTOR_PC_RELATIVE)
3178 operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
3179 gen_rtx_LABEL_REF (Pmode, operands[1]),
3180 NULL_RTX, 0, OPTAB_DIRECT);
3182 if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
3183 emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
3185 emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
3189 (define_insn "tablejump<mode>"
3190 [(set (pc) (match_operand:GPR 0 "register_operand" "l"))
3191 (use (label_ref (match_operand 1 "" "")))]
3194 [(set_attr "type" "jalr")
3195 (set_attr "mode" "none")])
3198 ;; ....................
3200 ;; Function prologue/epilogue
3202 ;; ....................
3205 (define_expand "prologue"
3209 riscv_expand_prologue ();
3213 ;; Block any insns from being moved before this point, since the
3214 ;; profiling call to mcount can use various registers that aren't
3215 ;; saved or used to pass arguments.
3217 (define_insn "blockage"
3218 [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
3221 [(set_attr "type" "ghost")
3222 (set_attr "mode" "none")])
3224 (define_expand "epilogue"
3228 riscv_expand_epilogue (NORMAL_RETURN);
3232 (define_expand "sibcall_epilogue"
3236 riscv_expand_epilogue (SIBCALL_RETURN);
3240 ;; Trivial return. Make it look like a normal return insn as that
3241 ;; allows jump optimizations to work better.
3243 (define_expand "return"
3245 "riscv_can_use_return_insn ()"
3248 (define_insn "simple_return"
3252 return riscv_output_return ();
3254 [(set_attr "type" "jalr")
3255 (set_attr "mode" "none")])
3259 (define_insn "simple_return_internal"
3261 (use (match_operand 0 "pmode_register_operand" ""))]
3264 [(set_attr "type" "jalr")
3265 (set_attr "mode" "none")])
3267 ;; This is used in compiling the unwind routines.
3268 (define_expand "eh_return"
3269 [(use (match_operand 0 "general_operand"))]
3272 if (GET_MODE (operands[0]) != word_mode)
3273 operands[0] = convert_to_mode (word_mode, operands[0], 0);
3275 emit_insn (gen_eh_set_lr_di (operands[0]));
3277 emit_insn (gen_eh_set_lr_si (operands[0]));
3279 emit_jump_insn (gen_eh_return_internal ());
3284 ;; Clobber the return address on the stack. We can't expand this
3285 ;; until we know where it will be put in the stack frame.
3287 (define_insn "eh_set_lr_si"
3288 [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
3289 (clobber (match_scratch:SI 1 "=&r"))]
3292 [(set_attr "type" "jump")])
3294 (define_insn "eh_set_lr_di"
3295 [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
3296 (clobber (match_scratch:DI 1 "=&r"))]
3299 [(set_attr "type" "jump")])
3302 [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
3303 (clobber (match_scratch 1))]
3307 riscv_set_return_address (operands[0], operands[1]);
3311 (define_insn_and_split "eh_return_internal"
3315 "epilogue_completed"
3317 "riscv_expand_epilogue (EXCEPTION_RETURN); DONE;"
3318 [(set_attr "type" "ret")])
3321 ;; ....................
3325 ;; ....................
3327 (define_expand "sibcall"
3328 [(parallel [(call (match_operand 0 "")
3329 (match_operand 1 ""))
3331 (match_operand 2 "const_int_operand")
3332 ] UNSPEC_CALLEE_CC))])]
3335 rtx target = riscv_legitimize_call_address (XEXP (operands[0], 0));
3336 emit_call_insn (gen_sibcall_internal (target, operands[1], operands[2]));
3340 (define_insn "sibcall_internal"
3341 [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S,U"))
3342 (match_operand 1 "" ""))
3344 (match_operand 2 "const_int_operand")
3345 ] UNSPEC_CALLEE_CC))]
3346 "SIBLING_CALL_P (insn)"
3351 [(set_attr "type" "call")])
3353 (define_expand "sibcall_value"
3354 [(parallel [(set (match_operand 0 "")
3355 (call (match_operand 1 "")
3356 (match_operand 2 "")))
3358 (match_operand 3 "const_int_operand")
3359 ] UNSPEC_CALLEE_CC))])]
3362 rtx target = riscv_legitimize_call_address (XEXP (operands[1], 0));
3363 emit_call_insn (gen_sibcall_value_internal (operands[0], target, operands[2],
3368 (define_insn "sibcall_value_internal"
3369 [(set (match_operand 0 "" "")
3370 (call (mem:SI (match_operand 1 "call_insn_operand" "j,S,U"))
3371 (match_operand 2 "" "")))
3373 (match_operand 3 "const_int_operand")
3374 ] UNSPEC_CALLEE_CC))]
3375 "SIBLING_CALL_P (insn)"
3380 [(set_attr "type" "call")])
3382 (define_expand "call"
3383 [(parallel [(call (match_operand 0 "")
3384 (match_operand 1 ""))
3386 (match_operand 2 "const_int_operand")
3387 ] UNSPEC_CALLEE_CC))])]
3390 rtx target = riscv_legitimize_call_address (XEXP (operands[0], 0));
3391 emit_call_insn (gen_call_internal (target, operands[1], operands[2]));
3395 (define_insn "call_internal"
3396 [(call (mem:SI (match_operand 0 "call_insn_operand" "l,S,U"))
3397 (match_operand 1 "" ""))
3399 (match_operand 2 "const_int_operand")
3400 ] UNSPEC_CALLEE_CC))
3401 (clobber (reg:SI RETURN_ADDR_REGNUM))]
3407 [(set_attr "type" "call")])
3409 (define_expand "call_value"
3410 [(parallel [(set (match_operand 0 "")
3411 (call (match_operand 1 "")
3412 (match_operand 2 "")))
3414 (match_operand 3 "const_int_operand")
3415 ] UNSPEC_CALLEE_CC))])]
3418 rtx target = riscv_legitimize_call_address (XEXP (operands[1], 0));
3419 emit_call_insn (gen_call_value_internal (operands[0], target, operands[2],
3424 (define_insn "call_value_internal"
3425 [(set (match_operand 0 "" "")
3426 (call (mem:SI (match_operand 1 "call_insn_operand" "l,S,U"))
3427 (match_operand 2 "" "")))
3429 (match_operand 3 "const_int_operand")
3430 ] UNSPEC_CALLEE_CC))
3431 (clobber (reg:SI RETURN_ADDR_REGNUM))]
3437 [(set_attr "type" "call")])
3439 ;; Call subroutine returning any type.
3441 (define_expand "untyped_call"
3442 [(parallel [(call (match_operand 0 "")
3444 (match_operand 1 "")
3445 (match_operand 2 "")])]
3450 /* Untyped calls always use the RISCV_CC_BASE calling convention. */
3451 emit_call_insn (gen_call (operands[0], const0_rtx,
3452 gen_int_mode (RISCV_CC_BASE, SImode)));
3454 for (i = 0; i < XVECLEN (operands[2], 0); i++)
3456 rtx set = XVECEXP (operands[2], 0, i);
3457 riscv_emit_move (SET_DEST (set), SET_SRC (set));
3460 emit_insn (gen_blockage ());
3468 [(set_attr "type" "nop")
3469 (set_attr "mode" "none")])
3472 [(trap_if (const_int 1) (const_int 0))]
3475 [(set_attr "type" "trap")])
3477 ;; Must use the registers that we save to prevent the rename reg optimization
3478 ;; pass from using them before the gpr_save pattern when shrink wrapping
3479 ;; occurs. See bug 95252 for instance.
3481 (define_insn "gpr_save"
3482 [(match_parallel 1 "gpr_save_operation"
3483 [(unspec_volatile [(match_operand 0 "const_int_operand")]
3484 UNSPECV_GPR_SAVE)])]
3486 "call\tt0,__riscv_save_%0"
3487 [(set_attr "type" "call")])
3489 (define_insn "gpr_restore"
3490 [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_RESTORE)]
3492 "tail\t__riscv_restore_%0"
3493 [(set_attr "type" "call")])
3495 (define_insn "gpr_restore_return"
3497 (use (match_operand 0 "pmode_register_operand" ""))
3501 [(set_attr "type" "ret")])
3503 (define_insn "riscv_frcsr"
3504 [(set (match_operand:SI 0 "register_operand" "=r")
3505 (unspec_volatile:SI [(const_int 0)] UNSPECV_FRCSR))]
3506 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3508 [(set_attr "type" "fmove")])
3510 (define_insn "riscv_fscsr"
3511 [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")] UNSPECV_FSCSR)]
3512 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3514 [(set_attr "type" "fmove")])
3516 (define_insn "riscv_frflags"
3517 [(set (match_operand:SI 0 "register_operand" "=r")
3518 (unspec_volatile:SI [(const_int 0)] UNSPECV_FRFLAGS))]
3519 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3521 [(set_attr "type" "fmove")])
3523 (define_insn "riscv_fsflags"
3524 [(unspec_volatile [(match_operand:SI 0 "csr_operand" "rK")] UNSPECV_FSFLAGS)]
3525 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3527 [(set_attr "type" "fmove")])
3529 (define_insn "*riscv_fsnvsnan<mode>2"
3530 [(unspec_volatile [(match_operand:ANYF 0 "register_operand" "f")
3531 (match_operand:ANYF 1 "register_operand" "f")]
3533 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3534 "feq.<fmt>\tzero,%0,%1"
3535 [(set_attr "type" "fcmp")
3536 (set_attr "mode" "<UNITMODE>")])
3538 (define_insn "riscv_mret"
3540 (unspec_volatile [(const_int 0)] UNSPECV_MRET)]
3543 [(set_attr "type" "ret")])
3545 (define_insn "riscv_sret"
3547 (unspec_volatile [(const_int 0)] UNSPECV_SRET)]
3550 [(set_attr "type" "ret")])
3552 (define_insn "riscv_uret"
3554 (unspec_volatile [(const_int 0)] UNSPECV_URET)]
3557 [(set_attr "type" "ret")])
3559 (define_insn "stack_tie<mode>"
3560 [(set (mem:BLK (scratch))
3561 (unspec:BLK [(match_operand:X 0 "register_operand" "r")
3562 (match_operand:X 1 "register_operand" "r")]
3566 [(set_attr "type" "ghost")
3567 (set_attr "length" "0")]
3570 ;; This fixes a failure with gcc.c-torture/execute/pr64242.c at -O2 for a
3571 ;; 32-bit target when using -mtune=sifive-7-series. The first sched pass
3572 ;; runs before register elimination, and we have a non-obvious dependency
3573 ;; between a use of the soft fp and a set of the hard fp. We fix this by
3574 ;; emitting a clobber using the hard fp between the two insns.
3575 (define_expand "restore_stack_nonlocal"
3576 [(match_operand 0 "register_operand")
3577 (match_operand 1 "memory_operand")]
3580 emit_move_insn (operands[0], operands[1]);
3581 /* Prevent the following hard fp restore from being moved before the move
3582 insn above which uses a copy of the soft fp reg. */
3583 emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
3587 ;; Named pattern for expanding thread pointer reference.
3588 (define_expand "get_thread_pointer<mode>"
3589 [(set (match_operand:P 0 "register_operand" "=r")
3594 ;; Named patterns for stack smashing protection.
3596 (define_expand "stack_protect_set"
3597 [(match_operand 0 "memory_operand")
3598 (match_operand 1 "memory_operand")]
3601 machine_mode mode = GET_MODE (operands[0]);
3602 if (riscv_stack_protector_guard == SSP_TLS)
3604 rtx reg = gen_rtx_REG (Pmode, riscv_stack_protector_guard_reg);
3605 rtx offset = GEN_INT (riscv_stack_protector_guard_offset);
3606 rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
3607 operands[1] = gen_rtx_MEM (Pmode, addr);
3610 emit_insn ((mode == DImode
3611 ? gen_stack_protect_set_di
3612 : gen_stack_protect_set_si) (operands[0], operands[1]));
3616 ;; DO NOT SPLIT THIS PATTERN. It is important for security reasons that the
3617 ;; canary value does not live beyond the life of this sequence.
3618 (define_insn "stack_protect_set_<mode>"
3619 [(set (match_operand:GPR 0 "memory_operand" "=m")
3620 (unspec:GPR [(match_operand:GPR 1 "memory_operand" "m")]
3622 (set (match_scratch:GPR 2 "=&r") (const_int 0))]
3624 "<load>\t%2, %1\;<store>\t%2, %0\;li\t%2, 0"
3625 [(set_attr "type" "multi")
3626 (set_attr "length" "12")])
3628 (define_expand "stack_protect_test"
3629 [(match_operand 0 "memory_operand")
3630 (match_operand 1 "memory_operand")
3635 machine_mode mode = GET_MODE (operands[0]);
3637 result = gen_reg_rtx(mode);
3638 if (riscv_stack_protector_guard == SSP_TLS)
3640 rtx reg = gen_rtx_REG (Pmode, riscv_stack_protector_guard_reg);
3641 rtx offset = GEN_INT (riscv_stack_protector_guard_offset);
3642 rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
3643 operands[1] = gen_rtx_MEM (Pmode, addr);
3645 emit_insn ((mode == DImode
3646 ? gen_stack_protect_test_di
3647 : gen_stack_protect_test_si) (result,
3651 rtx cond = gen_rtx_EQ (VOIDmode, result, const0_rtx);
3652 emit_jump_insn (gen_cbranch4 (mode, cond, result, const0_rtx, operands[2]));
3657 (define_insn "stack_protect_test_<mode>"
3658 [(set (match_operand:GPR 0 "register_operand" "=r")
3659 (unspec:GPR [(match_operand:GPR 1 "memory_operand" "m")
3660 (match_operand:GPR 2 "memory_operand" "m")]
3662 (clobber (match_scratch:GPR 3 "=&r"))]
3664 "<load>\t%3, %1\;<load>\t%0, %2\;xor\t%0, %3, %0\;li\t%3, 0"
3665 [(set_attr "type" "multi")
3666 (set_attr "length" "12")])
3668 (define_insn "riscv_clean_<mode>"
3669 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
3673 [(set_attr "type" "store")]
3676 (define_insn "riscv_flush_<mode>"
3677 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
3681 [(set_attr "type" "store")]
3684 (define_insn "riscv_inval_<mode>"
3685 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
3689 [(set_attr "type" "store")]
3692 (define_insn "riscv_zero_<mode>"
3693 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
3697 [(set_attr "type" "store")]
3700 (define_insn "prefetch"
3701 [(prefetch (match_operand 0 "address_operand" "r")
3702 (match_operand 1 "imm5_operand" "i")
3703 (match_operand 2 "const_int_operand" "n"))]
3706 switch (INTVAL (operands[1]))
3708 case 0: return "prefetch.r\t%a0";
3709 case 1: return "prefetch.w\t%a0";
3710 default: gcc_unreachable ();
3713 [(set_attr "type" "store")])
3715 (define_insn "riscv_prefetchi_<mode>"
3716 [(unspec_volatile:X [(match_operand:X 0 "address_operand" "r")
3717 (match_operand:X 1 "imm5_operand" "i")]
3721 [(set_attr "type" "store")])
3723 (define_expand "extv<mode>"
3724 [(set (match_operand:GPR 0 "register_operand" "=r")
3725 (sign_extract:GPR (match_operand:GPR 1 "register_operand" "r")
3726 (match_operand 2 "const_int_operand")
3727 (match_operand 3 "const_int_operand")))]
3731 (define_expand "extzv<mode>"
3732 [(set (match_operand:GPR 0 "register_operand" "=r")
3733 (zero_extract:GPR (match_operand:GPR 1 "register_operand" "r")
3734 (match_operand 2 "const_int_operand")
3735 (match_operand 3 "const_int_operand")))]
3739 && (INTVAL (operands[2]) < 8) && (INTVAL (operands[3]) == 0))
3743 (define_expand "maddhisi4"
3744 [(set (match_operand:SI 0 "register_operand")
3746 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand"))
3747 (sign_extend:SI (match_operand:HI 2 "register_operand")))
3748 (match_operand:SI 3 "register_operand")))]
3752 (define_expand "msubhisi4"
3753 [(set (match_operand:SI 0 "register_operand")
3755 (match_operand:SI 3 "register_operand")
3756 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand"))
3757 (sign_extend:SI (match_operand:HI 2 "register_operand")))))]
3761 ;; String compare with length insn.
3762 ;; Argument 0 is the target (result)
3763 ;; Argument 1 is the source1
3764 ;; Argument 2 is the source2
3765 ;; Argument 3 is the length
3766 ;; Argument 4 is the alignment
3768 (define_expand "cmpstrnsi"
3769 [(parallel [(set (match_operand:SI 0)
3770 (compare:SI (match_operand:BLK 1)
3771 (match_operand:BLK 2)))
3772 (use (match_operand:SI 3))
3773 (use (match_operand:SI 4))])]
3774 "riscv_inline_strncmp && !optimize_size
3775 && (TARGET_ZBB || TARGET_XTHEADBB || TARGET_VECTOR)"
3777 if (riscv_expand_strcmp (operands[0], operands[1], operands[2],
3778 operands[3], operands[4]))
3784 ;; String compare insn.
3785 ;; Argument 0 is the target (result)
3786 ;; Argument 1 is the source1
3787 ;; Argument 2 is the source2
3788 ;; Argument 3 is the alignment
3790 (define_expand "cmpstrsi"
3791 [(parallel [(set (match_operand:SI 0)
3792 (compare:SI (match_operand:BLK 1)
3793 (match_operand:BLK 2)))
3794 (use (match_operand:SI 3))])]
3795 "riscv_inline_strcmp && !optimize_size
3796 && (TARGET_ZBB || TARGET_XTHEADBB || TARGET_VECTOR)"
3798 if (riscv_expand_strcmp (operands[0], operands[1], operands[2],
3799 NULL_RTX, operands[3]))
3805 ;; Search character in string (generalization of strlen).
3806 ;; Argument 0 is the resulting offset
3807 ;; Argument 1 is the string
3808 ;; Argument 2 is the search character
3809 ;; Argument 3 is the alignment
3811 (define_expand "strlen<mode>"
3812 [(set (match_operand:X 0 "register_operand")
3813 (unspec:X [(match_operand:BLK 1 "general_operand")
3814 (match_operand:SI 2 "const_int_operand")
3815 (match_operand:SI 3 "const_int_operand")]
3817 "riscv_inline_strlen && !optimize_size
3818 && (TARGET_ZBB || TARGET_XTHEADBB || TARGET_VECTOR)"
3820 rtx search_char = operands[2];
3822 if (search_char != const0_rtx)
3825 if (riscv_expand_strlen (operands[0], operands[1], operands[2], operands[3]))
3831 (define_insn "*large_load_address"
3832 [(set (match_operand:DI 0 "register_operand" "=r")
3833 (mem:DI (match_operand 1 "pcrel_symbol_operand" "")))]
3834 "TARGET_64BIT && riscv_cmodel == CM_LARGE"
3836 [(set_attr "type" "load")
3837 (set (attr "length") (const_int 8))])
3839 (include "bitmanip.md")
3840 (include "crypto.md")
3842 (include "sync-rvwmo.md")
3843 (include "sync-ztso.md")
3844 (include "peephole.md")
3846 (include "generic.md")
3847 (include "sifive-7.md")
3848 (include "sifive-p400.md")
3849 (include "sifive-p600.md")
3850 (include "thead.md")
3851 (include "generic-vector-ooo.md")
3852 (include "generic-ooo.md")
3853 (include "vector.md")
3854 (include "vector-crypto.md")
3855 (include "zicond.md")
3858 (include "corev.md")
3859 (include "xiangshan.md")