1 ;; Machine description for RISC-V for GNU compiler.
2 ;; Copyright (C) 2011-2024 Free Software Foundation, Inc.
3 ;; Contributed by Andrew Waterman (andrew@sifive.com).
4 ;; Based on MIPS target for GNU compiler.
6 ;; This file is part of GCC.
8 ;; GCC is free software; you can redistribute it and/or modify
9 ;; it under the terms of the GNU General Public License as published by
10 ;; the Free Software Foundation; either version 3, or (at your option)
13 ;; GCC is distributed in the hope that it will be useful,
14 ;; but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ;; GNU General Public License for more details.
18 ;; You should have received a copy of the GNU General Public License
19 ;; along with GCC; see the file COPYING3. If not see
20 ;; <http://www.gnu.org/licenses/>.
23 ;; Keep this list and the one above riscv_print_operand in sync.
24 ;; The special asm out single letter directives following a '%' are:
25 ;; h -- Print the high-part relocation associated with OP, after stripping
26 ;; any outermost HIGH.
27 ;; R -- Print the low-part relocation associated with OP.
28 ;; C -- Print the integer branch condition for comparison OP.
29 ;; A -- Print the atomic operation suffix for memory model OP.
30 ;; F -- Print a FENCE if the memory model requires a release.
31 ;; z -- Print x0 if OP is zero, otherwise print OP normally.
32 ;; i -- Print i if the operand is not a register.
33 ;; S -- Print shift-index of single-bit mask OP.
34 ;; T -- Print shift-index of inverted single-bit mask OP.
35 ;; ~ -- Print w if TARGET_64BIT is true; otherwise not print anything.
37 (define_c_enum "unspec" [
38 ;; Override return address for exception handling.
41 ;; Symbolic accesses. The order of this list must match that of
42 ;; enum riscv_symbol_type in riscv-protos.h.
52 ;; High part of PC-relative address.
55 ;; Floating-point unspecs.
84 ;; the calling convention of callee
90 ;; Workaround for HFmode without hardware extension
94 (define_c_enum "unspecv" [
95 ;; Register save and restore.
99 ;; Floating-point unspecs.
106 ;; Interrupt handler instructions.
111 ;; Blockage and synchronization.
116 ;; Stack Smash Protector
127 ;; Zihintpause unspec
135 UNSPECV_XTHEADINT_PUSH
136 UNSPECV_XTHEADINT_POP
140 [(RETURN_ADDR_REGNUM 1)
170 (include "predicates.md")
171 (include "constraints.md")
172 (include "iterators.md")
174 ;; ....................
178 ;; ....................
180 (define_attr "got" "unset,xgot_high,load"
181 (const_string "unset"))
183 ;; Classification of moves, extensions and truncations. Most values
184 ;; are as for "type" (see below) but there are also the following
185 ;; move-specific values:
187 ;; andi a single ANDI instruction
188 ;; shift_shift a shift left followed by a shift right
190 ;; This attribute is used to determine the instruction's length and
191 ;; scheduling type. For doubleword moves, the attribute always describes
192 ;; the split instructions; in some cases, it is more appropriate for the
193 ;; scheduling type to be "multi" instead.
194 (define_attr "move_type"
195 "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
196 const,logical,arith,andi,shift_shift,rdvlenb"
197 (const_string "unknown"))
199 ;; Main data type used by the insn
200 (define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,HF,SF,DF,TF,
201 RVVMF64BI,RVVMF32BI,RVVMF16BI,RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,
202 RVVM8QI,RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI,
203 RVVM8HI,RVVM4HI,RVVM2HI,RVVM1HI,RVVMF2HI,RVVMF4HI,
204 RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF,
205 RVVM8SI,RVVM4SI,RVVM2SI,RVVM1SI,RVVMF2SI,
206 RVVM8SF,RVVM4SF,RVVM2SF,RVVM1SF,RVVMF2SF,
207 RVVM8DI,RVVM4DI,RVVM2DI,RVVM1DI,
208 RVVM8DF,RVVM4DF,RVVM2DF,RVVM1DF,
209 RVVM1x8QI,RVVMF2x8QI,RVVMF4x8QI,RVVMF8x8QI,
210 RVVM1x7QI,RVVMF2x7QI,RVVMF4x7QI,RVVMF8x7QI,
211 RVVM1x6QI,RVVMF2x6QI,RVVMF4x6QI,RVVMF8x6QI,
212 RVVM1x5QI,RVVMF2x5QI,RVVMF4x5QI,RVVMF8x5QI,
213 RVVM2x4QI,RVVM1x4QI,RVVMF2x4QI,RVVMF4x4QI,RVVMF8x4QI,
214 RVVM2x3QI,RVVM1x3QI,RVVMF2x3QI,RVVMF4x3QI,RVVMF8x3QI,
215 RVVM4x2QI,RVVM2x2QI,RVVM1x2QI,RVVMF2x2QI,RVVMF4x2QI,RVVMF8x2QI,
216 RVVM1x8HI,RVVMF2x8HI,RVVMF4x8HI,
217 RVVM1x7HI,RVVMF2x7HI,RVVMF4x7HI,
218 RVVM1x6HI,RVVMF2x6HI,RVVMF4x6HI,
219 RVVM1x5HI,RVVMF2x5HI,RVVMF4x5HI,
220 RVVM2x4HI,RVVM1x4HI,RVVMF2x4HI,RVVMF4x4HI,
221 RVVM2x3HI,RVVM1x3HI,RVVMF2x3HI,RVVMF4x3HI,
222 RVVM4x2HI,RVVM2x2HI,RVVM1x2HI,RVVMF2x2HI,RVVMF4x2HI,
223 RVVM1x8HF,RVVMF2x8HF,RVVMF4x8HF,RVVM1x7HF,RVVMF2x7HF,
224 RVVMF4x7HF,RVVM1x6HF,RVVMF2x6HF,RVVMF4x6HF,RVVM1x5HF,
225 RVVMF2x5HF,RVVMF4x5HF,RVVM2x4HF,RVVM1x4HF,RVVMF2x4HF,
226 RVVMF4x4HF,RVVM2x3HF,RVVM1x3HF,RVVMF2x3HF,RVVMF4x3HF,
227 RVVM4x2HF,RVVM2x2HF,RVVM1x2HF,RVVMF2x2HF,RVVMF4x2HF,
228 RVVM1x8SI,RVVMF2x8SI,
229 RVVM1x7SI,RVVMF2x7SI,
230 RVVM1x6SI,RVVMF2x6SI,
231 RVVM1x5SI,RVVMF2x5SI,
232 RVVM2x4SI,RVVM1x4SI,RVVMF2x4SI,
233 RVVM2x3SI,RVVM1x3SI,RVVMF2x3SI,
234 RVVM4x2SI,RVVM2x2SI,RVVM1x2SI,RVVMF2x2SI,
235 RVVM1x8SF,RVVMF2x8SF,RVVM1x7SF,RVVMF2x7SF,
236 RVVM1x6SF,RVVMF2x6SF,RVVM1x5SF,RVVMF2x5SF,
237 RVVM2x4SF,RVVM1x4SF,RVVMF2x4SF,RVVM2x3SF,
238 RVVM1x3SF,RVVMF2x3SF,RVVM4x2SF,RVVM2x2SF,
239 RVVM1x2SF,RVVMF2x2SF,
240 RVVM1x8DI,RVVM1x7DI,RVVM1x6DI,RVVM1x5DI,
241 RVVM2x4DI,RVVM1x4DI,RVVM2x3DI,RVVM1x3DI,
242 RVVM4x2DI,RVVM2x2DI,RVVM1x2DI,RVVM1x8DF,
243 RVVM1x7DF,RVVM1x6DF,RVVM1x5DF,RVVM2x4DF,
244 RVVM1x4DF,RVVM2x3DF,RVVM1x3DF,RVVM4x2DF,
246 V1QI,V2QI,V4QI,V8QI,V16QI,V32QI,V64QI,V128QI,V256QI,V512QI,V1024QI,V2048QI,V4096QI,
247 V1HI,V2HI,V4HI,V8HI,V16HI,V32HI,V64HI,V128HI,V256HI,V512HI,V1024HI,V2048HI,
248 V1SI,V2SI,V4SI,V8SI,V16SI,V32SI,V64SI,V128SI,V256SI,V512SI,V1024SI,
249 V1DI,V2DI,V4DI,V8DI,V16DI,V32DI,V64DI,V128DI,V256DI,V512DI,
250 V1HF,V2HF,V4HF,V8HF,V16HF,V32HF,V64HF,V128HF,V256HF,V512HF,V1024HF,V2048HF,
251 V1SF,V2SF,V4SF,V8SF,V16SF,V32SF,V64SF,V128SF,V256SF,V512SF,V1024SF,
252 V1DF,V2DF,V4DF,V8DF,V16DF,V32DF,V64DF,V128DF,V256DF,V512DF,
253 V1BI,V2BI,V4BI,V8BI,V16BI,V32BI,V64BI,V128BI,V256BI,V512BI,V1024BI,V2048BI,V4096BI"
254 (const_string "unknown"))
256 ;; True if the main data type is twice the size of a word.
257 (define_attr "dword_mode" "no,yes"
258 (cond [(and (eq_attr "mode" "DI,DF")
259 (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
262 (and (eq_attr "mode" "TI,TF")
263 (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
264 (const_string "yes")]
265 (const_string "no")))
268 (define_attr "ext" "base,f,d,vector"
269 (const_string "base"))
271 ;; True if the extension is enabled.
272 (define_attr "ext_enabled" "no,yes"
273 (cond [(eq_attr "ext" "base")
276 (and (eq_attr "ext" "f")
277 (match_test "TARGET_HARD_FLOAT"))
280 (and (eq_attr "ext" "d")
281 (match_test "TARGET_DOUBLE_FLOAT"))
284 (and (eq_attr "ext" "vector")
285 (match_test "TARGET_VECTOR"))
288 (const_string "no")))
290 ;; Classification of each insn.
291 ;; branch conditional branch
292 ;; jump unconditional direct jump
293 ;; jalr unconditional indirect jump
294 ;; ret various returns, no arguments
295 ;; call unconditional call
296 ;; load load instruction(s)
297 ;; fpload floating point load
298 ;; store store instruction(s)
299 ;; fpstore floating point store
300 ;; mtc transfer to coprocessor
301 ;; mfc transfer from coprocessor
302 ;; const load constant
303 ;; arith integer arithmetic instructions
304 ;; logical integer logical instructions
305 ;; shift integer shift instructions
306 ;; slt set less than instructions
307 ;; imul integer multiply
308 ;; idiv integer divide
309 ;; move integer register move (addi rd, rs1, 0)
310 ;; fmove floating point register move
311 ;; fadd floating point add/subtract
312 ;; fmul floating point multiply
313 ;; fmadd floating point multiply-add
314 ;; fdiv floating point divide
315 ;; fcmp floating point compare
316 ;; fcvt floating point convert
317 ;; fsqrt floating point square root
318 ;; multi multiword sequence (or user asm statements)
319 ;; auipc integer addition to PC
320 ;; sfb_alu SFB ALU instruction
322 ;; trap trap instruction
323 ;; ghost an instruction that produces no real code
324 ;; bitmanip bit manipulation instructions
325 ;; clmul clmul, clmulh, clmulr
326 ;; rotate rotation instructions
327 ;; atomic atomic instructions
328 ;; condmove conditional moves
329 ;; cbo cache block instructions
330 ;; crypto cryptography instructions
331 ;; pushpop zc push and pop instructions
332 ;; mvpair zc move pair instructions
333 ;; zicond zicond instructions
334 ;; Classification of RVV instructions which will be added to each RVV .md pattern and used by scheduler.
335 ;; rdvlenb vector byte length vlenb csrr read
336 ;; rdvl vector length vl csrr read
337 ;; wrvxrm vector fixed-point rounding mode write
338 ;; wrfrm vector floating-point rounding mode write
339 ;; vsetvl vector configuration-setting instrucions
340 ;; 7. Vector Loads and Stores
341 ;; vlde vector unit-stride load instructions
342 ;; vste vector unit-stride store instructions
343 ;; vldm vector unit-stride mask load instructions
344 ;; vstm vector unit-stride mask store instructions
345 ;; vlds vector strided load instructions
346 ;; vsts vector strided store instructions
347 ;; vldux vector unordered indexed load instructions
348 ;; vldox vector ordered indexed load instructions
349 ;; vstux vector unordered indexed store instructions
350 ;; vstox vector ordered indexed store instructions
351 ;; vldff vector unit-stride fault-only-first load instructions
352 ;; vldr vector whole register load instructions
353 ;; vstr vector whole register store instructions
354 ;; vlsegde vector segment unit-stride load instructions
355 ;; vssegte vector segment unit-stride store instructions
356 ;; vlsegds vector segment strided load instructions
357 ;; vssegts vector segment strided store instructions
358 ;; vlsegdux vector segment unordered indexed load instructions
359 ;; vlsegdox vector segment ordered indexed load instructions
360 ;; vssegtux vector segment unordered indexed store instructions
361 ;; vssegtox vector segment ordered indexed store instructions
362 ;; vlsegdff vector segment unit-stride fault-only-first load instructions
363 ;; 11. Vector integer arithmetic instructions
364 ;; vialu vector single-width integer add and subtract and logical nstructions
365 ;; viwalu vector widening integer add/subtract
366 ;; vext vector integer extension
367 ;; vicalu vector arithmetic with carry or borrow instructions
368 ;; vshift vector single-width bit shift instructions
369 ;; vnshift vector narrowing integer shift instructions
370 ;; viminmax vector integer min/max instructions
371 ;; vicmp vector integer comparison instructions
372 ;; vimul vector single-width integer multiply instructions
373 ;; vidiv vector single-width integer divide instructions
374 ;; viwmul vector widening integer multiply instructions
375 ;; vimuladd vector single-width integer multiply-add instructions
376 ;; viwmuladd vector widening integer multiply-add instructions
377 ;; vimerge vector integer merge instructions
378 ;; vimov vector integer move vector instructions
379 ;; 12. Vector fixed-point arithmetic instructions
380 ;; vsalu vector single-width saturating add and subtract and logical instructions
381 ;; vaalu vector single-width averaging add and subtract and logical instructions
382 ;; vsmul vector single-width fractional multiply with rounding and saturation instructions
383 ;; vsshift vector single-width scaling shift instructions
384 ;; vnclip vector narrowing fixed-point clip instructions
385 ;; 13. Vector floating-point instructions
386 ;; vfalu vector single-width floating-point add/subtract instructions
387 ;; vfwalu vector widening floating-point add/subtract instructions
388 ;; vfmul vector single-width floating-point multiply instructions
389 ;; vfdiv vector single-width floating-point divide instructions
390 ;; vfwmul vector widening floating-point multiply instructions
391 ;; vfmuladd vector single-width floating-point multiply-add instructions
392 ;; vfwmuladd vector widening floating-point multiply-add instructions
393 ;; vfsqrt vector floating-point square-root instructions
394 ;; vfrecp vector floating-point reciprocal square-root instructions
395 ;; vfminmax vector floating-point min/max instructions
396 ;; vfcmp vector floating-point comparison instructions
397 ;; vfsgnj vector floating-point sign-injection instructions
398 ;; vfclass vector floating-point classify instruction
399 ;; vfmerge vector floating-point merge instruction
400 ;; vfmov vector floating-point move instruction
401 ;; vfcvtitof vector single-width integer to floating-point instruction
402 ;; vfcvtftoi vector single-width floating-point to integer instruction
403 ;; vfwcvtitof vector widening integer to floating-point instruction
404 ;; vfwcvtftoi vector widening floating-point to integer instruction
405 ;; vfwcvtftof vector widening floating-point to floating-point instruction
406 ;; vfncvtitof vector narrowing integer to floating-point instruction
407 ;; vfncvtftoi vector narrowing floating-point to integer instruction
408 ;; vfncvtftof vector narrowing floating-point to floating-point instruction
409 ;; 14. Vector reduction operations
410 ;; vired vector single-width integer reduction instructions
411 ;; viwred vector widening integer reduction instructions
412 ;; vfredu vector single-width floating-point un-ordered reduction instruction
413 ;; vfredo vector single-width floating-point ordered reduction instruction
414 ;; vfwredu vector widening floating-point un-ordered reduction instruction
415 ;; vfwredo vector widening floating-point ordered reduction instruction
416 ;; 15. Vector mask instructions
417 ;; vmalu vector mask-register logical instructions
418 ;; vmpop vector mask population count
419 ;; vmffs vector find-first-set mask bit
420 ;; vmsfs vector set mask bit
421 ;; vmiota vector iota
422 ;; vmidx vector element index instruction
423 ;; 16. Vector permutation instructions
424 ;; vimovvx integer scalar move instructions
425 ;; vimovxv integer scalar move instructions
426 ;; vfmovvf floating-point scalar move instructions
427 ;; vfmovfv floating-point scalar move instructions
428 ;; vslideup vector slide instructions
429 ;; vslidedown vector slide instructions
430 ;; vislide1up vector slide instructions
431 ;; vislide1down vector slide instructions
432 ;; vfslide1up vector slide instructions
433 ;; vfslide1down vector slide instructions
434 ;; vgather vector register gather instructions
435 ;; vcompress vector compress instruction
436 ;; vmov whole vector register move
437 ;; vector unknown vector instruction
438 ;; 17. Crypto Vector instructions
439 ;; vandn crypto vector bitwise and-not instructions
440 ;; vbrev crypto vector reverse bits in elements instructions
441 ;; vbrev8 crypto vector reverse bits in bytes instructions
442 ;; vrev8 crypto vector reverse bytes instructions
443 ;; vclz crypto vector count leading Zeros instructions
444 ;; vctz crypto vector count lrailing Zeros instructions
445 ;; vrol crypto vector rotate left instructions
446 ;; vror crypto vector rotate right instructions
447 ;; vwsll crypto vector widening shift left logical instructions
448 ;; vclmul crypto vector carry-less multiply - return low half instructions
449 ;; vclmulh crypto vector carry-less multiply - return high half instructions
450 ;; vghsh crypto vector add-multiply over GHASH Galois-Field instructions
451 ;; vgmul crypto vector multiply over GHASH Galois-Field instrumctions
452 ;; vaesef crypto vector AES final-round encryption instructions
453 ;; vaesem crypto vector AES middle-round encryption instructions
454 ;; vaesdf crypto vector AES final-round decryption instructions
455 ;; vaesdm crypto vector AES middle-round decryption instructions
456 ;; vaeskf1 crypto vector AES-128 Forward KeySchedule generation instructions
457 ;; vaeskf2 crypto vector AES-256 Forward KeySchedule generation instructions
458 ;; vaesz crypto vector AES round zero encryption/decryption instructions
459 ;; vsha2ms crypto vector SHA-2 message schedule instructions
460 ;; vsha2ch crypto vector SHA-2 two rounds of compression instructions
461 ;; vsha2cl crypto vector SHA-2 two rounds of compression instructions
462 ;; vsm4k crypto vector SM4 KeyExpansion instructions
463 ;; vsm4r crypto vector SM4 Rounds instructions
464 ;; vsm3me crypto vector SM3 Message Expansion instructions
465 ;; vsm3c crypto vector SM3 Compression instructions
467 "unknown,branch,jump,jalr,ret,call,load,fpload,store,fpstore,
468 mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
469 fmadd,fdiv,fcmp,fcvt,fsqrt,multi,auipc,sfb_alu,nop,trap,ghost,bitmanip,
470 rotate,clmul,min,max,minu,maxu,clz,ctz,cpop,
471 atomic,condmove,cbo,crypto,pushpop,mvpair,zicond,rdvlenb,rdvl,wrvxrm,wrfrm,
472 rdfrm,vsetvl,vsetvl_pre,vlde,vste,vldm,vstm,vlds,vsts,
473 vldux,vldox,vstux,vstox,vldff,vldr,vstr,
474 vlsegde,vssegte,vlsegds,vssegts,vlsegdux,vlsegdox,vssegtux,vssegtox,vlsegdff,
475 vialu,viwalu,vext,vicalu,vshift,vnshift,vicmp,viminmax,
476 vimul,vidiv,viwmul,vimuladd,viwmuladd,vimerge,vimov,
477 vsalu,vaalu,vsmul,vsshift,vnclip,
478 vfalu,vfwalu,vfmul,vfdiv,vfwmul,vfmuladd,vfwmuladd,vfsqrt,vfrecp,
479 vfcmp,vfminmax,vfsgnj,vfclass,vfmerge,vfmov,
480 vfcvtitof,vfcvtftoi,vfwcvtitof,vfwcvtftoi,
481 vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,
482 vired,viwred,vfredu,vfredo,vfwredu,vfwredo,
483 vmalu,vmpop,vmffs,vmsfs,vmiota,vmidx,vimovvx,vimovxv,vfmovvf,vfmovfv,
484 vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,
485 vgather,vcompress,vmov,vector,vandn,vbrev,vbrev8,vrev8,vclz,vctz,vcpop,vrol,vror,vwsll,
486 vclmul,vclmulh,vghsh,vgmul,vaesef,vaesem,vaesdf,vaesdm,vaeskf1,vaeskf2,vaesz,
487 vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,vsm3me,vsm3c"
488 (cond [(eq_attr "got" "load") (const_string "load")
490 ;; If a doubleword move uses these expensive instructions,
491 ;; it is usually better to schedule them in the same way
492 ;; as the singleword form, rather than as "multi".
493 (eq_attr "move_type" "load") (const_string "load")
494 (eq_attr "move_type" "fpload") (const_string "fpload")
495 (eq_attr "move_type" "store") (const_string "store")
496 (eq_attr "move_type" "fpstore") (const_string "fpstore")
497 (eq_attr "move_type" "mtc") (const_string "mtc")
498 (eq_attr "move_type" "mfc") (const_string "mfc")
500 ;; These types of move are always single insns.
501 (eq_attr "move_type" "fmove") (const_string "fmove")
502 (eq_attr "move_type" "arith") (const_string "arith")
503 (eq_attr "move_type" "logical") (const_string "logical")
504 (eq_attr "move_type" "andi") (const_string "logical")
506 ;; These types of move are always split.
507 (eq_attr "move_type" "shift_shift")
508 (const_string "multi")
510 ;; These types of move are split for doubleword modes only.
511 (and (eq_attr "move_type" "move,const")
512 (eq_attr "dword_mode" "yes"))
513 (const_string "multi")
514 (eq_attr "move_type" "move") (const_string "move")
515 (eq_attr "move_type" "const") (const_string "const")
516 (eq_attr "move_type" "rdvlenb") (const_string "rdvlenb")]
517 (const_string "unknown")))
519 ;; True if the float point vector is disabled.
520 (define_attr "fp_vector_disabled" "no,yes"
522 (and (eq_attr "type" "vfmov,vfalu,vfmul,vfdiv,
523 vfwalu,vfwmul,vfmuladd,vfwmuladd,
524 vfsqrt,vfrecp,vfminmax,vfsgnj,vfcmp,
526 vfncvtitof,vfwcvtftoi,vfcvtftoi,vfcvtitof,
527 vfredo,vfredu,vfwredo,vfwredu,
528 vfslide1up,vfslide1down")
529 (and (eq_attr "mode" "RVVM8HF,RVVM4HF,RVVM2HF,RVVM1HF,RVVMF2HF,RVVMF4HF")
530 (match_test "!TARGET_ZVFH")))
533 ;; The mode records as QI for the FP16 <=> INT8 instruction.
534 (and (eq_attr "type" "vfncvtftoi,vfwcvtitof")
535 (and (eq_attr "mode" "RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI")
536 (match_test "!TARGET_ZVFH")))
539 (const_string "no")))
541 ;; Widening instructions have group-overlap constraints. Those are only
542 ;; valid for certain register-group sizes. This attribute marks the
543 ;; alternatives not matching the required register-group size as disabled.
544 (define_attr "group_overlap" "none,W21,W42,W84,W43,W86,W87,W0"
545 (const_string "none"))
547 (define_attr "group_overlap_valid" "no,yes"
548 (cond [(eq_attr "group_overlap" "none")
551 (and (eq_attr "group_overlap" "W21")
552 (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 2"))
555 (and (eq_attr "group_overlap" "W42")
556 (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 4"))
559 (and (eq_attr "group_overlap" "W84")
560 (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 8"))
563 ;; According to RVV ISA:
564 ;; The destination EEW is greater than the source EEW, the source EMUL is at least 1,
565 ;; and the overlap is in the highest-numbered part of the destination register group
566 ;; (e.g., when LMUL=8, vzext.vf4 v0, v6 is legal, but a source of v0, v2, or v4 is not).
567 ;; So the source operand should have LMUL >= 1.
568 (and (eq_attr "group_overlap" "W43")
569 (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 4
570 && riscv_get_v_regno_alignment (GET_MODE (operands[3])) >= 1"))
573 (and (eq_attr "group_overlap" "W86,W87")
574 (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) != 8
575 && riscv_get_v_regno_alignment (GET_MODE (operands[3])) >= 1"))
578 ;; W21 supports highest-number overlap for source LMUL = 1.
579 ;; For 'wv' variant, we can also allow wide source operand overlaps dest operand.
580 (and (eq_attr "group_overlap" "W0")
581 (match_test "riscv_get_v_regno_alignment (GET_MODE (operands[0])) > 1"))
584 (const_string "yes")))
586 ;; Attribute to control enable or disable instructions.
587 (define_attr "enabled" "no,yes"
589 (eq_attr "ext_enabled" "no")
592 (eq_attr "fp_vector_disabled" "yes")
595 (eq_attr "group_overlap_valid" "no")
598 (const_string "yes")))
600 ;; Length of instruction in bytes.
601 (define_attr "length" ""
603 ;; Branches further than +/- 1 MiB require three instructions.
604 ;; Branches further than +/- 4 KiB require two instructions.
605 (eq_attr "type" "branch")
606 (if_then_else (and (le (minus (match_dup 0) (pc))
608 (le (minus (pc) (match_dup 0))
611 (if_then_else (and (le (minus (match_dup 0) (pc))
613 (le (minus (pc) (match_dup 0))
614 (const_int 1048572)))
618 ;; Jumps further than +/- 1 MiB require two instructions.
619 (eq_attr "type" "jump")
620 (if_then_else (and (le (minus (match_dup 0) (pc))
622 (le (minus (pc) (match_dup 0))
623 (const_int 1048572)))
627 ;; Conservatively assume calls take two instructions (AUIPC + JALR).
628 ;; The linker will opportunistically relax the sequence to JAL.
629 (eq_attr "type" "call") (const_int 8)
631 ;; "Ghost" instructions occupy no space.
632 (eq_attr "type" "ghost") (const_int 0)
634 (eq_attr "got" "load") (const_int 8)
636 ;; SHIFT_SHIFTs are decomposed into two separate instructions.
637 (eq_attr "move_type" "shift_shift")
640 ;; Check for doubleword moves that are decomposed into two
642 (and (eq_attr "move_type" "mtc,mfc,move")
643 (eq_attr "dword_mode" "yes"))
646 ;; Doubleword CONST{,N} moves are split into two word
648 (and (eq_attr "move_type" "const")
649 (eq_attr "dword_mode" "yes"))
650 (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
652 ;; Otherwise, constants, loads and stores are handled by external
654 (eq_attr "move_type" "load,fpload")
655 (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
656 (eq_attr "move_type" "store,fpstore")
657 (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
660 ;; Is copying of this instruction disallowed?
661 (define_attr "cannot_copy" "no,yes" (const_string "no"))
663 ;; Microarchitectures we know how to tune for.
664 ;; Keep this in sync with enum riscv_microarchitecture.
666 "generic,sifive_7,generic_ooo"
667 (const (symbol_ref "((enum attr_tune) riscv_microarchitecture)")))
669 ;; Describe a user's asm statement.
670 (define_asm_attributes
671 [(set_attr "type" "multi")])
673 ;; Ghost instructions produce no real code and introduce no hazards.
674 ;; They exist purely to express an effect on dataflow.
675 (define_insn_reservation "ghost" 0
676 (eq_attr "type" "ghost")
680 ;; ....................
684 ;; ....................
687 (define_insn "add<mode>3"
688 [(set (match_operand:ANYF 0 "register_operand" "=f")
689 (plus:ANYF (match_operand:ANYF 1 "register_operand" " f")
690 (match_operand:ANYF 2 "register_operand" " f")))]
691 "TARGET_HARD_FLOAT || TARGET_ZFINX"
692 "fadd.<fmt>\t%0,%1,%2"
693 [(set_attr "type" "fadd")
694 (set_attr "mode" "<UNITMODE>")])
696 (define_insn "*addsi3"
697 [(set (match_operand:SI 0 "register_operand" "=r,r")
698 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
699 (match_operand:SI 2 "arith_operand" " r,I")))]
702 [(set_attr "type" "arith")
703 (set_attr "mode" "SI")])
705 (define_expand "addsi3"
706 [(set (match_operand:SI 0 "register_operand" "=r,r")
707 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
708 (match_operand:SI 2 "arith_operand" " r,I")))]
713 rtx t = gen_reg_rtx (DImode);
714 emit_insn (gen_addsi3_extended (t, operands[1], operands[2]));
715 t = gen_lowpart (SImode, t);
716 SUBREG_PROMOTED_VAR_P (t) = 1;
717 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
718 emit_move_insn (operands[0], t);
723 (define_insn "adddi3"
724 [(set (match_operand:DI 0 "register_operand" "=r,r")
725 (plus:DI (match_operand:DI 1 "register_operand" " r,r")
726 (match_operand:DI 2 "arith_operand" " r,I")))]
729 [(set_attr "type" "arith")
730 (set_attr "mode" "DI")])
732 (define_expand "addv<mode>4"
733 [(set (match_operand:GPR 0 "register_operand" "=r,r")
734 (plus:GPR (match_operand:GPR 1 "register_operand" " r,r")
735 (match_operand:GPR 2 "arith_operand" " r,I")))
736 (label_ref (match_operand 3 "" ""))]
739 if (TARGET_64BIT && <MODE>mode == SImode)
741 rtx t3 = gen_reg_rtx (DImode);
742 rtx t4 = gen_reg_rtx (DImode);
743 rtx t5 = gen_reg_rtx (DImode);
744 rtx t6 = gen_reg_rtx (DImode);
746 riscv_emit_binary (PLUS, operands[0], operands[1], operands[2]);
747 if (GET_CODE (operands[1]) != CONST_INT)
748 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
751 if (GET_CODE (operands[2]) != CONST_INT)
752 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
755 emit_insn (gen_adddi3 (t3, t4, t5));
756 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
758 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
762 rtx t3 = gen_reg_rtx (<MODE>mode);
763 rtx t4 = gen_reg_rtx (<MODE>mode);
765 emit_insn (gen_add3_insn (operands[0], operands[1], operands[2]));
766 rtx cmp1 = gen_rtx_LT (<MODE>mode, operands[2], const0_rtx);
767 emit_insn (gen_cstore<mode>4 (t3, cmp1, operands[2], const0_rtx));
768 rtx cmp2 = gen_rtx_LT (<MODE>mode, operands[0], operands[1]);
770 emit_insn (gen_cstore<mode>4 (t4, cmp2, operands[0], operands[1]));
771 riscv_expand_conditional_branch (operands[3], NE, t3, t4);
776 (define_expand "uaddv<mode>4"
777 [(set (match_operand:GPR 0 "register_operand" "=r,r")
778 (plus:GPR (match_operand:GPR 1 "register_operand" " r,r")
779 (match_operand:GPR 2 "arith_operand" " r,I")))
780 (label_ref (match_operand 3 "" ""))]
783 if (TARGET_64BIT && <MODE>mode == SImode)
785 rtx t3 = gen_reg_rtx (DImode);
786 rtx t4 = gen_reg_rtx (DImode);
788 if (GET_CODE (operands[1]) != CONST_INT)
789 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
792 riscv_emit_binary (PLUS, operands[0], operands[1], operands[2]);
793 emit_insn (gen_extend_insn (t4, operands[0], DImode, SImode, 0));
795 riscv_expand_conditional_branch (operands[3], LTU, t4, t3);
799 emit_insn (gen_add3_insn (operands[0], operands[1], operands[2]));
800 riscv_expand_conditional_branch (operands[3], LTU, operands[0],
807 (define_insn "addsi3_extended"
808 [(set (match_operand:DI 0 "register_operand" "=r,r")
810 (plus:SI (match_operand:SI 1 "register_operand" " r,r")
811 (match_operand:SI 2 "arith_operand" " r,I"))))]
814 [(set_attr "type" "arith")
815 (set_attr "mode" "SI")])
817 (define_insn "*addsi3_extended2"
818 [(set (match_operand:DI 0 "register_operand" "=r,r")
820 (match_operator:SI 3 "subreg_lowpart_operator"
821 [(plus:DI (match_operand:DI 1 "register_operand" " r,r")
822 (match_operand:DI 2 "arith_operand" " r,I"))])))]
825 [(set_attr "type" "arith")
826 (set_attr "mode" "SI")])
829 ;; ....................
833 ;; ....................
836 (define_insn "sub<mode>3"
837 [(set (match_operand:ANYF 0 "register_operand" "=f")
838 (minus:ANYF (match_operand:ANYF 1 "register_operand" " f")
839 (match_operand:ANYF 2 "register_operand" " f")))]
840 "TARGET_HARD_FLOAT || TARGET_ZFINX"
841 "fsub.<fmt>\t%0,%1,%2"
842 [(set_attr "type" "fadd")
843 (set_attr "mode" "<UNITMODE>")])
845 (define_insn "subdi3"
846 [(set (match_operand:DI 0 "register_operand" "= r")
847 (minus:DI (match_operand:DI 1 "reg_or_0_operand" " rJ")
848 (match_operand:DI 2 "register_operand" " r")))]
851 [(set_attr "type" "arith")
852 (set_attr "mode" "DI")])
854 (define_insn "*subsi3"
855 [(set (match_operand:SI 0 "register_operand" "= r")
856 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
857 (match_operand:SI 2 "register_operand" " r")))]
860 [(set_attr "type" "arith")
861 (set_attr "mode" "SI")])
863 (define_expand "subsi3"
864 [(set (match_operand:SI 0 "register_operand" "= r")
865 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
866 (match_operand:SI 2 "register_operand" " r")))]
871 rtx t = gen_reg_rtx (DImode);
872 emit_insn (gen_subsi3_extended (t, operands[1], operands[2]));
873 t = gen_lowpart (SImode, t);
874 SUBREG_PROMOTED_VAR_P (t) = 1;
875 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
876 emit_move_insn (operands[0], t);
881 (define_expand "subv<mode>4"
882 [(set (match_operand:GPR 0 "register_operand" "= r")
883 (minus:GPR (match_operand:GPR 1 "reg_or_0_operand" " rJ")
884 (match_operand:GPR 2 "register_operand" " r")))
885 (label_ref (match_operand 3 "" ""))]
888 if (TARGET_64BIT && <MODE>mode == SImode)
890 rtx t3 = gen_reg_rtx (DImode);
891 rtx t4 = gen_reg_rtx (DImode);
892 rtx t5 = gen_reg_rtx (DImode);
893 rtx t6 = gen_reg_rtx (DImode);
895 riscv_emit_binary (MINUS, operands[0], operands[1], operands[2]);
896 if (GET_CODE (operands[1]) != CONST_INT)
897 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
900 if (GET_CODE (operands[2]) != CONST_INT)
901 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
904 emit_insn (gen_subdi3 (t3, t4, t5));
905 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
907 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
911 rtx t3 = gen_reg_rtx (<MODE>mode);
912 rtx t4 = gen_reg_rtx (<MODE>mode);
914 emit_insn (gen_sub3_insn (operands[0], operands[1], operands[2]));
916 rtx cmp1 = gen_rtx_LT (<MODE>mode, operands[2], const0_rtx);
917 emit_insn (gen_cstore<mode>4 (t3, cmp1, operands[2], const0_rtx));
919 rtx cmp2 = gen_rtx_LT (<MODE>mode, operands[1], operands[0]);
920 emit_insn (gen_cstore<mode>4 (t4, cmp2, operands[1], operands[0]));
922 riscv_expand_conditional_branch (operands[3], NE, t3, t4);
928 (define_expand "usubv<mode>4"
929 [(set (match_operand:GPR 0 "register_operand" "= r")
930 (minus:GPR (match_operand:GPR 1 "reg_or_0_operand" " rJ")
931 (match_operand:GPR 2 "register_operand" " r")))
932 (label_ref (match_operand 3 "" ""))]
935 if (TARGET_64BIT && <MODE>mode == SImode)
937 rtx t3 = gen_reg_rtx (DImode);
938 rtx t4 = gen_reg_rtx (DImode);
940 if (GET_CODE (operands[1]) != CONST_INT)
941 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
944 riscv_emit_binary (MINUS, operands[0], operands[1], operands[2]);
945 emit_insn (gen_extend_insn (t4, operands[0], DImode, SImode, 0));
947 riscv_expand_conditional_branch (operands[3], LTU, t3, t4);
951 emit_insn (gen_sub3_insn (operands[0], operands[1], operands[2]));
952 riscv_expand_conditional_branch (operands[3], LTU, operands[1],
960 (define_insn "subsi3_extended"
961 [(set (match_operand:DI 0 "register_operand" "= r")
963 (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
964 (match_operand:SI 2 "register_operand" " r"))))]
967 [(set_attr "type" "arith")
968 (set_attr "mode" "SI")])
970 (define_insn "*subsi3_extended2"
971 [(set (match_operand:DI 0 "register_operand" "= r")
973 (match_operator:SI 3 "subreg_lowpart_operator"
974 [(minus:DI (match_operand:DI 1 "reg_or_0_operand" " rJ")
975 (match_operand:DI 2 "register_operand" " r"))])))]
978 [(set_attr "type" "arith")
979 (set_attr "mode" "SI")])
981 (define_insn "negdi2"
982 [(set (match_operand:DI 0 "register_operand" "=r")
983 (neg:DI (match_operand:DI 1 "register_operand" " r")))]
986 [(set_attr "type" "arith")
987 (set_attr "mode" "DI")])
989 (define_insn "*negsi2"
990 [(set (match_operand:SI 0 "register_operand" "=r")
991 (neg:SI (match_operand:SI 1 "register_operand" " r")))]
994 [(set_attr "type" "arith")
995 (set_attr "mode" "SI")])
997 (define_expand "negsi2"
998 [(set (match_operand:SI 0 "register_operand" "=r")
999 (neg:SI (match_operand:SI 1 "register_operand" " r")))]
1004 rtx t = gen_reg_rtx (DImode);
1005 emit_insn (gen_negsi2_extended (t, operands[1]));
1006 t = gen_lowpart (SImode, t);
1007 SUBREG_PROMOTED_VAR_P (t) = 1;
1008 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1009 emit_move_insn (operands[0], t);
1014 (define_insn "negsi2_extended"
1015 [(set (match_operand:DI 0 "register_operand" "=r")
1017 (neg:SI (match_operand:SI 1 "register_operand" " r"))))]
1020 [(set_attr "type" "arith")
1021 (set_attr "mode" "SI")])
1023 (define_insn "*negsi2_extended2"
1024 [(set (match_operand:DI 0 "register_operand" "=r")
1026 (match_operator:SI 2 "subreg_lowpart_operator"
1027 [(neg:DI (match_operand:DI 1 "register_operand" " r"))])))]
1030 [(set_attr "type" "arith")
1031 (set_attr "mode" "SI")])
1034 ;; ....................
1038 ;; ....................
1041 (define_insn "mul<mode>3"
1042 [(set (match_operand:ANYF 0 "register_operand" "=f")
1043 (mult:ANYF (match_operand:ANYF 1 "register_operand" " f")
1044 (match_operand:ANYF 2 "register_operand" " f")))]
1045 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1046 "fmul.<fmt>\t%0,%1,%2"
1047 [(set_attr "type" "fmul")
1048 (set_attr "mode" "<UNITMODE>")])
1050 (define_insn "*mulsi3"
1051 [(set (match_operand:SI 0 "register_operand" "=r")
1052 (mult:SI (match_operand:SI 1 "register_operand" " r")
1053 (match_operand:SI 2 "register_operand" " r")))]
1054 "TARGET_ZMMUL || TARGET_MUL"
1056 [(set_attr "type" "imul")
1057 (set_attr "mode" "SI")])
1059 (define_expand "mulsi3"
1060 [(set (match_operand:SI 0 "register_operand" "=r")
1061 (mult:SI (match_operand:SI 1 "register_operand" " r")
1062 (match_operand:SI 2 "register_operand" " r")))]
1063 "TARGET_ZMMUL || TARGET_MUL"
1067 rtx t = gen_reg_rtx (DImode);
1068 emit_insn (gen_mulsi3_extended (t, operands[1], operands[2]));
1069 t = gen_lowpart (SImode, t);
1070 SUBREG_PROMOTED_VAR_P (t) = 1;
1071 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1072 emit_move_insn (operands[0], t);
1077 (define_insn "muldi3"
1078 [(set (match_operand:DI 0 "register_operand" "=r")
1079 (mult:DI (match_operand:DI 1 "register_operand" " r")
1080 (match_operand:DI 2 "register_operand" " r")))]
1081 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1083 [(set_attr "type" "imul")
1084 (set_attr "mode" "DI")])
1086 (define_expand "mulv<mode>4"
1087 [(set (match_operand:GPR 0 "register_operand" "=r")
1088 (mult:GPR (match_operand:GPR 1 "register_operand" " r")
1089 (match_operand:GPR 2 "register_operand" " r")))
1090 (label_ref (match_operand 3 "" ""))]
1091 "TARGET_ZMMUL || TARGET_MUL"
1093 if (TARGET_64BIT && <MODE>mode == SImode)
1095 rtx t3 = gen_reg_rtx (DImode);
1096 rtx t4 = gen_reg_rtx (DImode);
1097 rtx t5 = gen_reg_rtx (DImode);
1098 rtx t6 = gen_reg_rtx (DImode);
1100 if (GET_CODE (operands[1]) != CONST_INT)
1101 emit_insn (gen_extend_insn (t4, operands[1], DImode, SImode, 0));
1104 if (GET_CODE (operands[2]) != CONST_INT)
1105 emit_insn (gen_extend_insn (t5, operands[2], DImode, SImode, 0));
1108 emit_insn (gen_muldi3 (t3, t4, t5));
1110 emit_move_insn (operands[0], gen_lowpart (SImode, t3));
1111 emit_insn (gen_extend_insn (t6, operands[0], DImode, SImode, 0));
1113 riscv_expand_conditional_branch (operands[3], NE, t6, t3);
1117 rtx hp = gen_reg_rtx (<MODE>mode);
1118 rtx lp = gen_reg_rtx (<MODE>mode);
1120 emit_insn (gen_smul<mode>3_highpart (hp, operands[1], operands[2]));
1121 emit_insn (gen_mul<mode>3 (operands[0], operands[1], operands[2]));
1122 riscv_emit_binary (ASHIFTRT, lp, operands[0],
1123 GEN_INT (BITS_PER_WORD - 1));
1125 riscv_expand_conditional_branch (operands[3], NE, hp, lp);
1131 (define_expand "umulv<mode>4"
1132 [(set (match_operand:GPR 0 "register_operand" "=r")
1133 (mult:GPR (match_operand:GPR 1 "register_operand" " r")
1134 (match_operand:GPR 2 "register_operand" " r")))
1135 (label_ref (match_operand 3 "" ""))]
1136 "TARGET_ZMMUL || TARGET_MUL"
1138 if (TARGET_64BIT && <MODE>mode == SImode)
1140 rtx t3 = gen_reg_rtx (DImode);
1141 rtx t4 = gen_reg_rtx (DImode);
1142 rtx t5 = gen_reg_rtx (DImode);
1143 rtx t6 = gen_reg_rtx (DImode);
1144 rtx t7 = gen_reg_rtx (DImode);
1145 rtx t8 = gen_reg_rtx (DImode);
1147 if (GET_CODE (operands[1]) != CONST_INT)
1148 emit_insn (gen_extend_insn (t3, operands[1], DImode, SImode, 0));
1151 if (GET_CODE (operands[2]) != CONST_INT)
1152 emit_insn (gen_extend_insn (t4, operands[2], DImode, SImode, 0));
1156 emit_insn (gen_ashldi3 (t5, t3, GEN_INT (32)));
1157 emit_insn (gen_ashldi3 (t6, t4, GEN_INT (32)));
1158 emit_insn (gen_umuldi3_highpart (t7, t5, t6));
1159 emit_move_insn (operands[0], gen_lowpart (SImode, t7));
1160 emit_insn (gen_lshrdi3 (t8, t7, GEN_INT (32)));
1162 riscv_expand_conditional_branch (operands[3], NE, t8, const0_rtx);
1166 rtx hp = gen_reg_rtx (<MODE>mode);
1168 emit_insn (gen_umul<mode>3_highpart (hp, operands[1], operands[2]));
1169 emit_insn (gen_mul<mode>3 (operands[0], operands[1], operands[2]));
1171 riscv_expand_conditional_branch (operands[3], NE, hp, const0_rtx);
1177 (define_insn "mulsi3_extended"
1178 [(set (match_operand:DI 0 "register_operand" "=r")
1180 (mult:SI (match_operand:SI 1 "register_operand" " r")
1181 (match_operand:SI 2 "register_operand" " r"))))]
1182 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1184 [(set_attr "type" "imul")
1185 (set_attr "mode" "SI")])
1187 (define_insn "*mulsi3_extended2"
1188 [(set (match_operand:DI 0 "register_operand" "=r")
1190 (match_operator:SI 3 "subreg_lowpart_operator"
1191 [(mult:DI (match_operand:DI 1 "register_operand" " r")
1192 (match_operand:DI 2 "register_operand" " r"))])))]
1193 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1195 [(set_attr "type" "imul")
1196 (set_attr "mode" "SI")])
1199 ;; ........................
1201 ;; MULTIPLICATION HIGH-PART
1203 ;; ........................
1207 (define_expand "<u>mulditi3"
1208 [(set (match_operand:TI 0 "register_operand")
1209 (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
1210 (any_extend:TI (match_operand:DI 2 "register_operand"))))]
1211 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1213 rtx low = gen_reg_rtx (DImode);
1214 emit_insn (gen_muldi3 (low, operands[1], operands[2]));
1216 rtx high = gen_reg_rtx (DImode);
1217 emit_insn (gen_<su>muldi3_highpart (high, operands[1], operands[2]));
1219 emit_move_insn (gen_lowpart (DImode, operands[0]), low);
1220 emit_move_insn (gen_highpart (DImode, operands[0]), high);
1224 (define_insn "<su>muldi3_highpart"
1225 [(set (match_operand:DI 0 "register_operand" "=r")
1228 (mult:TI (any_extend:TI
1229 (match_operand:DI 1 "register_operand" " r"))
1231 (match_operand:DI 2 "register_operand" " r")))
1233 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1235 [(set_attr "type" "imul")
1236 (set_attr "mode" "DI")])
1238 (define_expand "usmulditi3"
1239 [(set (match_operand:TI 0 "register_operand")
1240 (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand"))
1241 (sign_extend:TI (match_operand:DI 2 "register_operand"))))]
1242 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1244 rtx low = gen_reg_rtx (DImode);
1245 emit_insn (gen_muldi3 (low, operands[1], operands[2]));
1247 rtx high = gen_reg_rtx (DImode);
1248 emit_insn (gen_usmuldi3_highpart (high, operands[1], operands[2]));
1250 emit_move_insn (gen_lowpart (DImode, operands[0]), low);
1251 emit_move_insn (gen_highpart (DImode, operands[0]), high);
1255 (define_insn "usmuldi3_highpart"
1256 [(set (match_operand:DI 0 "register_operand" "=r")
1259 (mult:TI (zero_extend:TI
1260 (match_operand:DI 1 "register_operand" "r"))
1262 (match_operand:DI 2 "register_operand" " r")))
1264 "(TARGET_ZMMUL || TARGET_MUL) && TARGET_64BIT"
1266 [(set_attr "type" "imul")
1267 (set_attr "mode" "DI")])
1269 (define_expand "<u>mulsidi3"
1270 [(set (match_operand:DI 0 "register_operand" "=r")
1271 (mult:DI (any_extend:DI
1272 (match_operand:SI 1 "register_operand" " r"))
1274 (match_operand:SI 2 "register_operand" " r"))))]
1275 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1277 rtx temp = gen_reg_rtx (SImode);
1278 riscv_emit_binary (MULT, temp, operands[1], operands[2]);
1279 emit_insn (gen_<su>mulsi3_highpart (riscv_subword (operands[0], true),
1280 operands[1], operands[2]));
1281 emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
1285 (define_insn "<su>mulsi3_highpart"
1286 [(set (match_operand:SI 0 "register_operand" "=r")
1289 (mult:DI (any_extend:DI
1290 (match_operand:SI 1 "register_operand" " r"))
1292 (match_operand:SI 2 "register_operand" " r")))
1294 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1296 [(set_attr "type" "imul")
1297 (set_attr "mode" "SI")])
1300 (define_expand "usmulsidi3"
1301 [(set (match_operand:DI 0 "register_operand" "=r")
1302 (mult:DI (zero_extend:DI
1303 (match_operand:SI 1 "register_operand" " r"))
1305 (match_operand:SI 2 "register_operand" " r"))))]
1306 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1308 rtx temp = gen_reg_rtx (SImode);
1309 riscv_emit_binary (MULT, temp, operands[1], operands[2]);
1310 emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
1311 operands[1], operands[2]));
1312 emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
1316 (define_insn "usmulsi3_highpart"
1317 [(set (match_operand:SI 0 "register_operand" "=r")
1320 (mult:DI (zero_extend:DI
1321 (match_operand:SI 1 "register_operand" " r"))
1323 (match_operand:SI 2 "register_operand" " r")))
1325 "(TARGET_ZMMUL || TARGET_MUL) && !TARGET_64BIT"
1327 [(set_attr "type" "imul")
1328 (set_attr "mode" "SI")])
1331 ;; ....................
1333 ;; DIVISION and REMAINDER
1335 ;; ....................
1338 (define_insn "*<optab>si3"
1339 [(set (match_operand:SI 0 "register_operand" "=r")
1340 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1341 (match_operand:SI 2 "register_operand" " r")))]
1343 "<insn>%i2%~\t%0,%1,%2"
1344 [(set_attr "type" "idiv")
1345 (set_attr "mode" "SI")])
1347 (define_expand "<optab>si3"
1348 [(set (match_operand:SI 0 "register_operand" "=r")
1349 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1350 (match_operand:SI 2 "register_operand" " r")))]
1355 rtx t = gen_reg_rtx (DImode);
1356 emit_insn (gen_<optab>si3_extended (t, operands[1], operands[2]));
1357 t = gen_lowpart (SImode, t);
1358 SUBREG_PROMOTED_VAR_P (t) = 1;
1359 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
1360 emit_move_insn (operands[0], t);
1365 (define_insn "<optab>di3"
1366 [(set (match_operand:DI 0 "register_operand" "=r")
1367 (any_div:DI (match_operand:DI 1 "register_operand" " r")
1368 (match_operand:DI 2 "register_operand" " r")))]
1369 "TARGET_DIV && TARGET_64BIT"
1370 "<insn>%i2\t%0,%1,%2"
1371 [(set_attr "type" "idiv")
1372 (set_attr "mode" "DI")])
1374 (define_expand "<u>divmod<mode>4"
1376 [(set (match_operand:GPR 0 "register_operand")
1377 (only_div:GPR (match_operand:GPR 1 "register_operand")
1378 (match_operand:GPR 2 "register_operand")))
1379 (set (match_operand:GPR 3 "register_operand")
1380 (<paired_mod>:GPR (match_dup 1) (match_dup 2)))])]
1381 "TARGET_DIV && riscv_use_divmod_expander ()"
1383 rtx tmp = gen_reg_rtx (<MODE>mode);
1384 emit_insn (gen_<u>div<GPR:mode>3 (operands[0], operands[1], operands[2]));
1385 emit_insn (gen_mul<GPR:mode>3 (tmp, operands[0], operands[2]));
1386 emit_insn (gen_sub<GPR:mode>3 (operands[3], operands[1], tmp));
1390 (define_insn "<optab>si3_extended"
1391 [(set (match_operand:DI 0 "register_operand" "=r")
1393 (any_div:SI (match_operand:SI 1 "register_operand" " r")
1394 (match_operand:SI 2 "register_operand" " r"))))]
1395 "TARGET_DIV && TARGET_64BIT"
1396 "<insn>%i2w\t%0,%1,%2"
1397 [(set_attr "type" "idiv")
1398 (set_attr "mode" "DI")])
1400 (define_insn "div<mode>3"
1401 [(set (match_operand:ANYF 0 "register_operand" "=f")
1402 (div:ANYF (match_operand:ANYF 1 "register_operand" " f")
1403 (match_operand:ANYF 2 "register_operand" " f")))]
1404 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && TARGET_FDIV"
1405 "fdiv.<fmt>\t%0,%1,%2"
1406 [(set_attr "type" "fdiv")
1407 (set_attr "mode" "<UNITMODE>")])
1410 ;; ....................
1414 ;; ....................
1416 (define_insn "sqrt<mode>2"
1417 [(set (match_operand:ANYF 0 "register_operand" "=f")
1418 (sqrt:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1419 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && TARGET_FDIV"
1421 return "fsqrt.<fmt>\t%0,%1";
1423 [(set_attr "type" "fsqrt")
1424 (set_attr "mode" "<UNITMODE>")])
1426 ;; Floating point multiply accumulate instructions.
1429 (define_insn "fma<mode>4"
1430 [(set (match_operand:ANYF 0 "register_operand" "=f")
1431 (fma:ANYF (match_operand:ANYF 1 "register_operand" " f")
1432 (match_operand:ANYF 2 "register_operand" " f")
1433 (match_operand:ANYF 3 "register_operand" " f")))]
1434 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1435 "fmadd.<fmt>\t%0,%1,%2,%3"
1436 [(set_attr "type" "fmadd")
1437 (set_attr "mode" "<UNITMODE>")])
1440 (define_insn "fms<mode>4"
1441 [(set (match_operand:ANYF 0 "register_operand" "=f")
1442 (fma:ANYF (match_operand:ANYF 1 "register_operand" " f")
1443 (match_operand:ANYF 2 "register_operand" " f")
1444 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f"))))]
1445 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1446 "fmsub.<fmt>\t%0,%1,%2,%3"
1447 [(set_attr "type" "fmadd")
1448 (set_attr "mode" "<UNITMODE>")])
1451 (define_insn "fnms<mode>4"
1452 [(set (match_operand:ANYF 0 "register_operand" "=f")
1454 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1455 (match_operand:ANYF 2 "register_operand" " f")
1456 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f"))))]
1457 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1458 "fnmadd.<fmt>\t%0,%1,%2,%3"
1459 [(set_attr "type" "fmadd")
1460 (set_attr "mode" "<UNITMODE>")])
1463 (define_insn "fnma<mode>4"
1464 [(set (match_operand:ANYF 0 "register_operand" "=f")
1466 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1467 (match_operand:ANYF 2 "register_operand" " f")
1468 (match_operand:ANYF 3 "register_operand" " f")))]
1469 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1470 "fnmsub.<fmt>\t%0,%1,%2,%3"
1471 [(set_attr "type" "fmadd")
1472 (set_attr "mode" "<UNITMODE>")])
1474 ;; -(-a * b - c), modulo signed zeros
1475 (define_insn "*fma<mode>4"
1476 [(set (match_operand:ANYF 0 "register_operand" "=f")
1479 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1480 (match_operand:ANYF 2 "register_operand" " f")
1481 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))]
1482 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1483 "fmadd.<fmt>\t%0,%1,%2,%3"
1484 [(set_attr "type" "fmadd")
1485 (set_attr "mode" "<UNITMODE>")])
1487 ;; -(-a * b + c), modulo signed zeros
1488 (define_insn "*fms<mode>4"
1489 [(set (match_operand:ANYF 0 "register_operand" "=f")
1492 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f"))
1493 (match_operand:ANYF 2 "register_operand" " f")
1494 (match_operand:ANYF 3 "register_operand" " f"))))]
1495 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1496 "fmsub.<fmt>\t%0,%1,%2,%3"
1497 [(set_attr "type" "fmadd")
1498 (set_attr "mode" "<UNITMODE>")])
1500 ;; -(a * b + c), modulo signed zeros
1501 (define_insn "*fnms<mode>4"
1502 [(set (match_operand:ANYF 0 "register_operand" "=f")
1505 (match_operand:ANYF 1 "register_operand" " f")
1506 (match_operand:ANYF 2 "register_operand" " f")
1507 (match_operand:ANYF 3 "register_operand" " f"))))]
1508 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1509 "fnmadd.<fmt>\t%0,%1,%2,%3"
1510 [(set_attr "type" "fmadd")
1511 (set_attr "mode" "<UNITMODE>")])
1513 ;; -(a * b - c), modulo signed zeros
1514 (define_insn "*fnma<mode>4"
1515 [(set (match_operand:ANYF 0 "register_operand" "=f")
1518 (match_operand:ANYF 1 "register_operand" " f")
1519 (match_operand:ANYF 2 "register_operand" " f")
1520 (neg:ANYF (match_operand:ANYF 3 "register_operand" " f")))))]
1521 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SIGNED_ZEROS (<MODE>mode)"
1522 "fnmsub.<fmt>\t%0,%1,%2,%3"
1523 [(set_attr "type" "fmadd")
1524 (set_attr "mode" "<UNITMODE>")])
1527 ;; ....................
1531 ;; ....................
1533 (define_insn "abs<mode>2"
1534 [(set (match_operand:ANYF 0 "register_operand" "=f")
1535 (abs:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1536 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1538 [(set_attr "type" "fmove")
1539 (set_attr "mode" "<UNITMODE>")])
1541 (define_insn "copysign<mode>3"
1542 [(set (match_operand:ANYF 0 "register_operand" "=f")
1543 (unspec:ANYF [(match_operand:ANYF 1 "register_operand" " f")
1544 (match_operand:ANYF 2 "register_operand" " f")]
1546 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1547 "fsgnj.<fmt>\t%0,%1,%2"
1548 [(set_attr "type" "fmove")
1549 (set_attr "mode" "<UNITMODE>")])
1551 (define_insn "neg<mode>2"
1552 [(set (match_operand:ANYF 0 "register_operand" "=f")
1553 (neg:ANYF (match_operand:ANYF 1 "register_operand" " f")))]
1554 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1556 [(set_attr "type" "fmove")
1557 (set_attr "mode" "<UNITMODE>")])
1560 ;; ....................
1564 ;; ....................
1566 (define_insn "fminm<mode>3"
1567 [(set (match_operand:ANYF 0 "register_operand" "=f")
1568 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1569 (use (match_operand:ANYF 2 "register_operand" " f"))]
1571 "TARGET_HARD_FLOAT && TARGET_ZFA"
1572 "fminm.<fmt>\t%0,%1,%2"
1573 [(set_attr "type" "fmove")
1574 (set_attr "mode" "<UNITMODE>")])
1576 (define_insn "fmaxm<mode>3"
1577 [(set (match_operand:ANYF 0 "register_operand" "=f")
1578 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1579 (use (match_operand:ANYF 2 "register_operand" " f"))]
1581 "TARGET_HARD_FLOAT && TARGET_ZFA"
1582 "fmaxm.<fmt>\t%0,%1,%2"
1583 [(set_attr "type" "fmove")
1584 (set_attr "mode" "<UNITMODE>")])
1586 (define_insn "fmin<mode>3"
1587 [(set (match_operand:ANYF 0 "register_operand" "=f")
1588 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1589 (use (match_operand:ANYF 2 "register_operand" " f"))]
1591 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SNANS (<MODE>mode)"
1592 "fmin.<fmt>\t%0,%1,%2"
1593 [(set_attr "type" "fmove")
1594 (set_attr "mode" "<UNITMODE>")])
1596 (define_insn "fmax<mode>3"
1597 [(set (match_operand:ANYF 0 "register_operand" "=f")
1598 (unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" " f"))
1599 (use (match_operand:ANYF 2 "register_operand" " f"))]
1601 "(TARGET_HARD_FLOAT || TARGET_ZFINX) && !HONOR_SNANS (<MODE>mode)"
1602 "fmax.<fmt>\t%0,%1,%2"
1603 [(set_attr "type" "fmove")
1604 (set_attr "mode" "<UNITMODE>")])
1606 (define_insn "smin<mode>3"
1607 [(set (match_operand:ANYF 0 "register_operand" "=f")
1608 (smin:ANYF (match_operand:ANYF 1 "register_operand" " f")
1609 (match_operand:ANYF 2 "register_operand" " f")))]
1610 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1611 "fmin.<fmt>\t%0,%1,%2"
1612 [(set_attr "type" "fmove")
1613 (set_attr "mode" "<UNITMODE>")])
1615 (define_insn "smax<mode>3"
1616 [(set (match_operand:ANYF 0 "register_operand" "=f")
1617 (smax:ANYF (match_operand:ANYF 1 "register_operand" " f")
1618 (match_operand:ANYF 2 "register_operand" " f")))]
1619 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1620 "fmax.<fmt>\t%0,%1,%2"
1621 [(set_attr "type" "fmove")
1622 (set_attr "mode" "<UNITMODE>")])
1625 ;; ....................
1629 ;; ....................
1632 ;; For RV64, we don't expose the SImode operations to the rtl expanders,
1633 ;; but SImode versions exist for combine.
1635 (define_expand "and<mode>3"
1636 [(set (match_operand:X 0 "register_operand")
1637 (and:X (match_operand:X 1 "register_operand")
1638 (match_operand:X 2 "arith_operand_or_mode_mask")))]
1641 /* If the second operand is a mode mask, emit an extension
1643 if (CONST_INT_P (operands[2]))
1645 enum machine_mode tmode = VOIDmode;
1646 if (UINTVAL (operands[2]) == GET_MODE_MASK (HImode))
1648 else if (UINTVAL (operands[2]) == GET_MODE_MASK (SImode))
1651 if (tmode != VOIDmode)
1653 rtx tmp = gen_lowpart (tmode, operands[1]);
1654 emit_insn (gen_extend_insn (operands[0], tmp, <MODE>mode, tmode, 1));
1660 (define_insn "*and<mode>3"
1661 [(set (match_operand:X 0 "register_operand" "=r,r")
1662 (and:X (match_operand:X 1 "register_operand" "%r,r")
1663 (match_operand:X 2 "arith_operand" " r,I")))]
1666 [(set_attr "type" "logical")
1667 (set_attr "mode" "<MODE>")])
1669 (define_insn "<optab><mode>3"
1670 [(set (match_operand:X 0 "register_operand" "=r,r")
1671 (any_or:X (match_operand:X 1 "register_operand" "%r,r")
1672 (match_operand:X 2 "arith_operand" " r,I")))]
1674 "<insn>%i2\t%0,%1,%2"
1675 [(set_attr "type" "logical")
1676 (set_attr "mode" "<MODE>")])
1678 (define_insn "*<optab>si3_internal"
1679 [(set (match_operand:SI 0 "register_operand" "=r,r")
1680 (any_bitwise:SI (match_operand:SI 1 "register_operand" "%r,r")
1681 (match_operand:SI 2 "arith_operand" " r,I")))]
1683 "<insn>%i2\t%0,%1,%2"
1684 [(set_attr "type" "logical")
1685 (set_attr "mode" "SI")])
1687 (define_insn "one_cmpl<mode>2"
1688 [(set (match_operand:X 0 "register_operand" "=r")
1689 (not:X (match_operand:X 1 "register_operand" " r")))]
1692 [(set_attr "type" "logical")
1693 (set_attr "mode" "<MODE>")])
1695 (define_insn "*one_cmplsi2_internal"
1696 [(set (match_operand:SI 0 "register_operand" "=r")
1697 (not:SI (match_operand:SI 1 "register_operand" " r")))]
1700 [(set_attr "type" "logical")
1701 (set_attr "mode" "SI")])
1704 ;; ....................
1708 ;; ....................
1710 (define_insn "truncdfsf2"
1711 [(set (match_operand:SF 0 "register_operand" "=f")
1713 (match_operand:DF 1 "register_operand" " f")))]
1714 "TARGET_DOUBLE_FLOAT || TARGET_ZDINX"
1716 [(set_attr "type" "fcvt")
1717 (set_attr "mode" "SF")])
1719 (define_insn "truncsfhf2"
1720 [(set (match_operand:HF 0 "register_operand" "=f")
1722 (match_operand:SF 1 "register_operand" " f")))]
1723 "TARGET_ZFHMIN || TARGET_ZHINXMIN"
1725 [(set_attr "type" "fcvt")
1726 (set_attr "mode" "HF")])
1728 (define_insn "truncdfhf2"
1729 [(set (match_operand:HF 0 "register_operand" "=f")
1731 (match_operand:DF 1 "register_operand" " f")))]
1732 "(TARGET_ZFHMIN && TARGET_DOUBLE_FLOAT) ||
1733 (TARGET_ZHINXMIN && TARGET_ZDINX)"
1735 [(set_attr "type" "fcvt")
1736 (set_attr "mode" "HF")])
1739 ;; ....................
1743 ;; ....................
1747 (define_expand "zero_extendsidi2"
1748 [(set (match_operand:DI 0 "register_operand")
1749 (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]
1752 (define_insn_and_split "*zero_extendsidi2_internal"
1753 [(set (match_operand:DI 0 "register_operand" "=r,r")
1755 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1756 "TARGET_64BIT && !TARGET_ZBA && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX
1757 && !(register_operand (operands[1], SImode)
1758 && reg_or_subregno (operands[1]) == VL_REGNUM)"
1762 "&& reload_completed
1763 && REG_P (operands[1])
1764 && !paradoxical_subreg_p (operands[0])"
1766 (ashift:DI (match_dup 1) (const_int 32)))
1768 (lshiftrt:DI (match_dup 0) (const_int 32)))]
1769 { operands[1] = gen_lowpart (DImode, operands[1]); }
1770 [(set_attr "move_type" "shift_shift,load")
1771 (set_attr "type" "load")
1772 (set_attr "mode" "DI")])
1774 (define_expand "zero_extendhi<GPR:mode>2"
1775 [(set (match_operand:GPR 0 "register_operand")
1777 (match_operand:HI 1 "nonimmediate_operand")))]
1780 (define_insn_and_split "*zero_extendhi<GPR:mode>2"
1781 [(set (match_operand:GPR 0 "register_operand" "=r,r")
1783 (match_operand:HI 1 "nonimmediate_operand" " r,m")))]
1784 "!TARGET_ZBB && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX"
1788 "&& reload_completed
1789 && REG_P (operands[1])
1790 && !paradoxical_subreg_p (operands[0])"
1792 (ashift:GPR (match_dup 1) (match_dup 2)))
1794 (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
1796 operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
1797 operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
1799 [(set_attr "move_type" "shift_shift,load")
1800 (set_attr "type" "load")
1801 (set_attr "mode" "<GPR:MODE>")])
1803 (define_expand "zero_extendqi<SUPERQI:mode>2"
1804 [(set (match_operand:SUPERQI 0 "register_operand")
1805 (zero_extend:SUPERQI
1806 (match_operand:QI 1 "nonimmediate_operand")))]
1809 (define_insn "*zero_extendqi<SUPERQI:mode>2_internal"
1810 [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
1811 (zero_extend:SUPERQI
1812 (match_operand:QI 1 "nonimmediate_operand" " r,m")))]
1813 "!TARGET_XTHEADMEMIDX"
1817 [(set_attr "move_type" "andi,load")
1818 (set_attr "type" "multi")
1819 (set_attr "mode" "<SUPERQI:MODE>")])
1822 ;; ....................
1826 ;; ....................
1828 (define_expand "extendsidi2"
1829 [(set (match_operand:DI 0 "register_operand" "=r,r")
1831 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1834 (define_insn "*extendsidi2_internal"
1835 [(set (match_operand:DI 0 "register_operand" "=r,r")
1837 (match_operand:SI 1 "nonimmediate_operand" " r,m")))]
1838 "TARGET_64BIT && !TARGET_XTHEADMEMIDX"
1842 [(set_attr "move_type" "move,load")
1843 (set_attr "type" "multi")
1844 (set_attr "mode" "DI")])
1846 (define_expand "extend<SHORT:mode><SUPERQI:mode>2"
1847 [(set (match_operand:SUPERQI 0 "register_operand")
1848 (sign_extend:SUPERQI (match_operand:SHORT 1 "nonimmediate_operand")))]
1851 (define_insn_and_split "*extend<SHORT:mode><SUPERQI:mode>2"
1852 [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
1853 (sign_extend:SUPERQI
1854 (match_operand:SHORT 1 "nonimmediate_operand" " r,m")))]
1855 "!TARGET_ZBB && !TARGET_XTHEADBB && !TARGET_XTHEADMEMIDX"
1858 l<SHORT:size>\t%0,%1"
1859 "&& reload_completed
1860 && REG_P (operands[1])
1861 && !paradoxical_subreg_p (operands[0])"
1862 [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
1863 (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
1865 operands[0] = gen_lowpart (SImode, operands[0]);
1866 operands[1] = gen_lowpart (SImode, operands[1]);
1867 operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
1868 - GET_MODE_BITSIZE (<SHORT:MODE>mode));
1870 [(set_attr "move_type" "shift_shift,load")
1871 (set_attr "type" "load")
1872 (set_attr "mode" "SI")])
1874 (define_insn "extendhfsf2"
1875 [(set (match_operand:SF 0 "register_operand" "=f")
1877 (match_operand:HF 1 "register_operand" " f")))]
1878 "TARGET_ZFHMIN || TARGET_ZHINXMIN"
1880 [(set_attr "type" "fcvt")
1881 (set_attr "mode" "SF")])
1883 (define_insn "extendsfdf2"
1884 [(set (match_operand:DF 0 "register_operand" "=f")
1886 (match_operand:SF 1 "register_operand" " f")))]
1887 "TARGET_DOUBLE_FLOAT || TARGET_ZDINX"
1889 [(set_attr "type" "fcvt")
1890 (set_attr "mode" "DF")])
1892 (define_insn "extendhfdf2"
1893 [(set (match_operand:DF 0 "register_operand" "=f")
1895 (match_operand:HF 1 "register_operand" " f")))]
1896 "(TARGET_ZFHMIN && TARGET_DOUBLE_FLOAT) ||
1897 (TARGET_ZHINXMIN && TARGET_ZDINX)"
1899 [(set_attr "type" "fcvt")
1900 (set_attr "mode" "DF")])
1902 ;; 16-bit floating point moves
1903 (define_expand "movhf"
1904 [(set (match_operand:HF 0 "")
1905 (match_operand:HF 1 ""))]
1908 if (riscv_legitimize_move (HFmode, operands[0], operands[1]))
1912 (define_insn "*movhf_hardfloat"
1913 [(set (match_operand:HF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
1914 (match_operand:HF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*G*r,*m,*r"))]
1916 && (register_operand (operands[0], HFmode)
1917 || reg_or_0_operand (operands[1], HFmode))"
1918 { return riscv_output_move (operands[0], operands[1]); }
1919 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
1920 (set_attr "type" "fmove")
1921 (set_attr "mode" "HF")])
1923 (define_insn "*movhf_softfloat"
1924 [(set (match_operand:HF 0 "nonimmediate_operand" "=f, r,r,m,*f,*r")
1925 (match_operand:HF 1 "move_operand" " f,Gr,m,r,*r,*f"))]
1927 && (register_operand (operands[0], HFmode)
1928 || reg_or_0_operand (operands[1], HFmode))"
1929 { return riscv_output_move (operands[0], operands[1]); }
1930 [(set_attr "move_type" "fmove,move,load,store,mtc,mfc")
1931 (set_attr "type" "fmove")
1932 (set_attr "mode" "HF")])
1934 (define_insn "*movhf_softfloat_boxing"
1935 [(set (match_operand:HF 0 "register_operand" "=f")
1936 (unspec:HF [(match_operand:X 1 "register_operand" " r")] UNSPEC_FMV_SFP16_X))]
1939 [(set_attr "type" "fmove")
1940 (set_attr "mode" "SF")])
1943 ;; ....................
1947 ;; ....................
1949 (define_insn "fix_trunc<ANYF:mode><GPR:mode>2"
1950 [(set (match_operand:GPR 0 "register_operand" "=r")
1952 (match_operand:ANYF 1 "register_operand" " f")))]
1953 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1954 "fcvt.<GPR:ifmt>.<ANYF:fmt> %0,%1,rtz"
1955 [(set_attr "type" "fcvt")
1956 (set_attr "mode" "<ANYF:MODE>")])
1958 (define_insn "fixuns_trunc<ANYF:mode><GPR:mode>2"
1959 [(set (match_operand:GPR 0 "register_operand" "=r")
1961 (match_operand:ANYF 1 "register_operand" " f")))]
1962 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1963 "fcvt.<GPR:ifmt>u.<ANYF:fmt> %0,%1,rtz"
1964 [(set_attr "type" "fcvt")
1965 (set_attr "mode" "<ANYF:MODE>")])
1967 (define_insn "float<GPR:mode><ANYF:mode>2"
1968 [(set (match_operand:ANYF 0 "register_operand" "= f")
1970 (match_operand:GPR 1 "reg_or_0_operand" " rJ")))]
1971 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1972 "fcvt.<ANYF:fmt>.<GPR:ifmt>\t%0,%z1"
1973 [(set_attr "type" "fcvt")
1974 (set_attr "mode" "<ANYF:MODE>")])
1976 (define_insn "floatuns<GPR:mode><ANYF:mode>2"
1977 [(set (match_operand:ANYF 0 "register_operand" "= f")
1978 (unsigned_float:ANYF
1979 (match_operand:GPR 1 "reg_or_0_operand" " rJ")))]
1980 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1981 "fcvt.<ANYF:fmt>.<GPR:ifmt>u\t%0,%z1"
1982 [(set_attr "type" "fcvt")
1983 (set_attr "mode" "<ANYF:MODE>")])
1985 (define_insn "l<rint_pattern><ANYF:mode><GPR:mode>2"
1986 [(set (match_operand:GPR 0 "register_operand" "=r")
1988 [(match_operand:ANYF 1 "register_operand" " f")]
1990 "TARGET_HARD_FLOAT || TARGET_ZFINX"
1991 "fcvt.<GPR:ifmt>.<ANYF:fmt> %0,%1,<rint_rm>"
1992 [(set_attr "type" "fcvt")
1993 (set_attr "mode" "<ANYF:MODE>")])
1995 (define_insn "<round_pattern><ANYF:mode>2"
1996 [(set (match_operand:ANYF 0 "register_operand" "=f")
1998 [(match_operand:ANYF 1 "register_operand" " f")]
2000 "TARGET_HARD_FLOAT && TARGET_ZFA"
2001 "fround.<ANYF:fmt>\t%0,%1,<round_rm>"
2002 [(set_attr "type" "fcvt")
2003 (set_attr "mode" "<ANYF:MODE>")])
2005 (define_insn "rint<ANYF:mode>2"
2006 [(set (match_operand:ANYF 0 "register_operand" "=f")
2008 [(match_operand:ANYF 1 "register_operand" " f")]
2010 "TARGET_HARD_FLOAT && TARGET_ZFA"
2011 "froundnx.<ANYF:fmt>\t%0,%1"
2012 [(set_attr "type" "fcvt")
2013 (set_attr "mode" "<ANYF:MODE>")])
2016 ;; ....................
2020 ;; ....................
2022 ;; Lower-level instructions for loading an address from the GOT.
2023 ;; We could use MEMs, but an unspec gives more optimization
2026 (define_insn "got_load<mode>"
2027 [(set (match_operand:P 0 "register_operand" "=r")
2029 [(match_operand:P 1 "symbolic_operand" "")]
2033 [(set_attr "got" "load")
2034 (set_attr "type" "load")
2035 (set_attr "mode" "<MODE>")])
2037 (define_insn "tls_add_tp_le<mode>"
2038 [(set (match_operand:P 0 "register_operand" "=r")
2040 [(match_operand:P 1 "register_operand" "r")
2041 (match_operand:P 2 "register_operand" "r")
2042 (match_operand:P 3 "symbolic_operand" "")]
2045 "add\t%0,%1,%2,%%tprel_add(%3)"
2046 [(set_attr "type" "arith")
2047 (set_attr "mode" "<MODE>")])
2049 (define_insn "got_load_tls_gd<mode>"
2050 [(set (match_operand:P 0 "register_operand" "=r")
2052 [(match_operand:P 1 "symbolic_operand" "")]
2056 [(set_attr "got" "load")
2057 (set_attr "type" "load")
2058 (set_attr "mode" "<MODE>")])
2060 (define_insn "got_load_tls_ie<mode>"
2061 [(set (match_operand:P 0 "register_operand" "=r")
2063 [(match_operand:P 1 "symbolic_operand" "")]
2067 [(set_attr "got" "load")
2068 (set_attr "type" "load")
2069 (set_attr "mode" "<MODE>")])
2071 (define_insn "auipc<mode>"
2072 [(set (match_operand:P 0 "register_operand" "=r")
2074 [(match_operand:P 1 "symbolic_operand" "")
2075 (match_operand:P 2 "const_int_operand")
2079 ".LA%2: auipc\t%0,%h1"
2080 [(set_attr "type" "auipc")
2081 (set_attr "cannot_copy" "yes")])
2083 ;; Instructions for adding the low 12 bits of an address to a register.
2084 ;; Operand 2 is the address: riscv_print_operand works out which relocation
2085 ;; should be applied.
2087 (define_insn "*low<mode>"
2088 [(set (match_operand:P 0 "register_operand" "=r")
2089 (lo_sum:P (match_operand:P 1 "register_operand" " r")
2090 (match_operand:P 2 "symbolic_operand" "")))]
2093 [(set_attr "type" "arith")
2094 (set_attr "mode" "<MODE>")])
2096 ;; Allow combine to split complex const_int load sequences, using operand 2
2097 ;; to store the intermediate results. See move_operand for details.
2099 [(set (match_operand:GPR 0 "register_operand")
2100 (match_operand:GPR 1 "splittable_const_int_operand"))
2101 (clobber (match_operand:GPR 2 "register_operand"))]
2105 riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]),
2110 ;; Likewise, for symbolic operands.
2112 [(set (match_operand:P 0 "register_operand")
2113 (match_operand:P 1))
2114 (clobber (match_operand:P 2 "register_operand"))]
2115 "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
2116 [(set (match_dup 0) (match_dup 3))]
2118 riscv_split_symbol (operands[2], operands[1],
2119 MAX_MACHINE_MODE, &operands[3]);
2122 ;; Pretend to have the ability to load complex const_int in order to get
2123 ;; better code generation around them.
2124 ;; But avoid constants that are special cased elsewhere.
2126 ;; Hide it from IRA register equiv recog* () to elide potential undoing of split
2128 (define_insn_and_split "*mvconst_internal"
2129 [(set (match_operand:GPR 0 "register_operand" "=r")
2130 (match_operand:GPR 1 "splittable_const_int_operand" "i"))]
2132 && !(p2m1_shift_operand (operands[1], <MODE>mode)
2133 || high_mask_shift_operand (operands[1], <MODE>mode))"
2138 riscv_move_integer (operands[0], operands[0], INTVAL (operands[1]),
2142 [(set_attr "type" "move")])
2144 ;; 64-bit integer moves
2146 (define_expand "movdi"
2147 [(set (match_operand:DI 0 "")
2148 (match_operand:DI 1 ""))]
2151 if (riscv_legitimize_move (DImode, operands[0], operands[1]))
2155 (define_insn "*movdi_32bit"
2156 [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m, *f,*f,*r,*f,*m,r")
2157 (match_operand:DI 1 "move_operand" " r,i,m,r,*J*r,*m,*f,*f,*f,vp"))]
2159 && (register_operand (operands[0], DImode)
2160 || reg_or_0_operand (operands[1], DImode))"
2161 { return riscv_output_move (operands[0], operands[1]); }
2162 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore,rdvlenb")
2163 (set_attr "mode" "DI")
2164 (set_attr "type" "move")
2165 (set_attr "ext" "base,base,base,base,d,d,d,d,d,vector")])
2167 (define_insn "*movdi_64bit"
2168 [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r, m, *f,*f,*r,*f,*m,r")
2169 (match_operand:DI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,*f,vp"))]
2171 && (register_operand (operands[0], DImode)
2172 || reg_or_0_operand (operands[1], DImode))"
2173 { return riscv_output_move (operands[0], operands[1]); }
2174 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore,rdvlenb")
2175 (set_attr "mode" "DI")
2176 (set_attr "type" "move")
2177 (set_attr "ext" "base,base,base,base,d,d,d,d,d,vector")])
2179 ;; 32-bit Integer moves
2181 (define_expand "mov<mode>"
2182 [(set (match_operand:MOVE32 0 "")
2183 (match_operand:MOVE32 1 ""))]
2186 if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
2190 (define_insn "*movsi_internal"
2191 [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r, m, *f,*f,*r,*m,r")
2192 (match_operand:SI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,vp"))]
2193 "(register_operand (operands[0], SImode)
2194 || reg_or_0_operand (operands[1], SImode))
2195 && !(register_operand (operands[1], SImode)
2196 && reg_or_subregno (operands[1]) == VL_REGNUM)"
2197 { return riscv_output_move (operands[0], operands[1]); }
2198 [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore,rdvlenb")
2199 (set_attr "mode" "SI")
2200 (set_attr "type" "move")
2201 (set_attr "ext" "base,base,base,base,f,f,f,f,vector")])
2203 ;; 16-bit Integer moves
2205 ;; Unlike most other insns, the move insns can't be split with
2206 ;; different predicates, because register spilling and other parts of
2207 ;; the compiler, have memoized the insn number already.
2208 ;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
2210 (define_expand "movhi"
2211 [(set (match_operand:HI 0 "")
2212 (match_operand:HI 1 ""))]
2215 if (riscv_legitimize_move (HImode, operands[0], operands[1]))
2219 (define_insn "*movhi_internal"
2220 [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r, m, *f,*r,r")
2221 (match_operand:HI 1 "move_operand" " r,T,m,rJ,*r*J,*f,vp"))]
2222 "(register_operand (operands[0], HImode)
2223 || reg_or_0_operand (operands[1], HImode))"
2224 { return riscv_output_move (operands[0], operands[1]); }
2225 [(set_attr "move_type" "move,const,load,store,mtc,mfc,rdvlenb")
2226 (set_attr "mode" "HI")
2227 (set_attr "type" "move")
2228 (set_attr "ext" "base,base,base,base,f,f,vector")])
2230 ;; HImode constant generation; see riscv_move_integer for details.
2231 ;; si+si->hi without truncation is legal because of
2232 ;; TARGET_TRULY_NOOP_TRUNCATION.
2234 (define_insn "*add<mode>hi3"
2235 [(set (match_operand:HI 0 "register_operand" "=r,r")
2236 (plus:HI (match_operand:HISI 1 "register_operand" " r,r")
2237 (match_operand:HISI 2 "arith_operand" " r,I")))]
2239 "add%i2%~\t%0,%1,%2"
2240 [(set_attr "type" "arith")
2241 (set_attr "mode" "HI")])
2243 (define_insn "*xor<mode>hi3"
2244 [(set (match_operand:HI 0 "register_operand" "=r,r")
2245 (xor:HI (match_operand:HISI 1 "register_operand" " r,r")
2246 (match_operand:HISI 2 "arith_operand" " r,I")))]
2249 [(set_attr "type" "logical")
2250 (set_attr "mode" "HI")])
2252 ;; 8-bit Integer moves
2254 (define_expand "movqi"
2255 [(set (match_operand:QI 0 "")
2256 (match_operand:QI 1 ""))]
2259 if (riscv_legitimize_move (QImode, operands[0], operands[1]))
2263 (define_insn "*movqi_internal"
2264 [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r, m, *f,*r,r")
2265 (match_operand:QI 1 "move_operand" " r,I,m,rJ,*r*J,*f,vp"))]
2266 "(register_operand (operands[0], QImode)
2267 || reg_or_0_operand (operands[1], QImode))"
2268 { return riscv_output_move (operands[0], operands[1]); }
2269 [(set_attr "move_type" "move,const,load,store,mtc,mfc,rdvlenb")
2270 (set_attr "mode" "QI")
2271 (set_attr "type" "move")
2272 (set_attr "ext" "base,base,base,base,f,f,vector")])
2274 ;; 32-bit floating point moves
2276 (define_expand "movsf"
2277 [(set (match_operand:SF 0 "")
2278 (match_operand:SF 1 ""))]
2281 if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
2285 (define_insn "*movsf_hardfloat"
2286 [(set (match_operand:SF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
2287 (match_operand:SF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*G*r,*m,*r"))]
2289 && (register_operand (operands[0], SFmode)
2290 || reg_or_0_operand (operands[1], SFmode))"
2291 { return riscv_output_move (operands[0], operands[1]); }
2292 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2293 (set_attr "type" "fmove")
2294 (set_attr "mode" "SF")])
2296 (define_insn "*movsf_softfloat"
2297 [(set (match_operand:SF 0 "nonimmediate_operand" "= r,r,m")
2298 (match_operand:SF 1 "move_operand" " Gr,m,r"))]
2300 && (register_operand (operands[0], SFmode)
2301 || reg_or_0_operand (operands[1], SFmode))"
2302 { return riscv_output_move (operands[0], operands[1]); }
2303 [(set_attr "move_type" "move,load,store")
2304 (set_attr "type" "fmove")
2305 (set_attr "mode" "SF")])
2307 ;; 64-bit floating point moves
2309 (define_expand "movdf"
2310 [(set (match_operand:DF 0 "")
2311 (match_operand:DF 1 ""))]
2314 if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
2319 ;; In RV32, we lack fmv.x.d and fmv.d.x. Go through memory instead.
2320 ;; (However, we can still use fcvt.d.w to zero a floating-point register.)
2321 (define_insn "*movdf_hardfloat_rv32"
2322 [(set (match_operand:DF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*zmvf,*zmvr, *r,*r,*m")
2323 (match_operand:DF 1 "move_operand" " f,zfli,G,m,f,G,*zmvr,*zmvf,*r*G,*m,*r"))]
2324 "!TARGET_64BIT && TARGET_DOUBLE_FLOAT
2325 && (register_operand (operands[0], DFmode)
2326 || reg_or_0_operand (operands[1], DFmode))"
2327 { return riscv_output_move (operands[0], operands[1]); }
2328 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2329 (set_attr "type" "fmove")
2330 (set_attr "mode" "DF")])
2332 (define_insn "*movdf_hardfloat_rv64"
2333 [(set (match_operand:DF 0 "nonimmediate_operand" "=f, f,f,f,m,m,*f,*r, *r,*r,*m")
2334 (match_operand:DF 1 "move_operand" " f,zfli,G,m,f,G,*r,*f,*r*G,*m,*r"))]
2335 "TARGET_64BIT && TARGET_DOUBLE_FLOAT
2336 && (register_operand (operands[0], DFmode)
2337 || reg_or_0_operand (operands[1], DFmode))"
2338 { return riscv_output_move (operands[0], operands[1]); }
2339 [(set_attr "move_type" "fmove,fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
2340 (set_attr "type" "fmove")
2341 (set_attr "mode" "DF")])
2343 (define_insn "*movdf_softfloat"
2344 [(set (match_operand:DF 0 "nonimmediate_operand" "= r,r, m")
2345 (match_operand:DF 1 "move_operand" " rG,m,rG"))]
2346 "!TARGET_DOUBLE_FLOAT
2347 && (register_operand (operands[0], DFmode)
2348 || reg_or_0_operand (operands[1], DFmode))"
2349 { return riscv_output_move (operands[0], operands[1]); }
2350 [(set_attr "move_type" "move,load,store")
2351 (set_attr "type" "fmove")
2352 (set_attr "mode" "DF")])
2354 (define_insn "movsidf2_low_rv32"
2355 [(set (match_operand:SI 0 "register_operand" "= r")
2357 (match_operand:DF 1 "register_operand" "zmvf")))]
2358 "TARGET_HARD_FLOAT && !TARGET_64BIT && TARGET_ZFA"
2360 [(set_attr "move_type" "fmove")
2361 (set_attr "type" "fmove")
2362 (set_attr "mode" "DF")])
2365 (define_insn "movsidf2_high_rv32"
2366 [(set (match_operand:SI 0 "register_operand" "= r")
2369 (match_operand:DF 1 "register_operand" "zmvf")
2371 "TARGET_HARD_FLOAT && !TARGET_64BIT && TARGET_ZFA"
2373 [(set_attr "move_type" "fmove")
2374 (set_attr "type" "fmove")
2375 (set_attr "mode" "DF")])
2377 (define_insn "movdfsisi3_rv32"
2378 [(set (match_operand:DF 0 "register_operand" "= f")
2380 (match_operand:SI 2 "register_operand" "zmvr")
2382 (match_operand:SI 1 "register_operand" "zmvr")
2384 "TARGET_HARD_FLOAT && !TARGET_64BIT && TARGET_ZFA"
2385 "fmvp.d.x\t%0,%2,%1"
2386 [(set_attr "move_type" "fmove")
2387 (set_attr "type" "fmove")
2388 (set_attr "mode" "DF")])
2391 [(set (match_operand:MOVE64 0 "nonimmediate_operand")
2392 (match_operand:MOVE64 1 "move_operand"))]
2394 && riscv_split_64bit_move_p (operands[0], operands[1])"
2397 riscv_split_doubleword_move (operands[0], operands[1]);
2401 (define_expand "cpymem<mode>"
2402 [(parallel [(set (match_operand:BLK 0 "general_operand")
2403 (match_operand:BLK 1 "general_operand"))
2404 (use (match_operand:P 2 ""))
2405 (use (match_operand:SI 3 "const_int_operand"))])]
2408 if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
2414 ;; Expand in-line code to clear the instruction cache between operand[0] and
2416 (define_expand "clear_cache"
2417 [(match_operand 0 "pmode_register_operand")
2418 (match_operand 1 "pmode_register_operand")]
2421 #ifdef ICACHE_FLUSH_FUNC
2422 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, ICACHE_FLUSH_FUNC),
2423 LCT_NORMAL, VOIDmode, operands[0], Pmode,
2424 operands[1], Pmode, const0_rtx, Pmode);
2426 if (TARGET_ZIFENCEI)
2427 emit_insn (gen_fence_i ());
2432 (define_insn "fence"
2433 [(unspec_volatile [(const_int 0)] UNSPECV_FENCE)]
2436 [(set_attr "type" "atomic")])
2438 (define_insn "fence_i"
2439 [(unspec_volatile [(const_int 0)] UNSPECV_FENCE_I)]
2442 [(set_attr "type" "atomic")])
2444 (define_insn "riscv_pause"
2445 [(unspec_volatile [(const_int 0)] UNSPECV_PAUSE)]
2447 "* return TARGET_ZIHINTPAUSE ? \"pause\" : \".insn\t0x0100000f\";"
2448 [(set_attr "type" "atomic")])
2451 ;; ....................
2455 ;; ....................
2457 ;; Use a QImode shift count, to avoid generating sign or zero extend
2458 ;; instructions for shift counts, and to avoid dropping subregs.
2459 ;; expand_shift_1 can do this automatically when SHIFT_COUNT_TRUNCATED is
2460 ;; defined, but use of that is discouraged.
2462 (define_insn "*<optab>si3"
2463 [(set (match_operand:SI 0 "register_operand" "= r")
2465 (match_operand:SI 1 "register_operand" " r")
2466 (match_operand:QI 2 "arith_operand" " rI")))]
2469 if (GET_CODE (operands[2]) == CONST_INT)
2470 operands[2] = GEN_INT (INTVAL (operands[2])
2471 & (GET_MODE_BITSIZE (SImode) - 1));
2473 return "<insn>%i2%~\t%0,%1,%2";
2475 [(set_attr "type" "shift")
2476 (set_attr "mode" "SI")])
2478 (define_expand "<optab>si3"
2479 [(set (match_operand:SI 0 "register_operand" "= r")
2480 (any_shift:SI (match_operand:SI 1 "register_operand" " r")
2481 (match_operand:QI 2 "arith_operand" " rI")))]
2486 rtx t = gen_reg_rtx (DImode);
2487 emit_insn (gen_<optab>si3_extend (t, operands[1], operands[2]));
2488 t = gen_lowpart (SImode, t);
2489 SUBREG_PROMOTED_VAR_P (t) = 1;
2490 SUBREG_PROMOTED_SET (t, SRP_SIGNED);
2491 emit_move_insn (operands[0], t);
2496 (define_insn "<optab>di3"
2497 [(set (match_operand:DI 0 "register_operand" "= r")
2499 (match_operand:DI 1 "register_operand" " r")
2500 (match_operand:QI 2 "arith_operand" " rI")))]
2503 if (GET_CODE (operands[2]) == CONST_INT)
2504 operands[2] = GEN_INT (INTVAL (operands[2])
2505 & (GET_MODE_BITSIZE (DImode) - 1));
2507 return "<insn>%i2\t%0,%1,%2";
2509 [(set_attr "type" "shift")
2510 (set_attr "mode" "DI")])
2512 (define_insn_and_split "*<optab><GPR:mode>3_mask_1"
2513 [(set (match_operand:GPR 0 "register_operand" "= r")
2515 (match_operand:GPR 1 "register_operand" " r")
2516 (match_operator 4 "subreg_lowpart_operator"
2518 (match_operand:GPR2 2 "register_operand" "r")
2519 (match_operand 3 "<GPR:shiftm1>"))])))]
2524 (any_shift:GPR (match_dup 1)
2526 "operands[2] = gen_lowpart (QImode, operands[2]);"
2527 [(set_attr "type" "shift")
2528 (set_attr "mode" "<GPR:MODE>")])
2530 (define_insn "<optab>si3_extend"
2531 [(set (match_operand:DI 0 "register_operand" "= r")
2533 (any_shift:SI (match_operand:SI 1 "register_operand" " r")
2534 (match_operand:QI 2 "arith_operand" " rI"))))]
2537 if (GET_CODE (operands[2]) == CONST_INT)
2538 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
2540 return "<insn>%i2w\t%0,%1,%2";
2542 [(set_attr "type" "shift")
2543 (set_attr "mode" "SI")])
2545 (define_insn_and_split "*<optab>si3_extend_mask"
2546 [(set (match_operand:DI 0 "register_operand" "= r")
2549 (match_operand:SI 1 "register_operand" " r")
2550 (match_operator 4 "subreg_lowpart_operator"
2552 (match_operand:GPR 2 "register_operand" " r")
2553 (match_operand 3 "const_si_mask_operand"))]))))]
2559 (any_shift:SI (match_dup 1)
2561 "operands[2] = gen_lowpart (QImode, operands[2]);"
2562 [(set_attr "type" "shift")
2563 (set_attr "mode" "SI")])
2565 ;; Non-canonical, but can be formed by ree when combine is not successful at
2566 ;; producing one of the two canonical patterns below.
2567 (define_insn "*lshrsi3_zero_extend_1"
2568 [(set (match_operand:DI 0 "register_operand" "=r")
2570 (lshiftrt:SI (match_operand:SI 1 "register_operand" " r")
2571 (match_operand 2 "const_int_operand"))))]
2572 "TARGET_64BIT && (INTVAL (operands[2]) & 0x1f) > 0"
2574 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
2576 return "srliw\t%0,%1,%2";
2578 [(set_attr "type" "shift")
2579 (set_attr "mode" "SI")])
2581 ;; Canonical form for a zero-extend of a logical right shift.
2582 (define_insn "*lshrsi3_zero_extend_2"
2583 [(set (match_operand:DI 0 "register_operand" "=r")
2584 (zero_extract:DI (match_operand:DI 1 "register_operand" " r")
2585 (match_operand 2 "const_int_operand")
2586 (match_operand 3 "const_int_operand")))]
2587 "(TARGET_64BIT && (INTVAL (operands[3]) > 0)
2588 && (INTVAL (operands[2]) + INTVAL (operands[3]) == 32))"
2590 return "srliw\t%0,%1,%3";
2592 [(set_attr "type" "shift")
2593 (set_attr "mode" "SI")])
2595 ;; Canonical form for a zero-extend of a logical right shift when the
2596 ;; shift count is 31.
2597 (define_insn "*lshrsi3_zero_extend_3"
2598 [(set (match_operand:DI 0 "register_operand" "=r")
2599 (lt:DI (match_operand:SI 1 "register_operand" " r")
2603 return "srliw\t%0,%1,31";
2605 [(set_attr "type" "shift")
2606 (set_attr "mode" "SI")])
2608 ;; Handle AND with 2^N-1 for N from 12 to XLEN. This can be split into
2609 ;; two logical shifts. Otherwise it requires 3 instructions: lui,
2610 ;; xor/addi/srli, and.
2612 ;; Generating a temporary for the shift output gives better combiner results;
2613 ;; and also fixes a problem where op0 could be a paradoxical reg and shifting
2614 ;; by amounts larger than the size of the SUBREG_REG doesn't work.
2616 [(set (match_operand:GPR 0 "register_operand")
2617 (and:GPR (match_operand:GPR 1 "register_operand")
2618 (match_operand:GPR 2 "p2m1_shift_operand")))
2619 (clobber (match_operand:GPR 3 "register_operand"))]
2622 (ashift:GPR (match_dup 1) (match_dup 2)))
2624 (lshiftrt:GPR (match_dup 3) (match_dup 2)))]
2626 /* Op2 is a VOIDmode constant, so get the mode size from op1. */
2627 operands[2] = GEN_INT (GET_MODE_BITSIZE (GET_MODE (operands[1])).to_constant ()
2628 - exact_log2 (INTVAL (operands[2]) + 1));
2631 ;; Handle AND with 0xF...F0...0 where there are 32 to 63 zeros. This can be
2632 ;; split into two shifts. Otherwise it requires 3 instructions: li, sll, and.
2634 [(set (match_operand:DI 0 "register_operand")
2635 (and:DI (match_operand:DI 1 "register_operand")
2636 (match_operand:DI 2 "high_mask_shift_operand")))
2637 (clobber (match_operand:DI 3 "register_operand"))]
2640 (lshiftrt:DI (match_dup 1) (match_dup 2)))
2642 (ashift:DI (match_dup 3) (match_dup 2)))]
2644 operands[2] = GEN_INT (ctz_hwi (INTVAL (operands[2])));
2647 ;; Handle SImode to DImode zero-extend combined with a left shift. This can
2648 ;; occur when unsigned int is used for array indexing. Split this into two
2649 ;; shifts. Otherwise we can get 3 shifts.
2651 (define_insn_and_split "zero_extendsidi2_shifted"
2652 [(set (match_operand:DI 0 "register_operand" "=r")
2653 (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
2654 (match_operand:QI 2 "immediate_operand" "I"))
2655 (match_operand 3 "immediate_operand" "")))
2656 (clobber (match_scratch:DI 4 "=&r"))]
2657 "TARGET_64BIT && !TARGET_ZBA
2658 && ((INTVAL (operands[3]) >> INTVAL (operands[2])) == 0xffffffff)"
2660 "&& reload_completed"
2662 (ashift:DI (match_dup 1) (const_int 32)))
2664 (lshiftrt:DI (match_dup 4) (match_dup 5)))]
2665 "operands[5] = GEN_INT (32 - (INTVAL (operands [2])));"
2666 [(set_attr "type" "shift")
2667 (set_attr "mode" "DI")])
2670 ;; ....................
2672 ;; CONDITIONAL BRANCHES
2674 ;; ....................
2676 ;; Conditional branches
2678 (define_insn_and_split "*branch<ANYI:mode>_shiftedarith_equals_zero"
2680 (if_then_else (match_operator 1 "equality_operator"
2681 [(and:ANYI (match_operand:ANYI 2 "register_operand" "r")
2682 (match_operand 3 "shifted_const_arith_operand" "i"))
2684 (label_ref (match_operand 0 "" ""))
2686 (clobber (match_scratch:X 4 "=&r"))]
2687 "!SMALL_OPERAND (INTVAL (operands[3]))"
2689 "&& reload_completed"
2690 [(set (match_dup 4) (lshiftrt:X (subreg:X (match_dup 2) 0) (match_dup 6)))
2691 (set (match_dup 4) (and:X (match_dup 4) (match_dup 7)))
2692 (set (pc) (if_then_else (match_op_dup 1 [(match_dup 4) (const_int 0)])
2693 (label_ref (match_dup 0)) (pc)))]
2695 HOST_WIDE_INT mask = INTVAL (operands[3]);
2696 int trailing = ctz_hwi (mask);
2698 operands[6] = GEN_INT (trailing);
2699 operands[7] = GEN_INT (mask >> trailing);
2701 [(set_attr "type" "branch")])
2703 (define_insn_and_split "*branch<ANYI:mode>_shiftedmask_equals_zero"
2705 (if_then_else (match_operator 1 "equality_operator"
2706 [(and:ANYI (match_operand:ANYI 2 "register_operand" "r")
2707 (match_operand 3 "consecutive_bits_operand" "i"))
2709 (label_ref (match_operand 0 "" ""))
2711 (clobber (match_scratch:X 4 "=&r"))]
2712 "(INTVAL (operands[3]) >= 0 || !partial_subreg_p (operands[2]))
2713 && popcount_hwi (INTVAL (operands[3])) > 1
2714 && !SMALL_OPERAND (INTVAL (operands[3]))"
2716 "&& reload_completed"
2717 [(set (match_dup 4) (ashift:X (subreg:X (match_dup 2) 0) (match_dup 6)))
2718 (set (match_dup 4) (lshiftrt:X (match_dup 4) (match_dup 7)))
2719 (set (pc) (if_then_else (match_op_dup 1 [(match_dup 4) (const_int 0)])
2720 (label_ref (match_dup 0)) (pc)))]
2722 unsigned HOST_WIDE_INT mask = INTVAL (operands[3]);
2723 int leading = clz_hwi (mask);
2724 int trailing = ctz_hwi (mask);
2726 operands[6] = GEN_INT (leading);
2727 operands[7] = GEN_INT (leading + trailing);
2729 [(set_attr "type" "branch")])
2731 (define_insn "*branch<mode>"
2734 (match_operator 1 "ordered_comparison_operator"
2735 [(match_operand:X 2 "register_operand" "r")
2736 (match_operand:X 3 "reg_or_0_operand" "rJ")])
2737 (label_ref (match_operand 0 "" ""))
2741 if (get_attr_length (insn) == 12)
2742 return "b%N1\t%2,%z3,1f; jump\t%l0,ra; 1:";
2744 return "b%C1\t%2,%z3,%l0";
2746 [(set_attr "type" "branch")
2747 (set_attr "mode" "none")])
2749 ;; Conditional move and add patterns.
2751 (define_expand "mov<mode>cc"
2752 [(set (match_operand:GPR 0 "register_operand")
2753 (if_then_else:GPR (match_operand 1 "comparison_operator")
2754 (match_operand:GPR 2 "movcc_operand")
2755 (match_operand:GPR 3 "movcc_operand")))]
2756 "TARGET_SFB_ALU || TARGET_XTHEADCONDMOV || TARGET_ZICOND_LIKE
2759 if (riscv_expand_conditional_move (operands[0], operands[1],
2760 operands[2], operands[3]))
2766 (define_expand "add<mode>cc"
2767 [(match_operand:GPR 0 "register_operand")
2768 (match_operand 1 "comparison_operator")
2769 (match_operand:GPR 2 "arith_operand")
2770 (match_operand:GPR 3 "arith_operand")]
2773 rtx cmp = operands[1];
2774 rtx cmp0 = XEXP (cmp, 0);
2775 rtx cmp1 = XEXP (cmp, 1);
2776 machine_mode mode0 = GET_MODE (cmp0);
2778 /* We only handle word mode integer compares for now. */
2779 if (INTEGRAL_MODE_P (mode0) && mode0 != word_mode)
2782 enum rtx_code code = GET_CODE (cmp);
2783 rtx reg0 = gen_reg_rtx (<MODE>mode);
2784 rtx reg1 = gen_reg_rtx (<MODE>mode);
2785 rtx reg2 = gen_reg_rtx (<MODE>mode);
2786 bool invert = false;
2788 if (INTEGRAL_MODE_P (mode0))
2789 riscv_expand_int_scc (reg0, code, cmp0, cmp1, &invert);
2790 else if (FLOAT_MODE_P (mode0) && fp_scc_comparison (cmp, GET_MODE (cmp)))
2791 riscv_expand_float_scc (reg0, code, cmp0, cmp1, &invert);
2796 riscv_emit_binary (PLUS, reg1, reg0, constm1_rtx);
2798 riscv_emit_unary (NEG, reg1, reg0);
2799 riscv_emit_binary (AND, reg2, reg1, operands[3]);
2800 riscv_emit_binary (PLUS, operands[0], reg2, operands[2]);
2805 ;; Used to implement built-in functions.
2806 (define_expand "condjump"
2808 (if_then_else (match_operand 0)
2809 (label_ref (match_operand 1))
2812 (define_expand "@cbranch<mode>4"
2814 (if_then_else (match_operator 0 "comparison_operator"
2815 [(match_operand:BR 1 "register_operand")
2816 (match_operand:BR 2 "nonmemory_operand")])
2817 (label_ref (match_operand 3 ""))
2821 riscv_expand_conditional_branch (operands[3], GET_CODE (operands[0]),
2822 operands[1], operands[2]);
2826 (define_expand "@cbranch<ANYF:mode>4"
2827 [(parallel [(set (pc)
2828 (if_then_else (match_operator 0 "fp_branch_comparison"
2829 [(match_operand:ANYF 1 "register_operand")
2830 (match_operand:ANYF 2 "register_operand")])
2831 (label_ref (match_operand 3 ""))
2833 (clobber (match_operand 4 ""))])]
2834 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2836 if (!signed_order_operator (operands[0], GET_MODE (operands[0])))
2838 riscv_expand_conditional_branch (operands[3], GET_CODE (operands[0]),
2839 operands[1], operands[2]);
2842 operands[4] = gen_reg_rtx (TARGET_64BIT ? DImode : SImode);
2845 (define_insn_and_split "*cbranch<ANYF:mode>4"
2847 (if_then_else (match_operator 1 "fp_native_comparison"
2848 [(match_operand:ANYF 2 "register_operand" "f")
2849 (match_operand:ANYF 3 "register_operand" "f")])
2850 (label_ref (match_operand 0 ""))
2852 (clobber (match_operand:X 4 "register_operand" "=r"))]
2853 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2855 "&& reload_completed"
2857 (match_op_dup:X 1 [(match_dup 2) (match_dup 3)]))
2859 (if_then_else (ne:X (match_dup 4) (const_int 0))
2860 (label_ref (match_operand 0))
2863 [(set_attr "type" "branch")
2864 (set (attr "length")
2865 (if_then_else (and (le (minus (match_dup 0) (pc))
2867 (le (minus (pc) (match_dup 0))
2870 (if_then_else (and (le (minus (match_dup 0) (pc))
2871 (const_int 1048564))
2872 (le (minus (pc) (match_dup 0))
2873 (const_int 1048576)))
2877 (define_insn_and_split "*cbranch<ANYF:mode>4"
2879 (if_then_else (match_operator 1 "ne_operator"
2880 [(match_operand:ANYF 2 "register_operand" "f")
2881 (match_operand:ANYF 3 "register_operand" "f")])
2882 (label_ref (match_operand 0 ""))
2884 (clobber (match_operand:X 4 "register_operand" "=r"))]
2885 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2887 "&& reload_completed"
2889 (eq:X (match_dup 2) (match_dup 3)))
2891 (if_then_else (eq:X (match_dup 4) (const_int 0))
2892 (label_ref (match_operand 0))
2895 [(set_attr "type" "branch")
2896 (set (attr "length")
2897 (if_then_else (and (le (minus (match_dup 0) (pc))
2899 (le (minus (pc) (match_dup 0))
2902 (if_then_else (and (le (minus (match_dup 0) (pc))
2903 (const_int 1048564))
2904 (le (minus (pc) (match_dup 0))
2905 (const_int 1048576)))
2909 (define_insn_and_split "*branch_on_bit<X:mode>"
2912 (match_operator 0 "equality_operator"
2913 [(zero_extract:X (match_operand:X 2 "register_operand" "r")
2915 (match_operand 3 "branch_on_bit_operand"))
2917 (label_ref (match_operand 1))
2919 (clobber (match_scratch:X 4 "=&r"))]
2924 (ashift:X (match_dup 2) (match_dup 3)))
2927 (match_op_dup 0 [(match_dup 4) (const_int 0)])
2928 (label_ref (match_operand 1))
2931 int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
2932 operands[3] = GEN_INT (shift);
2934 if (GET_CODE (operands[0]) == EQ)
2935 operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
2937 operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
2939 [(set_attr "type" "branch")])
2941 (define_insn_and_split "*branch_on_bit_range<X:mode>"
2944 (match_operator 0 "equality_operator"
2945 [(zero_extract:X (match_operand:X 2 "register_operand" "r")
2946 (match_operand 3 "branch_on_bit_operand")
2949 (label_ref (match_operand 1))
2951 (clobber (match_scratch:X 4 "=&r"))]
2956 (ashift:X (match_dup 2) (match_dup 3)))
2959 (match_op_dup 0 [(match_dup 4) (const_int 0)])
2960 (label_ref (match_operand 1))
2963 operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
2965 [(set_attr "type" "branch")])
2968 ;; ....................
2970 ;; SETTING A REGISTER FROM A COMPARISON
2972 ;; ....................
2974 ;; Destination is always set in SI mode.
2976 (define_expand "cstore<mode>4"
2977 [(set (match_operand:SI 0 "register_operand")
2978 (match_operator:SI 1 "ordered_comparison_operator"
2979 [(match_operand:GPR 2 "register_operand")
2980 (match_operand:GPR 3 "nonmemory_operand")]))]
2983 riscv_expand_int_scc (operands[0], GET_CODE (operands[1]), operands[2],
2988 (define_expand "cstore<mode>4"
2989 [(set (match_operand:SI 0 "register_operand")
2990 (match_operator:SI 1 "fp_scc_comparison"
2991 [(match_operand:ANYF 2 "register_operand")
2992 (match_operand:ANYF 3 "register_operand")]))]
2993 "TARGET_HARD_FLOAT || TARGET_ZFINX"
2995 riscv_expand_float_scc (operands[0], GET_CODE (operands[1]), operands[2],
3000 (define_insn "*cstore<ANYF:mode><X:mode>4"
3001 [(set (match_operand:X 0 "register_operand" "=r")
3002 (match_operator:X 1 "fp_native_comparison"
3003 [(match_operand:ANYF 2 "register_operand" " f")
3004 (match_operand:ANYF 3 "register_operand" " f")]))]
3005 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3006 "f%C1.<fmt>\t%0,%2,%3"
3007 [(set_attr "type" "fcmp")
3008 (set_attr "mode" "<UNITMODE>")])
3010 (define_expand "f<quiet_pattern>_quiet<ANYF:mode><X:mode>4"
3011 [(set (match_operand:X 0 "register_operand")
3012 (unspec:X [(match_operand:ANYF 1 "register_operand")
3013 (match_operand:ANYF 2 "register_operand")]
3015 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3017 rtx op0 = operands[0];
3018 rtx op1 = operands[1];
3019 rtx op2 = operands[2];
3022 emit_insn (gen_f<quiet_pattern>_quiet<ANYF:mode><X:mode>4_zfa(op0, op1, op2));
3025 rtx tmp = gen_reg_rtx (SImode);
3026 rtx cmp = gen_rtx_<QUIET_PATTERN> (<X:MODE>mode, op1, op2);
3027 rtx frflags = gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, const0_rtx),
3029 rtx fsflags = gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (1, tmp),
3032 emit_insn (gen_rtx_SET (tmp, frflags));
3033 emit_insn (gen_rtx_SET (op0, cmp));
3034 emit_insn (fsflags);
3037 if (HONOR_SNANS (<ANYF:MODE>mode))
3038 emit_insn (gen_rtx_UNSPEC_VOLATILE (<ANYF:MODE>mode,
3039 gen_rtvec (2, op1, op2),
3044 (define_insn "f<quiet_pattern>_quiet<ANYF:mode><X:mode>4_zfa"
3045 [(set (match_operand:X 0 "register_operand" "=r")
3047 [(match_operand:ANYF 1 "register_operand" " f")
3048 (match_operand:ANYF 2 "register_operand" " f")]
3050 "TARGET_HARD_FLOAT && TARGET_ZFA"
3051 "f<quiet_pattern>q.<fmt>\t%0,%1,%2"
3052 [(set_attr "type" "fcmp")
3053 (set_attr "mode" "<UNITMODE>")
3054 (set (attr "length") (const_int 16))])
3056 (define_insn "*seq_zero_<X:mode><GPR:mode>"
3057 [(set (match_operand:GPR 0 "register_operand" "=r")
3058 (eq:GPR (match_operand:X 1 "register_operand" " r")
3062 [(set_attr "type" "slt")
3063 (set_attr "mode" "<X:MODE>")])
3065 (define_insn "*sne_zero_<X:mode><GPR:mode>"
3066 [(set (match_operand:GPR 0 "register_operand" "=r")
3067 (ne:GPR (match_operand:X 1 "register_operand" " r")
3071 [(set_attr "type" "slt")
3072 (set_attr "mode" "<X:MODE>")])
3074 (define_insn "*sgt<u>_<X:mode><GPR:mode>"
3075 [(set (match_operand:GPR 0 "register_operand" "= r")
3076 (any_gt:GPR (match_operand:X 1 "register_operand" " r")
3077 (match_operand:X 2 "reg_or_0_operand" " rJ")))]
3080 [(set_attr "type" "slt")
3081 (set_attr "mode" "<X:MODE>")])
3083 (define_insn "*sge<u>_<X:mode><GPR:mode>"
3084 [(set (match_operand:GPR 0 "register_operand" "=r")
3085 (any_ge:GPR (match_operand:X 1 "register_operand" " r")
3088 "slt%i2<u>\t%0,zero,%1"
3089 [(set_attr "type" "slt")
3090 (set_attr "mode" "<X:MODE>")])
3092 (define_insn "@slt<u>_<X:mode><GPR:mode>3"
3093 [(set (match_operand:GPR 0 "register_operand" "= r")
3094 (any_lt:GPR (match_operand:X 1 "register_operand" " r")
3095 (match_operand:X 2 "arith_operand" " rI")))]
3097 "slt%i2<u>\t%0,%1,%2"
3098 [(set_attr "type" "slt")
3099 (set_attr "mode" "<X:MODE>")])
3101 (define_insn "*sle<u>_<X:mode><GPR:mode>"
3102 [(set (match_operand:GPR 0 "register_operand" "=r")
3103 (any_le:GPR (match_operand:X 1 "register_operand" " r")
3104 (match_operand:X 2 "sle_operand" "")))]
3107 operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
3108 return "slt%i2<u>\t%0,%1,%2";
3110 [(set_attr "type" "slt")
3111 (set_attr "mode" "<X:MODE>")])
3114 ;; ....................
3116 ;; UNCONDITIONAL BRANCHES
3118 ;; ....................
3120 ;; Unconditional branches.
3123 [(set (pc) (label_ref (match_operand 0 "" "")))]
3126 /* Hopefully this does not happen often as this is going
3127 to clobber $ra and muck up the return stack predictors. */
3128 if (get_attr_length (insn) == 8)
3129 return "jump\t%l0,ra";
3133 [(set_attr "type" "jump")
3134 (set_attr "mode" "none")])
3136 (define_expand "indirect_jump"
3137 [(set (pc) (match_operand 0 "register_operand"))]
3140 operands[0] = force_reg (Pmode, operands[0]);
3141 if (Pmode == SImode)
3142 emit_jump_insn (gen_indirect_jumpsi (operands[0]));
3144 emit_jump_insn (gen_indirect_jumpdi (operands[0]));
3148 (define_insn "indirect_jump<mode>"
3149 [(set (pc) (match_operand:P 0 "register_operand" "l"))]
3152 [(set_attr "type" "jalr")
3153 (set_attr "mode" "none")])
3155 (define_expand "tablejump"
3156 [(set (pc) (match_operand 0 "register_operand" ""))
3157 (use (label_ref (match_operand 1 "" "")))]
3160 if (CASE_VECTOR_PC_RELATIVE)
3161 operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
3162 gen_rtx_LABEL_REF (Pmode, operands[1]),
3163 NULL_RTX, 0, OPTAB_DIRECT);
3165 if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
3166 emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
3168 emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
3172 (define_insn "tablejump<mode>"
3173 [(set (pc) (match_operand:GPR 0 "register_operand" "l"))
3174 (use (label_ref (match_operand 1 "" "")))]
3177 [(set_attr "type" "jalr")
3178 (set_attr "mode" "none")])
3181 ;; ....................
3183 ;; Function prologue/epilogue
3185 ;; ....................
3188 (define_expand "prologue"
3192 riscv_expand_prologue ();
3196 ;; Block any insns from being moved before this point, since the
3197 ;; profiling call to mcount can use various registers that aren't
3198 ;; saved or used to pass arguments.
3200 (define_insn "blockage"
3201 [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
3204 [(set_attr "type" "ghost")
3205 (set_attr "mode" "none")])
3207 (define_expand "epilogue"
3211 riscv_expand_epilogue (NORMAL_RETURN);
3215 (define_expand "sibcall_epilogue"
3219 riscv_expand_epilogue (SIBCALL_RETURN);
3223 ;; Trivial return. Make it look like a normal return insn as that
3224 ;; allows jump optimizations to work better.
3226 (define_expand "return"
3228 "riscv_can_use_return_insn ()"
3231 (define_insn "simple_return"
3235 return riscv_output_return ();
3237 [(set_attr "type" "jalr")
3238 (set_attr "mode" "none")])
3242 (define_insn "simple_return_internal"
3244 (use (match_operand 0 "pmode_register_operand" ""))]
3247 [(set_attr "type" "jalr")
3248 (set_attr "mode" "none")])
3250 ;; This is used in compiling the unwind routines.
3251 (define_expand "eh_return"
3252 [(use (match_operand 0 "general_operand"))]
3255 if (GET_MODE (operands[0]) != word_mode)
3256 operands[0] = convert_to_mode (word_mode, operands[0], 0);
3258 emit_insn (gen_eh_set_lr_di (operands[0]));
3260 emit_insn (gen_eh_set_lr_si (operands[0]));
3262 emit_jump_insn (gen_eh_return_internal ());
3267 ;; Clobber the return address on the stack. We can't expand this
3268 ;; until we know where it will be put in the stack frame.
3270 (define_insn "eh_set_lr_si"
3271 [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
3272 (clobber (match_scratch:SI 1 "=&r"))]
3275 [(set_attr "type" "jump")])
3277 (define_insn "eh_set_lr_di"
3278 [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
3279 (clobber (match_scratch:DI 1 "=&r"))]
3282 [(set_attr "type" "jump")])
3285 [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
3286 (clobber (match_scratch 1))]
3290 riscv_set_return_address (operands[0], operands[1]);
3294 (define_insn_and_split "eh_return_internal"
3298 "epilogue_completed"
3300 "riscv_expand_epilogue (EXCEPTION_RETURN); DONE;"
3301 [(set_attr "type" "ret")])
3304 ;; ....................
3308 ;; ....................
3310 (define_expand "sibcall"
3311 [(parallel [(call (match_operand 0 "")
3312 (match_operand 1 ""))
3314 (match_operand 2 "const_int_operand")
3315 ] UNSPEC_CALLEE_CC))])]
3318 rtx target = riscv_legitimize_call_address (XEXP (operands[0], 0));
3319 emit_call_insn (gen_sibcall_internal (target, operands[1], operands[2]));
3323 (define_insn "sibcall_internal"
3324 [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S,U"))
3325 (match_operand 1 "" ""))
3327 (match_operand 2 "const_int_operand")
3328 ] UNSPEC_CALLEE_CC))]
3329 "SIBLING_CALL_P (insn)"
3334 [(set_attr "type" "call")])
3336 (define_expand "sibcall_value"
3337 [(parallel [(set (match_operand 0 "")
3338 (call (match_operand 1 "")
3339 (match_operand 2 "")))
3341 (match_operand 3 "const_int_operand")
3342 ] UNSPEC_CALLEE_CC))])]
3345 rtx target = riscv_legitimize_call_address (XEXP (operands[1], 0));
3346 emit_call_insn (gen_sibcall_value_internal (operands[0], target, operands[2],
3351 (define_insn "sibcall_value_internal"
3352 [(set (match_operand 0 "" "")
3353 (call (mem:SI (match_operand 1 "call_insn_operand" "j,S,U"))
3354 (match_operand 2 "" "")))
3356 (match_operand 3 "const_int_operand")
3357 ] UNSPEC_CALLEE_CC))]
3358 "SIBLING_CALL_P (insn)"
3363 [(set_attr "type" "call")])
3365 (define_expand "call"
3366 [(parallel [(call (match_operand 0 "")
3367 (match_operand 1 ""))
3369 (match_operand 2 "const_int_operand")
3370 ] UNSPEC_CALLEE_CC))])]
3373 rtx target = riscv_legitimize_call_address (XEXP (operands[0], 0));
3374 emit_call_insn (gen_call_internal (target, operands[1], operands[2]));
3378 (define_insn "call_internal"
3379 [(call (mem:SI (match_operand 0 "call_insn_operand" "l,S,U"))
3380 (match_operand 1 "" ""))
3382 (match_operand 2 "const_int_operand")
3383 ] UNSPEC_CALLEE_CC))
3384 (clobber (reg:SI RETURN_ADDR_REGNUM))]
3390 [(set_attr "type" "call")])
3392 (define_expand "call_value"
3393 [(parallel [(set (match_operand 0 "")
3394 (call (match_operand 1 "")
3395 (match_operand 2 "")))
3397 (match_operand 3 "const_int_operand")
3398 ] UNSPEC_CALLEE_CC))])]
3401 rtx target = riscv_legitimize_call_address (XEXP (operands[1], 0));
3402 emit_call_insn (gen_call_value_internal (operands[0], target, operands[2],
3407 (define_insn "call_value_internal"
3408 [(set (match_operand 0 "" "")
3409 (call (mem:SI (match_operand 1 "call_insn_operand" "l,S,U"))
3410 (match_operand 2 "" "")))
3412 (match_operand 3 "const_int_operand")
3413 ] UNSPEC_CALLEE_CC))
3414 (clobber (reg:SI RETURN_ADDR_REGNUM))]
3420 [(set_attr "type" "call")])
3422 ;; Call subroutine returning any type.
3424 (define_expand "untyped_call"
3425 [(parallel [(call (match_operand 0 "")
3427 (match_operand 1 "")
3428 (match_operand 2 "")])]
3433 /* Untyped calls always use the RISCV_CC_BASE calling convention. */
3434 emit_call_insn (gen_call (operands[0], const0_rtx,
3435 gen_int_mode (RISCV_CC_BASE, SImode)));
3437 for (i = 0; i < XVECLEN (operands[2], 0); i++)
3439 rtx set = XVECEXP (operands[2], 0, i);
3440 riscv_emit_move (SET_DEST (set), SET_SRC (set));
3443 emit_insn (gen_blockage ());
3451 [(set_attr "type" "nop")
3452 (set_attr "mode" "none")])
3455 [(trap_if (const_int 1) (const_int 0))]
3458 [(set_attr "type" "trap")])
3460 ;; Must use the registers that we save to prevent the rename reg optimization
3461 ;; pass from using them before the gpr_save pattern when shrink wrapping
3462 ;; occurs. See bug 95252 for instance.
3464 (define_insn "gpr_save"
3465 [(match_parallel 1 "gpr_save_operation"
3466 [(unspec_volatile [(match_operand 0 "const_int_operand")]
3467 UNSPECV_GPR_SAVE)])]
3469 "call\tt0,__riscv_save_%0"
3470 [(set_attr "type" "call")])
3472 (define_insn "gpr_restore"
3473 [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_RESTORE)]
3475 "tail\t__riscv_restore_%0"
3476 [(set_attr "type" "call")])
3478 (define_insn "gpr_restore_return"
3480 (use (match_operand 0 "pmode_register_operand" ""))
3484 [(set_attr "type" "ret")])
3486 (define_insn "riscv_frcsr"
3487 [(set (match_operand:SI 0 "register_operand" "=r")
3488 (unspec_volatile:SI [(const_int 0)] UNSPECV_FRCSR))]
3489 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3491 [(set_attr "type" "fmove")])
3493 (define_insn "riscv_fscsr"
3494 [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")] UNSPECV_FSCSR)]
3495 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3497 [(set_attr "type" "fmove")])
3499 (define_insn "riscv_frflags"
3500 [(set (match_operand:SI 0 "register_operand" "=r")
3501 (unspec_volatile:SI [(const_int 0)] UNSPECV_FRFLAGS))]
3502 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3504 [(set_attr "type" "fmove")])
3506 (define_insn "riscv_fsflags"
3507 [(unspec_volatile [(match_operand:SI 0 "csr_operand" "rK")] UNSPECV_FSFLAGS)]
3508 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3510 [(set_attr "type" "fmove")])
3512 (define_insn "*riscv_fsnvsnan<mode>2"
3513 [(unspec_volatile [(match_operand:ANYF 0 "register_operand" "f")
3514 (match_operand:ANYF 1 "register_operand" "f")]
3516 "TARGET_HARD_FLOAT || TARGET_ZFINX"
3517 "feq.<fmt>\tzero,%0,%1"
3518 [(set_attr "type" "fcmp")
3519 (set_attr "mode" "<UNITMODE>")])
3521 (define_insn "riscv_mret"
3523 (unspec_volatile [(const_int 0)] UNSPECV_MRET)]
3526 [(set_attr "type" "ret")])
3528 (define_insn "riscv_sret"
3530 (unspec_volatile [(const_int 0)] UNSPECV_SRET)]
3533 [(set_attr "type" "ret")])
3535 (define_insn "riscv_uret"
3537 (unspec_volatile [(const_int 0)] UNSPECV_URET)]
3540 [(set_attr "type" "ret")])
3542 (define_insn "stack_tie<mode>"
3543 [(set (mem:BLK (scratch))
3544 (unspec:BLK [(match_operand:X 0 "register_operand" "r")
3545 (match_operand:X 1 "register_operand" "r")]
3549 [(set_attr "type" "ghost")
3550 (set_attr "length" "0")]
3553 ;; This fixes a failure with gcc.c-torture/execute/pr64242.c at -O2 for a
3554 ;; 32-bit target when using -mtune=sifive-7-series. The first sched pass
3555 ;; runs before register elimination, and we have a non-obvious dependency
3556 ;; between a use of the soft fp and a set of the hard fp. We fix this by
3557 ;; emitting a clobber using the hard fp between the two insns.
3558 (define_expand "restore_stack_nonlocal"
3559 [(match_operand 0 "register_operand")
3560 (match_operand 1 "memory_operand")]
3563 emit_move_insn (operands[0], operands[1]);
3564 /* Prevent the following hard fp restore from being moved before the move
3565 insn above which uses a copy of the soft fp reg. */
3566 emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
3570 ;; Named pattern for expanding thread pointer reference.
3571 (define_expand "get_thread_pointer<mode>"
3572 [(set (match_operand:P 0 "register_operand" "=r")
3577 ;; Named patterns for stack smashing protection.
3579 (define_expand "stack_protect_set"
3580 [(match_operand 0 "memory_operand")
3581 (match_operand 1 "memory_operand")]
3584 machine_mode mode = GET_MODE (operands[0]);
3585 if (riscv_stack_protector_guard == SSP_TLS)
3587 rtx reg = gen_rtx_REG (Pmode, riscv_stack_protector_guard_reg);
3588 rtx offset = GEN_INT (riscv_stack_protector_guard_offset);
3589 rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
3590 operands[1] = gen_rtx_MEM (Pmode, addr);
3593 emit_insn ((mode == DImode
3594 ? gen_stack_protect_set_di
3595 : gen_stack_protect_set_si) (operands[0], operands[1]));
3599 ;; DO NOT SPLIT THIS PATTERN. It is important for security reasons that the
3600 ;; canary value does not live beyond the life of this sequence.
3601 (define_insn "stack_protect_set_<mode>"
3602 [(set (match_operand:GPR 0 "memory_operand" "=m")
3603 (unspec:GPR [(match_operand:GPR 1 "memory_operand" "m")]
3605 (set (match_scratch:GPR 2 "=&r") (const_int 0))]
3607 "<load>\t%2, %1\;<store>\t%2, %0\;li\t%2, 0"
3608 [(set_attr "type" "multi")
3609 (set_attr "length" "12")])
3611 (define_expand "stack_protect_test"
3612 [(match_operand 0 "memory_operand")
3613 (match_operand 1 "memory_operand")
3618 machine_mode mode = GET_MODE (operands[0]);
3620 result = gen_reg_rtx(mode);
3621 if (riscv_stack_protector_guard == SSP_TLS)
3623 rtx reg = gen_rtx_REG (Pmode, riscv_stack_protector_guard_reg);
3624 rtx offset = GEN_INT (riscv_stack_protector_guard_offset);
3625 rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
3626 operands[1] = gen_rtx_MEM (Pmode, addr);
3628 emit_insn ((mode == DImode
3629 ? gen_stack_protect_test_di
3630 : gen_stack_protect_test_si) (result,
3634 rtx cond = gen_rtx_EQ (VOIDmode, result, const0_rtx);
3635 emit_jump_insn (gen_cbranch4 (mode, cond, result, const0_rtx, operands[2]));
3640 (define_insn "stack_protect_test_<mode>"
3641 [(set (match_operand:GPR 0 "register_operand" "=r")
3642 (unspec:GPR [(match_operand:GPR 1 "memory_operand" "m")
3643 (match_operand:GPR 2 "memory_operand" "m")]
3645 (clobber (match_scratch:GPR 3 "=&r"))]
3647 "<load>\t%3, %1\;<load>\t%0, %2\;xor\t%0, %3, %0\;li\t%3, 0"
3648 [(set_attr "type" "multi")
3649 (set_attr "length" "12")])
3651 (define_insn "riscv_clean_<mode>"
3652 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
3656 [(set_attr "type" "cbo")]
3659 (define_insn "riscv_flush_<mode>"
3660 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
3664 [(set_attr "type" "cbo")]
3667 (define_insn "riscv_inval_<mode>"
3668 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
3672 [(set_attr "type" "cbo")]
3675 (define_insn "riscv_zero_<mode>"
3676 [(unspec_volatile:X [(match_operand:X 0 "register_operand" "r")]
3680 [(set_attr "type" "cbo")]
3683 (define_insn "prefetch"
3684 [(prefetch (match_operand 0 "address_operand" "r")
3685 (match_operand 1 "imm5_operand" "i")
3686 (match_operand 2 "const_int_operand" "n"))]
3689 switch (INTVAL (operands[1]))
3691 case 0: return "prefetch.r\t%a0";
3692 case 1: return "prefetch.w\t%a0";
3693 default: gcc_unreachable ();
3696 [(set_attr "type" "cbo")])
3698 (define_insn "riscv_prefetchi_<mode>"
3699 [(unspec_volatile:X [(match_operand:X 0 "address_operand" "r")
3700 (match_operand:X 1 "imm5_operand" "i")]
3704 [(set_attr "type" "cbo")])
3706 (define_expand "extv<mode>"
3707 [(set (match_operand:GPR 0 "register_operand" "=r")
3708 (sign_extract:GPR (match_operand:GPR 1 "register_operand" "r")
3709 (match_operand 2 "const_int_operand")
3710 (match_operand 3 "const_int_operand")))]
3714 (define_expand "extzv<mode>"
3715 [(set (match_operand:GPR 0 "register_operand" "=r")
3716 (zero_extract:GPR (match_operand:GPR 1 "register_operand" "r")
3717 (match_operand 2 "const_int_operand")
3718 (match_operand 3 "const_int_operand")))]
3722 && (INTVAL (operands[2]) < 8) && (INTVAL (operands[3]) == 0))
3726 (define_expand "maddhisi4"
3727 [(set (match_operand:SI 0 "register_operand")
3729 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand"))
3730 (sign_extend:SI (match_operand:HI 2 "register_operand")))
3731 (match_operand:SI 3 "register_operand")))]
3735 (define_expand "msubhisi4"
3736 [(set (match_operand:SI 0 "register_operand")
3738 (match_operand:SI 3 "register_operand")
3739 (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand"))
3740 (sign_extend:SI (match_operand:HI 2 "register_operand")))))]
3744 ;; String compare with length insn.
3745 ;; Argument 0 is the target (result)
3746 ;; Argument 1 is the source1
3747 ;; Argument 2 is the source2
3748 ;; Argument 3 is the length
3749 ;; Argument 4 is the alignment
3751 (define_expand "cmpstrnsi"
3752 [(parallel [(set (match_operand:SI 0)
3753 (compare:SI (match_operand:BLK 1)
3754 (match_operand:BLK 2)))
3755 (use (match_operand:SI 3))
3756 (use (match_operand:SI 4))])]
3757 "riscv_inline_strncmp && !optimize_size
3758 && (TARGET_ZBB || TARGET_XTHEADBB || TARGET_VECTOR)"
3760 if (riscv_expand_strcmp (operands[0], operands[1], operands[2],
3761 operands[3], operands[4]))
3767 ;; String compare insn.
3768 ;; Argument 0 is the target (result)
3769 ;; Argument 1 is the source1
3770 ;; Argument 2 is the source2
3771 ;; Argument 3 is the alignment
3773 (define_expand "cmpstrsi"
3774 [(parallel [(set (match_operand:SI 0)
3775 (compare:SI (match_operand:BLK 1)
3776 (match_operand:BLK 2)))
3777 (use (match_operand:SI 3))])]
3778 "riscv_inline_strcmp && !optimize_size
3779 && (TARGET_ZBB || TARGET_XTHEADBB || TARGET_VECTOR)"
3781 if (riscv_expand_strcmp (operands[0], operands[1], operands[2],
3782 NULL_RTX, operands[3]))
3788 ;; Search character in string (generalization of strlen).
3789 ;; Argument 0 is the resulting offset
3790 ;; Argument 1 is the string
3791 ;; Argument 2 is the search character
3792 ;; Argument 3 is the alignment
3794 (define_expand "strlen<mode>"
3795 [(set (match_operand:X 0 "register_operand")
3796 (unspec:X [(match_operand:BLK 1 "general_operand")
3797 (match_operand:SI 2 "const_int_operand")
3798 (match_operand:SI 3 "const_int_operand")]
3800 "riscv_inline_strlen && !optimize_size
3801 && (TARGET_ZBB || TARGET_XTHEADBB || TARGET_VECTOR)"
3803 rtx search_char = operands[2];
3805 if (search_char != const0_rtx)
3808 if (riscv_expand_strlen (operands[0], operands[1], operands[2], operands[3]))
3814 (define_insn "*large_load_address"
3815 [(set (match_operand:DI 0 "register_operand" "=r")
3816 (mem:DI (match_operand 1 "pcrel_symbol_operand" "")))]
3817 "TARGET_64BIT && riscv_cmodel == CM_LARGE"
3819 [(set_attr "type" "load")
3820 (set (attr "length") (const_int 8))])
3822 (include "bitmanip.md")
3823 (include "crypto.md")
3825 (include "sync-rvwmo.md")
3826 (include "sync-ztso.md")
3827 (include "peephole.md")
3829 (include "generic.md")
3830 (include "sifive-7.md")
3831 (include "thead.md")
3832 (include "generic-ooo.md")
3833 (include "vector.md")
3834 (include "vector-crypto.md")
3835 (include "zicond.md")
3838 (include "corev.md")