1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001 Free Software Foundation, Inc.
6 This file is part of GNU CC.
8 GNU CC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GNU CC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GNU CC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
31 #include "insn-flags.h"
32 #include "insn-codes.h"
33 #include "insn-config.h"
38 static void store_fixed_bit_field
PARAMS ((rtx
, unsigned HOST_WIDE_INT
,
39 unsigned HOST_WIDE_INT
,
40 unsigned HOST_WIDE_INT
, rtx
,
42 static void store_split_bit_field
PARAMS ((rtx
, unsigned HOST_WIDE_INT
,
43 unsigned HOST_WIDE_INT
, rtx
,
45 static rtx extract_fixed_bit_field
PARAMS ((enum machine_mode
, rtx
,
46 unsigned HOST_WIDE_INT
,
47 unsigned HOST_WIDE_INT
,
48 unsigned HOST_WIDE_INT
,
49 rtx
, int, unsigned int));
50 static rtx mask_rtx
PARAMS ((enum machine_mode
, int,
52 static rtx lshift_value
PARAMS ((enum machine_mode
, rtx
,
54 static rtx extract_split_bit_field
PARAMS ((rtx
, unsigned HOST_WIDE_INT
,
55 unsigned HOST_WIDE_INT
, int,
57 static void do_cmp_and_jump
PARAMS ((rtx
, rtx
, enum rtx_code
,
58 enum machine_mode
, rtx
));
60 /* Non-zero means divides or modulus operations are relatively cheap for
61 powers of two, so don't use branches; emit the operation instead.
62 Usually, this will mean that the MD file will emit non-branch
65 static int sdiv_pow2_cheap
, smod_pow2_cheap
;
67 #ifndef SLOW_UNALIGNED_ACCESS
68 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
71 /* For compilers that support multiple targets with different word sizes,
72 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
73 is the H8/300(H) compiler. */
75 #ifndef MAX_BITS_PER_WORD
76 #define MAX_BITS_PER_WORD BITS_PER_WORD
79 /* Cost of various pieces of RTL. Note that some of these are indexed by
80 shift count and some by mode. */
81 static int add_cost
, negate_cost
, zero_cost
;
82 static int shift_cost
[MAX_BITS_PER_WORD
];
83 static int shiftadd_cost
[MAX_BITS_PER_WORD
];
84 static int shiftsub_cost
[MAX_BITS_PER_WORD
];
85 static int mul_cost
[NUM_MACHINE_MODES
];
86 static int div_cost
[NUM_MACHINE_MODES
];
87 static int mul_widen_cost
[NUM_MACHINE_MODES
];
88 static int mul_highpart_cost
[NUM_MACHINE_MODES
];
93 /* This is "some random pseudo register" for purposes of calling recog
94 to see what insns exist. */
95 rtx reg
= gen_rtx_REG (word_mode
, 10000);
96 rtx shift_insn
, shiftadd_insn
, shiftsub_insn
;
99 enum machine_mode mode
, wider_mode
;
103 reg
= gen_rtx_REG (word_mode
, 10000);
105 zero_cost
= rtx_cost (const0_rtx
, 0);
106 add_cost
= rtx_cost (gen_rtx_PLUS (word_mode
, reg
, reg
), SET
);
108 shift_insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
,
109 gen_rtx_ASHIFT (word_mode
, reg
,
113 = emit_insn (gen_rtx_SET (VOIDmode
, reg
,
114 gen_rtx_PLUS (word_mode
,
115 gen_rtx_MULT (word_mode
,
120 = emit_insn (gen_rtx_SET (VOIDmode
, reg
,
121 gen_rtx_MINUS (word_mode
,
122 gen_rtx_MULT (word_mode
,
129 shiftadd_cost
[0] = shiftsub_cost
[0] = add_cost
;
131 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
133 shift_cost
[m
] = shiftadd_cost
[m
] = shiftsub_cost
[m
] = 32000;
135 XEXP (SET_SRC (PATTERN (shift_insn
)), 1) = GEN_INT (m
);
136 if (recog (PATTERN (shift_insn
), shift_insn
, &dummy
) >= 0)
137 shift_cost
[m
] = rtx_cost (SET_SRC (PATTERN (shift_insn
)), SET
);
139 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn
)), 0), 1)
140 = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
141 if (recog (PATTERN (shiftadd_insn
), shiftadd_insn
, &dummy
) >= 0)
142 shiftadd_cost
[m
] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn
)), SET
);
144 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn
)), 0), 1)
145 = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
146 if (recog (PATTERN (shiftsub_insn
), shiftsub_insn
, &dummy
) >= 0)
147 shiftsub_cost
[m
] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn
)), SET
);
150 negate_cost
= rtx_cost (gen_rtx_NEG (word_mode
, reg
), SET
);
153 = (rtx_cost (gen_rtx_DIV (word_mode
, reg
, GEN_INT (32)), SET
)
156 = (rtx_cost (gen_rtx_MOD (word_mode
, reg
, GEN_INT (32)), SET
)
159 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
161 mode
= GET_MODE_WIDER_MODE (mode
))
163 reg
= gen_rtx_REG (mode
, 10000);
164 div_cost
[(int) mode
] = rtx_cost (gen_rtx_UDIV (mode
, reg
, reg
), SET
);
165 mul_cost
[(int) mode
] = rtx_cost (gen_rtx_MULT (mode
, reg
, reg
), SET
);
166 wider_mode
= GET_MODE_WIDER_MODE (mode
);
167 if (wider_mode
!= VOIDmode
)
169 mul_widen_cost
[(int) wider_mode
]
170 = rtx_cost (gen_rtx_MULT (wider_mode
,
171 gen_rtx_ZERO_EXTEND (wider_mode
, reg
),
172 gen_rtx_ZERO_EXTEND (wider_mode
, reg
)),
174 mul_highpart_cost
[(int) mode
]
175 = rtx_cost (gen_rtx_TRUNCATE
177 gen_rtx_LSHIFTRT (wider_mode
,
178 gen_rtx_MULT (wider_mode
,
183 GEN_INT (GET_MODE_BITSIZE (mode
)))),
191 /* Return an rtx representing minus the value of X.
192 MODE is the intended mode of the result,
193 useful if X is a CONST_INT. */
197 enum machine_mode mode
;
200 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
203 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
208 /* Generate code to store value from rtx VALUE
209 into a bit-field within structure STR_RTX
210 containing BITSIZE bits starting at bit BITNUM.
211 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
212 ALIGN is the alignment that STR_RTX is known to have.
213 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
215 /* ??? Note that there are two different ideas here for how
216 to determine the size to count bits within, for a register.
217 One is BITS_PER_WORD, and the other is the size of operand 3
220 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
221 else, we use the mode of operand 3. */
224 store_bit_field (str_rtx
, bitsize
, bitnum
, fieldmode
, value
, align
, total_size
)
226 unsigned HOST_WIDE_INT bitsize
;
227 unsigned HOST_WIDE_INT bitnum
;
228 enum machine_mode fieldmode
;
231 HOST_WIDE_INT total_size
;
234 = (GET_CODE (str_rtx
) == MEM
) ? BITS_PER_UNIT
: BITS_PER_WORD
;
235 unsigned HOST_WIDE_INT offset
= bitnum
/ unit
;
236 unsigned HOST_WIDE_INT bitpos
= bitnum
% unit
;
237 register rtx op0
= str_rtx
;
239 unsigned HOST_WIDE_INT insv_bitsize
;
240 enum machine_mode op_mode
;
242 op_mode
= insn_data
[(int) CODE_FOR_insv
].operand
[3].mode
;
243 if (op_mode
== VOIDmode
)
245 insv_bitsize
= GET_MODE_BITSIZE (op_mode
);
248 /* It is wrong to have align==0, since every object is aligned at
249 least at a bit boundary. This usually means a bug elsewhere. */
253 /* Discount the part of the structure before the desired byte.
254 We need to know how many bytes are safe to reference after it. */
256 total_size
-= (bitpos
/ BIGGEST_ALIGNMENT
257 * (BIGGEST_ALIGNMENT
/ BITS_PER_UNIT
));
259 while (GET_CODE (op0
) == SUBREG
)
261 /* The following line once was done only if WORDS_BIG_ENDIAN,
262 but I think that is a mistake. WORDS_BIG_ENDIAN is
263 meaningful at a much higher level; when structures are copied
264 between memory and regs, the higher-numbered regs
265 always get higher addresses. */
266 offset
+= SUBREG_WORD (op0
);
267 /* We used to adjust BITPOS here, but now we do the whole adjustment
268 right after the loop. */
269 op0
= SUBREG_REG (op0
);
272 /* If OP0 is a register, BITPOS must count within a word.
273 But as we have it, it counts within whatever size OP0 now has.
274 On a bigendian machine, these are not the same, so convert. */
276 && GET_CODE (op0
) != MEM
277 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
278 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
280 value
= protect_from_queue (value
, 0);
283 value
= force_not_mem (value
);
285 /* If the target is a register, overwriting the entire object, or storing
286 a full-word or multi-word field can be done with just a SUBREG.
288 If the target is memory, storing any naturally aligned field can be
289 done with a simple store. For targets that support fast unaligned
290 memory, any naturally sized, unit aligned field can be done directly. */
292 if (bitsize
== GET_MODE_BITSIZE (fieldmode
)
293 && (GET_CODE (op0
) != MEM
294 ? (GET_MODE_SIZE (fieldmode
) >= UNITS_PER_WORD
295 || GET_MODE_SIZE (GET_MODE (op0
)) == GET_MODE_SIZE (fieldmode
))
296 : (! SLOW_UNALIGNED_ACCESS (fieldmode
, align
)
297 || (offset
* BITS_PER_UNIT
% bitsize
== 0
298 && align
% GET_MODE_BITSIZE (fieldmode
) == 0)))
299 && (BYTES_BIG_ENDIAN
? bitpos
+ bitsize
== unit
: bitpos
== 0))
301 if (GET_MODE (op0
) != fieldmode
)
303 if (GET_CODE (op0
) == SUBREG
)
305 if (GET_MODE (SUBREG_REG (op0
)) == fieldmode
306 || GET_MODE_CLASS (fieldmode
) == MODE_INT
307 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
)
308 op0
= SUBREG_REG (op0
);
310 /* Else we've got some float mode source being extracted into
311 a different float mode destination -- this combination of
312 subregs results in Severe Tire Damage. */
315 if (GET_CODE (op0
) == REG
)
316 op0
= gen_rtx_SUBREG (fieldmode
, op0
, offset
);
318 op0
= change_address (op0
, fieldmode
,
319 plus_constant (XEXP (op0
, 0), offset
));
321 emit_move_insn (op0
, value
);
325 /* Make sure we are playing with integral modes. Pun with subregs
326 if we aren't. This must come after the entire register case above,
327 since that case is valid for any mode. The following cases are only
328 valid for integral modes. */
330 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
331 if (imode
!= GET_MODE (op0
))
333 if (GET_CODE (op0
) == MEM
)
334 op0
= change_address (op0
, imode
, NULL_RTX
);
335 else if (imode
!= BLKmode
)
336 op0
= gen_lowpart (imode
, op0
);
342 /* Storing an lsb-aligned field in a register
343 can be done with a movestrict instruction. */
345 if (GET_CODE (op0
) != MEM
346 && (BYTES_BIG_ENDIAN
? bitpos
+ bitsize
== unit
: bitpos
== 0)
347 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
348 && (movstrict_optab
->handlers
[(int) fieldmode
].insn_code
349 != CODE_FOR_nothing
))
351 int icode
= movstrict_optab
->handlers
[(int) fieldmode
].insn_code
;
353 /* Get appropriate low part of the value being stored. */
354 if (GET_CODE (value
) == CONST_INT
|| GET_CODE (value
) == REG
)
355 value
= gen_lowpart (fieldmode
, value
);
356 else if (!(GET_CODE (value
) == SYMBOL_REF
357 || GET_CODE (value
) == LABEL_REF
358 || GET_CODE (value
) == CONST
))
359 value
= convert_to_mode (fieldmode
, value
, 0);
361 if (! (*insn_data
[icode
].operand
[1].predicate
) (value
, fieldmode
))
362 value
= copy_to_mode_reg (fieldmode
, value
);
364 if (GET_CODE (op0
) == SUBREG
)
366 if (GET_MODE (SUBREG_REG (op0
)) == fieldmode
367 || GET_MODE_CLASS (fieldmode
) == MODE_INT
368 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
)
369 op0
= SUBREG_REG (op0
);
371 /* Else we've got some float mode source being extracted into
372 a different float mode destination -- this combination of
373 subregs results in Severe Tire Damage. */
377 emit_insn (GEN_FCN (icode
)
378 (gen_rtx_SUBREG (fieldmode
, op0
, offset
), value
));
383 /* Handle fields bigger than a word. */
385 if (bitsize
> BITS_PER_WORD
)
387 /* Here we transfer the words of the field
388 in the order least significant first.
389 This is because the most significant word is the one which may
391 However, only do that if the value is not BLKmode. */
393 unsigned int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
394 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
397 /* This is the mode we must force value to, so that there will be enough
398 subwords to extract. Note that fieldmode will often (always?) be
399 VOIDmode, because that is what store_field uses to indicate that this
400 is a bit field, but passing VOIDmode to operand_subword_force will
401 result in an abort. */
402 fieldmode
= smallest_mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
);
404 for (i
= 0; i
< nwords
; i
++)
406 /* If I is 0, use the low-order word in both field and target;
407 if I is 1, use the next to lowest word; and so on. */
408 unsigned int wordnum
= (backwards
? nwords
- i
- 1 : i
);
409 unsigned int bit_offset
= (backwards
410 ? MAX ((int) bitsize
- ((int) i
+ 1)
413 : (int) i
* BITS_PER_WORD
);
415 store_bit_field (op0
, MIN (BITS_PER_WORD
,
416 bitsize
- i
* BITS_PER_WORD
),
417 bitnum
+ bit_offset
, word_mode
,
418 operand_subword_force (value
, wordnum
,
419 (GET_MODE (value
) == VOIDmode
421 : GET_MODE (value
))),
427 /* From here on we can assume that the field to be stored in is
428 a full-word (whatever type that is), since it is shorter than a word. */
430 /* OFFSET is the number of words or bytes (UNIT says which)
431 from STR_RTX to the first word or byte containing part of the field. */
433 if (GET_CODE (op0
) != MEM
)
436 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
438 if (GET_CODE (op0
) != REG
)
440 /* Since this is a destination (lvalue), we can't copy it to a
441 pseudo. We can trivially remove a SUBREG that does not
442 change the size of the operand. Such a SUBREG may have been
443 added above. Otherwise, abort. */
444 if (GET_CODE (op0
) == SUBREG
445 && (GET_MODE_SIZE (GET_MODE (op0
))
446 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)))))
447 op0
= SUBREG_REG (op0
);
451 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
458 op0
= protect_from_queue (op0
, 1);
461 /* If VALUE is a floating-point mode, access it as an integer of the
462 corresponding size. This can occur on a machine with 64 bit registers
463 that uses SFmode for float. This can also occur for unaligned float
465 if (GET_MODE_CLASS (GET_MODE (value
)) == MODE_FLOAT
)
467 if (GET_CODE (value
) != REG
)
468 value
= copy_to_reg (value
);
469 value
= gen_rtx_SUBREG (word_mode
, value
, 0);
472 /* Now OFFSET is nonzero only if OP0 is memory
473 and is therefore always measured in bytes. */
477 && GET_MODE (value
) != BLKmode
478 && !(bitsize
== 1 && GET_CODE (value
) == CONST_INT
)
479 /* Ensure insv's size is wide enough for this field. */
480 && (insv_bitsize
>= bitsize
)
481 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
482 && (bitsize
+ bitpos
> insv_bitsize
)))
484 int xbitpos
= bitpos
;
487 rtx last
= get_last_insn ();
489 enum machine_mode maxmode
;
490 int save_volatile_ok
= volatile_ok
;
492 maxmode
= insn_data
[(int) CODE_FOR_insv
].operand
[3].mode
;
493 if (maxmode
== VOIDmode
)
498 /* If this machine's insv can only insert into a register, copy OP0
499 into a register and save it back later. */
500 /* This used to check flag_force_mem, but that was a serious
501 de-optimization now that flag_force_mem is enabled by -O2. */
502 if (GET_CODE (op0
) == MEM
503 && ! ((*insn_data
[(int) CODE_FOR_insv
].operand
[0].predicate
)
507 enum machine_mode bestmode
;
509 /* Get the mode to use for inserting into this field. If OP0 is
510 BLKmode, get the smallest mode consistent with the alignment. If
511 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
512 mode. Otherwise, use the smallest mode containing the field. */
514 if (GET_MODE (op0
) == BLKmode
515 || GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (maxmode
))
517 = get_best_mode (bitsize
, bitnum
, align
, maxmode
,
518 MEM_VOLATILE_P (op0
));
520 bestmode
= GET_MODE (op0
);
522 if (bestmode
== VOIDmode
523 || (SLOW_UNALIGNED_ACCESS (bestmode
, align
)
524 && GET_MODE_BITSIZE (bestmode
) > align
))
527 /* Adjust address to point to the containing unit of that mode. */
528 unit
= GET_MODE_BITSIZE (bestmode
);
529 /* Compute offset as multiple of this unit, counting in bytes. */
530 offset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
531 bitpos
= bitnum
% unit
;
532 op0
= change_address (op0
, bestmode
,
533 plus_constant (XEXP (op0
, 0), offset
));
535 /* Fetch that unit, store the bitfield in it, then store
537 tempreg
= copy_to_reg (op0
);
538 store_bit_field (tempreg
, bitsize
, bitpos
, fieldmode
, value
,
540 emit_move_insn (op0
, tempreg
);
543 volatile_ok
= save_volatile_ok
;
545 /* Add OFFSET into OP0's address. */
546 if (GET_CODE (xop0
) == MEM
)
547 xop0
= change_address (xop0
, byte_mode
,
548 plus_constant (XEXP (xop0
, 0), offset
));
550 /* If xop0 is a register, we need it in MAXMODE
551 to make it acceptable to the format of insv. */
552 if (GET_CODE (xop0
) == SUBREG
)
553 /* We can't just change the mode, because this might clobber op0,
554 and we will need the original value of op0 if insv fails. */
555 xop0
= gen_rtx_SUBREG (maxmode
, SUBREG_REG (xop0
), SUBREG_WORD (xop0
));
556 if (GET_CODE (xop0
) == REG
&& GET_MODE (xop0
) != maxmode
)
557 xop0
= gen_rtx_SUBREG (maxmode
, xop0
, 0);
559 /* On big-endian machines, we count bits from the most significant.
560 If the bit field insn does not, we must invert. */
562 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
563 xbitpos
= unit
- bitsize
- xbitpos
;
565 /* We have been counting XBITPOS within UNIT.
566 Count instead within the size of the register. */
567 if (BITS_BIG_ENDIAN
&& GET_CODE (xop0
) != MEM
)
568 xbitpos
+= GET_MODE_BITSIZE (maxmode
) - unit
;
570 unit
= GET_MODE_BITSIZE (maxmode
);
572 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
574 if (GET_MODE (value
) != maxmode
)
576 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
578 /* Optimization: Don't bother really extending VALUE
579 if it has all the bits we will actually use. However,
580 if we must narrow it, be sure we do it correctly. */
582 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (maxmode
))
584 /* Avoid making subreg of a subreg, or of a mem. */
585 if (GET_CODE (value1
) != REG
)
586 value1
= copy_to_reg (value1
);
587 value1
= gen_rtx_SUBREG (maxmode
, value1
, 0);
590 value1
= gen_lowpart (maxmode
, value1
);
592 else if (!CONSTANT_P (value
))
593 /* Parse phase is supposed to make VALUE's data type
594 match that of the component reference, which is a type
595 at least as wide as the field; so VALUE should have
596 a mode that corresponds to that type. */
600 /* If this machine's insv insists on a register,
601 get VALUE1 into a register. */
602 if (! ((*insn_data
[(int) CODE_FOR_insv
].operand
[3].predicate
)
604 value1
= force_reg (maxmode
, value1
);
606 pat
= gen_insv (xop0
, GEN_INT (bitsize
), GEN_INT (xbitpos
), value1
);
611 delete_insns_since (last
);
612 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
, align
);
618 /* Insv is not available; store using shifts and boolean ops. */
619 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
, align
);
623 /* Use shifts and boolean operations to store VALUE
624 into a bit field of width BITSIZE
625 in a memory location specified by OP0 except offset by OFFSET bytes.
626 (OFFSET must be 0 if OP0 is a register.)
627 The field starts at position BITPOS within the byte.
628 (If OP0 is a register, it may be a full word or a narrower mode,
629 but BITPOS still counts within a full word,
630 which is significant on bigendian machines.)
631 STRUCT_ALIGN is the alignment the structure is known to have.
633 Note that protect_from_queue has already been done on OP0 and VALUE. */
636 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
, struct_align
)
638 unsigned HOST_WIDE_INT offset
, bitsize
, bitpos
;
640 unsigned int struct_align
;
642 register enum machine_mode mode
;
643 unsigned int total_bits
= BITS_PER_WORD
;
648 if (! SLOW_UNALIGNED_ACCESS (word_mode
, struct_align
))
649 struct_align
= BIGGEST_ALIGNMENT
;
651 /* There is a case not handled here:
652 a structure with a known alignment of just a halfword
653 and a field split across two aligned halfwords within the structure.
654 Or likewise a structure with a known alignment of just a byte
655 and a field split across two bytes.
656 Such cases are not supposed to be able to occur. */
658 if (GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
662 /* Special treatment for a bit field split across two registers. */
663 if (bitsize
+ bitpos
> BITS_PER_WORD
)
665 store_split_bit_field (op0
, bitsize
, bitpos
,
666 value
, BITS_PER_WORD
);
672 /* Get the proper mode to use for this field. We want a mode that
673 includes the entire field. If such a mode would be larger than
674 a word, we won't be doing the extraction the normal way. */
676 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
677 struct_align
, word_mode
,
678 GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
));
680 if (mode
== VOIDmode
)
682 /* The only way this should occur is if the field spans word
684 store_split_bit_field (op0
,
685 bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
686 value
, struct_align
);
690 total_bits
= GET_MODE_BITSIZE (mode
);
692 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
693 be in the range 0 to total_bits-1, and put any excess bytes in
695 if (bitpos
>= total_bits
)
697 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
698 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
702 /* Get ref to an aligned byte, halfword, or word containing the field.
703 Adjust BITPOS to be position within a word,
704 and OFFSET to be the offset of that word.
705 Then alter OP0 to refer to that word. */
706 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
707 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
708 op0
= change_address (op0
, mode
,
709 plus_constant (XEXP (op0
, 0), offset
));
712 mode
= GET_MODE (op0
);
714 /* Now MODE is either some integral mode for a MEM as OP0,
715 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
716 The bit field is contained entirely within OP0.
717 BITPOS is the starting bit number within OP0.
718 (OP0's mode may actually be narrower than MODE.) */
720 if (BYTES_BIG_ENDIAN
)
721 /* BITPOS is the distance between our msb
722 and that of the containing datum.
723 Convert it to the distance from the lsb. */
724 bitpos
= total_bits
- bitsize
- bitpos
;
726 /* Now BITPOS is always the distance between our lsb
729 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
730 we must first convert its mode to MODE. */
732 if (GET_CODE (value
) == CONST_INT
)
734 register HOST_WIDE_INT v
= INTVAL (value
);
736 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
737 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
741 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
742 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
743 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
746 value
= lshift_value (mode
, value
, bitpos
, bitsize
);
750 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
751 && bitpos
+ bitsize
!= GET_MODE_BITSIZE (mode
));
753 if (GET_MODE (value
) != mode
)
755 if ((GET_CODE (value
) == REG
|| GET_CODE (value
) == SUBREG
)
756 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (value
)))
757 value
= gen_lowpart (mode
, value
);
759 value
= convert_to_mode (mode
, value
, 1);
763 value
= expand_binop (mode
, and_optab
, value
,
764 mask_rtx (mode
, 0, bitsize
, 0),
765 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
767 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
768 build_int_2 (bitpos
, 0), NULL_RTX
, 1);
771 /* Now clear the chosen bits in OP0,
772 except that if VALUE is -1 we need not bother. */
774 subtarget
= (GET_CODE (op0
) == REG
|| ! flag_force_mem
) ? op0
: 0;
778 temp
= expand_binop (mode
, and_optab
, op0
,
779 mask_rtx (mode
, bitpos
, bitsize
, 1),
780 subtarget
, 1, OPTAB_LIB_WIDEN
);
786 /* Now logical-or VALUE into OP0, unless it is zero. */
789 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
790 subtarget
, 1, OPTAB_LIB_WIDEN
);
792 emit_move_insn (op0
, temp
);
795 /* Store a bit field that is split across multiple accessible memory objects.
797 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
798 BITSIZE is the field width; BITPOS the position of its first bit
800 VALUE is the value to store.
801 ALIGN is the known alignment of OP0.
802 This is also the size of the memory objects to be used.
804 This does not yet handle fields wider than BITS_PER_WORD. */
807 store_split_bit_field (op0
, bitsize
, bitpos
, value
, align
)
809 unsigned HOST_WIDE_INT bitsize
, bitpos
;
814 unsigned int bitsdone
= 0;
816 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
818 if (GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
819 unit
= BITS_PER_WORD
;
821 unit
= MIN (align
, BITS_PER_WORD
);
823 /* If VALUE is a constant other than a CONST_INT, get it into a register in
824 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
825 that VALUE might be a floating-point constant. */
826 if (CONSTANT_P (value
) && GET_CODE (value
) != CONST_INT
)
828 rtx word
= gen_lowpart_common (word_mode
, value
);
830 if (word
&& (value
!= word
))
833 value
= gen_lowpart_common (word_mode
,
834 force_reg (GET_MODE (value
) != VOIDmode
836 : word_mode
, value
));
838 else if (GET_CODE (value
) == ADDRESSOF
)
839 value
= copy_to_reg (value
);
841 while (bitsdone
< bitsize
)
843 unsigned HOST_WIDE_INT thissize
;
845 unsigned HOST_WIDE_INT thispos
;
846 unsigned HOST_WIDE_INT offset
;
848 offset
= (bitpos
+ bitsdone
) / unit
;
849 thispos
= (bitpos
+ bitsdone
) % unit
;
851 /* THISSIZE must not overrun a word boundary. Otherwise,
852 store_fixed_bit_field will call us again, and we will mutually
854 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
855 thissize
= MIN (thissize
, unit
- thispos
);
857 if (BYTES_BIG_ENDIAN
)
861 /* We must do an endian conversion exactly the same way as it is
862 done in extract_bit_field, so that the two calls to
863 extract_fixed_bit_field will have comparable arguments. */
864 if (GET_CODE (value
) != MEM
|| GET_MODE (value
) == BLKmode
)
865 total_bits
= BITS_PER_WORD
;
867 total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
869 /* Fetch successively less significant portions. */
870 if (GET_CODE (value
) == CONST_INT
)
871 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
872 >> (bitsize
- bitsdone
- thissize
))
873 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
875 /* The args are chosen so that the last part includes the
876 lsb. Give extract_bit_field the value it needs (with
877 endianness compensation) to fetch the piece we want.
879 ??? We have no idea what the alignment of VALUE is, so
880 we have to use a guess. */
882 = extract_fixed_bit_field
883 (word_mode
, value
, 0, thissize
,
884 total_bits
- bitsize
+ bitsdone
, NULL_RTX
, 1,
885 GET_MODE (value
) == VOIDmode
887 : (GET_MODE (value
) == BLKmode
888 ? 1 : GET_MODE_ALIGNMENT (GET_MODE (value
))));
892 /* Fetch successively more significant portions. */
893 if (GET_CODE (value
) == CONST_INT
)
894 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
896 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
899 = extract_fixed_bit_field
900 (word_mode
, value
, 0, thissize
, bitsdone
, NULL_RTX
, 1,
901 GET_MODE (value
) == VOIDmode
903 : (GET_MODE (value
) == BLKmode
904 ? 1 : GET_MODE_ALIGNMENT (GET_MODE (value
))));
907 /* If OP0 is a register, then handle OFFSET here.
909 When handling multiword bitfields, extract_bit_field may pass
910 down a word_mode SUBREG of a larger REG for a bitfield that actually
911 crosses a word boundary. Thus, for a SUBREG, we must find
912 the current word starting from the base register. */
913 if (GET_CODE (op0
) == SUBREG
)
915 word
= operand_subword_force (SUBREG_REG (op0
),
916 SUBREG_WORD (op0
) + offset
,
917 GET_MODE (SUBREG_REG (op0
)));
920 else if (GET_CODE (op0
) == REG
)
922 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
928 /* OFFSET is in UNITs, and UNIT is in bits.
929 store_fixed_bit_field wants offset in bytes. */
930 store_fixed_bit_field (word
, offset
* unit
/ BITS_PER_UNIT
,
931 thissize
, thispos
, part
, align
);
932 bitsdone
+= thissize
;
936 /* Generate code to extract a byte-field from STR_RTX
937 containing BITSIZE bits, starting at BITNUM,
938 and put it in TARGET if possible (if TARGET is nonzero).
939 Regardless of TARGET, we return the rtx for where the value is placed.
942 STR_RTX is the structure containing the byte (a REG or MEM).
943 UNSIGNEDP is nonzero if this is an unsigned bit field.
944 MODE is the natural mode of the field value once extracted.
945 TMODE is the mode the caller would like the value to have;
946 but the value may be returned with type MODE instead.
948 ALIGN is the alignment that STR_RTX is known to have.
949 TOTAL_SIZE is the size in bytes of the containing structure,
952 If a TARGET is specified and we can store in it at no extra cost,
953 we do so, and return TARGET.
954 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
955 if they are equally easy. */
958 extract_bit_field (str_rtx
, bitsize
, bitnum
, unsignedp
,
959 target
, mode
, tmode
, align
, total_size
)
961 unsigned HOST_WIDE_INT bitsize
;
962 unsigned HOST_WIDE_INT bitnum
;
965 enum machine_mode mode
, tmode
;
967 HOST_WIDE_INT total_size
;
970 = (GET_CODE (str_rtx
) == MEM
) ? BITS_PER_UNIT
: BITS_PER_WORD
;
971 unsigned HOST_WIDE_INT offset
= bitnum
/ unit
;
972 unsigned HOST_WIDE_INT bitpos
= bitnum
% unit
;
973 register rtx op0
= str_rtx
;
974 rtx spec_target
= target
;
975 rtx spec_target_subreg
= 0;
976 enum machine_mode int_mode
;
978 unsigned HOST_WIDE_INT extv_bitsize
;
979 enum machine_mode extv_mode
;
982 unsigned HOST_WIDE_INT extzv_bitsize
;
983 enum machine_mode extzv_mode
;
987 extv_mode
= insn_data
[(int) CODE_FOR_extv
].operand
[0].mode
;
988 if (extv_mode
== VOIDmode
)
989 extv_mode
= word_mode
;
990 extv_bitsize
= GET_MODE_BITSIZE (extv_mode
);
994 extzv_mode
= insn_data
[(int) CODE_FOR_extzv
].operand
[0].mode
;
995 if (extzv_mode
== VOIDmode
)
996 extzv_mode
= word_mode
;
997 extzv_bitsize
= GET_MODE_BITSIZE (extzv_mode
);
1000 /* Discount the part of the structure before the desired byte.
1001 We need to know how many bytes are safe to reference after it. */
1002 if (total_size
>= 0)
1003 total_size
-= (bitpos
/ BIGGEST_ALIGNMENT
1004 * (BIGGEST_ALIGNMENT
/ BITS_PER_UNIT
));
1006 if (tmode
== VOIDmode
)
1008 while (GET_CODE (op0
) == SUBREG
)
1010 int outer_size
= GET_MODE_BITSIZE (GET_MODE (op0
));
1011 int inner_size
= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)));
1013 offset
+= SUBREG_WORD (op0
);
1015 inner_size
= MIN (inner_size
, BITS_PER_WORD
);
1017 if (BYTES_BIG_ENDIAN
&& (outer_size
< inner_size
))
1019 bitpos
+= inner_size
- outer_size
;
1022 offset
+= (bitpos
/ unit
);
1027 op0
= SUBREG_REG (op0
);
1030 /* Make sure we are playing with integral modes. Pun with subregs
1033 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
1034 if (imode
!= GET_MODE (op0
))
1036 if (GET_CODE (op0
) == MEM
)
1037 op0
= change_address (op0
, imode
, NULL_RTX
);
1038 else if (imode
!= BLKmode
)
1039 op0
= gen_lowpart (imode
, op0
);
1045 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1046 If that's wrong, the solution is to test for it and set TARGET to 0
1049 /* If OP0 is a register, BITPOS must count within a word.
1050 But as we have it, it counts within whatever size OP0 now has.
1051 On a bigendian machine, these are not the same, so convert. */
1052 if (BYTES_BIG_ENDIAN
1053 && GET_CODE (op0
) != MEM
1054 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
1055 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
1057 /* Extracting a full-word or multi-word value
1058 from a structure in a register or aligned memory.
1059 This can be done with just SUBREG.
1060 So too extracting a subword value in
1061 the least significant part of the register. */
1063 if (((GET_CODE (op0
) != MEM
1064 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1065 GET_MODE_BITSIZE (GET_MODE (op0
))))
1066 || (GET_CODE (op0
) == MEM
1067 && (! SLOW_UNALIGNED_ACCESS (mode
, align
)
1068 || (offset
* BITS_PER_UNIT
% bitsize
== 0
1069 && align
% bitsize
== 0))))
1070 && ((bitsize
>= BITS_PER_WORD
&& bitsize
== GET_MODE_BITSIZE (mode
)
1071 && bitpos
% BITS_PER_WORD
== 0)
1072 || (mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0) != BLKmode
1073 /* ??? The big endian test here is wrong. This is correct
1074 if the value is in a register, and if mode_for_size is not
1075 the same mode as op0. This causes us to get unnecessarily
1076 inefficient code from the Thumb port when -mbig-endian. */
1077 && (BYTES_BIG_ENDIAN
1078 ? bitpos
+ bitsize
== BITS_PER_WORD
1081 enum machine_mode mode1
1082 = (VECTOR_MODE_P (tmode
) ? mode
1083 : mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0));
1085 if (mode1
!= GET_MODE (op0
))
1087 if (GET_CODE (op0
) == SUBREG
)
1089 if (GET_MODE (SUBREG_REG (op0
)) == mode1
1090 || GET_MODE_CLASS (mode1
) == MODE_INT
1091 || GET_MODE_CLASS (mode1
) == MODE_PARTIAL_INT
)
1092 op0
= SUBREG_REG (op0
);
1094 /* Else we've got some float mode source being extracted into
1095 a different float mode destination -- this combination of
1096 subregs results in Severe Tire Damage. */
1099 if (GET_CODE (op0
) == REG
)
1100 op0
= gen_rtx_SUBREG (mode1
, op0
, offset
);
1102 op0
= change_address (op0
, mode1
,
1103 plus_constant (XEXP (op0
, 0), offset
));
1106 return convert_to_mode (tmode
, op0
, unsignedp
);
1110 /* Handle fields bigger than a word. */
1112 if (bitsize
> BITS_PER_WORD
)
1114 /* Here we transfer the words of the field
1115 in the order least significant first.
1116 This is because the most significant word is the one which may
1117 be less than full. */
1119 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1122 if (target
== 0 || GET_CODE (target
) != REG
)
1123 target
= gen_reg_rtx (mode
);
1125 /* Indicate for flow that the entire target reg is being set. */
1126 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
1128 for (i
= 0; i
< nwords
; i
++)
1130 /* If I is 0, use the low-order word in both field and target;
1131 if I is 1, use the next to lowest word; and so on. */
1132 /* Word number in TARGET to use. */
1133 unsigned int wordnum
1135 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1137 /* Offset from start of field in OP0. */
1138 unsigned int bit_offset
= (WORDS_BIG_ENDIAN
1139 ? MAX (0, ((int) bitsize
- ((int) i
+ 1)
1140 * (int) BITS_PER_WORD
))
1141 : (int) i
* BITS_PER_WORD
);
1142 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1144 = extract_bit_field (op0
, MIN (BITS_PER_WORD
,
1145 bitsize
- i
* BITS_PER_WORD
),
1146 bitnum
+ bit_offset
, 1, target_part
, mode
,
1147 word_mode
, align
, total_size
);
1149 if (target_part
== 0)
1152 if (result_part
!= target_part
)
1153 emit_move_insn (target_part
, result_part
);
1158 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1159 need to be zero'd out. */
1160 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1162 unsigned int i
, total_words
;
1164 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1165 for (i
= nwords
; i
< total_words
; i
++)
1167 int wordnum
= WORDS_BIG_ENDIAN
? total_words
- i
- 1 : i
;
1168 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1169 emit_move_insn (target_part
, const0_rtx
);
1175 /* Signed bit field: sign-extend with two arithmetic shifts. */
1176 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1177 build_int_2 (GET_MODE_BITSIZE (mode
) - bitsize
, 0),
1179 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1180 build_int_2 (GET_MODE_BITSIZE (mode
) - bitsize
, 0),
1184 /* From here on we know the desired field is smaller than a word. */
1186 /* Check if there is a correspondingly-sized integer field, so we can
1187 safely extract it as one size of integer, if necessary; then
1188 truncate or extend to the size that is wanted; then use SUBREGs or
1189 convert_to_mode to get one of the modes we really wanted. */
1191 int_mode
= int_mode_for_mode (tmode
);
1192 if (int_mode
== BLKmode
)
1193 int_mode
= int_mode_for_mode (mode
);
1194 if (int_mode
== BLKmode
)
1195 abort(); /* Should probably push op0 out to memory and then
1198 /* OFFSET is the number of words or bytes (UNIT says which)
1199 from STR_RTX to the first word or byte containing part of the field. */
1201 if (GET_CODE (op0
) != MEM
)
1204 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1206 if (GET_CODE (op0
) != REG
)
1207 op0
= copy_to_reg (op0
);
1208 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
1215 op0
= protect_from_queue (str_rtx
, 1);
1218 /* Now OFFSET is nonzero only for memory operands. */
1224 && (extzv_bitsize
>= bitsize
)
1225 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
1226 && (bitsize
+ bitpos
> extzv_bitsize
)))
1228 unsigned HOST_WIDE_INT xbitpos
= bitpos
, xoffset
= offset
;
1229 rtx bitsize_rtx
, bitpos_rtx
;
1230 rtx last
= get_last_insn ();
1232 rtx xtarget
= target
;
1233 rtx xspec_target
= spec_target
;
1234 rtx xspec_target_subreg
= spec_target_subreg
;
1236 enum machine_mode maxmode
;
1238 maxmode
= insn_data
[(int) CODE_FOR_extzv
].operand
[0].mode
;
1239 if (maxmode
== VOIDmode
)
1240 maxmode
= word_mode
;
1242 if (GET_CODE (xop0
) == MEM
)
1244 int save_volatile_ok
= volatile_ok
;
1247 /* Is the memory operand acceptable? */
1248 if (! ((*insn_data
[(int) CODE_FOR_extzv
].operand
[1].predicate
)
1249 (xop0
, GET_MODE (xop0
))))
1251 /* No, load into a reg and extract from there. */
1252 enum machine_mode bestmode
;
1254 /* Get the mode to use for inserting into this field. If
1255 OP0 is BLKmode, get the smallest mode consistent with the
1256 alignment. If OP0 is a non-BLKmode object that is no
1257 wider than MAXMODE, use its mode. Otherwise, use the
1258 smallest mode containing the field. */
1260 if (GET_MODE (xop0
) == BLKmode
1261 || (GET_MODE_SIZE (GET_MODE (op0
))
1262 > GET_MODE_SIZE (maxmode
)))
1263 bestmode
= get_best_mode (bitsize
, bitnum
, align
, maxmode
,
1264 MEM_VOLATILE_P (xop0
));
1266 bestmode
= GET_MODE (xop0
);
1268 if (bestmode
== VOIDmode
1269 || (SLOW_UNALIGNED_ACCESS (bestmode
, align
)
1270 && GET_MODE_BITSIZE (bestmode
) > align
))
1273 /* Compute offset as multiple of this unit,
1274 counting in bytes. */
1275 unit
= GET_MODE_BITSIZE (bestmode
);
1276 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1277 xbitpos
= bitnum
% unit
;
1278 xop0
= change_address (xop0
, bestmode
,
1279 plus_constant (XEXP (xop0
, 0),
1281 /* Fetch it to a register in that size. */
1282 xop0
= force_reg (bestmode
, xop0
);
1284 /* XBITPOS counts within UNIT, which is what is expected. */
1287 /* Get ref to first byte containing part of the field. */
1288 xop0
= change_address (xop0
, byte_mode
,
1289 plus_constant (XEXP (xop0
, 0), xoffset
));
1291 volatile_ok
= save_volatile_ok
;
1294 /* If op0 is a register, we need it in MAXMODE (which is usually
1295 SImode). to make it acceptable to the format of extzv. */
1296 if (GET_CODE (xop0
) == SUBREG
&& GET_MODE (xop0
) != maxmode
)
1298 if (GET_CODE (xop0
) == REG
&& GET_MODE (xop0
) != maxmode
)
1299 xop0
= gen_rtx_SUBREG (maxmode
, xop0
, 0);
1301 /* On big-endian machines, we count bits from the most significant.
1302 If the bit field insn does not, we must invert. */
1303 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1304 xbitpos
= unit
- bitsize
- xbitpos
;
1306 /* Now convert from counting within UNIT to counting in MAXMODE. */
1307 if (BITS_BIG_ENDIAN
&& GET_CODE (xop0
) != MEM
)
1308 xbitpos
+= GET_MODE_BITSIZE (maxmode
) - unit
;
1310 unit
= GET_MODE_BITSIZE (maxmode
);
1313 || (flag_force_mem
&& GET_CODE (xtarget
) == MEM
))
1314 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1316 if (GET_MODE (xtarget
) != maxmode
)
1318 if (GET_CODE (xtarget
) == REG
)
1320 int wider
= (GET_MODE_SIZE (maxmode
)
1321 > GET_MODE_SIZE (GET_MODE (xtarget
)));
1322 xtarget
= gen_lowpart (maxmode
, xtarget
);
1324 xspec_target_subreg
= xtarget
;
1327 xtarget
= gen_reg_rtx (maxmode
);
1330 /* If this machine's extzv insists on a register target,
1331 make sure we have one. */
1332 if (! ((*insn_data
[(int) CODE_FOR_extzv
].operand
[0].predicate
)
1333 (xtarget
, maxmode
)))
1334 xtarget
= gen_reg_rtx (maxmode
);
1336 bitsize_rtx
= GEN_INT (bitsize
);
1337 bitpos_rtx
= GEN_INT (xbitpos
);
1339 pat
= gen_extzv (protect_from_queue (xtarget
, 1),
1340 xop0
, bitsize_rtx
, bitpos_rtx
);
1345 spec_target
= xspec_target
;
1346 spec_target_subreg
= xspec_target_subreg
;
1350 delete_insns_since (last
);
1351 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1352 bitpos
, target
, 1, align
);
1358 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1359 bitpos
, target
, 1, align
);
1365 && (extv_bitsize
>= bitsize
)
1366 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
1367 && (bitsize
+ bitpos
> extv_bitsize
)))
1369 int xbitpos
= bitpos
, xoffset
= offset
;
1370 rtx bitsize_rtx
, bitpos_rtx
;
1371 rtx last
= get_last_insn ();
1372 rtx xop0
= op0
, xtarget
= target
;
1373 rtx xspec_target
= spec_target
;
1374 rtx xspec_target_subreg
= spec_target_subreg
;
1376 enum machine_mode maxmode
;
1378 maxmode
= insn_data
[(int) CODE_FOR_extv
].operand
[0].mode
;
1379 if (maxmode
== VOIDmode
)
1380 maxmode
= word_mode
;
1382 if (GET_CODE (xop0
) == MEM
)
1384 /* Is the memory operand acceptable? */
1385 if (! ((*insn_data
[(int) CODE_FOR_extv
].operand
[1].predicate
)
1386 (xop0
, GET_MODE (xop0
))))
1388 /* No, load into a reg and extract from there. */
1389 enum machine_mode bestmode
;
1391 /* Get the mode to use for inserting into this field. If
1392 OP0 is BLKmode, get the smallest mode consistent with the
1393 alignment. If OP0 is a non-BLKmode object that is no
1394 wider than MAXMODE, use its mode. Otherwise, use the
1395 smallest mode containing the field. */
1397 if (GET_MODE (xop0
) == BLKmode
1398 || (GET_MODE_SIZE (GET_MODE (op0
))
1399 > GET_MODE_SIZE (maxmode
)))
1400 bestmode
= get_best_mode (bitsize
, bitnum
, align
, maxmode
,
1401 MEM_VOLATILE_P (xop0
));
1403 bestmode
= GET_MODE (xop0
);
1405 if (bestmode
== VOIDmode
1406 || (SLOW_UNALIGNED_ACCESS (bestmode
, align
)
1407 && GET_MODE_BITSIZE (bestmode
) > align
))
1410 /* Compute offset as multiple of this unit,
1411 counting in bytes. */
1412 unit
= GET_MODE_BITSIZE (bestmode
);
1413 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1414 xbitpos
= bitnum
% unit
;
1415 xop0
= change_address (xop0
, bestmode
,
1416 plus_constant (XEXP (xop0
, 0),
1418 /* Fetch it to a register in that size. */
1419 xop0
= force_reg (bestmode
, xop0
);
1421 /* XBITPOS counts within UNIT, which is what is expected. */
1424 /* Get ref to first byte containing part of the field. */
1425 xop0
= change_address (xop0
, byte_mode
,
1426 plus_constant (XEXP (xop0
, 0), xoffset
));
1429 /* If op0 is a register, we need it in MAXMODE (which is usually
1430 SImode) to make it acceptable to the format of extv. */
1431 if (GET_CODE (xop0
) == SUBREG
&& GET_MODE (xop0
) != maxmode
)
1433 if (GET_CODE (xop0
) == REG
&& GET_MODE (xop0
) != maxmode
)
1434 xop0
= gen_rtx_SUBREG (maxmode
, xop0
, 0);
1436 /* On big-endian machines, we count bits from the most significant.
1437 If the bit field insn does not, we must invert. */
1438 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1439 xbitpos
= unit
- bitsize
- xbitpos
;
1441 /* XBITPOS counts within a size of UNIT.
1442 Adjust to count within a size of MAXMODE. */
1443 if (BITS_BIG_ENDIAN
&& GET_CODE (xop0
) != MEM
)
1444 xbitpos
+= (GET_MODE_BITSIZE (maxmode
) - unit
);
1446 unit
= GET_MODE_BITSIZE (maxmode
);
1449 || (flag_force_mem
&& GET_CODE (xtarget
) == MEM
))
1450 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1452 if (GET_MODE (xtarget
) != maxmode
)
1454 if (GET_CODE (xtarget
) == REG
)
1456 int wider
= (GET_MODE_SIZE (maxmode
)
1457 > GET_MODE_SIZE (GET_MODE (xtarget
)));
1458 xtarget
= gen_lowpart (maxmode
, xtarget
);
1460 xspec_target_subreg
= xtarget
;
1463 xtarget
= gen_reg_rtx (maxmode
);
1466 /* If this machine's extv insists on a register target,
1467 make sure we have one. */
1468 if (! ((*insn_data
[(int) CODE_FOR_extv
].operand
[0].predicate
)
1469 (xtarget
, maxmode
)))
1470 xtarget
= gen_reg_rtx (maxmode
);
1472 bitsize_rtx
= GEN_INT (bitsize
);
1473 bitpos_rtx
= GEN_INT (xbitpos
);
1475 pat
= gen_extv (protect_from_queue (xtarget
, 1),
1476 xop0
, bitsize_rtx
, bitpos_rtx
);
1481 spec_target
= xspec_target
;
1482 spec_target_subreg
= xspec_target_subreg
;
1486 delete_insns_since (last
);
1487 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1488 bitpos
, target
, 0, align
);
1494 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1495 bitpos
, target
, 0, align
);
1497 if (target
== spec_target
)
1499 if (target
== spec_target_subreg
)
1501 if (GET_MODE (target
) != tmode
&& GET_MODE (target
) != mode
)
1503 /* If the target mode is floating-point, first convert to the
1504 integer mode of that size and then access it as a floating-point
1505 value via a SUBREG. */
1506 if (GET_MODE_CLASS (tmode
) == MODE_FLOAT
)
1508 target
= convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode
),
1511 if (GET_CODE (target
) != REG
)
1512 target
= copy_to_reg (target
);
1513 return gen_rtx_SUBREG (tmode
, target
, 0);
1516 return convert_to_mode (tmode
, target
, unsignedp
);
1521 /* Extract a bit field using shifts and boolean operations
1522 Returns an rtx to represent the value.
1523 OP0 addresses a register (word) or memory (byte).
1524 BITPOS says which bit within the word or byte the bit field starts in.
1525 OFFSET says how many bytes farther the bit field starts;
1526 it is 0 if OP0 is a register.
1527 BITSIZE says how many bits long the bit field is.
1528 (If OP0 is a register, it may be narrower than a full word,
1529 but BITPOS still counts within a full word,
1530 which is significant on bigendian machines.)
1532 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1533 If TARGET is nonzero, attempts to store the value there
1534 and return TARGET, but this is not guaranteed.
1535 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1537 ALIGN is the alignment that STR_RTX is known to have. */
1540 extract_fixed_bit_field (tmode
, op0
, offset
, bitsize
, bitpos
,
1541 target
, unsignedp
, align
)
1542 enum machine_mode tmode
;
1543 register rtx op0
, target
;
1544 unsigned HOST_WIDE_INT offset
, bitsize
, bitpos
;
1548 unsigned int total_bits
= BITS_PER_WORD
;
1549 enum machine_mode mode
;
1551 if (GET_CODE (op0
) == SUBREG
|| GET_CODE (op0
) == REG
)
1553 /* Special treatment for a bit field split across two registers. */
1554 if (bitsize
+ bitpos
> BITS_PER_WORD
)
1555 return extract_split_bit_field (op0
, bitsize
, bitpos
,
1560 /* Get the proper mode to use for this field. We want a mode that
1561 includes the entire field. If such a mode would be larger than
1562 a word, we won't be doing the extraction the normal way. */
1564 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
, align
,
1566 GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
));
1568 if (mode
== VOIDmode
)
1569 /* The only way this should occur is if the field spans word
1571 return extract_split_bit_field (op0
, bitsize
,
1572 bitpos
+ offset
* BITS_PER_UNIT
,
1575 total_bits
= GET_MODE_BITSIZE (mode
);
1577 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1578 be in the range 0 to total_bits-1, and put any excess bytes in
1580 if (bitpos
>= total_bits
)
1582 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
1583 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
1587 /* Get ref to an aligned byte, halfword, or word containing the field.
1588 Adjust BITPOS to be position within a word,
1589 and OFFSET to be the offset of that word.
1590 Then alter OP0 to refer to that word. */
1591 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
1592 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
1593 op0
= change_address (op0
, mode
,
1594 plus_constant (XEXP (op0
, 0), offset
));
1597 mode
= GET_MODE (op0
);
1599 if (BYTES_BIG_ENDIAN
)
1601 /* BITPOS is the distance between our msb and that of OP0.
1602 Convert it to the distance from the lsb. */
1604 bitpos
= total_bits
- bitsize
- bitpos
;
1607 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1608 We have reduced the big-endian case to the little-endian case. */
1614 /* If the field does not already start at the lsb,
1615 shift it so it does. */
1616 tree amount
= build_int_2 (bitpos
, 0);
1617 /* Maybe propagate the target for the shift. */
1618 /* But not if we will return it--could confuse integrate.c. */
1619 rtx subtarget
= (target
!= 0 && GET_CODE (target
) == REG
1620 && !REG_FUNCTION_VALUE_P (target
)
1622 if (tmode
!= mode
) subtarget
= 0;
1623 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1625 /* Convert the value to the desired mode. */
1627 op0
= convert_to_mode (tmode
, op0
, 1);
1629 /* Unless the msb of the field used to be the msb when we shifted,
1630 mask out the upper bits. */
1632 if (GET_MODE_BITSIZE (mode
) != bitpos
+ bitsize
1634 #ifdef SLOW_ZERO_EXTEND
1635 /* Always generate an `and' if
1636 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1637 will combine fruitfully with the zero-extend. */
1642 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1643 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1644 target
, 1, OPTAB_LIB_WIDEN
);
1648 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1649 then arithmetic-shift its lsb to the lsb of the word. */
1650 op0
= force_reg (mode
, op0
);
1654 /* Find the narrowest integer mode that contains the field. */
1656 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1657 mode
= GET_MODE_WIDER_MODE (mode
))
1658 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitpos
)
1660 op0
= convert_to_mode (mode
, op0
, 0);
1664 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitpos
))
1666 tree amount
= build_int_2 (GET_MODE_BITSIZE (mode
) - (bitsize
+ bitpos
), 0);
1667 /* Maybe propagate the target for the shift. */
1668 /* But not if we will return the result--could confuse integrate.c. */
1669 rtx subtarget
= (target
!= 0 && GET_CODE (target
) == REG
1670 && ! REG_FUNCTION_VALUE_P (target
)
1672 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1675 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1676 build_int_2 (GET_MODE_BITSIZE (mode
) - bitsize
, 0),
1680 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1681 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1682 complement of that if COMPLEMENT. The mask is truncated if
1683 necessary to the width of mode MODE. The mask is zero-extended if
1684 BITSIZE+BITPOS is too small for MODE. */
1687 mask_rtx (mode
, bitpos
, bitsize
, complement
)
1688 enum machine_mode mode
;
1689 int bitpos
, bitsize
, complement
;
1691 HOST_WIDE_INT masklow
, maskhigh
;
1693 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1694 masklow
= (HOST_WIDE_INT
) -1 << bitpos
;
1698 if (bitpos
+ bitsize
< HOST_BITS_PER_WIDE_INT
)
1699 masklow
&= ((unsigned HOST_WIDE_INT
) -1
1700 >> (HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1702 if (bitpos
<= HOST_BITS_PER_WIDE_INT
)
1705 maskhigh
= (HOST_WIDE_INT
) -1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
1707 if (bitpos
+ bitsize
> HOST_BITS_PER_WIDE_INT
)
1708 maskhigh
&= ((unsigned HOST_WIDE_INT
) -1
1709 >> (2 * HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1715 maskhigh
= ~maskhigh
;
1719 return immed_double_const (masklow
, maskhigh
, mode
);
1722 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1723 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1726 lshift_value (mode
, value
, bitpos
, bitsize
)
1727 enum machine_mode mode
;
1729 int bitpos
, bitsize
;
1731 unsigned HOST_WIDE_INT v
= INTVAL (value
);
1732 HOST_WIDE_INT low
, high
;
1734 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
1735 v
&= ~((HOST_WIDE_INT
) -1 << bitsize
);
1737 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1740 high
= (bitpos
> 0 ? (v
>> (HOST_BITS_PER_WIDE_INT
- bitpos
)) : 0);
1745 high
= v
<< (bitpos
- HOST_BITS_PER_WIDE_INT
);
1748 return immed_double_const (low
, high
, mode
);
1751 /* Extract a bit field that is split across two words
1752 and return an RTX for the result.
1754 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1755 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1756 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1758 ALIGN is the known alignment of OP0. This is also the size of the
1759 memory objects to be used. */
1762 extract_split_bit_field (op0
, bitsize
, bitpos
, unsignedp
, align
)
1764 unsigned HOST_WIDE_INT bitsize
, bitpos
;
1769 unsigned int bitsdone
= 0;
1770 rtx result
= NULL_RTX
;
1773 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1775 if (GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
1776 unit
= BITS_PER_WORD
;
1778 unit
= MIN (align
, BITS_PER_WORD
);
1780 while (bitsdone
< bitsize
)
1782 unsigned HOST_WIDE_INT thissize
;
1784 unsigned HOST_WIDE_INT thispos
;
1785 unsigned HOST_WIDE_INT offset
;
1787 offset
= (bitpos
+ bitsdone
) / unit
;
1788 thispos
= (bitpos
+ bitsdone
) % unit
;
1790 /* THISSIZE must not overrun a word boundary. Otherwise,
1791 extract_fixed_bit_field will call us again, and we will mutually
1793 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1794 thissize
= MIN (thissize
, unit
- thispos
);
1796 /* If OP0 is a register, then handle OFFSET here.
1798 When handling multiword bitfields, extract_bit_field may pass
1799 down a word_mode SUBREG of a larger REG for a bitfield that actually
1800 crosses a word boundary. Thus, for a SUBREG, we must find
1801 the current word starting from the base register. */
1802 if (GET_CODE (op0
) == SUBREG
)
1804 word
= operand_subword_force (SUBREG_REG (op0
),
1805 SUBREG_WORD (op0
) + offset
,
1806 GET_MODE (SUBREG_REG (op0
)));
1809 else if (GET_CODE (op0
) == REG
)
1811 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1817 /* Extract the parts in bit-counting order,
1818 whose meaning is determined by BYTES_PER_UNIT.
1819 OFFSET is in UNITs, and UNIT is in bits.
1820 extract_fixed_bit_field wants offset in bytes. */
1821 part
= extract_fixed_bit_field (word_mode
, word
,
1822 offset
* unit
/ BITS_PER_UNIT
,
1823 thissize
, thispos
, 0, 1, align
);
1824 bitsdone
+= thissize
;
1826 /* Shift this part into place for the result. */
1827 if (BYTES_BIG_ENDIAN
)
1829 if (bitsize
!= bitsdone
)
1830 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1831 build_int_2 (bitsize
- bitsdone
, 0), 0, 1);
1835 if (bitsdone
!= thissize
)
1836 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1837 build_int_2 (bitsdone
- thissize
, 0), 0, 1);
1843 /* Combine the parts with bitwise or. This works
1844 because we extracted each part as an unsigned bit field. */
1845 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
1851 /* Unsigned bit field: we are done. */
1854 /* Signed bit field: sign-extend with two arithmetic shifts. */
1855 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
1856 build_int_2 (BITS_PER_WORD
- bitsize
, 0),
1858 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
1859 build_int_2 (BITS_PER_WORD
- bitsize
, 0), NULL_RTX
, 0);
1862 /* Add INC into TARGET. */
1865 expand_inc (target
, inc
)
1868 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
1870 target
, 0, OPTAB_LIB_WIDEN
);
1871 if (value
!= target
)
1872 emit_move_insn (target
, value
);
1875 /* Subtract DEC from TARGET. */
1878 expand_dec (target
, dec
)
1881 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
1883 target
, 0, OPTAB_LIB_WIDEN
);
1884 if (value
!= target
)
1885 emit_move_insn (target
, value
);
1888 /* Output a shift instruction for expression code CODE,
1889 with SHIFTED being the rtx for the value to shift,
1890 and AMOUNT the tree for the amount to shift by.
1891 Store the result in the rtx TARGET, if that is convenient.
1892 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1893 Return the rtx for where the value is. */
1896 expand_shift (code
, mode
, shifted
, amount
, target
, unsignedp
)
1897 enum tree_code code
;
1898 register enum machine_mode mode
;
1901 register rtx target
;
1904 register rtx op1
, temp
= 0;
1905 register int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
1906 register int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
1909 /* Previously detected shift-counts computed by NEGATE_EXPR
1910 and shifted in the other direction; but that does not work
1913 op1
= expand_expr (amount
, NULL_RTX
, VOIDmode
, 0);
1915 #ifdef SHIFT_COUNT_TRUNCATED
1916 if (SHIFT_COUNT_TRUNCATED
)
1918 if (GET_CODE (op1
) == CONST_INT
1919 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
1920 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
)))
1921 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
1922 % GET_MODE_BITSIZE (mode
));
1923 else if (GET_CODE (op1
) == SUBREG
1924 && SUBREG_WORD (op1
) == 0)
1925 op1
= SUBREG_REG (op1
);
1929 if (op1
== const0_rtx
)
1932 for (try = 0; temp
== 0 && try < 3; try++)
1934 enum optab_methods methods
;
1937 methods
= OPTAB_DIRECT
;
1939 methods
= OPTAB_WIDEN
;
1941 methods
= OPTAB_LIB_WIDEN
;
1945 /* Widening does not work for rotation. */
1946 if (methods
== OPTAB_WIDEN
)
1948 else if (methods
== OPTAB_LIB_WIDEN
)
1950 /* If we have been unable to open-code this by a rotation,
1951 do it as the IOR of two shifts. I.e., to rotate A
1952 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1953 where C is the bitsize of A.
1955 It is theoretically possible that the target machine might
1956 not be able to perform either shift and hence we would
1957 be making two libcalls rather than just the one for the
1958 shift (similarly if IOR could not be done). We will allow
1959 this extremely unlikely lossage to avoid complicating the
1962 rtx subtarget
= target
== shifted
? 0 : target
;
1964 tree type
= TREE_TYPE (amount
);
1965 tree new_amount
= make_tree (type
, op1
);
1967 = fold (build (MINUS_EXPR
, type
,
1969 build_int_2 (GET_MODE_BITSIZE (mode
),
1973 shifted
= force_reg (mode
, shifted
);
1975 temp
= expand_shift (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
1976 mode
, shifted
, new_amount
, subtarget
, 1);
1977 temp1
= expand_shift (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
1978 mode
, shifted
, other_amount
, 0, 1);
1979 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
1980 unsignedp
, methods
);
1983 temp
= expand_binop (mode
,
1984 left
? rotl_optab
: rotr_optab
,
1985 shifted
, op1
, target
, unsignedp
, methods
);
1987 /* If we don't have the rotate, but we are rotating by a constant
1988 that is in range, try a rotate in the opposite direction. */
1990 if (temp
== 0 && GET_CODE (op1
) == CONST_INT
1991 && INTVAL (op1
) > 0 && INTVAL (op1
) < GET_MODE_BITSIZE (mode
))
1992 temp
= expand_binop (mode
,
1993 left
? rotr_optab
: rotl_optab
,
1995 GEN_INT (GET_MODE_BITSIZE (mode
)
1997 target
, unsignedp
, methods
);
2000 temp
= expand_binop (mode
,
2001 left
? ashl_optab
: lshr_optab
,
2002 shifted
, op1
, target
, unsignedp
, methods
);
2004 /* Do arithmetic shifts.
2005 Also, if we are going to widen the operand, we can just as well
2006 use an arithmetic right-shift instead of a logical one. */
2007 if (temp
== 0 && ! rotate
2008 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2010 enum optab_methods methods1
= methods
;
2012 /* If trying to widen a log shift to an arithmetic shift,
2013 don't accept an arithmetic shift of the same size. */
2015 methods1
= OPTAB_MUST_WIDEN
;
2017 /* Arithmetic shift */
2019 temp
= expand_binop (mode
,
2020 left
? ashl_optab
: ashr_optab
,
2021 shifted
, op1
, target
, unsignedp
, methods1
);
2024 /* We used to try extzv here for logical right shifts, but that was
2025 only useful for one machine, the VAX, and caused poor code
2026 generation there for lshrdi3, so the code was deleted and a
2027 define_expand for lshrsi3 was added to vax.md. */
2035 enum alg_code
{ alg_zero
, alg_m
, alg_shift
,
2036 alg_add_t_m2
, alg_sub_t_m2
,
2037 alg_add_factor
, alg_sub_factor
,
2038 alg_add_t2_m
, alg_sub_t2_m
,
2039 alg_add
, alg_subtract
, alg_factor
, alg_shiftop
};
2041 /* This structure records a sequence of operations.
2042 `ops' is the number of operations recorded.
2043 `cost' is their total cost.
2044 The operations are stored in `op' and the corresponding
2045 logarithms of the integer coefficients in `log'.
2047 These are the operations:
2048 alg_zero total := 0;
2049 alg_m total := multiplicand;
2050 alg_shift total := total * coeff
2051 alg_add_t_m2 total := total + multiplicand * coeff;
2052 alg_sub_t_m2 total := total - multiplicand * coeff;
2053 alg_add_factor total := total * coeff + total;
2054 alg_sub_factor total := total * coeff - total;
2055 alg_add_t2_m total := total * coeff + multiplicand;
2056 alg_sub_t2_m total := total * coeff - multiplicand;
2058 The first operand must be either alg_zero or alg_m. */
2064 /* The size of the OP and LOG fields are not directly related to the
2065 word size, but the worst-case algorithms will be if we have few
2066 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2067 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2068 in total wordsize operations. */
2069 enum alg_code op
[MAX_BITS_PER_WORD
];
2070 char log
[MAX_BITS_PER_WORD
];
2073 static void synth_mult
PARAMS ((struct algorithm
*,
2074 unsigned HOST_WIDE_INT
,
2076 static unsigned HOST_WIDE_INT choose_multiplier
PARAMS ((unsigned HOST_WIDE_INT
,
2078 unsigned HOST_WIDE_INT
*,
2080 static unsigned HOST_WIDE_INT invert_mod2n
PARAMS ((unsigned HOST_WIDE_INT
,
2082 /* Compute and return the best algorithm for multiplying by T.
2083 The algorithm must cost less than cost_limit
2084 If retval.cost >= COST_LIMIT, no algorithm was found and all
2085 other field of the returned struct are undefined. */
2088 synth_mult (alg_out
, t
, cost_limit
)
2089 struct algorithm
*alg_out
;
2090 unsigned HOST_WIDE_INT t
;
2094 struct algorithm
*alg_in
, *best_alg
;
2096 unsigned HOST_WIDE_INT q
;
2098 /* Indicate that no algorithm is yet found. If no algorithm
2099 is found, this value will be returned and indicate failure. */
2100 alg_out
->cost
= cost_limit
;
2102 if (cost_limit
<= 0)
2105 /* t == 1 can be done in zero cost. */
2110 alg_out
->op
[0] = alg_m
;
2114 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2118 if (zero_cost
>= cost_limit
)
2123 alg_out
->cost
= zero_cost
;
2124 alg_out
->op
[0] = alg_zero
;
2129 /* We'll be needing a couple extra algorithm structures now. */
2131 alg_in
= (struct algorithm
*)alloca (sizeof (struct algorithm
));
2132 best_alg
= (struct algorithm
*)alloca (sizeof (struct algorithm
));
2134 /* If we have a group of zero bits at the low-order part of T, try
2135 multiplying by the remaining bits and then doing a shift. */
2139 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
2140 if (m
< BITS_PER_WORD
)
2143 cost
= shift_cost
[m
];
2144 synth_mult (alg_in
, q
, cost_limit
- cost
);
2146 cost
+= alg_in
->cost
;
2147 if (cost
< cost_limit
)
2149 struct algorithm
*x
;
2150 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2151 best_alg
->log
[best_alg
->ops
] = m
;
2152 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2158 /* If we have an odd number, add or subtract one. */
2161 unsigned HOST_WIDE_INT w
;
2163 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2165 /* If T was -1, then W will be zero after the loop. This is another
2166 case where T ends with ...111. Handling this with (T + 1) and
2167 subtract 1 produces slightly better code and results in algorithm
2168 selection much faster than treating it like the ...0111 case
2172 /* Reject the case where t is 3.
2173 Thus we prefer addition in that case. */
2176 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2179 synth_mult (alg_in
, t
+ 1, cost_limit
- cost
);
2181 cost
+= alg_in
->cost
;
2182 if (cost
< cost_limit
)
2184 struct algorithm
*x
;
2185 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2186 best_alg
->log
[best_alg
->ops
] = 0;
2187 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2193 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2196 synth_mult (alg_in
, t
- 1, cost_limit
- cost
);
2198 cost
+= alg_in
->cost
;
2199 if (cost
< cost_limit
)
2201 struct algorithm
*x
;
2202 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2203 best_alg
->log
[best_alg
->ops
] = 0;
2204 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2210 /* Look for factors of t of the form
2211 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2212 If we find such a factor, we can multiply by t using an algorithm that
2213 multiplies by q, shift the result by m and add/subtract it to itself.
2215 We search for large factors first and loop down, even if large factors
2216 are less probable than small; if we find a large factor we will find a
2217 good sequence quickly, and therefore be able to prune (by decreasing
2218 COST_LIMIT) the search. */
2220 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2222 unsigned HOST_WIDE_INT d
;
2224 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2225 if (t
% d
== 0 && t
> d
&& m
< BITS_PER_WORD
)
2227 cost
= MIN (shiftadd_cost
[m
], add_cost
+ shift_cost
[m
]);
2228 synth_mult (alg_in
, t
/ d
, cost_limit
- cost
);
2230 cost
+= alg_in
->cost
;
2231 if (cost
< cost_limit
)
2233 struct algorithm
*x
;
2234 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2235 best_alg
->log
[best_alg
->ops
] = m
;
2236 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2239 /* Other factors will have been taken care of in the recursion. */
2243 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2244 if (t
% d
== 0 && t
> d
&& m
< BITS_PER_WORD
)
2246 cost
= MIN (shiftsub_cost
[m
], add_cost
+ shift_cost
[m
]);
2247 synth_mult (alg_in
, t
/ d
, cost_limit
- cost
);
2249 cost
+= alg_in
->cost
;
2250 if (cost
< cost_limit
)
2252 struct algorithm
*x
;
2253 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2254 best_alg
->log
[best_alg
->ops
] = m
;
2255 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2262 /* Try shift-and-add (load effective address) instructions,
2263 i.e. do a*3, a*5, a*9. */
2269 if (m
>= 0 && m
< BITS_PER_WORD
)
2271 cost
= shiftadd_cost
[m
];
2272 synth_mult (alg_in
, (t
- 1) >> m
, cost_limit
- cost
);
2274 cost
+= alg_in
->cost
;
2275 if (cost
< cost_limit
)
2277 struct algorithm
*x
;
2278 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2279 best_alg
->log
[best_alg
->ops
] = m
;
2280 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2288 if (m
>= 0 && m
< BITS_PER_WORD
)
2290 cost
= shiftsub_cost
[m
];
2291 synth_mult (alg_in
, (t
+ 1) >> m
, cost_limit
- cost
);
2293 cost
+= alg_in
->cost
;
2294 if (cost
< cost_limit
)
2296 struct algorithm
*x
;
2297 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2298 best_alg
->log
[best_alg
->ops
] = m
;
2299 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2305 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2306 we have not found any algorithm. */
2307 if (cost_limit
== alg_out
->cost
)
2310 /* If we are getting a too long sequence for `struct algorithm'
2311 to record, make this search fail. */
2312 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2315 /* Copy the algorithm from temporary space to the space at alg_out.
2316 We avoid using structure assignment because the majority of
2317 best_alg is normally undefined, and this is a critical function. */
2318 alg_out
->ops
= best_alg
->ops
+ 1;
2319 alg_out
->cost
= cost_limit
;
2320 memcpy (alg_out
->op
, best_alg
->op
,
2321 alg_out
->ops
* sizeof *alg_out
->op
);
2322 memcpy (alg_out
->log
, best_alg
->log
,
2323 alg_out
->ops
* sizeof *alg_out
->log
);
2326 /* Perform a multiplication and return an rtx for the result.
2327 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2328 TARGET is a suggestion for where to store the result (an rtx).
2330 We check specially for a constant integer as OP1.
2331 If you want this check for OP0 as well, then before calling
2332 you should swap the two operands if OP0 would be constant. */
2335 expand_mult (mode
, op0
, op1
, target
, unsignedp
)
2336 enum machine_mode mode
;
2337 register rtx op0
, op1
, target
;
2340 rtx const_op1
= op1
;
2342 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2343 less than or equal in size to `unsigned int' this doesn't matter.
2344 If the mode is larger than `unsigned int', then synth_mult works only
2345 if the constant value exactly fits in an `unsigned int' without any
2346 truncation. This means that multiplying by negative values does
2347 not work; results are off by 2^32 on a 32 bit machine. */
2349 /* If we are multiplying in DImode, it may still be a win
2350 to try to work with shifts and adds. */
2351 if (GET_CODE (op1
) == CONST_DOUBLE
2352 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_INT
2353 && HOST_BITS_PER_INT
>= BITS_PER_WORD
2354 && CONST_DOUBLE_HIGH (op1
) == 0)
2355 const_op1
= GEN_INT (CONST_DOUBLE_LOW (op1
));
2356 else if (HOST_BITS_PER_INT
< GET_MODE_BITSIZE (mode
)
2357 && GET_CODE (op1
) == CONST_INT
2358 && INTVAL (op1
) < 0)
2361 /* We used to test optimize here, on the grounds that it's better to
2362 produce a smaller program when -O is not used.
2363 But this causes such a terrible slowdown sometimes
2364 that it seems better to use synth_mult always. */
2366 if (const_op1
&& GET_CODE (const_op1
) == CONST_INT
2367 && (unsignedp
|| ! flag_trapv
))
2369 struct algorithm alg
;
2370 struct algorithm alg2
;
2371 HOST_WIDE_INT val
= INTVAL (op1
);
2372 HOST_WIDE_INT val_so_far
;
2375 enum {basic_variant
, negate_variant
, add_variant
} variant
= basic_variant
;
2377 /* Try to do the computation three ways: multiply by the negative of OP1
2378 and then negate, do the multiplication directly, or do multiplication
2381 mult_cost
= rtx_cost (gen_rtx_MULT (mode
, op0
, op1
), SET
);
2382 mult_cost
= MIN (12 * add_cost
, mult_cost
);
2384 synth_mult (&alg
, val
, mult_cost
);
2386 /* This works only if the inverted value actually fits in an
2388 if (HOST_BITS_PER_INT
>= GET_MODE_BITSIZE (mode
))
2390 synth_mult (&alg2
, - val
,
2391 (alg
.cost
< mult_cost
? alg
.cost
: mult_cost
) - negate_cost
);
2392 if (alg2
.cost
+ negate_cost
< alg
.cost
)
2393 alg
= alg2
, variant
= negate_variant
;
2396 /* This proves very useful for division-by-constant. */
2397 synth_mult (&alg2
, val
- 1,
2398 (alg
.cost
< mult_cost
? alg
.cost
: mult_cost
) - add_cost
);
2399 if (alg2
.cost
+ add_cost
< alg
.cost
)
2400 alg
= alg2
, variant
= add_variant
;
2402 if (alg
.cost
< mult_cost
)
2404 /* We found something cheaper than a multiply insn. */
2407 enum machine_mode nmode
;
2409 op0
= protect_from_queue (op0
, 0);
2411 /* Avoid referencing memory over and over.
2412 For speed, but also for correctness when mem is volatile. */
2413 if (GET_CODE (op0
) == MEM
)
2414 op0
= force_reg (mode
, op0
);
2416 /* ACCUM starts out either as OP0 or as a zero, depending on
2417 the first operation. */
2419 if (alg
.op
[0] == alg_zero
)
2421 accum
= copy_to_mode_reg (mode
, const0_rtx
);
2424 else if (alg
.op
[0] == alg_m
)
2426 accum
= copy_to_mode_reg (mode
, op0
);
2432 for (opno
= 1; opno
< alg
.ops
; opno
++)
2434 int log
= alg
.log
[opno
];
2435 int preserve
= preserve_subexpressions_p ();
2436 rtx shift_subtarget
= preserve
? 0 : accum
;
2438 = (opno
== alg
.ops
- 1 && target
!= 0 && variant
!= add_variant
2441 rtx accum_target
= preserve
? 0 : accum
;
2443 switch (alg
.op
[opno
])
2446 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2447 build_int_2 (log
, 0), NULL_RTX
, 0);
2452 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2453 build_int_2 (log
, 0), NULL_RTX
, 0);
2454 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2456 ? add_target
: accum_target
);
2457 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
2461 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2462 build_int_2 (log
, 0), NULL_RTX
, 0);
2463 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
2465 ? add_target
: accum_target
);
2466 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
2470 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2471 build_int_2 (log
, 0), shift_subtarget
,
2473 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
2475 ? add_target
: accum_target
);
2476 val_so_far
= (val_so_far
<< log
) + 1;
2480 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2481 build_int_2 (log
, 0), shift_subtarget
,
2483 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
2485 ? add_target
: accum_target
);
2486 val_so_far
= (val_so_far
<< log
) - 1;
2489 case alg_add_factor
:
2490 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2491 build_int_2 (log
, 0), NULL_RTX
, 0);
2492 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2494 ? add_target
: accum_target
);
2495 val_so_far
+= val_so_far
<< log
;
2498 case alg_sub_factor
:
2499 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2500 build_int_2 (log
, 0), NULL_RTX
, 0);
2501 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
2502 (add_target
? add_target
2503 : preserve
? 0 : tem
));
2504 val_so_far
= (val_so_far
<< log
) - val_so_far
;
2511 /* Write a REG_EQUAL note on the last insn so that we can cse
2512 multiplication sequences. Note that if ACCUM is a SUBREG,
2513 we've set the inner register and must properly indicate
2516 tem
= op0
, nmode
= mode
;
2517 if (GET_CODE (accum
) == SUBREG
)
2519 nmode
= GET_MODE (SUBREG_REG (accum
));
2520 tem
= gen_lowpart (nmode
, op0
);
2523 insn
= get_last_insn ();
2524 set_unique_reg_note (insn
,
2526 gen_rtx_MULT (nmode
, tem
,
2527 GEN_INT (val_so_far
)));
2530 if (variant
== negate_variant
)
2532 val_so_far
= - val_so_far
;
2533 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
2535 else if (variant
== add_variant
)
2537 val_so_far
= val_so_far
+ 1;
2538 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
2541 if (val
!= val_so_far
)
2548 /* This used to use umul_optab if unsigned, but for non-widening multiply
2549 there is no difference between signed and unsigned. */
2550 op0
= expand_binop (mode
,
2552 && flag_trapv
&& (GET_MODE_CLASS(mode
) == MODE_INT
)
2553 ? smulv_optab
: smul_optab
,
2554 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
2560 /* Return the smallest n such that 2**n >= X. */
2564 unsigned HOST_WIDE_INT x
;
2566 return floor_log2 (x
- 1) + 1;
2569 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2570 replace division by D, and put the least significant N bits of the result
2571 in *MULTIPLIER_PTR and return the most significant bit.
2573 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2574 needed precision is in PRECISION (should be <= N).
2576 PRECISION should be as small as possible so this function can choose
2577 multiplier more freely.
2579 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2580 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2582 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2583 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2586 unsigned HOST_WIDE_INT
2587 choose_multiplier (d
, n
, precision
, multiplier_ptr
, post_shift_ptr
, lgup_ptr
)
2588 unsigned HOST_WIDE_INT d
;
2591 unsigned HOST_WIDE_INT
*multiplier_ptr
;
2592 int *post_shift_ptr
;
2595 HOST_WIDE_INT mhigh_hi
, mlow_hi
;
2596 unsigned HOST_WIDE_INT mhigh_lo
, mlow_lo
;
2597 int lgup
, post_shift
;
2599 unsigned HOST_WIDE_INT nl
, dummy1
;
2600 HOST_WIDE_INT nh
, dummy2
;
2602 /* lgup = ceil(log2(divisor)); */
2603 lgup
= ceil_log2 (d
);
2609 pow2
= n
+ lgup
- precision
;
2611 if (pow
== 2 * HOST_BITS_PER_WIDE_INT
)
2613 /* We could handle this with some effort, but this case is much better
2614 handled directly with a scc insn, so rely on caller using that. */
2618 /* mlow = 2^(N + lgup)/d */
2619 if (pow
>= HOST_BITS_PER_WIDE_INT
)
2621 nh
= (HOST_WIDE_INT
) 1 << (pow
- HOST_BITS_PER_WIDE_INT
);
2627 nl
= (unsigned HOST_WIDE_INT
) 1 << pow
;
2629 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
2630 &mlow_lo
, &mlow_hi
, &dummy1
, &dummy2
);
2632 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2633 if (pow2
>= HOST_BITS_PER_WIDE_INT
)
2634 nh
|= (HOST_WIDE_INT
) 1 << (pow2
- HOST_BITS_PER_WIDE_INT
);
2636 nl
|= (unsigned HOST_WIDE_INT
) 1 << pow2
;
2637 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
2638 &mhigh_lo
, &mhigh_hi
, &dummy1
, &dummy2
);
2640 if (mhigh_hi
&& nh
- d
>= d
)
2642 if (mhigh_hi
> 1 || mlow_hi
> 1)
2644 /* assert that mlow < mhigh. */
2645 if (! (mlow_hi
< mhigh_hi
|| (mlow_hi
== mhigh_hi
&& mlow_lo
< mhigh_lo
)))
2648 /* If precision == N, then mlow, mhigh exceed 2^N
2649 (but they do not exceed 2^(N+1)). */
2651 /* Reduce to lowest terms */
2652 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
2654 unsigned HOST_WIDE_INT ml_lo
= (mlow_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mlow_lo
>> 1);
2655 unsigned HOST_WIDE_INT mh_lo
= (mhigh_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mhigh_lo
>> 1);
2665 *post_shift_ptr
= post_shift
;
2667 if (n
< HOST_BITS_PER_WIDE_INT
)
2669 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
2670 *multiplier_ptr
= mhigh_lo
& mask
;
2671 return mhigh_lo
>= mask
;
2675 *multiplier_ptr
= mhigh_lo
;
2680 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2681 congruent to 1 (mod 2**N). */
2683 static unsigned HOST_WIDE_INT
2685 unsigned HOST_WIDE_INT x
;
2688 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2690 /* The algorithm notes that the choice y = x satisfies
2691 x*y == 1 mod 2^3, since x is assumed odd.
2692 Each iteration doubles the number of bits of significance in y. */
2694 unsigned HOST_WIDE_INT mask
;
2695 unsigned HOST_WIDE_INT y
= x
;
2698 mask
= (n
== HOST_BITS_PER_WIDE_INT
2699 ? ~(unsigned HOST_WIDE_INT
) 0
2700 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
2704 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
2710 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2711 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2712 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2713 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2716 The result is put in TARGET if that is convenient.
2718 MODE is the mode of operation. */
2721 expand_mult_highpart_adjust (mode
, adj_operand
, op0
, op1
, target
, unsignedp
)
2722 enum machine_mode mode
;
2723 register rtx adj_operand
, op0
, op1
, target
;
2727 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
2729 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2730 build_int_2 (GET_MODE_BITSIZE (mode
) - 1, 0),
2732 tem
= expand_and (tem
, op1
, NULL_RTX
);
2734 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
2737 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
2738 build_int_2 (GET_MODE_BITSIZE (mode
) - 1, 0),
2740 tem
= expand_and (tem
, op0
, NULL_RTX
);
2741 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
2747 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2748 in TARGET if that is convenient, and return where the result is. If the
2749 operation can not be performed, 0 is returned.
2751 MODE is the mode of operation and result.
2753 UNSIGNEDP nonzero means unsigned multiply.
2755 MAX_COST is the total allowed cost for the expanded RTL. */
2758 expand_mult_highpart (mode
, op0
, cnst1
, target
, unsignedp
, max_cost
)
2759 enum machine_mode mode
;
2760 register rtx op0
, target
;
2761 unsigned HOST_WIDE_INT cnst1
;
2765 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
2766 optab mul_highpart_optab
;
2769 int size
= GET_MODE_BITSIZE (mode
);
2772 /* We can't support modes wider than HOST_BITS_PER_INT. */
2773 if (size
> HOST_BITS_PER_WIDE_INT
)
2776 op1
= GEN_INT (cnst1
);
2778 if (GET_MODE_BITSIZE (wider_mode
) <= HOST_BITS_PER_INT
)
2782 = immed_double_const (cnst1
,
2785 : -(cnst1
>> (HOST_BITS_PER_WIDE_INT
- 1))),
2788 /* expand_mult handles constant multiplication of word_mode
2789 or narrower. It does a poor job for large modes. */
2790 if (size
< BITS_PER_WORD
2791 && mul_cost
[(int) wider_mode
] + shift_cost
[size
-1] < max_cost
)
2793 /* We have to do this, since expand_binop doesn't do conversion for
2794 multiply. Maybe change expand_binop to handle widening multiply? */
2795 op0
= convert_to_mode (wider_mode
, op0
, unsignedp
);
2797 /* We know that this can't have signed overflow, so pretend this is
2798 an unsigned multiply. */
2799 tem
= expand_mult (wider_mode
, op0
, wide_op1
, NULL_RTX
, 0);
2800 tem
= expand_shift (RSHIFT_EXPR
, wider_mode
, tem
,
2801 build_int_2 (size
, 0), NULL_RTX
, 1);
2802 return convert_modes (mode
, wider_mode
, tem
, unsignedp
);
2806 target
= gen_reg_rtx (mode
);
2808 /* Firstly, try using a multiplication insn that only generates the needed
2809 high part of the product, and in the sign flavor of unsignedp. */
2810 if (mul_highpart_cost
[(int) mode
] < max_cost
)
2812 mul_highpart_optab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
2813 target
= expand_binop (mode
, mul_highpart_optab
,
2814 op0
, op1
, target
, unsignedp
, OPTAB_DIRECT
);
2819 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2820 Need to adjust the result after the multiplication. */
2821 if (size
- 1 < BITS_PER_WORD
2822 && (mul_highpart_cost
[(int) mode
] + 2 * shift_cost
[size
-1] + 4 * add_cost
2825 mul_highpart_optab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
2826 target
= expand_binop (mode
, mul_highpart_optab
,
2827 op0
, op1
, target
, unsignedp
, OPTAB_DIRECT
);
2829 /* We used the wrong signedness. Adjust the result. */
2830 return expand_mult_highpart_adjust (mode
, target
, op0
,
2831 op1
, target
, unsignedp
);
2834 /* Try widening multiplication. */
2835 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
2836 if (moptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
2837 && mul_widen_cost
[(int) wider_mode
] < max_cost
)
2839 op1
= force_reg (mode
, op1
);
2843 /* Try widening the mode and perform a non-widening multiplication. */
2844 moptab
= smul_optab
;
2845 if (smul_optab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
2846 && size
- 1 < BITS_PER_WORD
2847 && mul_cost
[(int) wider_mode
] + shift_cost
[size
-1] < max_cost
)
2853 /* Try widening multiplication of opposite signedness, and adjust. */
2854 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
2855 if (moptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
2856 && size
- 1 < BITS_PER_WORD
2857 && (mul_widen_cost
[(int) wider_mode
]
2858 + 2 * shift_cost
[size
-1] + 4 * add_cost
< max_cost
))
2860 rtx regop1
= force_reg (mode
, op1
);
2861 tem
= expand_binop (wider_mode
, moptab
, op0
, regop1
,
2862 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
2865 /* Extract the high half of the just generated product. */
2866 tem
= expand_shift (RSHIFT_EXPR
, wider_mode
, tem
,
2867 build_int_2 (size
, 0), NULL_RTX
, 1);
2868 tem
= convert_modes (mode
, wider_mode
, tem
, unsignedp
);
2869 /* We used the wrong signedness. Adjust the result. */
2870 return expand_mult_highpart_adjust (mode
, tem
, op0
, op1
,
2878 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2879 tem
= expand_binop (wider_mode
, moptab
, op0
, op1
,
2880 NULL_RTX
, unsignedp
, OPTAB_WIDEN
);
2884 /* Extract the high half of the just generated product. */
2885 if (mode
== word_mode
)
2887 return gen_highpart (mode
, tem
);
2891 tem
= expand_shift (RSHIFT_EXPR
, wider_mode
, tem
,
2892 build_int_2 (size
, 0), NULL_RTX
, 1);
2893 return convert_modes (mode
, wider_mode
, tem
, unsignedp
);
2897 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2898 if that is convenient, and returning where the result is.
2899 You may request either the quotient or the remainder as the result;
2900 specify REM_FLAG nonzero to get the remainder.
2902 CODE is the expression code for which kind of division this is;
2903 it controls how rounding is done. MODE is the machine mode to use.
2904 UNSIGNEDP nonzero means do unsigned division. */
2906 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2907 and then correct it by or'ing in missing high bits
2908 if result of ANDI is nonzero.
2909 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2910 This could optimize to a bfexts instruction.
2911 But C doesn't use these operations, so their optimizations are
2913 /* ??? For modulo, we don't actually need the highpart of the first product,
2914 the low part will do nicely. And for small divisors, the second multiply
2915 can also be a low-part only multiply or even be completely left out.
2916 E.g. to calculate the remainder of a division by 3 with a 32 bit
2917 multiply, multiply with 0x55555556 and extract the upper two bits;
2918 the result is exact for inputs up to 0x1fffffff.
2919 The input range can be reduced by using cross-sum rules.
2920 For odd divisors >= 3, the following table gives right shift counts
2921 so that if an number is shifted by an integer multiple of the given
2922 amount, the remainder stays the same:
2923 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2924 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2925 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2926 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2927 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2929 Cross-sum rules for even numbers can be derived by leaving as many bits
2930 to the right alone as the divisor has zeros to the right.
2931 E.g. if x is an unsigned 32 bit number:
2932 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2935 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2938 expand_divmod (rem_flag
, code
, mode
, op0
, op1
, target
, unsignedp
)
2940 enum tree_code code
;
2941 enum machine_mode mode
;
2942 register rtx op0
, op1
, target
;
2945 enum machine_mode compute_mode
;
2946 register rtx tquotient
;
2947 rtx quotient
= 0, remainder
= 0;
2951 optab optab1
, optab2
;
2952 int op1_is_constant
, op1_is_pow2
;
2953 int max_cost
, extra_cost
;
2954 static HOST_WIDE_INT last_div_const
= 0;
2956 op1_is_constant
= GET_CODE (op1
) == CONST_INT
;
2957 op1_is_pow2
= (op1_is_constant
2958 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
2959 || (! unsignedp
&& EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1
))))));
2962 This is the structure of expand_divmod:
2964 First comes code to fix up the operands so we can perform the operations
2965 correctly and efficiently.
2967 Second comes a switch statement with code specific for each rounding mode.
2968 For some special operands this code emits all RTL for the desired
2969 operation, for other cases, it generates only a quotient and stores it in
2970 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2971 to indicate that it has not done anything.
2973 Last comes code that finishes the operation. If QUOTIENT is set and
2974 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2975 QUOTIENT is not set, it is computed using trunc rounding.
2977 We try to generate special code for division and remainder when OP1 is a
2978 constant. If |OP1| = 2**n we can use shifts and some other fast
2979 operations. For other values of OP1, we compute a carefully selected
2980 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2983 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2984 half of the product. Different strategies for generating the product are
2985 implemented in expand_mult_highpart.
2987 If what we actually want is the remainder, we generate that by another
2988 by-constant multiplication and a subtraction. */
2990 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2991 code below will malfunction if we are, so check here and handle
2992 the special case if so. */
2993 if (op1
== const1_rtx
)
2994 return rem_flag
? const0_rtx
: op0
;
2996 /* When dividing by -1, we could get an overflow.
2997 negv_optab can handle overflows. */
2998 if (! unsignedp
&& op1
== constm1_rtx
)
3002 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS(mode
) == MODE_INT
3003 ? negv_optab
: neg_optab
, op0
, target
, 0);
3007 /* Don't use the function value register as a target
3008 since we have to read it as well as write it,
3009 and function-inlining gets confused by this. */
3010 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
3011 /* Don't clobber an operand while doing a multi-step calculation. */
3012 || ((rem_flag
|| op1_is_constant
)
3013 && (reg_mentioned_p (target
, op0
)
3014 || (GET_CODE (op0
) == MEM
&& GET_CODE (target
) == MEM
)))
3015 || reg_mentioned_p (target
, op1
)
3016 || (GET_CODE (op1
) == MEM
&& GET_CODE (target
) == MEM
)))
3019 /* Get the mode in which to perform this computation. Normally it will
3020 be MODE, but sometimes we can't do the desired operation in MODE.
3021 If so, pick a wider mode in which we can do the operation. Convert
3022 to that mode at the start to avoid repeated conversions.
3024 First see what operations we need. These depend on the expression
3025 we are evaluating. (We assume that divxx3 insns exist under the
3026 same conditions that modxx3 insns and that these insns don't normally
3027 fail. If these assumptions are not correct, we may generate less
3028 efficient code in some cases.)
3030 Then see if we find a mode in which we can open-code that operation
3031 (either a division, modulus, or shift). Finally, check for the smallest
3032 mode for which we can do the operation with a library call. */
3034 /* We might want to refine this now that we have division-by-constant
3035 optimization. Since expand_mult_highpart tries so many variants, it is
3036 not straightforward to generalize this. Maybe we should make an array
3037 of possible modes in init_expmed? Save this for GCC 2.7. */
3039 optab1
= (op1_is_pow2
? (unsignedp
? lshr_optab
: ashr_optab
)
3040 : (unsignedp
? udiv_optab
: sdiv_optab
));
3041 optab2
= (op1_is_pow2
? optab1
: (unsignedp
? udivmod_optab
: sdivmod_optab
));
3043 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3044 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3045 if (optab1
->handlers
[(int) compute_mode
].insn_code
!= CODE_FOR_nothing
3046 || optab2
->handlers
[(int) compute_mode
].insn_code
!= CODE_FOR_nothing
)
3049 if (compute_mode
== VOIDmode
)
3050 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3051 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3052 if (optab1
->handlers
[(int) compute_mode
].libfunc
3053 || optab2
->handlers
[(int) compute_mode
].libfunc
)
3056 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3058 if (compute_mode
== VOIDmode
)
3059 compute_mode
= mode
;
3061 if (target
&& GET_MODE (target
) == compute_mode
)
3064 tquotient
= gen_reg_rtx (compute_mode
);
3066 size
= GET_MODE_BITSIZE (compute_mode
);
3068 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3069 (mode), and thereby get better code when OP1 is a constant. Do that
3070 later. It will require going over all usages of SIZE below. */
3071 size
= GET_MODE_BITSIZE (mode
);
3074 /* Only deduct something for a REM if the last divide done was
3075 for a different constant. Then set the constant of the last
3077 max_cost
= div_cost
[(int) compute_mode
]
3078 - (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
3079 && INTVAL (op1
) == last_div_const
)
3080 ? mul_cost
[(int) compute_mode
] + add_cost
: 0);
3082 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
3084 /* Now convert to the best mode to use. */
3085 if (compute_mode
!= mode
)
3087 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
3088 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
3090 /* convert_modes may have placed op1 into a register, so we
3091 must recompute the following. */
3092 op1_is_constant
= GET_CODE (op1
) == CONST_INT
;
3093 op1_is_pow2
= (op1_is_constant
3094 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
3096 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1
)))))) ;
3099 /* If one of the operands is a volatile MEM, copy it into a register. */
3101 if (GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
))
3102 op0
= force_reg (compute_mode
, op0
);
3103 if (GET_CODE (op1
) == MEM
&& MEM_VOLATILE_P (op1
))
3104 op1
= force_reg (compute_mode
, op1
);
3106 /* If we need the remainder or if OP1 is constant, we need to
3107 put OP0 in a register in case it has any queued subexpressions. */
3108 if (rem_flag
|| op1_is_constant
)
3109 op0
= force_reg (compute_mode
, op0
);
3111 last
= get_last_insn ();
3113 /* Promote floor rounding to trunc rounding for unsigned operations. */
3116 if (code
== FLOOR_DIV_EXPR
)
3117 code
= TRUNC_DIV_EXPR
;
3118 if (code
== FLOOR_MOD_EXPR
)
3119 code
= TRUNC_MOD_EXPR
;
3120 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
3121 code
= TRUNC_DIV_EXPR
;
3124 if (op1
!= const0_rtx
)
3127 case TRUNC_MOD_EXPR
:
3128 case TRUNC_DIV_EXPR
:
3129 if (op1_is_constant
)
3133 unsigned HOST_WIDE_INT mh
, ml
;
3134 int pre_shift
, post_shift
;
3136 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
3138 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
3140 pre_shift
= floor_log2 (d
);
3144 = expand_binop (compute_mode
, and_optab
, op0
,
3145 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
3149 return gen_lowpart (mode
, remainder
);
3151 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3152 build_int_2 (pre_shift
, 0),
3155 else if (size
<= HOST_BITS_PER_WIDE_INT
)
3157 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
3159 /* Most significant bit of divisor is set; emit an scc
3161 quotient
= emit_store_flag (tquotient
, GEU
, op0
, op1
,
3162 compute_mode
, 1, 1);
3168 /* Find a suitable multiplier and right shift count
3169 instead of multiplying with D. */
3171 mh
= choose_multiplier (d
, size
, size
,
3172 &ml
, &post_shift
, &dummy
);
3174 /* If the suggested multiplier is more than SIZE bits,
3175 we can do better for even divisors, using an
3176 initial right shift. */
3177 if (mh
!= 0 && (d
& 1) == 0)
3179 pre_shift
= floor_log2 (d
& -d
);
3180 mh
= choose_multiplier (d
>> pre_shift
, size
,
3182 &ml
, &post_shift
, &dummy
);
3193 if (post_shift
- 1 >= BITS_PER_WORD
)
3196 extra_cost
= (shift_cost
[post_shift
- 1]
3197 + shift_cost
[1] + 2 * add_cost
);
3198 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
3200 max_cost
- extra_cost
);
3203 t2
= force_operand (gen_rtx_MINUS (compute_mode
,
3206 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
3207 build_int_2 (1, 0), NULL_RTX
,1);
3208 t4
= force_operand (gen_rtx_PLUS (compute_mode
,
3212 = expand_shift (RSHIFT_EXPR
, compute_mode
, t4
,
3213 build_int_2 (post_shift
- 1, 0),
3220 if (pre_shift
>= BITS_PER_WORD
3221 || post_shift
>= BITS_PER_WORD
)
3224 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3225 build_int_2 (pre_shift
, 0),
3227 extra_cost
= (shift_cost
[pre_shift
]
3228 + shift_cost
[post_shift
]);
3229 t2
= expand_mult_highpart (compute_mode
, t1
, ml
,
3231 max_cost
- extra_cost
);
3235 = expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
3236 build_int_2 (post_shift
, 0),
3241 else /* Too wide mode to use tricky code */
3244 insn
= get_last_insn ();
3246 && (set
= single_set (insn
)) != 0
3247 && SET_DEST (set
) == quotient
)
3248 set_unique_reg_note (insn
,
3250 gen_rtx_UDIV (compute_mode
, op0
, op1
));
3252 else /* TRUNC_DIV, signed */
3254 unsigned HOST_WIDE_INT ml
;
3255 int lgup
, post_shift
;
3256 HOST_WIDE_INT d
= INTVAL (op1
);
3257 unsigned HOST_WIDE_INT abs_d
= d
>= 0 ? d
: -d
;
3259 /* n rem d = n rem -d */
3260 if (rem_flag
&& d
< 0)
3263 op1
= GEN_INT (abs_d
);
3269 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
3271 else if (abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
3273 /* This case is not handled correctly below. */
3274 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
3275 compute_mode
, 1, 1);
3279 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
3280 && (rem_flag
? smod_pow2_cheap
: sdiv_pow2_cheap
))
3282 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
3284 lgup
= floor_log2 (abs_d
);
3285 if (BRANCH_COST
< 1 || (abs_d
!= 2 && BRANCH_COST
< 3))
3287 rtx label
= gen_label_rtx ();
3290 t1
= copy_to_mode_reg (compute_mode
, op0
);
3291 do_cmp_and_jump (t1
, const0_rtx
, GE
,
3292 compute_mode
, label
);
3293 expand_inc (t1
, GEN_INT (abs_d
- 1));
3295 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, t1
,
3296 build_int_2 (lgup
, 0),
3302 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3303 build_int_2 (size
- 1, 0),
3305 t2
= expand_shift (RSHIFT_EXPR
, compute_mode
, t1
,
3306 build_int_2 (size
- lgup
, 0),
3308 t3
= force_operand (gen_rtx_PLUS (compute_mode
,
3311 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, t3
,
3312 build_int_2 (lgup
, 0),
3316 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3320 insn
= get_last_insn ();
3322 && (set
= single_set (insn
)) != 0
3323 && SET_DEST (set
) == quotient
3324 && abs_d
< ((unsigned HOST_WIDE_INT
) 1
3325 << (HOST_BITS_PER_WIDE_INT
- 1)))
3326 set_unique_reg_note (insn
,
3328 gen_rtx_DIV (compute_mode
,
3332 quotient
= expand_unop (compute_mode
, neg_optab
,
3333 quotient
, quotient
, 0);
3336 else if (size
<= HOST_BITS_PER_WIDE_INT
)
3338 choose_multiplier (abs_d
, size
, size
- 1,
3339 &ml
, &post_shift
, &lgup
);
3340 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
3344 if (post_shift
>= BITS_PER_WORD
3345 || size
- 1 >= BITS_PER_WORD
)
3348 extra_cost
= (shift_cost
[post_shift
]
3349 + shift_cost
[size
- 1] + add_cost
);
3350 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
3352 max_cost
- extra_cost
);
3355 t2
= expand_shift (RSHIFT_EXPR
, compute_mode
, t1
,
3356 build_int_2 (post_shift
, 0), NULL_RTX
, 0);
3357 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3358 build_int_2 (size
- 1, 0), NULL_RTX
, 0);
3361 = force_operand (gen_rtx_MINUS (compute_mode
,
3366 = force_operand (gen_rtx_MINUS (compute_mode
,
3374 if (post_shift
>= BITS_PER_WORD
3375 || size
- 1 >= BITS_PER_WORD
)
3378 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
3379 extra_cost
= (shift_cost
[post_shift
]
3380 + shift_cost
[size
- 1] + 2 * add_cost
);
3381 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
3383 max_cost
- extra_cost
);
3386 t2
= force_operand (gen_rtx_PLUS (compute_mode
,
3389 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
3390 build_int_2 (post_shift
, 0),
3392 t4
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3393 build_int_2 (size
- 1, 0),
3397 = force_operand (gen_rtx_MINUS (compute_mode
,
3402 = force_operand (gen_rtx_MINUS (compute_mode
,
3407 else /* Too wide mode to use tricky code */
3410 insn
= get_last_insn ();
3412 && (set
= single_set (insn
)) != 0
3413 && SET_DEST (set
) == quotient
)
3414 set_unique_reg_note (insn
,
3416 gen_rtx_DIV (compute_mode
, op0
, op1
));
3421 delete_insns_since (last
);
3424 case FLOOR_DIV_EXPR
:
3425 case FLOOR_MOD_EXPR
:
3426 /* We will come here only for signed operations. */
3427 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
3429 unsigned HOST_WIDE_INT mh
, ml
;
3430 int pre_shift
, lgup
, post_shift
;
3431 HOST_WIDE_INT d
= INTVAL (op1
);
3435 /* We could just as easily deal with negative constants here,
3436 but it does not seem worth the trouble for GCC 2.6. */
3437 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
3439 pre_shift
= floor_log2 (d
);
3442 remainder
= expand_binop (compute_mode
, and_optab
, op0
,
3443 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
3444 remainder
, 0, OPTAB_LIB_WIDEN
);
3446 return gen_lowpart (mode
, remainder
);
3448 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3449 build_int_2 (pre_shift
, 0),
3456 mh
= choose_multiplier (d
, size
, size
- 1,
3457 &ml
, &post_shift
, &lgup
);
3461 if (post_shift
< BITS_PER_WORD
3462 && size
- 1 < BITS_PER_WORD
)
3464 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3465 build_int_2 (size
- 1, 0),
3467 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
3468 NULL_RTX
, 0, OPTAB_WIDEN
);
3469 extra_cost
= (shift_cost
[post_shift
]
3470 + shift_cost
[size
- 1] + 2 * add_cost
);
3471 t3
= expand_mult_highpart (compute_mode
, t2
, ml
,
3473 max_cost
- extra_cost
);
3476 t4
= expand_shift (RSHIFT_EXPR
, compute_mode
, t3
,
3477 build_int_2 (post_shift
, 0),
3479 quotient
= expand_binop (compute_mode
, xor_optab
,
3480 t4
, t1
, tquotient
, 0,
3488 rtx nsign
, t1
, t2
, t3
, t4
;
3489 t1
= force_operand (gen_rtx_PLUS (compute_mode
,
3490 op0
, constm1_rtx
), NULL_RTX
);
3491 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
3493 nsign
= expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
3494 build_int_2 (size
- 1, 0), NULL_RTX
, 0);
3495 t3
= force_operand (gen_rtx_MINUS (compute_mode
, t1
, nsign
),
3497 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
3502 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
3504 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
3513 delete_insns_since (last
);
3515 /* Try using an instruction that produces both the quotient and
3516 remainder, using truncation. We can easily compensate the quotient
3517 or remainder to get floor rounding, once we have the remainder.
3518 Notice that we compute also the final remainder value here,
3519 and return the result right away. */
3520 if (target
== 0 || GET_MODE (target
) != compute_mode
)
3521 target
= gen_reg_rtx (compute_mode
);
3526 = GET_CODE (target
) == REG
? target
: gen_reg_rtx (compute_mode
);
3527 quotient
= gen_reg_rtx (compute_mode
);
3532 = GET_CODE (target
) == REG
? target
: gen_reg_rtx (compute_mode
);
3533 remainder
= gen_reg_rtx (compute_mode
);
3536 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
3537 quotient
, remainder
, 0))
3539 /* This could be computed with a branch-less sequence.
3540 Save that for later. */
3542 rtx label
= gen_label_rtx ();
3543 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
3544 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
3545 NULL_RTX
, 0, OPTAB_WIDEN
);
3546 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
3547 expand_dec (quotient
, const1_rtx
);
3548 expand_inc (remainder
, op1
);
3550 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3553 /* No luck with division elimination or divmod. Have to do it
3554 by conditionally adjusting op0 *and* the result. */
3556 rtx label1
, label2
, label3
, label4
, label5
;
3560 quotient
= gen_reg_rtx (compute_mode
);
3561 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
3562 label1
= gen_label_rtx ();
3563 label2
= gen_label_rtx ();
3564 label3
= gen_label_rtx ();
3565 label4
= gen_label_rtx ();
3566 label5
= gen_label_rtx ();
3567 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
3568 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
3569 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3570 quotient
, 0, OPTAB_LIB_WIDEN
);
3571 if (tem
!= quotient
)
3572 emit_move_insn (quotient
, tem
);
3573 emit_jump_insn (gen_jump (label5
));
3575 emit_label (label1
);
3576 expand_inc (adjusted_op0
, const1_rtx
);
3577 emit_jump_insn (gen_jump (label4
));
3579 emit_label (label2
);
3580 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
3581 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3582 quotient
, 0, OPTAB_LIB_WIDEN
);
3583 if (tem
!= quotient
)
3584 emit_move_insn (quotient
, tem
);
3585 emit_jump_insn (gen_jump (label5
));
3587 emit_label (label3
);
3588 expand_dec (adjusted_op0
, const1_rtx
);
3589 emit_label (label4
);
3590 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3591 quotient
, 0, OPTAB_LIB_WIDEN
);
3592 if (tem
!= quotient
)
3593 emit_move_insn (quotient
, tem
);
3594 expand_dec (quotient
, const1_rtx
);
3595 emit_label (label5
);
3603 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
3606 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
3607 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3608 build_int_2 (floor_log2 (d
), 0),
3610 t2
= expand_binop (compute_mode
, and_optab
, op0
,
3612 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3613 t3
= gen_reg_rtx (compute_mode
);
3614 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
3615 compute_mode
, 1, 1);
3619 lab
= gen_label_rtx ();
3620 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
3621 expand_inc (t1
, const1_rtx
);
3626 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
3632 /* Try using an instruction that produces both the quotient and
3633 remainder, using truncation. We can easily compensate the
3634 quotient or remainder to get ceiling rounding, once we have the
3635 remainder. Notice that we compute also the final remainder
3636 value here, and return the result right away. */
3637 if (target
== 0 || GET_MODE (target
) != compute_mode
)
3638 target
= gen_reg_rtx (compute_mode
);
3642 remainder
= (GET_CODE (target
) == REG
3643 ? target
: gen_reg_rtx (compute_mode
));
3644 quotient
= gen_reg_rtx (compute_mode
);
3648 quotient
= (GET_CODE (target
) == REG
3649 ? target
: gen_reg_rtx (compute_mode
));
3650 remainder
= gen_reg_rtx (compute_mode
);
3653 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
3656 /* This could be computed with a branch-less sequence.
3657 Save that for later. */
3658 rtx label
= gen_label_rtx ();
3659 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
3660 compute_mode
, label
);
3661 expand_inc (quotient
, const1_rtx
);
3662 expand_dec (remainder
, op1
);
3664 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3667 /* No luck with division elimination or divmod. Have to do it
3668 by conditionally adjusting op0 *and* the result. */
3671 rtx adjusted_op0
, tem
;
3673 quotient
= gen_reg_rtx (compute_mode
);
3674 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
3675 label1
= gen_label_rtx ();
3676 label2
= gen_label_rtx ();
3677 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
3678 compute_mode
, label1
);
3679 emit_move_insn (quotient
, const0_rtx
);
3680 emit_jump_insn (gen_jump (label2
));
3682 emit_label (label1
);
3683 expand_dec (adjusted_op0
, const1_rtx
);
3684 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
3685 quotient
, 1, OPTAB_LIB_WIDEN
);
3686 if (tem
!= quotient
)
3687 emit_move_insn (quotient
, tem
);
3688 expand_inc (quotient
, const1_rtx
);
3689 emit_label (label2
);
3694 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
3695 && INTVAL (op1
) >= 0)
3697 /* This is extremely similar to the code for the unsigned case
3698 above. For 2.7 we should merge these variants, but for
3699 2.6.1 I don't want to touch the code for unsigned since that
3700 get used in C. The signed case will only be used by other
3704 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
3705 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3706 build_int_2 (floor_log2 (d
), 0),
3708 t2
= expand_binop (compute_mode
, and_optab
, op0
,
3710 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3711 t3
= gen_reg_rtx (compute_mode
);
3712 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
3713 compute_mode
, 1, 1);
3717 lab
= gen_label_rtx ();
3718 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
3719 expand_inc (t1
, const1_rtx
);
3724 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
3730 /* Try using an instruction that produces both the quotient and
3731 remainder, using truncation. We can easily compensate the
3732 quotient or remainder to get ceiling rounding, once we have the
3733 remainder. Notice that we compute also the final remainder
3734 value here, and return the result right away. */
3735 if (target
== 0 || GET_MODE (target
) != compute_mode
)
3736 target
= gen_reg_rtx (compute_mode
);
3739 remainder
= (GET_CODE (target
) == REG
3740 ? target
: gen_reg_rtx (compute_mode
));
3741 quotient
= gen_reg_rtx (compute_mode
);
3745 quotient
= (GET_CODE (target
) == REG
3746 ? target
: gen_reg_rtx (compute_mode
));
3747 remainder
= gen_reg_rtx (compute_mode
);
3750 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
3753 /* This could be computed with a branch-less sequence.
3754 Save that for later. */
3756 rtx label
= gen_label_rtx ();
3757 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
3758 compute_mode
, label
);
3759 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
3760 NULL_RTX
, 0, OPTAB_WIDEN
);
3761 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
3762 expand_inc (quotient
, const1_rtx
);
3763 expand_dec (remainder
, op1
);
3765 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3768 /* No luck with division elimination or divmod. Have to do it
3769 by conditionally adjusting op0 *and* the result. */
3771 rtx label1
, label2
, label3
, label4
, label5
;
3775 quotient
= gen_reg_rtx (compute_mode
);
3776 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
3777 label1
= gen_label_rtx ();
3778 label2
= gen_label_rtx ();
3779 label3
= gen_label_rtx ();
3780 label4
= gen_label_rtx ();
3781 label5
= gen_label_rtx ();
3782 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
3783 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
3784 compute_mode
, label1
);
3785 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3786 quotient
, 0, OPTAB_LIB_WIDEN
);
3787 if (tem
!= quotient
)
3788 emit_move_insn (quotient
, tem
);
3789 emit_jump_insn (gen_jump (label5
));
3791 emit_label (label1
);
3792 expand_dec (adjusted_op0
, const1_rtx
);
3793 emit_jump_insn (gen_jump (label4
));
3795 emit_label (label2
);
3796 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
3797 compute_mode
, label3
);
3798 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3799 quotient
, 0, OPTAB_LIB_WIDEN
);
3800 if (tem
!= quotient
)
3801 emit_move_insn (quotient
, tem
);
3802 emit_jump_insn (gen_jump (label5
));
3804 emit_label (label3
);
3805 expand_inc (adjusted_op0
, const1_rtx
);
3806 emit_label (label4
);
3807 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3808 quotient
, 0, OPTAB_LIB_WIDEN
);
3809 if (tem
!= quotient
)
3810 emit_move_insn (quotient
, tem
);
3811 expand_inc (quotient
, const1_rtx
);
3812 emit_label (label5
);
3817 case EXACT_DIV_EXPR
:
3818 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
3820 HOST_WIDE_INT d
= INTVAL (op1
);
3821 unsigned HOST_WIDE_INT ml
;
3825 pre_shift
= floor_log2 (d
& -d
);
3826 ml
= invert_mod2n (d
>> pre_shift
, size
);
3827 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3828 build_int_2 (pre_shift
, 0), NULL_RTX
, unsignedp
);
3829 quotient
= expand_mult (compute_mode
, t1
, GEN_INT (ml
), NULL_RTX
,
3832 insn
= get_last_insn ();
3833 set_unique_reg_note (insn
,
3835 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
3841 case ROUND_DIV_EXPR
:
3842 case ROUND_MOD_EXPR
:
3847 label
= gen_label_rtx ();
3848 quotient
= gen_reg_rtx (compute_mode
);
3849 remainder
= gen_reg_rtx (compute_mode
);
3850 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
3853 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
3854 quotient
, 1, OPTAB_LIB_WIDEN
);
3855 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
3856 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
3857 remainder
, 1, OPTAB_LIB_WIDEN
);
3859 tem
= plus_constant (op1
, -1);
3860 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
3861 build_int_2 (1, 0), NULL_RTX
, 1);
3862 do_cmp_and_jump (remainder
, tem
, LEU
, compute_mode
, label
);
3863 expand_inc (quotient
, const1_rtx
);
3864 expand_dec (remainder
, op1
);
3869 rtx abs_rem
, abs_op1
, tem
, mask
;
3871 label
= gen_label_rtx ();
3872 quotient
= gen_reg_rtx (compute_mode
);
3873 remainder
= gen_reg_rtx (compute_mode
);
3874 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
3877 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
3878 quotient
, 0, OPTAB_LIB_WIDEN
);
3879 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
3880 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
3881 remainder
, 0, OPTAB_LIB_WIDEN
);
3883 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 1, 0);
3884 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 1, 0);
3885 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
3886 build_int_2 (1, 0), NULL_RTX
, 1);
3887 do_cmp_and_jump (tem
, abs_op1
, LTU
, compute_mode
, label
);
3888 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
3889 NULL_RTX
, 0, OPTAB_WIDEN
);
3890 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
3891 build_int_2 (size
- 1, 0), NULL_RTX
, 0);
3892 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
3893 NULL_RTX
, 0, OPTAB_WIDEN
);
3894 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
3895 NULL_RTX
, 0, OPTAB_WIDEN
);
3896 expand_inc (quotient
, tem
);
3897 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
3898 NULL_RTX
, 0, OPTAB_WIDEN
);
3899 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
3900 NULL_RTX
, 0, OPTAB_WIDEN
);
3901 expand_dec (remainder
, tem
);
3904 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3912 if (target
&& GET_MODE (target
) != compute_mode
)
3917 /* Try to produce the remainder without producing the quotient.
3918 If we seem to have a divmod patten that does not require widening,
3919 don't try windening here. We should really have an WIDEN argument
3920 to expand_twoval_binop, since what we'd really like to do here is
3921 1) try a mod insn in compute_mode
3922 2) try a divmod insn in compute_mode
3923 3) try a div insn in compute_mode and multiply-subtract to get
3925 4) try the same things with widening allowed. */
3927 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
3930 ((optab2
->handlers
[(int) compute_mode
].insn_code
3931 != CODE_FOR_nothing
)
3932 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
3935 /* No luck there. Can we do remainder and divide at once
3936 without a library call? */
3937 remainder
= gen_reg_rtx (compute_mode
);
3938 if (! expand_twoval_binop ((unsignedp
3942 NULL_RTX
, remainder
, unsignedp
))
3947 return gen_lowpart (mode
, remainder
);
3950 /* Produce the quotient. Try a quotient insn, but not a library call.
3951 If we have a divmod in this mode, use it in preference to widening
3952 the div (for this test we assume it will not fail). Note that optab2
3953 is set to the one of the two optabs that the call below will use. */
3955 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
3956 op0
, op1
, rem_flag
? NULL_RTX
: target
,
3958 ((optab2
->handlers
[(int) compute_mode
].insn_code
3959 != CODE_FOR_nothing
)
3960 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
3964 /* No luck there. Try a quotient-and-remainder insn,
3965 keeping the quotient alone. */
3966 quotient
= gen_reg_rtx (compute_mode
);
3967 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
3969 quotient
, NULL_RTX
, unsignedp
))
3973 /* Still no luck. If we are not computing the remainder,
3974 use a library call for the quotient. */
3975 quotient
= sign_expand_binop (compute_mode
,
3976 udiv_optab
, sdiv_optab
,
3978 unsignedp
, OPTAB_LIB_WIDEN
);
3985 if (target
&& GET_MODE (target
) != compute_mode
)
3989 /* No divide instruction either. Use library for remainder. */
3990 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
3992 unsignedp
, OPTAB_LIB_WIDEN
);
3995 /* We divided. Now finish doing X - Y * (X / Y). */
3996 remainder
= expand_mult (compute_mode
, quotient
, op1
,
3997 NULL_RTX
, unsignedp
);
3998 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
3999 remainder
, target
, unsignedp
,
4004 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4007 /* Return a tree node with data type TYPE, describing the value of X.
4008 Usually this is an RTL_EXPR, if there is no obvious better choice.
4009 X may be an expression, however we only support those expressions
4010 generated by loop.c. */
4019 switch (GET_CODE (x
))
4022 t
= build_int_2 (INTVAL (x
),
4023 (TREE_UNSIGNED (type
)
4024 && (GET_MODE_BITSIZE (TYPE_MODE (type
)) < HOST_BITS_PER_WIDE_INT
))
4025 || INTVAL (x
) >= 0 ? 0 : -1);
4026 TREE_TYPE (t
) = type
;
4030 if (GET_MODE (x
) == VOIDmode
)
4032 t
= build_int_2 (CONST_DOUBLE_LOW (x
), CONST_DOUBLE_HIGH (x
));
4033 TREE_TYPE (t
) = type
;
4039 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
4040 t
= build_real (type
, d
);
4046 return fold (build (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4047 make_tree (type
, XEXP (x
, 1))));
4050 return fold (build (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4051 make_tree (type
, XEXP (x
, 1))));
4054 return fold (build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0))));
4057 return fold (build (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4058 make_tree (type
, XEXP (x
, 1))));
4061 return fold (build (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4062 make_tree (type
, XEXP (x
, 1))));
4065 return fold (convert (type
,
4066 build (RSHIFT_EXPR
, unsigned_type (type
),
4067 make_tree (unsigned_type (type
),
4069 make_tree (type
, XEXP (x
, 1)))));
4072 return fold (convert (type
,
4073 build (RSHIFT_EXPR
, signed_type (type
),
4074 make_tree (signed_type (type
), XEXP (x
, 0)),
4075 make_tree (type
, XEXP (x
, 1)))));
4078 if (TREE_CODE (type
) != REAL_TYPE
)
4079 t
= signed_type (type
);
4083 return fold (convert (type
,
4084 build (TRUNC_DIV_EXPR
, t
,
4085 make_tree (t
, XEXP (x
, 0)),
4086 make_tree (t
, XEXP (x
, 1)))));
4088 t
= unsigned_type (type
);
4089 return fold (convert (type
,
4090 build (TRUNC_DIV_EXPR
, t
,
4091 make_tree (t
, XEXP (x
, 0)),
4092 make_tree (t
, XEXP (x
, 1)))));
4094 t
= make_node (RTL_EXPR
);
4095 TREE_TYPE (t
) = type
;
4097 #ifdef POINTERS_EXTEND_UNSIGNED
4098 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4099 ptr_mode. So convert. */
4100 if (POINTER_TYPE_P (type
) && GET_MODE (x
) != TYPE_MODE (type
))
4101 x
= convert_memory_address (TYPE_MODE (type
), x
);
4104 RTL_EXPR_RTL (t
) = x
;
4105 /* There are no insns to be output
4106 when this rtl_expr is used. */
4107 RTL_EXPR_SEQUENCE (t
) = 0;
4112 /* Return an rtx representing the value of X * MULT + ADD.
4113 TARGET is a suggestion for where to store the result (an rtx).
4114 MODE is the machine mode for the computation.
4115 X and MULT must have mode MODE. ADD may have a different mode.
4116 So can X (defaults to same as MODE).
4117 UNSIGNEDP is non-zero to do unsigned multiplication.
4118 This may emit insns. */
4121 expand_mult_add (x
, target
, mult
, add
, mode
, unsignedp
)
4122 rtx x
, target
, mult
, add
;
4123 enum machine_mode mode
;
4126 tree type
= type_for_mode (mode
, unsignedp
);
4127 tree add_type
= (GET_MODE (add
) == VOIDmode
4128 ? type
: type_for_mode (GET_MODE (add
), unsignedp
));
4129 tree result
= fold (build (PLUS_EXPR
, type
,
4130 fold (build (MULT_EXPR
, type
,
4131 make_tree (type
, x
),
4132 make_tree (type
, mult
))),
4133 make_tree (add_type
, add
)));
4135 return expand_expr (result
, target
, VOIDmode
, 0);
4138 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4139 and returning TARGET.
4141 If TARGET is 0, a pseudo-register or constant is returned. */
4144 expand_and (op0
, op1
, target
)
4145 rtx op0
, op1
, target
;
4147 enum machine_mode mode
= VOIDmode
;
4150 if (GET_MODE (op0
) != VOIDmode
)
4151 mode
= GET_MODE (op0
);
4152 else if (GET_MODE (op1
) != VOIDmode
)
4153 mode
= GET_MODE (op1
);
4155 if (mode
!= VOIDmode
)
4156 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
4157 else if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
)
4158 tem
= GEN_INT (INTVAL (op0
) & INTVAL (op1
));
4164 else if (tem
!= target
)
4165 emit_move_insn (target
, tem
);
4169 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4170 and storing in TARGET. Normally return TARGET.
4171 Return 0 if that cannot be done.
4173 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4174 it is VOIDmode, they cannot both be CONST_INT.
4176 UNSIGNEDP is for the case where we have to widen the operands
4177 to perform the operation. It says to use zero-extension.
4179 NORMALIZEP is 1 if we should convert the result to be either zero
4180 or one. Normalize is -1 if we should convert the result to be
4181 either zero or -1. If NORMALIZEP is zero, the result will be left
4182 "raw" out of the scc insn. */
4185 emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
)
4189 enum machine_mode mode
;
4194 enum insn_code icode
;
4195 enum machine_mode compare_mode
;
4196 enum machine_mode target_mode
= GET_MODE (target
);
4198 rtx last
= get_last_insn ();
4199 rtx pattern
, comparison
;
4202 code
= unsigned_condition (code
);
4204 /* If one operand is constant, make it the second one. Only do this
4205 if the other operand is not constant as well. */
4207 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
4208 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
4213 code
= swap_condition (code
);
4216 if (mode
== VOIDmode
)
4217 mode
= GET_MODE (op0
);
4219 /* For some comparisons with 1 and -1, we can convert this to
4220 comparisons with zero. This will often produce more opportunities for
4221 store-flag insns. */
4226 if (op1
== const1_rtx
)
4227 op1
= const0_rtx
, code
= LE
;
4230 if (op1
== constm1_rtx
)
4231 op1
= const0_rtx
, code
= LT
;
4234 if (op1
== const1_rtx
)
4235 op1
= const0_rtx
, code
= GT
;
4238 if (op1
== constm1_rtx
)
4239 op1
= const0_rtx
, code
= GE
;
4242 if (op1
== const1_rtx
)
4243 op1
= const0_rtx
, code
= NE
;
4246 if (op1
== const1_rtx
)
4247 op1
= const0_rtx
, code
= EQ
;
4253 /* If we are comparing a double-word integer with zero, we can convert
4254 the comparison into one involving a single word. */
4255 if (GET_MODE_BITSIZE (mode
) == BITS_PER_WORD
* 2
4256 && GET_MODE_CLASS (mode
) == MODE_INT
4257 && op1
== const0_rtx
)
4259 if (code
== EQ
|| code
== NE
)
4261 /* Do a logical OR of the two words and compare the result. */
4262 rtx op0h
= gen_highpart (word_mode
, op0
);
4263 rtx op0l
= gen_lowpart (word_mode
, op0
);
4264 rtx op0both
= expand_binop (word_mode
, ior_optab
, op0h
, op0l
,
4265 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
4267 return emit_store_flag (target
, code
, op0both
, op1
, word_mode
,
4268 unsignedp
, normalizep
);
4270 else if (code
== LT
|| code
== GE
)
4271 /* If testing the sign bit, can just test on high word. */
4272 return emit_store_flag (target
, code
, gen_highpart (word_mode
, op0
),
4273 op1
, word_mode
, unsignedp
, normalizep
);
4276 /* From now on, we won't change CODE, so set ICODE now. */
4277 icode
= setcc_gen_code
[(int) code
];
4279 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4280 complement of A (for GE) and shifting the sign bit to the low bit. */
4281 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
4282 && GET_MODE_CLASS (mode
) == MODE_INT
4283 && (normalizep
|| STORE_FLAG_VALUE
== 1
4284 || (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4285 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
4286 == (HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))))
4290 /* If the result is to be wider than OP0, it is best to convert it
4291 first. If it is to be narrower, it is *incorrect* to convert it
4293 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
4295 op0
= protect_from_queue (op0
, 0);
4296 op0
= convert_modes (target_mode
, mode
, op0
, 0);
4300 if (target_mode
!= mode
)
4304 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
4305 ((STORE_FLAG_VALUE
== 1 || normalizep
)
4306 ? 0 : subtarget
), 0);
4308 if (STORE_FLAG_VALUE
== 1 || normalizep
)
4309 /* If we are supposed to produce a 0/1 value, we want to do
4310 a logical shift from the sign bit to the low-order bit; for
4311 a -1/0 value, we do an arithmetic shift. */
4312 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
4313 size_int (GET_MODE_BITSIZE (mode
) - 1),
4314 subtarget
, normalizep
!= -1);
4316 if (mode
!= target_mode
)
4317 op0
= convert_modes (target_mode
, mode
, op0
, 0);
4322 if (icode
!= CODE_FOR_nothing
)
4324 insn_operand_predicate_fn pred
;
4326 /* We think we may be able to do this with a scc insn. Emit the
4327 comparison and then the scc insn.
4329 compare_from_rtx may call emit_queue, which would be deleted below
4330 if the scc insn fails. So call it ourselves before setting LAST.
4331 Likewise for do_pending_stack_adjust. */
4334 do_pending_stack_adjust ();
4335 last
= get_last_insn ();
4338 = compare_from_rtx (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
, 0);
4339 if (GET_CODE (comparison
) == CONST_INT
)
4340 return (comparison
== const0_rtx
? const0_rtx
4341 : normalizep
== 1 ? const1_rtx
4342 : normalizep
== -1 ? constm1_rtx
4345 /* If the code of COMPARISON doesn't match CODE, something is
4346 wrong; we can no longer be sure that we have the operation.
4347 We could handle this case, but it should not happen. */
4349 if (GET_CODE (comparison
) != code
)
4352 /* Get a reference to the target in the proper mode for this insn. */
4353 compare_mode
= insn_data
[(int) icode
].operand
[0].mode
;
4355 pred
= insn_data
[(int) icode
].operand
[0].predicate
;
4356 if (preserve_subexpressions_p ()
4357 || ! (*pred
) (subtarget
, compare_mode
))
4358 subtarget
= gen_reg_rtx (compare_mode
);
4360 pattern
= GEN_FCN (icode
) (subtarget
);
4363 emit_insn (pattern
);
4365 /* If we are converting to a wider mode, first convert to
4366 TARGET_MODE, then normalize. This produces better combining
4367 opportunities on machines that have a SIGN_EXTRACT when we are
4368 testing a single bit. This mostly benefits the 68k.
4370 If STORE_FLAG_VALUE does not have the sign bit set when
4371 interpreted in COMPARE_MODE, we can do this conversion as
4372 unsigned, which is usually more efficient. */
4373 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (compare_mode
))
4375 convert_move (target
, subtarget
,
4376 (GET_MODE_BITSIZE (compare_mode
)
4377 <= HOST_BITS_PER_WIDE_INT
)
4378 && 0 == (STORE_FLAG_VALUE
4379 & ((HOST_WIDE_INT
) 1
4380 << (GET_MODE_BITSIZE (compare_mode
) -1))));
4382 compare_mode
= target_mode
;
4387 /* If we want to keep subexpressions around, don't reuse our
4390 if (preserve_subexpressions_p ())
4393 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4394 we don't have to do anything. */
4395 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
4397 /* STORE_FLAG_VALUE might be the most negative number, so write
4398 the comparison this way to avoid a compiler-time warning. */
4399 else if (- normalizep
== STORE_FLAG_VALUE
)
4400 op0
= expand_unop (compare_mode
, neg_optab
, op0
, subtarget
, 0);
4402 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4403 makes it hard to use a value of just the sign bit due to
4404 ANSI integer constant typing rules. */
4405 else if (GET_MODE_BITSIZE (compare_mode
) <= HOST_BITS_PER_WIDE_INT
4406 && (STORE_FLAG_VALUE
4407 & ((HOST_WIDE_INT
) 1
4408 << (GET_MODE_BITSIZE (compare_mode
) - 1))))
4409 op0
= expand_shift (RSHIFT_EXPR
, compare_mode
, op0
,
4410 size_int (GET_MODE_BITSIZE (compare_mode
) - 1),
4411 subtarget
, normalizep
== 1);
4412 else if (STORE_FLAG_VALUE
& 1)
4414 op0
= expand_and (op0
, const1_rtx
, subtarget
);
4415 if (normalizep
== -1)
4416 op0
= expand_unop (compare_mode
, neg_optab
, op0
, op0
, 0);
4421 /* If we were converting to a smaller mode, do the
4423 if (target_mode
!= compare_mode
)
4425 convert_move (target
, op0
, 0);
4433 delete_insns_since (last
);
4435 /* If expensive optimizations, use different pseudo registers for each
4436 insn, instead of reusing the same pseudo. This leads to better CSE,
4437 but slows down the compiler, since there are more pseudos */
4438 subtarget
= (!flag_expensive_optimizations
4439 && (target_mode
== mode
)) ? target
: NULL_RTX
;
4441 /* If we reached here, we can't do this with a scc insn. However, there
4442 are some comparisons that can be done directly. For example, if
4443 this is an equality comparison of integers, we can try to exclusive-or
4444 (or subtract) the two operands and use a recursive call to try the
4445 comparison with zero. Don't do any of these cases if branches are
4449 && GET_MODE_CLASS (mode
) == MODE_INT
&& (code
== EQ
|| code
== NE
)
4450 && op1
!= const0_rtx
)
4452 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
4456 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
4459 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
4460 mode
, unsignedp
, normalizep
);
4462 delete_insns_since (last
);
4466 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4467 the constant zero. Reject all other comparisons at this point. Only
4468 do LE and GT if branches are expensive since they are expensive on
4469 2-operand machines. */
4471 if (BRANCH_COST
== 0
4472 || GET_MODE_CLASS (mode
) != MODE_INT
|| op1
!= const0_rtx
4473 || (code
!= EQ
&& code
!= NE
4474 && (BRANCH_COST
<= 1 || (code
!= LE
&& code
!= GT
))))
4477 /* See what we need to return. We can only return a 1, -1, or the
4480 if (normalizep
== 0)
4482 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
4483 normalizep
= STORE_FLAG_VALUE
;
4485 else if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4486 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
4487 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))
4493 /* Try to put the result of the comparison in the sign bit. Assume we can't
4494 do the necessary operation below. */
4498 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4499 the sign bit set. */
4503 /* This is destructive, so SUBTARGET can't be OP0. */
4504 if (rtx_equal_p (subtarget
, op0
))
4507 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
4510 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
4514 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4515 number of bits in the mode of OP0, minus one. */
4519 if (rtx_equal_p (subtarget
, op0
))
4522 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
4523 size_int (GET_MODE_BITSIZE (mode
) - 1),
4525 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
4529 if (code
== EQ
|| code
== NE
)
4531 /* For EQ or NE, one way to do the comparison is to apply an operation
4532 that converts the operand into a positive number if it is non-zero
4533 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4534 for NE we negate. This puts the result in the sign bit. Then we
4535 normalize with a shift, if needed.
4537 Two operations that can do the above actions are ABS and FFS, so try
4538 them. If that doesn't work, and MODE is smaller than a full word,
4539 we can use zero-extension to the wider mode (an unsigned conversion)
4540 as the operation. */
4542 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4543 that is compensated by the subsequent overflow when subtracting
4546 if (abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
4547 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
4548 else if (ffs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
4549 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
4550 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
4552 op0
= protect_from_queue (op0
, 0);
4553 tem
= convert_modes (word_mode
, mode
, op0
, 1);
4560 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
4563 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
4566 /* If we couldn't do it that way, for NE we can "or" the two's complement
4567 of the value with itself. For EQ, we take the one's complement of
4568 that "or", which is an extra insn, so we only handle EQ if branches
4571 if (tem
== 0 && (code
== NE
|| BRANCH_COST
> 1))
4573 if (rtx_equal_p (subtarget
, op0
))
4576 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
4577 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
4580 if (tem
&& code
== EQ
)
4581 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
4585 if (tem
&& normalizep
)
4586 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
4587 size_int (GET_MODE_BITSIZE (mode
) - 1),
4588 subtarget
, normalizep
== 1);
4592 if (GET_MODE (tem
) != target_mode
)
4594 convert_move (target
, tem
, 0);
4597 else if (!subtarget
)
4599 emit_move_insn (target
, tem
);
4604 delete_insns_since (last
);
4609 /* Like emit_store_flag, but always succeeds. */
4612 emit_store_flag_force (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
)
4616 enum machine_mode mode
;
4622 /* First see if emit_store_flag can do the job. */
4623 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
4627 if (normalizep
== 0)
4630 /* If this failed, we have to do this with set/compare/jump/set code. */
4632 if (GET_CODE (target
) != REG
4633 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
4634 target
= gen_reg_rtx (GET_MODE (target
));
4636 emit_move_insn (target
, const1_rtx
);
4637 label
= gen_label_rtx ();
4638 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
, 0,
4641 emit_move_insn (target
, const0_rtx
);
4647 /* Perform possibly multi-word comparison and conditional jump to LABEL
4648 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4650 The algorithm is based on the code in expr.c:do_jump.
4652 Note that this does not perform a general comparison. Only variants
4653 generated within expmed.c are correctly handled, others abort (but could
4654 be handled if needed). */
4657 do_cmp_and_jump (arg1
, arg2
, op
, mode
, label
)
4658 rtx arg1
, arg2
, label
;
4660 enum machine_mode mode
;
4662 /* If this mode is an integer too wide to compare properly,
4663 compare word by word. Rely on cse to optimize constant cases. */
4665 if (GET_MODE_CLASS (mode
) == MODE_INT
4666 && ! can_compare_p (op
, mode
, ccp_jump
))
4668 rtx label2
= gen_label_rtx ();
4673 do_jump_by_parts_greater_rtx (mode
, 1, arg2
, arg1
, label2
, label
);
4677 do_jump_by_parts_greater_rtx (mode
, 1, arg1
, arg2
, label
, label2
);
4681 do_jump_by_parts_greater_rtx (mode
, 0, arg2
, arg1
, label2
, label
);
4685 do_jump_by_parts_greater_rtx (mode
, 0, arg1
, arg2
, label2
, label
);
4689 do_jump_by_parts_greater_rtx (mode
, 0, arg2
, arg1
, label
, label2
);
4692 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4693 that's the only equality operations we do */
4695 if (arg2
!= const0_rtx
|| mode
!= GET_MODE(arg1
))
4697 do_jump_by_parts_equality_rtx (arg1
, label2
, label
);
4701 if (arg2
!= const0_rtx
|| mode
!= GET_MODE(arg1
))
4703 do_jump_by_parts_equality_rtx (arg1
, label
, label2
);
4710 emit_label (label2
);
4714 emit_cmp_and_jump_insns (arg1
, arg2
, op
, NULL_RTX
, mode
, 0, 0, label
);