1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
26 #include "coretypes.h"
33 #include "insn-config.h"
38 #include "langhooks.h"
40 static void store_fixed_bit_field (rtx
, unsigned HOST_WIDE_INT
,
41 unsigned HOST_WIDE_INT
,
42 unsigned HOST_WIDE_INT
, rtx
);
43 static void store_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
44 unsigned HOST_WIDE_INT
, rtx
);
45 static rtx
extract_fixed_bit_field (enum machine_mode
, rtx
,
46 unsigned HOST_WIDE_INT
,
47 unsigned HOST_WIDE_INT
,
48 unsigned HOST_WIDE_INT
, rtx
, int);
49 static rtx
mask_rtx (enum machine_mode
, int, int, int);
50 static rtx
lshift_value (enum machine_mode
, rtx
, int, int);
51 static rtx
extract_split_bit_field (rtx
, unsigned HOST_WIDE_INT
,
52 unsigned HOST_WIDE_INT
, int);
53 static void do_cmp_and_jump (rtx
, rtx
, enum rtx_code
, enum machine_mode
, rtx
);
54 static rtx
expand_smod_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
55 static rtx
expand_sdiv_pow2 (enum machine_mode
, rtx
, HOST_WIDE_INT
);
57 /* Nonzero means divides or modulus operations are relatively cheap for
58 powers of two, so don't use branches; emit the operation instead.
59 Usually, this will mean that the MD file will emit non-branch
62 static bool sdiv_pow2_cheap
[NUM_MACHINE_MODES
];
63 static bool smod_pow2_cheap
[NUM_MACHINE_MODES
];
65 #ifndef SLOW_UNALIGNED_ACCESS
66 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
69 /* For compilers that support multiple targets with different word sizes,
70 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
71 is the H8/300(H) compiler. */
73 #ifndef MAX_BITS_PER_WORD
74 #define MAX_BITS_PER_WORD BITS_PER_WORD
77 /* Reduce conditional compilation elsewhere. */
80 #define CODE_FOR_insv CODE_FOR_nothing
81 #define gen_insv(a,b,c,d) NULL_RTX
85 #define CODE_FOR_extv CODE_FOR_nothing
86 #define gen_extv(a,b,c,d) NULL_RTX
90 #define CODE_FOR_extzv CODE_FOR_nothing
91 #define gen_extzv(a,b,c,d) NULL_RTX
94 /* Cost of various pieces of RTL. Note that some of these are indexed by
95 shift count and some by mode. */
97 static int add_cost
[NUM_MACHINE_MODES
];
98 static int neg_cost
[NUM_MACHINE_MODES
];
99 static int shift_cost
[NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
100 static int shiftadd_cost
[NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
101 static int shiftsub_cost
[NUM_MACHINE_MODES
][MAX_BITS_PER_WORD
];
102 static int mul_cost
[NUM_MACHINE_MODES
];
103 static int div_cost
[NUM_MACHINE_MODES
];
104 static int mul_widen_cost
[NUM_MACHINE_MODES
];
105 static int mul_highpart_cost
[NUM_MACHINE_MODES
];
112 struct rtx_def reg
; rtunion reg_fld
[2];
113 struct rtx_def plus
; rtunion plus_fld1
;
115 struct rtx_def udiv
; rtunion udiv_fld1
;
116 struct rtx_def mult
; rtunion mult_fld1
;
117 struct rtx_def div
; rtunion div_fld1
;
118 struct rtx_def mod
; rtunion mod_fld1
;
120 struct rtx_def wide_mult
; rtunion wide_mult_fld1
;
121 struct rtx_def wide_lshr
; rtunion wide_lshr_fld1
;
122 struct rtx_def wide_trunc
;
123 struct rtx_def shift
; rtunion shift_fld1
;
124 struct rtx_def shift_mult
; rtunion shift_mult_fld1
;
125 struct rtx_def shift_add
; rtunion shift_add_fld1
;
126 struct rtx_def shift_sub
; rtunion shift_sub_fld1
;
129 rtx pow2
[MAX_BITS_PER_WORD
];
130 rtx cint
[MAX_BITS_PER_WORD
];
132 enum machine_mode mode
, wider_mode
;
134 zero_cost
= rtx_cost (const0_rtx
, 0);
136 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
138 pow2
[m
] = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
139 cint
[m
] = GEN_INT (m
);
142 memset (&all
, 0, sizeof all
);
144 PUT_CODE (&all
.reg
, REG
);
145 REGNO (&all
.reg
) = 10000;
147 PUT_CODE (&all
.plus
, PLUS
);
148 XEXP (&all
.plus
, 0) = &all
.reg
;
149 XEXP (&all
.plus
, 1) = &all
.reg
;
151 PUT_CODE (&all
.neg
, NEG
);
152 XEXP (&all
.neg
, 0) = &all
.reg
;
154 PUT_CODE (&all
.udiv
, UDIV
);
155 XEXP (&all
.udiv
, 0) = &all
.reg
;
156 XEXP (&all
.udiv
, 1) = &all
.reg
;
158 PUT_CODE (&all
.mult
, MULT
);
159 XEXP (&all
.mult
, 0) = &all
.reg
;
160 XEXP (&all
.mult
, 1) = &all
.reg
;
162 PUT_CODE (&all
.div
, DIV
);
163 XEXP (&all
.div
, 0) = &all
.reg
;
164 XEXP (&all
.div
, 1) = 32 < MAX_BITS_PER_WORD
? cint
[32] : GEN_INT (32);
166 PUT_CODE (&all
.mod
, MOD
);
167 XEXP (&all
.mod
, 0) = &all
.reg
;
168 XEXP (&all
.mod
, 1) = XEXP (&all
.div
, 1);
170 PUT_CODE (&all
.zext
, ZERO_EXTEND
);
171 XEXP (&all
.zext
, 0) = &all
.reg
;
173 PUT_CODE (&all
.wide_mult
, MULT
);
174 XEXP (&all
.wide_mult
, 0) = &all
.zext
;
175 XEXP (&all
.wide_mult
, 1) = &all
.zext
;
177 PUT_CODE (&all
.wide_lshr
, LSHIFTRT
);
178 XEXP (&all
.wide_lshr
, 0) = &all
.wide_mult
;
180 PUT_CODE (&all
.wide_trunc
, TRUNCATE
);
181 XEXP (&all
.wide_trunc
, 0) = &all
.wide_lshr
;
183 PUT_CODE (&all
.shift
, ASHIFT
);
184 XEXP (&all
.shift
, 0) = &all
.reg
;
186 PUT_CODE (&all
.shift_mult
, MULT
);
187 XEXP (&all
.shift_mult
, 0) = &all
.reg
;
189 PUT_CODE (&all
.shift_add
, PLUS
);
190 XEXP (&all
.shift_add
, 0) = &all
.shift_mult
;
191 XEXP (&all
.shift_add
, 1) = &all
.reg
;
193 PUT_CODE (&all
.shift_sub
, MINUS
);
194 XEXP (&all
.shift_sub
, 0) = &all
.shift_mult
;
195 XEXP (&all
.shift_sub
, 1) = &all
.reg
;
197 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
199 mode
= GET_MODE_WIDER_MODE (mode
))
201 PUT_MODE (&all
.reg
, mode
);
202 PUT_MODE (&all
.plus
, mode
);
203 PUT_MODE (&all
.neg
, mode
);
204 PUT_MODE (&all
.udiv
, mode
);
205 PUT_MODE (&all
.mult
, mode
);
206 PUT_MODE (&all
.div
, mode
);
207 PUT_MODE (&all
.mod
, mode
);
208 PUT_MODE (&all
.wide_trunc
, mode
);
209 PUT_MODE (&all
.shift
, mode
);
210 PUT_MODE (&all
.shift_mult
, mode
);
211 PUT_MODE (&all
.shift_add
, mode
);
212 PUT_MODE (&all
.shift_sub
, mode
);
214 add_cost
[mode
] = rtx_cost (&all
.plus
, SET
);
215 neg_cost
[mode
] = rtx_cost (&all
.neg
, SET
);
216 div_cost
[mode
] = rtx_cost (&all
.udiv
, SET
);
217 mul_cost
[mode
] = rtx_cost (&all
.mult
, SET
);
219 sdiv_pow2_cheap
[mode
] = (rtx_cost (&all
.div
, SET
) <= 2 * add_cost
[mode
]);
220 smod_pow2_cheap
[mode
] = (rtx_cost (&all
.mod
, SET
) <= 4 * add_cost
[mode
]);
222 wider_mode
= GET_MODE_WIDER_MODE (mode
);
223 if (wider_mode
!= VOIDmode
)
225 PUT_MODE (&all
.zext
, wider_mode
);
226 PUT_MODE (&all
.wide_mult
, wider_mode
);
227 PUT_MODE (&all
.wide_lshr
, wider_mode
);
228 XEXP (&all
.wide_lshr
, 1) = GEN_INT (GET_MODE_BITSIZE (mode
));
230 mul_widen_cost
[wider_mode
] = rtx_cost (&all
.wide_mult
, SET
);
231 mul_highpart_cost
[mode
] = rtx_cost (&all
.wide_trunc
, SET
);
234 shift_cost
[mode
][0] = 0;
235 shiftadd_cost
[mode
][0] = shiftsub_cost
[mode
][0] = add_cost
[mode
];
237 n
= MIN (MAX_BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
238 for (m
= 1; m
< n
; m
++)
240 XEXP (&all
.shift
, 1) = cint
[m
];
241 XEXP (&all
.shift_mult
, 1) = pow2
[m
];
243 shift_cost
[mode
][m
] = rtx_cost (&all
.shift
, SET
);
244 shiftadd_cost
[mode
][m
] = rtx_cost (&all
.shift_add
, SET
);
245 shiftsub_cost
[mode
][m
] = rtx_cost (&all
.shift_sub
, SET
);
250 /* Return an rtx representing minus the value of X.
251 MODE is the intended mode of the result,
252 useful if X is a CONST_INT. */
255 negate_rtx (enum machine_mode mode
, rtx x
)
257 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
260 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
265 /* Report on the availability of insv/extv/extzv and the desired mode
266 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
267 is false; else the mode of the specified operand. If OPNO is -1,
268 all the caller cares about is whether the insn is available. */
270 mode_for_extraction (enum extraction_pattern pattern
, int opno
)
272 const struct insn_data
*data
;
279 data
= &insn_data
[CODE_FOR_insv
];
282 return MAX_MACHINE_MODE
;
287 data
= &insn_data
[CODE_FOR_extv
];
290 return MAX_MACHINE_MODE
;
295 data
= &insn_data
[CODE_FOR_extzv
];
298 return MAX_MACHINE_MODE
;
307 /* Everyone who uses this function used to follow it with
308 if (result == VOIDmode) result = word_mode; */
309 if (data
->operand
[opno
].mode
== VOIDmode
)
311 return data
->operand
[opno
].mode
;
315 /* Generate code to store value from rtx VALUE
316 into a bit-field within structure STR_RTX
317 containing BITSIZE bits starting at bit BITNUM.
318 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
319 ALIGN is the alignment that STR_RTX is known to have.
320 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
322 /* ??? Note that there are two different ideas here for how
323 to determine the size to count bits within, for a register.
324 One is BITS_PER_WORD, and the other is the size of operand 3
327 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
328 else, we use the mode of operand 3. */
331 store_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
332 unsigned HOST_WIDE_INT bitnum
, enum machine_mode fieldmode
,
336 = (MEM_P (str_rtx
)) ? BITS_PER_UNIT
: BITS_PER_WORD
;
337 unsigned HOST_WIDE_INT offset
= bitnum
/ unit
;
338 unsigned HOST_WIDE_INT bitpos
= bitnum
% unit
;
343 enum machine_mode op_mode
= mode_for_extraction (EP_insv
, 3);
345 while (GET_CODE (op0
) == SUBREG
)
347 /* The following line once was done only if WORDS_BIG_ENDIAN,
348 but I think that is a mistake. WORDS_BIG_ENDIAN is
349 meaningful at a much higher level; when structures are copied
350 between memory and regs, the higher-numbered regs
351 always get higher addresses. */
352 offset
+= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
);
353 /* We used to adjust BITPOS here, but now we do the whole adjustment
354 right after the loop. */
355 op0
= SUBREG_REG (op0
);
358 /* Use vec_set patterns for inserting parts of vectors whenever
360 if (VECTOR_MODE_P (GET_MODE (op0
))
362 && (vec_set_optab
->handlers
[GET_MODE (op0
)].insn_code
364 && fieldmode
== GET_MODE_INNER (GET_MODE (op0
))
365 && bitsize
== GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
366 && !(bitnum
% GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
368 enum machine_mode outermode
= GET_MODE (op0
);
369 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
370 int icode
= (int) vec_set_optab
->handlers
[outermode
].insn_code
;
371 int pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
372 rtx rtxpos
= GEN_INT (pos
);
376 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
377 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
378 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
382 if (! (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
))
383 src
= copy_to_mode_reg (mode1
, src
);
385 if (! (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
))
386 rtxpos
= copy_to_mode_reg (mode1
, rtxpos
);
388 /* We could handle this, but we should always be called with a pseudo
389 for our targets and all insns should take them as outputs. */
390 gcc_assert ((*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
)
391 && (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
)
392 && (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
));
393 pat
= GEN_FCN (icode
) (dest
, src
, rtxpos
);
406 int old_generating_concat_p
= generating_concat_p
;
407 generating_concat_p
= 0;
408 value
= force_not_mem (value
);
409 generating_concat_p
= old_generating_concat_p
;
412 /* If the target is a register, overwriting the entire object, or storing
413 a full-word or multi-word field can be done with just a SUBREG.
415 If the target is memory, storing any naturally aligned field can be
416 done with a simple store. For targets that support fast unaligned
417 memory, any naturally sized, unit aligned field can be done directly. */
419 byte_offset
= (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
420 + (offset
* UNITS_PER_WORD
);
423 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
425 ? ((GET_MODE_SIZE (fieldmode
) >= UNITS_PER_WORD
426 || GET_MODE_SIZE (GET_MODE (op0
)) == GET_MODE_SIZE (fieldmode
))
427 && byte_offset
% GET_MODE_SIZE (fieldmode
) == 0)
428 : (! SLOW_UNALIGNED_ACCESS (fieldmode
, MEM_ALIGN (op0
))
429 || (offset
* BITS_PER_UNIT
% bitsize
== 0
430 && MEM_ALIGN (op0
) % GET_MODE_BITSIZE (fieldmode
) == 0))))
432 if (GET_MODE (op0
) != fieldmode
)
434 if (GET_CODE (op0
) == SUBREG
)
436 /* Else we've got some float mode source being extracted
437 into a different float mode destination -- this
438 combination of subregs results in Severe Tire
440 gcc_assert (GET_MODE (SUBREG_REG (op0
)) == fieldmode
441 || GET_MODE_CLASS (fieldmode
) == MODE_INT
442 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
);
443 op0
= SUBREG_REG (op0
);
446 op0
= gen_rtx_SUBREG (fieldmode
, op0
, byte_offset
);
448 op0
= adjust_address (op0
, fieldmode
, offset
);
450 emit_move_insn (op0
, value
);
454 /* Make sure we are playing with integral modes. Pun with subregs
455 if we aren't. This must come after the entire register case above,
456 since that case is valid for any mode. The following cases are only
457 valid for integral modes. */
459 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
460 if (imode
!= GET_MODE (op0
))
463 op0
= adjust_address (op0
, imode
, 0);
466 gcc_assert (imode
!= BLKmode
);
467 op0
= gen_lowpart (imode
, op0
);
472 /* We may be accessing data outside the field, which means
473 we can alias adjacent data. */
476 op0
= shallow_copy_rtx (op0
);
477 set_mem_alias_set (op0
, 0);
478 set_mem_expr (op0
, 0);
481 /* If OP0 is a register, BITPOS must count within a word.
482 But as we have it, it counts within whatever size OP0 now has.
483 On a bigendian machine, these are not the same, so convert. */
486 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
487 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
489 /* Storing an lsb-aligned field in a register
490 can be done with a movestrict instruction. */
493 && (BYTES_BIG_ENDIAN
? bitpos
+ bitsize
== unit
: bitpos
== 0)
494 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
495 && (movstrict_optab
->handlers
[fieldmode
].insn_code
496 != CODE_FOR_nothing
))
498 int icode
= movstrict_optab
->handlers
[fieldmode
].insn_code
;
500 /* Get appropriate low part of the value being stored. */
501 if (GET_CODE (value
) == CONST_INT
|| REG_P (value
))
502 value
= gen_lowpart (fieldmode
, value
);
503 else if (!(GET_CODE (value
) == SYMBOL_REF
504 || GET_CODE (value
) == LABEL_REF
505 || GET_CODE (value
) == CONST
))
506 value
= convert_to_mode (fieldmode
, value
, 0);
508 if (! (*insn_data
[icode
].operand
[1].predicate
) (value
, fieldmode
))
509 value
= copy_to_mode_reg (fieldmode
, value
);
511 if (GET_CODE (op0
) == SUBREG
)
513 /* Else we've got some float mode source being extracted into
514 a different float mode destination -- this combination of
515 subregs results in Severe Tire Damage. */
516 gcc_assert (GET_MODE (SUBREG_REG (op0
)) == fieldmode
517 || GET_MODE_CLASS (fieldmode
) == MODE_INT
518 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
);
519 op0
= SUBREG_REG (op0
);
522 emit_insn (GEN_FCN (icode
)
523 (gen_rtx_SUBREG (fieldmode
, op0
,
524 (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
525 + (offset
* UNITS_PER_WORD
)),
531 /* Handle fields bigger than a word. */
533 if (bitsize
> BITS_PER_WORD
)
535 /* Here we transfer the words of the field
536 in the order least significant first.
537 This is because the most significant word is the one which may
539 However, only do that if the value is not BLKmode. */
541 unsigned int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
542 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
545 /* This is the mode we must force value to, so that there will be enough
546 subwords to extract. Note that fieldmode will often (always?) be
547 VOIDmode, because that is what store_field uses to indicate that this
548 is a bit field, but passing VOIDmode to operand_subword_force will
549 result in an abort. */
550 fieldmode
= GET_MODE (value
);
551 if (fieldmode
== VOIDmode
)
552 fieldmode
= smallest_mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
);
554 for (i
= 0; i
< nwords
; i
++)
556 /* If I is 0, use the low-order word in both field and target;
557 if I is 1, use the next to lowest word; and so on. */
558 unsigned int wordnum
= (backwards
? nwords
- i
- 1 : i
);
559 unsigned int bit_offset
= (backwards
560 ? MAX ((int) bitsize
- ((int) i
+ 1)
563 : (int) i
* BITS_PER_WORD
);
565 store_bit_field (op0
, MIN (BITS_PER_WORD
,
566 bitsize
- i
* BITS_PER_WORD
),
567 bitnum
+ bit_offset
, word_mode
,
568 operand_subword_force (value
, wordnum
, fieldmode
));
573 /* From here on we can assume that the field to be stored in is
574 a full-word (whatever type that is), since it is shorter than a word. */
576 /* OFFSET is the number of words or bytes (UNIT says which)
577 from STR_RTX to the first word or byte containing part of the field. */
582 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
586 /* Since this is a destination (lvalue), we can't copy it to a
587 pseudo. We can trivially remove a SUBREG that does not
588 change the size of the operand. Such a SUBREG may have been
589 added above. Otherwise, abort. */
590 gcc_assert (GET_CODE (op0
) == SUBREG
591 && (GET_MODE_SIZE (GET_MODE (op0
))
592 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)))));
593 op0
= SUBREG_REG (op0
);
595 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
596 op0
, (offset
* UNITS_PER_WORD
));
601 /* If VALUE is a floating-point mode, access it as an integer of the
602 corresponding size. This can occur on a machine with 64 bit registers
603 that uses SFmode for float. This can also occur for unaligned float
606 if (GET_MODE_CLASS (GET_MODE (value
)) != MODE_INT
607 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_PARTIAL_INT
)
608 value
= gen_lowpart ((GET_MODE (value
) == VOIDmode
609 ? word_mode
: int_mode_for_mode (GET_MODE (value
))),
612 /* Now OFFSET is nonzero only if OP0 is memory
613 and is therefore always measured in bytes. */
616 && GET_MODE (value
) != BLKmode
617 && !(bitsize
== 1 && GET_CODE (value
) == CONST_INT
)
618 /* Ensure insv's size is wide enough for this field. */
619 && (GET_MODE_BITSIZE (op_mode
) >= bitsize
)
620 && ! ((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
621 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (op_mode
))))
623 int xbitpos
= bitpos
;
626 rtx last
= get_last_insn ();
628 enum machine_mode maxmode
= mode_for_extraction (EP_insv
, 3);
629 int save_volatile_ok
= volatile_ok
;
633 /* If this machine's insv can only insert into a register, copy OP0
634 into a register and save it back later. */
635 /* This used to check flag_force_mem, but that was a serious
636 de-optimization now that flag_force_mem is enabled by -O2. */
638 && ! ((*insn_data
[(int) CODE_FOR_insv
].operand
[0].predicate
)
642 enum machine_mode bestmode
;
644 /* Get the mode to use for inserting into this field. If OP0 is
645 BLKmode, get the smallest mode consistent with the alignment. If
646 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
647 mode. Otherwise, use the smallest mode containing the field. */
649 if (GET_MODE (op0
) == BLKmode
650 || GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (maxmode
))
652 = get_best_mode (bitsize
, bitnum
, MEM_ALIGN (op0
), maxmode
,
653 MEM_VOLATILE_P (op0
));
655 bestmode
= GET_MODE (op0
);
657 if (bestmode
== VOIDmode
658 || (SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
659 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
662 /* Adjust address to point to the containing unit of that mode.
663 Compute offset as multiple of this unit, counting in bytes. */
664 unit
= GET_MODE_BITSIZE (bestmode
);
665 offset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
666 bitpos
= bitnum
% unit
;
667 op0
= adjust_address (op0
, bestmode
, offset
);
669 /* Fetch that unit, store the bitfield in it, then store
671 tempreg
= copy_to_reg (op0
);
672 store_bit_field (tempreg
, bitsize
, bitpos
, fieldmode
, orig_value
);
673 emit_move_insn (op0
, tempreg
);
676 volatile_ok
= save_volatile_ok
;
678 /* Add OFFSET into OP0's address. */
680 xop0
= adjust_address (xop0
, byte_mode
, offset
);
682 /* If xop0 is a register, we need it in MAXMODE
683 to make it acceptable to the format of insv. */
684 if (GET_CODE (xop0
) == SUBREG
)
685 /* We can't just change the mode, because this might clobber op0,
686 and we will need the original value of op0 if insv fails. */
687 xop0
= gen_rtx_SUBREG (maxmode
, SUBREG_REG (xop0
), SUBREG_BYTE (xop0
));
688 if (REG_P (xop0
) && GET_MODE (xop0
) != maxmode
)
689 xop0
= gen_rtx_SUBREG (maxmode
, xop0
, 0);
691 /* On big-endian machines, we count bits from the most significant.
692 If the bit field insn does not, we must invert. */
694 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
695 xbitpos
= unit
- bitsize
- xbitpos
;
697 /* We have been counting XBITPOS within UNIT.
698 Count instead within the size of the register. */
699 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
700 xbitpos
+= GET_MODE_BITSIZE (maxmode
) - unit
;
702 unit
= GET_MODE_BITSIZE (maxmode
);
704 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
706 if (GET_MODE (value
) != maxmode
)
708 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
710 /* Optimization: Don't bother really extending VALUE
711 if it has all the bits we will actually use. However,
712 if we must narrow it, be sure we do it correctly. */
714 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (maxmode
))
718 tmp
= simplify_subreg (maxmode
, value1
, GET_MODE (value
), 0);
720 tmp
= simplify_gen_subreg (maxmode
,
721 force_reg (GET_MODE (value
),
723 GET_MODE (value
), 0);
727 value1
= gen_lowpart (maxmode
, value1
);
729 else if (GET_CODE (value
) == CONST_INT
)
730 value1
= gen_int_mode (INTVAL (value
), maxmode
);
732 /* Parse phase is supposed to make VALUE's data type
733 match that of the component reference, which is a type
734 at least as wide as the field; so VALUE should have
735 a mode that corresponds to that type. */
736 gcc_assert (CONSTANT_P (value
));
739 /* If this machine's insv insists on a register,
740 get VALUE1 into a register. */
741 if (! ((*insn_data
[(int) CODE_FOR_insv
].operand
[3].predicate
)
743 value1
= force_reg (maxmode
, value1
);
745 pat
= gen_insv (xop0
, GEN_INT (bitsize
), GEN_INT (xbitpos
), value1
);
750 delete_insns_since (last
);
751 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
);
756 /* Insv is not available; store using shifts and boolean ops. */
757 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
);
761 /* Use shifts and boolean operations to store VALUE
762 into a bit field of width BITSIZE
763 in a memory location specified by OP0 except offset by OFFSET bytes.
764 (OFFSET must be 0 if OP0 is a register.)
765 The field starts at position BITPOS within the byte.
766 (If OP0 is a register, it may be a full word or a narrower mode,
767 but BITPOS still counts within a full word,
768 which is significant on bigendian machines.) */
771 store_fixed_bit_field (rtx op0
, unsigned HOST_WIDE_INT offset
,
772 unsigned HOST_WIDE_INT bitsize
,
773 unsigned HOST_WIDE_INT bitpos
, rtx value
)
775 enum machine_mode mode
;
776 unsigned int total_bits
= BITS_PER_WORD
;
781 /* There is a case not handled here:
782 a structure with a known alignment of just a halfword
783 and a field split across two aligned halfwords within the structure.
784 Or likewise a structure with a known alignment of just a byte
785 and a field split across two bytes.
786 Such cases are not supposed to be able to occur. */
788 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
790 gcc_assert (!offset
);
791 /* Special treatment for a bit field split across two registers. */
792 if (bitsize
+ bitpos
> BITS_PER_WORD
)
794 store_split_bit_field (op0
, bitsize
, bitpos
, value
);
800 /* Get the proper mode to use for this field. We want a mode that
801 includes the entire field. If such a mode would be larger than
802 a word, we won't be doing the extraction the normal way.
803 We don't want a mode bigger than the destination. */
805 mode
= GET_MODE (op0
);
806 if (GET_MODE_BITSIZE (mode
) == 0
807 || GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (word_mode
))
809 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
810 MEM_ALIGN (op0
), mode
, MEM_VOLATILE_P (op0
));
812 if (mode
== VOIDmode
)
814 /* The only way this should occur is if the field spans word
816 store_split_bit_field (op0
, bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
821 total_bits
= GET_MODE_BITSIZE (mode
);
823 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
824 be in the range 0 to total_bits-1, and put any excess bytes in
826 if (bitpos
>= total_bits
)
828 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
829 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
833 /* Get ref to an aligned byte, halfword, or word containing the field.
834 Adjust BITPOS to be position within a word,
835 and OFFSET to be the offset of that word.
836 Then alter OP0 to refer to that word. */
837 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
838 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
839 op0
= adjust_address (op0
, mode
, offset
);
842 mode
= GET_MODE (op0
);
844 /* Now MODE is either some integral mode for a MEM as OP0,
845 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
846 The bit field is contained entirely within OP0.
847 BITPOS is the starting bit number within OP0.
848 (OP0's mode may actually be narrower than MODE.) */
850 if (BYTES_BIG_ENDIAN
)
851 /* BITPOS is the distance between our msb
852 and that of the containing datum.
853 Convert it to the distance from the lsb. */
854 bitpos
= total_bits
- bitsize
- bitpos
;
856 /* Now BITPOS is always the distance between our lsb
859 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
860 we must first convert its mode to MODE. */
862 if (GET_CODE (value
) == CONST_INT
)
864 HOST_WIDE_INT v
= INTVAL (value
);
866 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
867 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
871 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
872 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
873 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
876 value
= lshift_value (mode
, value
, bitpos
, bitsize
);
880 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
881 && bitpos
+ bitsize
!= GET_MODE_BITSIZE (mode
));
883 if (GET_MODE (value
) != mode
)
885 if ((REG_P (value
) || GET_CODE (value
) == SUBREG
)
886 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (value
)))
887 value
= gen_lowpart (mode
, value
);
889 value
= convert_to_mode (mode
, value
, 1);
893 value
= expand_binop (mode
, and_optab
, value
,
894 mask_rtx (mode
, 0, bitsize
, 0),
895 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
897 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
898 build_int_cst (NULL_TREE
, bitpos
), NULL_RTX
, 1);
901 /* Now clear the chosen bits in OP0,
902 except that if VALUE is -1 we need not bother. */
904 subtarget
= (REG_P (op0
) || ! flag_force_mem
) ? op0
: 0;
908 temp
= expand_binop (mode
, and_optab
, op0
,
909 mask_rtx (mode
, bitpos
, bitsize
, 1),
910 subtarget
, 1, OPTAB_LIB_WIDEN
);
916 /* Now logical-or VALUE into OP0, unless it is zero. */
919 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
920 subtarget
, 1, OPTAB_LIB_WIDEN
);
922 emit_move_insn (op0
, temp
);
925 /* Store a bit field that is split across multiple accessible memory objects.
927 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
928 BITSIZE is the field width; BITPOS the position of its first bit
930 VALUE is the value to store.
932 This does not yet handle fields wider than BITS_PER_WORD. */
935 store_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
936 unsigned HOST_WIDE_INT bitpos
, rtx value
)
939 unsigned int bitsdone
= 0;
941 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
943 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
944 unit
= BITS_PER_WORD
;
946 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
948 /* If VALUE is a constant other than a CONST_INT, get it into a register in
949 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
950 that VALUE might be a floating-point constant. */
951 if (CONSTANT_P (value
) && GET_CODE (value
) != CONST_INT
)
953 rtx word
= gen_lowpart_common (word_mode
, value
);
955 if (word
&& (value
!= word
))
958 value
= gen_lowpart_common (word_mode
,
959 force_reg (GET_MODE (value
) != VOIDmode
961 : word_mode
, value
));
964 while (bitsdone
< bitsize
)
966 unsigned HOST_WIDE_INT thissize
;
968 unsigned HOST_WIDE_INT thispos
;
969 unsigned HOST_WIDE_INT offset
;
971 offset
= (bitpos
+ bitsdone
) / unit
;
972 thispos
= (bitpos
+ bitsdone
) % unit
;
974 /* THISSIZE must not overrun a word boundary. Otherwise,
975 store_fixed_bit_field will call us again, and we will mutually
977 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
978 thissize
= MIN (thissize
, unit
- thispos
);
980 if (BYTES_BIG_ENDIAN
)
984 /* We must do an endian conversion exactly the same way as it is
985 done in extract_bit_field, so that the two calls to
986 extract_fixed_bit_field will have comparable arguments. */
987 if (!MEM_P (value
) || GET_MODE (value
) == BLKmode
)
988 total_bits
= BITS_PER_WORD
;
990 total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
992 /* Fetch successively less significant portions. */
993 if (GET_CODE (value
) == CONST_INT
)
994 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
995 >> (bitsize
- bitsdone
- thissize
))
996 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
998 /* The args are chosen so that the last part includes the
999 lsb. Give extract_bit_field the value it needs (with
1000 endianness compensation) to fetch the piece we want. */
1001 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
1002 total_bits
- bitsize
+ bitsdone
,
1007 /* Fetch successively more significant portions. */
1008 if (GET_CODE (value
) == CONST_INT
)
1009 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
1011 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
1013 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
1014 bitsdone
, NULL_RTX
, 1);
1017 /* If OP0 is a register, then handle OFFSET here.
1019 When handling multiword bitfields, extract_bit_field may pass
1020 down a word_mode SUBREG of a larger REG for a bitfield that actually
1021 crosses a word boundary. Thus, for a SUBREG, we must find
1022 the current word starting from the base register. */
1023 if (GET_CODE (op0
) == SUBREG
)
1025 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1026 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1027 GET_MODE (SUBREG_REG (op0
)));
1030 else if (REG_P (op0
))
1032 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1038 /* OFFSET is in UNITs, and UNIT is in bits.
1039 store_fixed_bit_field wants offset in bytes. */
1040 store_fixed_bit_field (word
, offset
* unit
/ BITS_PER_UNIT
, thissize
,
1042 bitsdone
+= thissize
;
1046 /* Generate code to extract a byte-field from STR_RTX
1047 containing BITSIZE bits, starting at BITNUM,
1048 and put it in TARGET if possible (if TARGET is nonzero).
1049 Regardless of TARGET, we return the rtx for where the value is placed.
1051 STR_RTX is the structure containing the byte (a REG or MEM).
1052 UNSIGNEDP is nonzero if this is an unsigned bit field.
1053 MODE is the natural mode of the field value once extracted.
1054 TMODE is the mode the caller would like the value to have;
1055 but the value may be returned with type MODE instead.
1057 TOTAL_SIZE is the size in bytes of the containing structure,
1060 If a TARGET is specified and we can store in it at no extra cost,
1061 we do so, and return TARGET.
1062 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1063 if they are equally easy. */
1066 extract_bit_field (rtx str_rtx
, unsigned HOST_WIDE_INT bitsize
,
1067 unsigned HOST_WIDE_INT bitnum
, int unsignedp
, rtx target
,
1068 enum machine_mode mode
, enum machine_mode tmode
)
1071 = (MEM_P (str_rtx
)) ? BITS_PER_UNIT
: BITS_PER_WORD
;
1072 unsigned HOST_WIDE_INT offset
= bitnum
/ unit
;
1073 unsigned HOST_WIDE_INT bitpos
= bitnum
% unit
;
1075 rtx spec_target
= target
;
1076 rtx spec_target_subreg
= 0;
1077 enum machine_mode int_mode
;
1078 enum machine_mode extv_mode
= mode_for_extraction (EP_extv
, 0);
1079 enum machine_mode extzv_mode
= mode_for_extraction (EP_extzv
, 0);
1080 enum machine_mode mode1
;
1083 if (tmode
== VOIDmode
)
1086 while (GET_CODE (op0
) == SUBREG
)
1088 bitpos
+= SUBREG_BYTE (op0
) * BITS_PER_UNIT
;
1091 offset
+= (bitpos
/ unit
);
1094 op0
= SUBREG_REG (op0
);
1098 && mode
== GET_MODE (op0
)
1100 && bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
1102 /* We're trying to extract a full register from itself. */
1106 /* Use vec_extract patterns for extracting parts of vectors whenever
1108 if (VECTOR_MODE_P (GET_MODE (op0
))
1110 && (vec_extract_optab
->handlers
[GET_MODE (op0
)].insn_code
1111 != CODE_FOR_nothing
)
1112 && ((bitnum
+ bitsize
- 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))
1113 == bitnum
/ GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0
)))))
1115 enum machine_mode outermode
= GET_MODE (op0
);
1116 enum machine_mode innermode
= GET_MODE_INNER (outermode
);
1117 int icode
= (int) vec_extract_optab
->handlers
[outermode
].insn_code
;
1118 unsigned HOST_WIDE_INT pos
= bitnum
/ GET_MODE_BITSIZE (innermode
);
1119 rtx rtxpos
= GEN_INT (pos
);
1121 rtx dest
= NULL
, pat
, seq
;
1122 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
1123 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
1124 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
1126 if (innermode
== tmode
|| innermode
== mode
)
1130 dest
= gen_reg_rtx (innermode
);
1134 if (! (*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
))
1135 dest
= copy_to_mode_reg (mode0
, dest
);
1137 if (! (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
))
1138 src
= copy_to_mode_reg (mode1
, src
);
1140 if (! (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
))
1141 rtxpos
= copy_to_mode_reg (mode1
, rtxpos
);
1143 /* We could handle this, but we should always be called with a pseudo
1144 for our targets and all insns should take them as outputs. */
1145 gcc_assert ((*insn_data
[icode
].operand
[0].predicate
) (dest
, mode0
)
1146 && (*insn_data
[icode
].operand
[1].predicate
) (src
, mode1
)
1147 && (*insn_data
[icode
].operand
[2].predicate
) (rtxpos
, mode2
));
1149 pat
= GEN_FCN (icode
) (dest
, src
, rtxpos
);
1160 /* Make sure we are playing with integral modes. Pun with subregs
1163 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
1164 if (imode
!= GET_MODE (op0
))
1167 op0
= adjust_address (op0
, imode
, 0);
1170 gcc_assert (imode
!= BLKmode
);
1171 op0
= gen_lowpart (imode
, op0
);
1176 /* We may be accessing data outside the field, which means
1177 we can alias adjacent data. */
1180 op0
= shallow_copy_rtx (op0
);
1181 set_mem_alias_set (op0
, 0);
1182 set_mem_expr (op0
, 0);
1185 /* Extraction of a full-word or multi-word value from a structure
1186 in a register or aligned memory can be done with just a SUBREG.
1187 A subword value in the least significant part of a register
1188 can also be extracted with a SUBREG. For this, we need the
1189 byte offset of the value in op0. */
1191 byte_offset
= bitpos
/ BITS_PER_UNIT
+ offset
* UNITS_PER_WORD
;
1193 /* If OP0 is a register, BITPOS must count within a word.
1194 But as we have it, it counts within whatever size OP0 now has.
1195 On a bigendian machine, these are not the same, so convert. */
1196 if (BYTES_BIG_ENDIAN
1198 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
1199 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
1201 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1202 If that's wrong, the solution is to test for it and set TARGET to 0
1205 /* Only scalar integer modes can be converted via subregs. There is an
1206 additional problem for FP modes here in that they can have a precision
1207 which is different from the size. mode_for_size uses precision, but
1208 we want a mode based on the size, so we must avoid calling it for FP
1210 mode1
= (SCALAR_INT_MODE_P (tmode
)
1211 ? mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0)
1214 if (((bitsize
>= BITS_PER_WORD
&& bitsize
== GET_MODE_BITSIZE (mode
)
1215 && bitpos
% BITS_PER_WORD
== 0)
1216 || (mode1
!= BLKmode
1217 /* ??? The big endian test here is wrong. This is correct
1218 if the value is in a register, and if mode_for_size is not
1219 the same mode as op0. This causes us to get unnecessarily
1220 inefficient code from the Thumb port when -mbig-endian. */
1221 && (BYTES_BIG_ENDIAN
1222 ? bitpos
+ bitsize
== BITS_PER_WORD
1225 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1226 GET_MODE_BITSIZE (GET_MODE (op0
)))
1227 && GET_MODE_SIZE (mode1
) != 0
1228 && byte_offset
% GET_MODE_SIZE (mode1
) == 0)
1230 && (! SLOW_UNALIGNED_ACCESS (mode
, MEM_ALIGN (op0
))
1231 || (offset
* BITS_PER_UNIT
% bitsize
== 0
1232 && MEM_ALIGN (op0
) % bitsize
== 0)))))
1234 if (mode1
!= GET_MODE (op0
))
1237 op0
= adjust_address (op0
, mode1
, offset
);
1240 rtx sub
= simplify_gen_subreg (mode1
, op0
, GET_MODE (op0
),
1243 goto no_subreg_mode_swap
;
1248 return convert_to_mode (tmode
, op0
, unsignedp
);
1251 no_subreg_mode_swap
:
1253 /* Handle fields bigger than a word. */
1255 if (bitsize
> BITS_PER_WORD
)
1257 /* Here we transfer the words of the field
1258 in the order least significant first.
1259 This is because the most significant word is the one which may
1260 be less than full. */
1262 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1265 if (target
== 0 || !REG_P (target
))
1266 target
= gen_reg_rtx (mode
);
1268 /* Indicate for flow that the entire target reg is being set. */
1269 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
1271 for (i
= 0; i
< nwords
; i
++)
1273 /* If I is 0, use the low-order word in both field and target;
1274 if I is 1, use the next to lowest word; and so on. */
1275 /* Word number in TARGET to use. */
1276 unsigned int wordnum
1278 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1280 /* Offset from start of field in OP0. */
1281 unsigned int bit_offset
= (WORDS_BIG_ENDIAN
1282 ? MAX (0, ((int) bitsize
- ((int) i
+ 1)
1283 * (int) BITS_PER_WORD
))
1284 : (int) i
* BITS_PER_WORD
);
1285 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1287 = extract_bit_field (op0
, MIN (BITS_PER_WORD
,
1288 bitsize
- i
* BITS_PER_WORD
),
1289 bitnum
+ bit_offset
, 1, target_part
, mode
,
1292 gcc_assert (target_part
);
1294 if (result_part
!= target_part
)
1295 emit_move_insn (target_part
, result_part
);
1300 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1301 need to be zero'd out. */
1302 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1304 unsigned int i
, total_words
;
1306 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1307 for (i
= nwords
; i
< total_words
; i
++)
1309 (operand_subword (target
,
1310 WORDS_BIG_ENDIAN
? total_words
- i
- 1 : i
,
1317 /* Signed bit field: sign-extend with two arithmetic shifts. */
1318 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1319 build_int_cst (NULL_TREE
,
1320 GET_MODE_BITSIZE (mode
) - bitsize
),
1322 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1323 build_int_cst (NULL_TREE
,
1324 GET_MODE_BITSIZE (mode
) - bitsize
),
1328 /* From here on we know the desired field is smaller than a word. */
1330 /* Check if there is a correspondingly-sized integer field, so we can
1331 safely extract it as one size of integer, if necessary; then
1332 truncate or extend to the size that is wanted; then use SUBREGs or
1333 convert_to_mode to get one of the modes we really wanted. */
1335 int_mode
= int_mode_for_mode (tmode
);
1336 if (int_mode
== BLKmode
)
1337 int_mode
= int_mode_for_mode (mode
);
1338 /* Should probably push op0 out to memory and then do a load. */
1339 gcc_assert (int_mode
!= BLKmode
);
1341 /* OFFSET is the number of words or bytes (UNIT says which)
1342 from STR_RTX to the first word or byte containing part of the field. */
1346 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1349 op0
= copy_to_reg (op0
);
1350 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
1351 op0
, (offset
* UNITS_PER_WORD
));
1356 /* Now OFFSET is nonzero only for memory operands. */
1361 && (GET_MODE_BITSIZE (extzv_mode
) >= bitsize
)
1362 && ! ((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1363 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (extzv_mode
))))
1365 unsigned HOST_WIDE_INT xbitpos
= bitpos
, xoffset
= offset
;
1366 rtx bitsize_rtx
, bitpos_rtx
;
1367 rtx last
= get_last_insn ();
1369 rtx xtarget
= target
;
1370 rtx xspec_target
= spec_target
;
1371 rtx xspec_target_subreg
= spec_target_subreg
;
1373 enum machine_mode maxmode
= mode_for_extraction (EP_extzv
, 0);
1377 int save_volatile_ok
= volatile_ok
;
1380 /* Is the memory operand acceptable? */
1381 if (! ((*insn_data
[(int) CODE_FOR_extzv
].operand
[1].predicate
)
1382 (xop0
, GET_MODE (xop0
))))
1384 /* No, load into a reg and extract from there. */
1385 enum machine_mode bestmode
;
1387 /* Get the mode to use for inserting into this field. If
1388 OP0 is BLKmode, get the smallest mode consistent with the
1389 alignment. If OP0 is a non-BLKmode object that is no
1390 wider than MAXMODE, use its mode. Otherwise, use the
1391 smallest mode containing the field. */
1393 if (GET_MODE (xop0
) == BLKmode
1394 || (GET_MODE_SIZE (GET_MODE (op0
))
1395 > GET_MODE_SIZE (maxmode
)))
1396 bestmode
= get_best_mode (bitsize
, bitnum
,
1397 MEM_ALIGN (xop0
), maxmode
,
1398 MEM_VOLATILE_P (xop0
));
1400 bestmode
= GET_MODE (xop0
);
1402 if (bestmode
== VOIDmode
1403 || (SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (xop0
))
1404 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (xop0
)))
1407 /* Compute offset as multiple of this unit,
1408 counting in bytes. */
1409 unit
= GET_MODE_BITSIZE (bestmode
);
1410 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1411 xbitpos
= bitnum
% unit
;
1412 xop0
= adjust_address (xop0
, bestmode
, xoffset
);
1414 /* Fetch it to a register in that size. */
1415 xop0
= force_reg (bestmode
, xop0
);
1417 /* XBITPOS counts within UNIT, which is what is expected. */
1420 /* Get ref to first byte containing part of the field. */
1421 xop0
= adjust_address (xop0
, byte_mode
, xoffset
);
1423 volatile_ok
= save_volatile_ok
;
1426 /* If op0 is a register, we need it in MAXMODE (which is usually
1427 SImode). to make it acceptable to the format of extzv. */
1428 if (GET_CODE (xop0
) == SUBREG
&& GET_MODE (xop0
) != maxmode
)
1430 if (REG_P (xop0
) && GET_MODE (xop0
) != maxmode
)
1431 xop0
= gen_rtx_SUBREG (maxmode
, xop0
, 0);
1433 /* On big-endian machines, we count bits from the most significant.
1434 If the bit field insn does not, we must invert. */
1435 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1436 xbitpos
= unit
- bitsize
- xbitpos
;
1438 /* Now convert from counting within UNIT to counting in MAXMODE. */
1439 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
1440 xbitpos
+= GET_MODE_BITSIZE (maxmode
) - unit
;
1442 unit
= GET_MODE_BITSIZE (maxmode
);
1445 || (flag_force_mem
&& MEM_P (xtarget
)))
1446 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1448 if (GET_MODE (xtarget
) != maxmode
)
1450 if (REG_P (xtarget
))
1452 int wider
= (GET_MODE_SIZE (maxmode
)
1453 > GET_MODE_SIZE (GET_MODE (xtarget
)));
1454 xtarget
= gen_lowpart (maxmode
, xtarget
);
1456 xspec_target_subreg
= xtarget
;
1459 xtarget
= gen_reg_rtx (maxmode
);
1462 /* If this machine's extzv insists on a register target,
1463 make sure we have one. */
1464 if (! ((*insn_data
[(int) CODE_FOR_extzv
].operand
[0].predicate
)
1465 (xtarget
, maxmode
)))
1466 xtarget
= gen_reg_rtx (maxmode
);
1468 bitsize_rtx
= GEN_INT (bitsize
);
1469 bitpos_rtx
= GEN_INT (xbitpos
);
1471 pat
= gen_extzv (xtarget
, xop0
, bitsize_rtx
, bitpos_rtx
);
1476 spec_target
= xspec_target
;
1477 spec_target_subreg
= xspec_target_subreg
;
1481 delete_insns_since (last
);
1482 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1488 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1494 && (GET_MODE_BITSIZE (extv_mode
) >= bitsize
)
1495 && ! ((REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
1496 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (extv_mode
))))
1498 int xbitpos
= bitpos
, xoffset
= offset
;
1499 rtx bitsize_rtx
, bitpos_rtx
;
1500 rtx last
= get_last_insn ();
1501 rtx xop0
= op0
, xtarget
= target
;
1502 rtx xspec_target
= spec_target
;
1503 rtx xspec_target_subreg
= spec_target_subreg
;
1505 enum machine_mode maxmode
= mode_for_extraction (EP_extv
, 0);
1509 /* Is the memory operand acceptable? */
1510 if (! ((*insn_data
[(int) CODE_FOR_extv
].operand
[1].predicate
)
1511 (xop0
, GET_MODE (xop0
))))
1513 /* No, load into a reg and extract from there. */
1514 enum machine_mode bestmode
;
1516 /* Get the mode to use for inserting into this field. If
1517 OP0 is BLKmode, get the smallest mode consistent with the
1518 alignment. If OP0 is a non-BLKmode object that is no
1519 wider than MAXMODE, use its mode. Otherwise, use the
1520 smallest mode containing the field. */
1522 if (GET_MODE (xop0
) == BLKmode
1523 || (GET_MODE_SIZE (GET_MODE (op0
))
1524 > GET_MODE_SIZE (maxmode
)))
1525 bestmode
= get_best_mode (bitsize
, bitnum
,
1526 MEM_ALIGN (xop0
), maxmode
,
1527 MEM_VOLATILE_P (xop0
));
1529 bestmode
= GET_MODE (xop0
);
1531 if (bestmode
== VOIDmode
1532 || (SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (xop0
))
1533 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (xop0
)))
1536 /* Compute offset as multiple of this unit,
1537 counting in bytes. */
1538 unit
= GET_MODE_BITSIZE (bestmode
);
1539 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1540 xbitpos
= bitnum
% unit
;
1541 xop0
= adjust_address (xop0
, bestmode
, xoffset
);
1543 /* Fetch it to a register in that size. */
1544 xop0
= force_reg (bestmode
, xop0
);
1546 /* XBITPOS counts within UNIT, which is what is expected. */
1549 /* Get ref to first byte containing part of the field. */
1550 xop0
= adjust_address (xop0
, byte_mode
, xoffset
);
1553 /* If op0 is a register, we need it in MAXMODE (which is usually
1554 SImode) to make it acceptable to the format of extv. */
1555 if (GET_CODE (xop0
) == SUBREG
&& GET_MODE (xop0
) != maxmode
)
1557 if (REG_P (xop0
) && GET_MODE (xop0
) != maxmode
)
1558 xop0
= gen_rtx_SUBREG (maxmode
, xop0
, 0);
1560 /* On big-endian machines, we count bits from the most significant.
1561 If the bit field insn does not, we must invert. */
1562 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1563 xbitpos
= unit
- bitsize
- xbitpos
;
1565 /* XBITPOS counts within a size of UNIT.
1566 Adjust to count within a size of MAXMODE. */
1567 if (BITS_BIG_ENDIAN
&& !MEM_P (xop0
))
1568 xbitpos
+= (GET_MODE_BITSIZE (maxmode
) - unit
);
1570 unit
= GET_MODE_BITSIZE (maxmode
);
1573 || (flag_force_mem
&& MEM_P (xtarget
)))
1574 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1576 if (GET_MODE (xtarget
) != maxmode
)
1578 if (REG_P (xtarget
))
1580 int wider
= (GET_MODE_SIZE (maxmode
)
1581 > GET_MODE_SIZE (GET_MODE (xtarget
)));
1582 xtarget
= gen_lowpart (maxmode
, xtarget
);
1584 xspec_target_subreg
= xtarget
;
1587 xtarget
= gen_reg_rtx (maxmode
);
1590 /* If this machine's extv insists on a register target,
1591 make sure we have one. */
1592 if (! ((*insn_data
[(int) CODE_FOR_extv
].operand
[0].predicate
)
1593 (xtarget
, maxmode
)))
1594 xtarget
= gen_reg_rtx (maxmode
);
1596 bitsize_rtx
= GEN_INT (bitsize
);
1597 bitpos_rtx
= GEN_INT (xbitpos
);
1599 pat
= gen_extv (xtarget
, xop0
, bitsize_rtx
, bitpos_rtx
);
1604 spec_target
= xspec_target
;
1605 spec_target_subreg
= xspec_target_subreg
;
1609 delete_insns_since (last
);
1610 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1616 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1619 if (target
== spec_target
)
1621 if (target
== spec_target_subreg
)
1623 if (GET_MODE (target
) != tmode
&& GET_MODE (target
) != mode
)
1625 /* If the target mode is complex, then extract the two scalar elements
1626 from the value now. Creating (subreg:SC (reg:DI) 0), as we would do
1627 with the clause below, will cause gen_realpart or gen_imagpart to
1628 fail, since those functions must return lvalues. */
1629 if (COMPLEX_MODE_P (tmode
))
1631 rtx realpart
, imagpart
;
1632 enum machine_mode itmode
= GET_MODE_INNER (tmode
);
1634 target
= convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode
),
1638 realpart
= extract_bit_field (target
, GET_MODE_BITSIZE (itmode
), 0,
1639 unsignedp
, NULL
, itmode
, itmode
);
1640 imagpart
= extract_bit_field (target
, GET_MODE_BITSIZE (itmode
),
1641 GET_MODE_BITSIZE (itmode
), unsignedp
,
1642 NULL
, itmode
, itmode
);
1644 return gen_rtx_CONCAT (tmode
, realpart
, imagpart
);
1647 /* If the target mode is not a scalar integral, first convert to the
1648 integer mode of that size and then access it as a floating-point
1649 value via a SUBREG. */
1650 if (!SCALAR_INT_MODE_P (tmode
))
1652 enum machine_mode smode
1653 = mode_for_size (GET_MODE_BITSIZE (tmode
), MODE_INT
, 0);
1654 target
= convert_to_mode (smode
, target
, unsignedp
);
1655 target
= force_reg (smode
, target
);
1656 return gen_lowpart (tmode
, target
);
1659 return convert_to_mode (tmode
, target
, unsignedp
);
1664 /* Extract a bit field using shifts and boolean operations
1665 Returns an rtx to represent the value.
1666 OP0 addresses a register (word) or memory (byte).
1667 BITPOS says which bit within the word or byte the bit field starts in.
1668 OFFSET says how many bytes farther the bit field starts;
1669 it is 0 if OP0 is a register.
1670 BITSIZE says how many bits long the bit field is.
1671 (If OP0 is a register, it may be narrower than a full word,
1672 but BITPOS still counts within a full word,
1673 which is significant on bigendian machines.)
1675 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1676 If TARGET is nonzero, attempts to store the value there
1677 and return TARGET, but this is not guaranteed.
1678 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1681 extract_fixed_bit_field (enum machine_mode tmode
, rtx op0
,
1682 unsigned HOST_WIDE_INT offset
,
1683 unsigned HOST_WIDE_INT bitsize
,
1684 unsigned HOST_WIDE_INT bitpos
, rtx target
,
1687 unsigned int total_bits
= BITS_PER_WORD
;
1688 enum machine_mode mode
;
1690 if (GET_CODE (op0
) == SUBREG
|| REG_P (op0
))
1692 /* Special treatment for a bit field split across two registers. */
1693 if (bitsize
+ bitpos
> BITS_PER_WORD
)
1694 return extract_split_bit_field (op0
, bitsize
, bitpos
, unsignedp
);
1698 /* Get the proper mode to use for this field. We want a mode that
1699 includes the entire field. If such a mode would be larger than
1700 a word, we won't be doing the extraction the normal way. */
1702 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
1703 MEM_ALIGN (op0
), word_mode
, MEM_VOLATILE_P (op0
));
1705 if (mode
== VOIDmode
)
1706 /* The only way this should occur is if the field spans word
1708 return extract_split_bit_field (op0
, bitsize
,
1709 bitpos
+ offset
* BITS_PER_UNIT
,
1712 total_bits
= GET_MODE_BITSIZE (mode
);
1714 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1715 be in the range 0 to total_bits-1, and put any excess bytes in
1717 if (bitpos
>= total_bits
)
1719 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
1720 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
1724 /* Get ref to an aligned byte, halfword, or word containing the field.
1725 Adjust BITPOS to be position within a word,
1726 and OFFSET to be the offset of that word.
1727 Then alter OP0 to refer to that word. */
1728 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
1729 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
1730 op0
= adjust_address (op0
, mode
, offset
);
1733 mode
= GET_MODE (op0
);
1735 if (BYTES_BIG_ENDIAN
)
1736 /* BITPOS is the distance between our msb and that of OP0.
1737 Convert it to the distance from the lsb. */
1738 bitpos
= total_bits
- bitsize
- bitpos
;
1740 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1741 We have reduced the big-endian case to the little-endian case. */
1747 /* If the field does not already start at the lsb,
1748 shift it so it does. */
1749 tree amount
= build_int_cst (NULL_TREE
, bitpos
);
1750 /* Maybe propagate the target for the shift. */
1751 /* But not if we will return it--could confuse integrate.c. */
1752 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1753 if (tmode
!= mode
) subtarget
= 0;
1754 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1756 /* Convert the value to the desired mode. */
1758 op0
= convert_to_mode (tmode
, op0
, 1);
1760 /* Unless the msb of the field used to be the msb when we shifted,
1761 mask out the upper bits. */
1763 if (GET_MODE_BITSIZE (mode
) != bitpos
+ bitsize
)
1764 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1765 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1766 target
, 1, OPTAB_LIB_WIDEN
);
1770 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1771 then arithmetic-shift its lsb to the lsb of the word. */
1772 op0
= force_reg (mode
, op0
);
1776 /* Find the narrowest integer mode that contains the field. */
1778 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1779 mode
= GET_MODE_WIDER_MODE (mode
))
1780 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitpos
)
1782 op0
= convert_to_mode (mode
, op0
, 0);
1786 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitpos
))
1789 = build_int_cst (NULL_TREE
,
1790 GET_MODE_BITSIZE (mode
) - (bitsize
+ bitpos
));
1791 /* Maybe propagate the target for the shift. */
1792 rtx subtarget
= (target
!= 0 && REG_P (target
) ? target
: 0);
1793 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1796 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1797 build_int_cst (NULL_TREE
,
1798 GET_MODE_BITSIZE (mode
) - bitsize
),
1802 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1803 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1804 complement of that if COMPLEMENT. The mask is truncated if
1805 necessary to the width of mode MODE. The mask is zero-extended if
1806 BITSIZE+BITPOS is too small for MODE. */
1809 mask_rtx (enum machine_mode mode
, int bitpos
, int bitsize
, int complement
)
1811 HOST_WIDE_INT masklow
, maskhigh
;
1815 else if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1816 masklow
= (HOST_WIDE_INT
) -1 << bitpos
;
1820 if (bitpos
+ bitsize
< HOST_BITS_PER_WIDE_INT
)
1821 masklow
&= ((unsigned HOST_WIDE_INT
) -1
1822 >> (HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1824 if (bitpos
<= HOST_BITS_PER_WIDE_INT
)
1827 maskhigh
= (HOST_WIDE_INT
) -1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
1831 else if (bitpos
+ bitsize
> HOST_BITS_PER_WIDE_INT
)
1832 maskhigh
&= ((unsigned HOST_WIDE_INT
) -1
1833 >> (2 * HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1839 maskhigh
= ~maskhigh
;
1843 return immed_double_const (masklow
, maskhigh
, mode
);
1846 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1847 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1850 lshift_value (enum machine_mode mode
, rtx value
, int bitpos
, int bitsize
)
1852 unsigned HOST_WIDE_INT v
= INTVAL (value
);
1853 HOST_WIDE_INT low
, high
;
1855 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
1856 v
&= ~((HOST_WIDE_INT
) -1 << bitsize
);
1858 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1861 high
= (bitpos
> 0 ? (v
>> (HOST_BITS_PER_WIDE_INT
- bitpos
)) : 0);
1866 high
= v
<< (bitpos
- HOST_BITS_PER_WIDE_INT
);
1869 return immed_double_const (low
, high
, mode
);
1872 /* Extract a bit field from a memory by forcing the alignment of the
1873 memory. This efficient only if the field spans at least 4 boundaries.
1876 BITSIZE is the field width; BITPOS is the position of the first bit.
1877 UNSIGNEDP is true if the result should be zero-extended. */
1880 extract_force_align_mem_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
1881 unsigned HOST_WIDE_INT bitpos
,
1884 enum machine_mode mode
, dmode
;
1885 unsigned int m_bitsize
, m_size
;
1886 unsigned int sign_shift_up
, sign_shift_dn
;
1887 rtx base
, a1
, a2
, v1
, v2
, comb
, shift
, result
, start
;
1889 /* Choose a mode that will fit BITSIZE. */
1890 mode
= smallest_mode_for_size (bitsize
, MODE_INT
);
1891 m_size
= GET_MODE_SIZE (mode
);
1892 m_bitsize
= GET_MODE_BITSIZE (mode
);
1894 /* Choose a mode twice as wide. Fail if no such mode exists. */
1895 dmode
= mode_for_size (m_bitsize
* 2, MODE_INT
, false);
1896 if (dmode
== BLKmode
)
1899 do_pending_stack_adjust ();
1900 start
= get_last_insn ();
1902 /* At the end, we'll need an additional shift to deal with sign/zero
1903 extension. By default this will be a left+right shift of the
1904 appropriate size. But we may be able to eliminate one of them. */
1905 sign_shift_up
= sign_shift_dn
= m_bitsize
- bitsize
;
1907 if (STRICT_ALIGNMENT
)
1909 base
= plus_constant (XEXP (op0
, 0), bitpos
/ BITS_PER_UNIT
);
1910 bitpos
%= BITS_PER_UNIT
;
1912 /* We load two values to be concatenate. There's an edge condition
1913 that bears notice -- an aligned value at the end of a page can
1914 only load one value lest we segfault. So the two values we load
1915 are at "base & -size" and "(base + size - 1) & -size". If base
1916 is unaligned, the addresses will be aligned and sequential; if
1917 base is aligned, the addresses will both be equal to base. */
1919 a1
= expand_simple_binop (Pmode
, AND
, force_operand (base
, NULL
),
1920 GEN_INT (-(HOST_WIDE_INT
)m_size
),
1921 NULL
, true, OPTAB_LIB_WIDEN
);
1922 mark_reg_pointer (a1
, m_bitsize
);
1923 v1
= gen_rtx_MEM (mode
, a1
);
1924 set_mem_align (v1
, m_bitsize
);
1925 v1
= force_reg (mode
, validize_mem (v1
));
1927 a2
= plus_constant (base
, GET_MODE_SIZE (mode
) - 1);
1928 a2
= expand_simple_binop (Pmode
, AND
, force_operand (a2
, NULL
),
1929 GEN_INT (-(HOST_WIDE_INT
)m_size
),
1930 NULL
, true, OPTAB_LIB_WIDEN
);
1931 v2
= gen_rtx_MEM (mode
, a2
);
1932 set_mem_align (v2
, m_bitsize
);
1933 v2
= force_reg (mode
, validize_mem (v2
));
1935 /* Combine these two values into a double-word value. */
1936 if (m_bitsize
== BITS_PER_WORD
)
1938 comb
= gen_reg_rtx (dmode
);
1939 emit_insn (gen_rtx_CLOBBER (VOIDmode
, comb
));
1940 emit_move_insn (gen_rtx_SUBREG (mode
, comb
, 0), v1
);
1941 emit_move_insn (gen_rtx_SUBREG (mode
, comb
, m_size
), v2
);
1945 if (BYTES_BIG_ENDIAN
)
1946 comb
= v1
, v1
= v2
, v2
= comb
;
1947 v1
= convert_modes (dmode
, mode
, v1
, true);
1950 v2
= convert_modes (dmode
, mode
, v2
, true);
1951 v2
= expand_simple_binop (dmode
, ASHIFT
, v2
, GEN_INT (m_bitsize
),
1952 NULL
, true, OPTAB_LIB_WIDEN
);
1955 comb
= expand_simple_binop (dmode
, IOR
, v1
, v2
, NULL
,
1956 true, OPTAB_LIB_WIDEN
);
1961 shift
= expand_simple_binop (Pmode
, AND
, base
, GEN_INT (m_size
- 1),
1962 NULL
, true, OPTAB_LIB_WIDEN
);
1963 shift
= expand_mult (Pmode
, shift
, GEN_INT (BITS_PER_UNIT
), NULL
, 1);
1967 if (sign_shift_up
<= bitpos
)
1968 bitpos
-= sign_shift_up
, sign_shift_up
= 0;
1969 shift
= expand_simple_binop (Pmode
, PLUS
, shift
, GEN_INT (bitpos
),
1970 NULL
, true, OPTAB_LIB_WIDEN
);
1975 unsigned HOST_WIDE_INT offset
= bitpos
/ BITS_PER_UNIT
;
1976 bitpos
%= BITS_PER_UNIT
;
1978 /* When strict alignment is not required, we can just load directly
1979 from memory without masking. If the remaining BITPOS offset is
1980 small enough, we may be able to do all operations in MODE as
1981 opposed to DMODE. */
1982 if (bitpos
+ bitsize
<= m_bitsize
)
1984 comb
= adjust_address (op0
, dmode
, offset
);
1986 if (sign_shift_up
<= bitpos
)
1987 bitpos
-= sign_shift_up
, sign_shift_up
= 0;
1988 shift
= GEN_INT (bitpos
);
1991 /* Shift down the double-word such that the requested value is at bit 0. */
1992 if (shift
!= const0_rtx
)
1993 comb
= expand_simple_binop (dmode
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
1994 comb
, shift
, NULL
, unsignedp
, OPTAB_LIB_WIDEN
);
1998 /* If the field exactly matches MODE, then all we need to do is return the
1999 lowpart. Otherwise, shift to get the sign bits set properly. */
2000 result
= force_reg (mode
, gen_lowpart (mode
, comb
));
2003 result
= expand_simple_binop (mode
, ASHIFT
, result
,
2004 GEN_INT (sign_shift_up
),
2005 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
2007 result
= expand_simple_binop (mode
, unsignedp
? LSHIFTRT
: ASHIFTRT
,
2008 result
, GEN_INT (sign_shift_dn
),
2009 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
2014 delete_insns_since (start
);
2018 /* Extract a bit field that is split across two words
2019 and return an RTX for the result.
2021 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2022 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2023 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
2026 extract_split_bit_field (rtx op0
, unsigned HOST_WIDE_INT bitsize
,
2027 unsigned HOST_WIDE_INT bitpos
, int unsignedp
)
2030 unsigned int bitsdone
= 0;
2031 rtx result
= NULL_RTX
;
2034 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2036 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
2037 unit
= BITS_PER_WORD
;
2040 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
2041 if (0 && bitsize
/ unit
> 2)
2043 rtx tmp
= extract_force_align_mem_bit_field (op0
, bitsize
, bitpos
,
2050 while (bitsdone
< bitsize
)
2052 unsigned HOST_WIDE_INT thissize
;
2054 unsigned HOST_WIDE_INT thispos
;
2055 unsigned HOST_WIDE_INT offset
;
2057 offset
= (bitpos
+ bitsdone
) / unit
;
2058 thispos
= (bitpos
+ bitsdone
) % unit
;
2060 /* THISSIZE must not overrun a word boundary. Otherwise,
2061 extract_fixed_bit_field will call us again, and we will mutually
2063 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
2064 thissize
= MIN (thissize
, unit
- thispos
);
2066 /* If OP0 is a register, then handle OFFSET here.
2068 When handling multiword bitfields, extract_bit_field may pass
2069 down a word_mode SUBREG of a larger REG for a bitfield that actually
2070 crosses a word boundary. Thus, for a SUBREG, we must find
2071 the current word starting from the base register. */
2072 if (GET_CODE (op0
) == SUBREG
)
2074 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
2075 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
2076 GET_MODE (SUBREG_REG (op0
)));
2079 else if (REG_P (op0
))
2081 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
2087 /* Extract the parts in bit-counting order,
2088 whose meaning is determined by BYTES_PER_UNIT.
2089 OFFSET is in UNITs, and UNIT is in bits.
2090 extract_fixed_bit_field wants offset in bytes. */
2091 part
= extract_fixed_bit_field (word_mode
, word
,
2092 offset
* unit
/ BITS_PER_UNIT
,
2093 thissize
, thispos
, 0, 1);
2094 bitsdone
+= thissize
;
2096 /* Shift this part into place for the result. */
2097 if (BYTES_BIG_ENDIAN
)
2099 if (bitsize
!= bitsdone
)
2100 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
2101 build_int_cst (NULL_TREE
, bitsize
- bitsdone
),
2106 if (bitsdone
!= thissize
)
2107 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
2108 build_int_cst (NULL_TREE
,
2109 bitsdone
- thissize
), 0, 1);
2115 /* Combine the parts with bitwise or. This works
2116 because we extracted each part as an unsigned bit field. */
2117 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
2123 /* Unsigned bit field: we are done. */
2126 /* Signed bit field: sign-extend with two arithmetic shifts. */
2127 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
2128 build_int_cst (NULL_TREE
, BITS_PER_WORD
- bitsize
),
2130 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
2131 build_int_cst (NULL_TREE
, BITS_PER_WORD
- bitsize
),
2135 /* Add INC into TARGET. */
2138 expand_inc (rtx target
, rtx inc
)
2140 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
2142 target
, 0, OPTAB_LIB_WIDEN
);
2143 if (value
!= target
)
2144 emit_move_insn (target
, value
);
2147 /* Subtract DEC from TARGET. */
2150 expand_dec (rtx target
, rtx dec
)
2152 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
2154 target
, 0, OPTAB_LIB_WIDEN
);
2155 if (value
!= target
)
2156 emit_move_insn (target
, value
);
2159 /* Output a shift instruction for expression code CODE,
2160 with SHIFTED being the rtx for the value to shift,
2161 and AMOUNT the tree for the amount to shift by.
2162 Store the result in the rtx TARGET, if that is convenient.
2163 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2164 Return the rtx for where the value is. */
2167 expand_shift (enum tree_code code
, enum machine_mode mode
, rtx shifted
,
2168 tree amount
, rtx target
, int unsignedp
)
2171 int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
2172 int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
2175 /* Previously detected shift-counts computed by NEGATE_EXPR
2176 and shifted in the other direction; but that does not work
2179 op1
= expand_expr (amount
, NULL_RTX
, VOIDmode
, 0);
2181 if (SHIFT_COUNT_TRUNCATED
)
2183 if (GET_CODE (op1
) == CONST_INT
2184 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
2185 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
)))
2186 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
2187 % GET_MODE_BITSIZE (mode
));
2188 else if (GET_CODE (op1
) == SUBREG
2189 && subreg_lowpart_p (op1
))
2190 op1
= SUBREG_REG (op1
);
2193 if (op1
== const0_rtx
)
2196 /* Check whether its cheaper to implement a left shift by a constant
2197 bit count by a sequence of additions. */
2198 if (code
== LSHIFT_EXPR
2199 && GET_CODE (op1
) == CONST_INT
2201 && INTVAL (op1
) < GET_MODE_BITSIZE (mode
)
2202 && shift_cost
[mode
][INTVAL (op1
)] > INTVAL (op1
) * add_cost
[mode
])
2205 for (i
= 0; i
< INTVAL (op1
); i
++)
2207 temp
= force_reg (mode
, shifted
);
2208 shifted
= expand_binop (mode
, add_optab
, temp
, temp
, NULL_RTX
,
2209 unsignedp
, OPTAB_LIB_WIDEN
);
2214 for (try = 0; temp
== 0 && try < 3; try++)
2216 enum optab_methods methods
;
2219 methods
= OPTAB_DIRECT
;
2221 methods
= OPTAB_WIDEN
;
2223 methods
= OPTAB_LIB_WIDEN
;
2227 /* Widening does not work for rotation. */
2228 if (methods
== OPTAB_WIDEN
)
2230 else if (methods
== OPTAB_LIB_WIDEN
)
2232 /* If we have been unable to open-code this by a rotation,
2233 do it as the IOR of two shifts. I.e., to rotate A
2234 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2235 where C is the bitsize of A.
2237 It is theoretically possible that the target machine might
2238 not be able to perform either shift and hence we would
2239 be making two libcalls rather than just the one for the
2240 shift (similarly if IOR could not be done). We will allow
2241 this extremely unlikely lossage to avoid complicating the
2244 rtx subtarget
= target
== shifted
? 0 : target
;
2246 tree type
= TREE_TYPE (amount
);
2247 tree new_amount
= make_tree (type
, op1
);
2249 = fold (build2 (MINUS_EXPR
, type
, convert
2250 (type
, build_int_cst
2251 (NULL_TREE
, GET_MODE_BITSIZE (mode
))),
2254 shifted
= force_reg (mode
, shifted
);
2256 temp
= expand_shift (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
2257 mode
, shifted
, new_amount
, subtarget
, 1);
2258 temp1
= expand_shift (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
2259 mode
, shifted
, other_amount
, 0, 1);
2260 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
2261 unsignedp
, methods
);
2264 temp
= expand_binop (mode
,
2265 left
? rotl_optab
: rotr_optab
,
2266 shifted
, op1
, target
, unsignedp
, methods
);
2268 /* If we don't have the rotate, but we are rotating by a constant
2269 that is in range, try a rotate in the opposite direction. */
2271 if (temp
== 0 && GET_CODE (op1
) == CONST_INT
2273 && (unsigned int) INTVAL (op1
) < GET_MODE_BITSIZE (mode
))
2274 temp
= expand_binop (mode
,
2275 left
? rotr_optab
: rotl_optab
,
2277 GEN_INT (GET_MODE_BITSIZE (mode
)
2279 target
, unsignedp
, methods
);
2282 temp
= expand_binop (mode
,
2283 left
? ashl_optab
: lshr_optab
,
2284 shifted
, op1
, target
, unsignedp
, methods
);
2286 /* Do arithmetic shifts.
2287 Also, if we are going to widen the operand, we can just as well
2288 use an arithmetic right-shift instead of a logical one. */
2289 if (temp
== 0 && ! rotate
2290 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2292 enum optab_methods methods1
= methods
;
2294 /* If trying to widen a log shift to an arithmetic shift,
2295 don't accept an arithmetic shift of the same size. */
2297 methods1
= OPTAB_MUST_WIDEN
;
2299 /* Arithmetic shift */
2301 temp
= expand_binop (mode
,
2302 left
? ashl_optab
: ashr_optab
,
2303 shifted
, op1
, target
, unsignedp
, methods1
);
2306 /* We used to try extzv here for logical right shifts, but that was
2307 only useful for one machine, the VAX, and caused poor code
2308 generation there for lshrdi3, so the code was deleted and a
2309 define_expand for lshrsi3 was added to vax.md. */
2316 enum alg_code
{ alg_unknown
, alg_zero
, alg_m
, alg_shift
,
2317 alg_add_t_m2
, alg_sub_t_m2
,
2318 alg_add_factor
, alg_sub_factor
,
2319 alg_add_t2_m
, alg_sub_t2_m
};
2321 /* This structure holds the "cost" of a multiply sequence. The
2322 "cost" field holds the total rtx_cost of every operator in the
2323 synthetic multiplication sequence, hence cost(a op b) is defined
2324 as rtx_cost(op) + cost(a) + cost(b), where cost(leaf) is zero.
2325 The "latency" field holds the minimum possible latency of the
2326 synthetic multiply, on a hypothetical infinitely parallel CPU.
2327 This is the critical path, or the maximum height, of the expression
2328 tree which is the sum of rtx_costs on the most expensive path from
2329 any leaf to the root. Hence latency(a op b) is defined as zero for
2330 leaves and rtx_cost(op) + max(latency(a), latency(b)) otherwise. */
2333 short cost
; /* Total rtx_cost of the multiplication sequence. */
2334 short latency
; /* The latency of the multiplication sequence. */
2337 /* This macro is used to compare a pointer to a mult_cost against an
2338 single integer "rtx_cost" value. This is equivalent to the macro
2339 CHEAPER_MULT_COST(X,Z) where Z = {Y,Y}. */
2340 #define MULT_COST_LESS(X,Y) ((X)->cost < (Y) \
2341 || ((X)->cost == (Y) && (X)->latency < (Y)))
2343 /* This macro is used to compare two pointers to mult_costs against
2344 each other. The macro returns true if X is cheaper than Y.
2345 Currently, the cheaper of two mult_costs is the one with the
2346 lower "cost". If "cost"s are tied, the lower latency is cheaper. */
2347 #define CHEAPER_MULT_COST(X,Y) ((X)->cost < (Y)->cost \
2348 || ((X)->cost == (Y)->cost \
2349 && (X)->latency < (Y)->latency))
2351 /* This structure records a sequence of operations.
2352 `ops' is the number of operations recorded.
2353 `cost' is their total cost.
2354 The operations are stored in `op' and the corresponding
2355 logarithms of the integer coefficients in `log'.
2357 These are the operations:
2358 alg_zero total := 0;
2359 alg_m total := multiplicand;
2360 alg_shift total := total * coeff
2361 alg_add_t_m2 total := total + multiplicand * coeff;
2362 alg_sub_t_m2 total := total - multiplicand * coeff;
2363 alg_add_factor total := total * coeff + total;
2364 alg_sub_factor total := total * coeff - total;
2365 alg_add_t2_m total := total * coeff + multiplicand;
2366 alg_sub_t2_m total := total * coeff - multiplicand;
2368 The first operand must be either alg_zero or alg_m. */
2372 struct mult_cost cost
;
2374 /* The size of the OP and LOG fields are not directly related to the
2375 word size, but the worst-case algorithms will be if we have few
2376 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2377 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2378 in total wordsize operations. */
2379 enum alg_code op
[MAX_BITS_PER_WORD
];
2380 char log
[MAX_BITS_PER_WORD
];
2383 /* The entry for our multiplication cache/hash table. */
2384 struct alg_hash_entry
{
2385 /* The number we are multiplying by. */
2388 /* The mode in which we are multiplying something by T. */
2389 enum machine_mode mode
;
2391 /* The best multiplication algorithm for t. */
2395 /* The number of cache/hash entries. */
2396 #define NUM_ALG_HASH_ENTRIES 307
2398 /* Each entry of ALG_HASH caches alg_code for some integer. This is
2399 actually a hash table. If we have a collision, that the older
2400 entry is kicked out. */
2401 static struct alg_hash_entry alg_hash
[NUM_ALG_HASH_ENTRIES
];
2403 /* Indicates the type of fixup needed after a constant multiplication.
2404 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2405 the result should be negated, and ADD_VARIANT means that the
2406 multiplicand should be added to the result. */
2407 enum mult_variant
{basic_variant
, negate_variant
, add_variant
};
2409 static void synth_mult (struct algorithm
*, unsigned HOST_WIDE_INT
,
2410 const struct mult_cost
*, enum machine_mode mode
);
2411 static bool choose_mult_variant (enum machine_mode
, HOST_WIDE_INT
,
2412 struct algorithm
*, enum mult_variant
*, int);
2413 static rtx
expand_mult_const (enum machine_mode
, rtx
, HOST_WIDE_INT
, rtx
,
2414 const struct algorithm
*, enum mult_variant
);
2415 static unsigned HOST_WIDE_INT
choose_multiplier (unsigned HOST_WIDE_INT
, int,
2416 int, unsigned HOST_WIDE_INT
*,
2418 static unsigned HOST_WIDE_INT
invert_mod2n (unsigned HOST_WIDE_INT
, int);
2419 static rtx
extract_high_half (enum machine_mode
, rtx
);
2420 static rtx
expand_mult_highpart_optab (enum machine_mode
, rtx
, rtx
, rtx
,
2422 /* Compute and return the best algorithm for multiplying by T.
2423 The algorithm must cost less than cost_limit
2424 If retval.cost >= COST_LIMIT, no algorithm was found and all
2425 other field of the returned struct are undefined.
2426 MODE is the machine mode of the multiplication. */
2429 synth_mult (struct algorithm
*alg_out
, unsigned HOST_WIDE_INT t
,
2430 const struct mult_cost
*cost_limit
, enum machine_mode mode
)
2433 struct algorithm
*alg_in
, *best_alg
;
2434 struct mult_cost best_cost
;
2435 struct mult_cost new_limit
;
2436 int op_cost
, op_latency
;
2437 unsigned HOST_WIDE_INT q
;
2438 int maxm
= MIN (BITS_PER_WORD
, GET_MODE_BITSIZE (mode
));
2440 bool cache_hit
= false;
2441 enum alg_code cache_alg
= alg_zero
;
2443 /* Indicate that no algorithm is yet found. If no algorithm
2444 is found, this value will be returned and indicate failure. */
2445 alg_out
->cost
.cost
= cost_limit
->cost
+ 1;
2446 alg_out
->cost
.latency
= cost_limit
->latency
+ 1;
2448 if (cost_limit
->cost
< 0
2449 || (cost_limit
->cost
== 0 && cost_limit
->latency
<= 0))
2452 /* Restrict the bits of "t" to the multiplication's mode. */
2453 t
&= GET_MODE_MASK (mode
);
2455 /* t == 1 can be done in zero cost. */
2459 alg_out
->cost
.cost
= 0;
2460 alg_out
->cost
.latency
= 0;
2461 alg_out
->op
[0] = alg_m
;
2465 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2469 if (MULT_COST_LESS (cost_limit
, zero_cost
))
2474 alg_out
->cost
.cost
= zero_cost
;
2475 alg_out
->cost
.latency
= zero_cost
;
2476 alg_out
->op
[0] = alg_zero
;
2481 /* We'll be needing a couple extra algorithm structures now. */
2483 alg_in
= alloca (sizeof (struct algorithm
));
2484 best_alg
= alloca (sizeof (struct algorithm
));
2485 best_cost
= *cost_limit
;
2487 /* Compute the hash index. */
2488 hash_index
= (t
^ (unsigned int) mode
) % NUM_ALG_HASH_ENTRIES
;
2490 /* See if we already know what to do for T. */
2491 if (alg_hash
[hash_index
].t
== t
2492 && alg_hash
[hash_index
].mode
== mode
2493 && alg_hash
[hash_index
].alg
!= alg_unknown
)
2496 cache_alg
= alg_hash
[hash_index
].alg
;
2504 goto do_alg_addsub_t_m2
;
2506 case alg_add_factor
:
2507 case alg_sub_factor
:
2508 goto do_alg_addsub_factor
;
2511 goto do_alg_add_t2_m
;
2514 goto do_alg_sub_t2_m
;
2521 /* If we have a group of zero bits at the low-order part of T, try
2522 multiplying by the remaining bits and then doing a shift. */
2527 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
2531 /* The function expand_shift will choose between a shift and
2532 a sequence of additions, so the observed cost is given as
2533 MIN (m * add_cost[mode], shift_cost[mode][m]). */
2534 op_cost
= m
* add_cost
[mode
];
2535 if (shift_cost
[mode
][m
] < op_cost
)
2536 op_cost
= shift_cost
[mode
][m
];
2537 new_limit
.cost
= best_cost
.cost
- op_cost
;
2538 new_limit
.latency
= best_cost
.latency
- op_cost
;
2539 synth_mult (alg_in
, q
, &new_limit
, mode
);
2541 alg_in
->cost
.cost
+= op_cost
;
2542 alg_in
->cost
.latency
+= op_cost
;
2543 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2545 struct algorithm
*x
;
2546 best_cost
= alg_in
->cost
;
2547 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2548 best_alg
->log
[best_alg
->ops
] = m
;
2549 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2556 /* If we have an odd number, add or subtract one. */
2559 unsigned HOST_WIDE_INT w
;
2562 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2564 /* If T was -1, then W will be zero after the loop. This is another
2565 case where T ends with ...111. Handling this with (T + 1) and
2566 subtract 1 produces slightly better code and results in algorithm
2567 selection much faster than treating it like the ...0111 case
2571 /* Reject the case where t is 3.
2572 Thus we prefer addition in that case. */
2575 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2577 op_cost
= add_cost
[mode
];
2578 new_limit
.cost
= best_cost
.cost
- op_cost
;
2579 new_limit
.latency
= best_cost
.latency
- op_cost
;
2580 synth_mult (alg_in
, t
+ 1, &new_limit
, mode
);
2582 alg_in
->cost
.cost
+= op_cost
;
2583 alg_in
->cost
.latency
+= op_cost
;
2584 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2586 struct algorithm
*x
;
2587 best_cost
= alg_in
->cost
;
2588 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2589 best_alg
->log
[best_alg
->ops
] = 0;
2590 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2595 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2597 op_cost
= add_cost
[mode
];
2598 new_limit
.cost
= best_cost
.cost
- op_cost
;
2599 new_limit
.latency
= best_cost
.latency
- op_cost
;
2600 synth_mult (alg_in
, t
- 1, &new_limit
, mode
);
2602 alg_in
->cost
.cost
+= op_cost
;
2603 alg_in
->cost
.latency
+= op_cost
;
2604 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2606 struct algorithm
*x
;
2607 best_cost
= alg_in
->cost
;
2608 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2609 best_alg
->log
[best_alg
->ops
] = 0;
2610 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2617 /* Look for factors of t of the form
2618 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2619 If we find such a factor, we can multiply by t using an algorithm that
2620 multiplies by q, shift the result by m and add/subtract it to itself.
2622 We search for large factors first and loop down, even if large factors
2623 are less probable than small; if we find a large factor we will find a
2624 good sequence quickly, and therefore be able to prune (by decreasing
2625 COST_LIMIT) the search. */
2627 do_alg_addsub_factor
:
2628 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2630 unsigned HOST_WIDE_INT d
;
2632 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2633 if (t
% d
== 0 && t
> d
&& m
< maxm
2634 && (!cache_hit
|| cache_alg
== alg_add_factor
))
2636 /* If the target has a cheap shift-and-add instruction use
2637 that in preference to a shift insn followed by an add insn.
2638 Assume that the shift-and-add is "atomic" with a latency
2639 equal to its cost, otherwise assume that on superscalar
2640 hardware the shift may be executed concurrently with the
2641 earlier steps in the algorithm. */
2642 op_cost
= add_cost
[mode
] + shift_cost
[mode
][m
];
2643 if (shiftadd_cost
[mode
][m
] < op_cost
)
2645 op_cost
= shiftadd_cost
[mode
][m
];
2646 op_latency
= op_cost
;
2649 op_latency
= add_cost
[mode
];
2651 new_limit
.cost
= best_cost
.cost
- op_cost
;
2652 new_limit
.latency
= best_cost
.latency
- op_latency
;
2653 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2655 alg_in
->cost
.cost
+= op_cost
;
2656 alg_in
->cost
.latency
+= op_latency
;
2657 if (alg_in
->cost
.latency
< op_cost
)
2658 alg_in
->cost
.latency
= op_cost
;
2659 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2661 struct algorithm
*x
;
2662 best_cost
= alg_in
->cost
;
2663 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2664 best_alg
->log
[best_alg
->ops
] = m
;
2665 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2667 /* Other factors will have been taken care of in the recursion. */
2671 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2672 if (t
% d
== 0 && t
> d
&& m
< maxm
2673 && (!cache_hit
|| cache_alg
== alg_sub_factor
))
2675 /* If the target has a cheap shift-and-subtract insn use
2676 that in preference to a shift insn followed by a sub insn.
2677 Assume that the shift-and-sub is "atomic" with a latency
2678 equal to it's cost, otherwise assume that on superscalar
2679 hardware the shift may be executed concurrently with the
2680 earlier steps in the algorithm. */
2681 op_cost
= add_cost
[mode
] + shift_cost
[mode
][m
];
2682 if (shiftsub_cost
[mode
][m
] < op_cost
)
2684 op_cost
= shiftsub_cost
[mode
][m
];
2685 op_latency
= op_cost
;
2688 op_latency
= add_cost
[mode
];
2690 new_limit
.cost
= best_cost
.cost
- op_cost
;
2691 new_limit
.cost
= best_cost
.cost
- op_latency
;
2692 synth_mult (alg_in
, t
/ d
, &new_limit
, mode
);
2694 alg_in
->cost
.cost
+= op_cost
;
2695 alg_in
->cost
.latency
+= op_latency
;
2696 if (alg_in
->cost
.latency
< op_cost
)
2697 alg_in
->cost
.latency
= op_cost
;
2698 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2700 struct algorithm
*x
;
2701 best_cost
= alg_in
->cost
;
2702 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2703 best_alg
->log
[best_alg
->ops
] = m
;
2704 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2712 /* Try shift-and-add (load effective address) instructions,
2713 i.e. do a*3, a*5, a*9. */
2720 if (m
>= 0 && m
< maxm
)
2722 op_cost
= shiftadd_cost
[mode
][m
];
2723 new_limit
.cost
= best_cost
.cost
- op_cost
;
2724 new_limit
.latency
= best_cost
.latency
- op_cost
;
2725 synth_mult (alg_in
, (t
- 1) >> m
, &new_limit
, mode
);
2727 alg_in
->cost
.cost
+= op_cost
;
2728 alg_in
->cost
.latency
+= op_cost
;
2729 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2731 struct algorithm
*x
;
2732 best_cost
= alg_in
->cost
;
2733 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2734 best_alg
->log
[best_alg
->ops
] = m
;
2735 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2745 if (m
>= 0 && m
< maxm
)
2747 op_cost
= shiftsub_cost
[mode
][m
];
2748 new_limit
.cost
= best_cost
.cost
- op_cost
;
2749 new_limit
.latency
= best_cost
.latency
- op_cost
;
2750 synth_mult (alg_in
, (t
+ 1) >> m
, &new_limit
, mode
);
2752 alg_in
->cost
.cost
+= op_cost
;
2753 alg_in
->cost
.latency
+= op_cost
;
2754 if (CHEAPER_MULT_COST (&alg_in
->cost
, &best_cost
))
2756 struct algorithm
*x
;
2757 best_cost
= alg_in
->cost
;
2758 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2759 best_alg
->log
[best_alg
->ops
] = m
;
2760 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2768 /* If best_cost has not decreased, we have not found any algorithm. */
2769 if (!CHEAPER_MULT_COST (&best_cost
, cost_limit
))
2772 /* Cache the result. */
2775 alg_hash
[hash_index
].t
= t
;
2776 alg_hash
[hash_index
].mode
= mode
;
2777 alg_hash
[hash_index
].alg
= best_alg
->op
[best_alg
->ops
];
2780 /* If we are getting a too long sequence for `struct algorithm'
2781 to record, make this search fail. */
2782 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2785 /* Copy the algorithm from temporary space to the space at alg_out.
2786 We avoid using structure assignment because the majority of
2787 best_alg is normally undefined, and this is a critical function. */
2788 alg_out
->ops
= best_alg
->ops
+ 1;
2789 alg_out
->cost
= best_cost
;
2790 memcpy (alg_out
->op
, best_alg
->op
,
2791 alg_out
->ops
* sizeof *alg_out
->op
);
2792 memcpy (alg_out
->log
, best_alg
->log
,
2793 alg_out
->ops
* sizeof *alg_out
->log
);
2796 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2797 Try three variations:
2799 - a shift/add sequence based on VAL itself
2800 - a shift/add sequence based on -VAL, followed by a negation
2801 - a shift/add sequence based on VAL - 1, followed by an addition.
2803 Return true if the cheapest of these cost less than MULT_COST,
2804 describing the algorithm in *ALG and final fixup in *VARIANT. */
2807 choose_mult_variant (enum machine_mode mode
, HOST_WIDE_INT val
,
2808 struct algorithm
*alg
, enum mult_variant
*variant
,
2811 struct algorithm alg2
;
2812 struct mult_cost limit
;
2815 *variant
= basic_variant
;
2816 limit
.cost
= mult_cost
;
2817 limit
.latency
= mult_cost
;
2818 synth_mult (alg
, val
, &limit
, mode
);
2820 /* This works only if the inverted value actually fits in an
2822 if (HOST_BITS_PER_INT
>= GET_MODE_BITSIZE (mode
))
2824 op_cost
= neg_cost
[mode
];
2825 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2827 limit
.cost
= alg
->cost
.cost
- op_cost
;
2828 limit
.latency
= alg
->cost
.latency
- op_cost
;
2832 limit
.cost
= mult_cost
- op_cost
;
2833 limit
.latency
= mult_cost
- op_cost
;
2836 synth_mult (&alg2
, -val
, &limit
, mode
);
2837 alg2
.cost
.cost
+= op_cost
;
2838 alg2
.cost
.latency
+= op_cost
;
2839 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2840 *alg
= alg2
, *variant
= negate_variant
;
2843 /* This proves very useful for division-by-constant. */
2844 op_cost
= add_cost
[mode
];
2845 if (MULT_COST_LESS (&alg
->cost
, mult_cost
))
2847 limit
.cost
= alg
->cost
.cost
- op_cost
;
2848 limit
.latency
= alg
->cost
.latency
- op_cost
;
2852 limit
.cost
= mult_cost
- op_cost
;
2853 limit
.latency
= mult_cost
- op_cost
;
2856 synth_mult (&alg2
, val
- 1, &limit
, mode
);
2857 alg2
.cost
.cost
+= op_cost
;
2858 alg2
.cost
.latency
+= op_cost
;
2859 if (CHEAPER_MULT_COST (&alg2
.cost
, &alg
->cost
))
2860 *alg
= alg2
, *variant
= add_variant
;
2862 return MULT_COST_LESS (&alg
->cost
, mult_cost
);
2865 /* A subroutine of expand_mult, used for constant multiplications.
2866 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2867 convenient. Use the shift/add sequence described by ALG and apply
2868 the final fixup specified by VARIANT. */
2871 expand_mult_const (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT val
,
2872 rtx target
, const struct algorithm
*alg
,
2873 enum mult_variant variant
)
2875 HOST_WIDE_INT val_so_far
;
2876 rtx insn
, accum
, tem
;
2878 enum machine_mode nmode
;
2880 /* Avoid referencing memory over and over.
2881 For speed, but also for correctness when mem is volatile. */
2883 op0
= force_reg (mode
, op0
);
2885 /* ACCUM starts out either as OP0 or as a zero, depending on
2886 the first operation. */
2888 if (alg
->op
[0] == alg_zero
)
2890 accum
= copy_to_mode_reg (mode
, const0_rtx
);
2893 else if (alg
->op
[0] == alg_m
)
2895 accum
= copy_to_mode_reg (mode
, op0
);
2901 for (opno
= 1; opno
< alg
->ops
; opno
++)
2903 int log
= alg
->log
[opno
];
2904 rtx shift_subtarget
= optimize
? 0 : accum
;
2906 = (opno
== alg
->ops
- 1 && target
!= 0 && variant
!= add_variant
2909 rtx accum_target
= optimize
? 0 : accum
;
2911 switch (alg
->op
[opno
])
2914 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2915 build_int_cst (NULL_TREE
, log
),
2921 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2922 build_int_cst (NULL_TREE
, log
),
2924 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2925 add_target
? add_target
: accum_target
);
2926 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
2930 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2931 build_int_cst (NULL_TREE
, log
),
2933 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
2934 add_target
? add_target
: accum_target
);
2935 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
2939 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2940 build_int_cst (NULL_TREE
, log
),
2943 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
2944 add_target
? add_target
: accum_target
);
2945 val_so_far
= (val_so_far
<< log
) + 1;
2949 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2950 build_int_cst (NULL_TREE
, log
),
2951 shift_subtarget
, 0);
2952 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
2953 add_target
? add_target
: accum_target
);
2954 val_so_far
= (val_so_far
<< log
) - 1;
2957 case alg_add_factor
:
2958 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2959 build_int_cst (NULL_TREE
, log
),
2961 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2962 add_target
? add_target
: accum_target
);
2963 val_so_far
+= val_so_far
<< log
;
2966 case alg_sub_factor
:
2967 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2968 build_int_cst (NULL_TREE
, log
),
2970 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
2972 ? add_target
: (optimize
? 0 : tem
)));
2973 val_so_far
= (val_so_far
<< log
) - val_so_far
;
2980 /* Write a REG_EQUAL note on the last insn so that we can cse
2981 multiplication sequences. Note that if ACCUM is a SUBREG,
2982 we've set the inner register and must properly indicate
2985 tem
= op0
, nmode
= mode
;
2986 if (GET_CODE (accum
) == SUBREG
)
2988 nmode
= GET_MODE (SUBREG_REG (accum
));
2989 tem
= gen_lowpart (nmode
, op0
);
2992 insn
= get_last_insn ();
2993 set_unique_reg_note (insn
, REG_EQUAL
,
2994 gen_rtx_MULT (nmode
, tem
, GEN_INT (val_so_far
)));
2997 if (variant
== negate_variant
)
2999 val_so_far
= -val_so_far
;
3000 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
3002 else if (variant
== add_variant
)
3004 val_so_far
= val_so_far
+ 1;
3005 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
3008 /* Compare only the bits of val and val_so_far that are significant
3009 in the result mode, to avoid sign-/zero-extension confusion. */
3010 val
&= GET_MODE_MASK (mode
);
3011 val_so_far
&= GET_MODE_MASK (mode
);
3012 gcc_assert (val
== val_so_far
);
3017 /* Perform a multiplication and return an rtx for the result.
3018 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3019 TARGET is a suggestion for where to store the result (an rtx).
3021 We check specially for a constant integer as OP1.
3022 If you want this check for OP0 as well, then before calling
3023 you should swap the two operands if OP0 would be constant. */
3026 expand_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
3029 rtx const_op1
= op1
;
3030 enum mult_variant variant
;
3031 struct algorithm algorithm
;
3033 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3034 less than or equal in size to `unsigned int' this doesn't matter.
3035 If the mode is larger than `unsigned int', then synth_mult works only
3036 if the constant value exactly fits in an `unsigned int' without any
3037 truncation. This means that multiplying by negative values does
3038 not work; results are off by 2^32 on a 32 bit machine. */
3040 /* If we are multiplying in DImode, it may still be a win
3041 to try to work with shifts and adds. */
3042 if (GET_CODE (op1
) == CONST_DOUBLE
3043 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_INT
3044 && HOST_BITS_PER_INT
>= BITS_PER_WORD
3045 && CONST_DOUBLE_HIGH (op1
) == 0)
3046 const_op1
= GEN_INT (CONST_DOUBLE_LOW (op1
));
3047 else if (HOST_BITS_PER_INT
< GET_MODE_BITSIZE (mode
)
3048 && GET_CODE (op1
) == CONST_INT
3049 && INTVAL (op1
) < 0)
3052 /* We used to test optimize here, on the grounds that it's better to
3053 produce a smaller program when -O is not used.
3054 But this causes such a terrible slowdown sometimes
3055 that it seems better to use synth_mult always. */
3057 if (const_op1
&& GET_CODE (const_op1
) == CONST_INT
3058 && (unsignedp
|| !flag_trapv
))
3060 int mult_cost
= rtx_cost (gen_rtx_MULT (mode
, op0
, op1
), SET
);
3062 if (choose_mult_variant (mode
, INTVAL (const_op1
), &algorithm
, &variant
,
3064 return expand_mult_const (mode
, op0
, INTVAL (const_op1
), target
,
3065 &algorithm
, variant
);
3068 if (GET_CODE (op0
) == CONST_DOUBLE
)
3075 /* Expand x*2.0 as x+x. */
3076 if (GET_CODE (op1
) == CONST_DOUBLE
3077 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3080 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
3082 if (REAL_VALUES_EQUAL (d
, dconst2
))
3084 op0
= force_reg (GET_MODE (op0
), op0
);
3085 return expand_binop (mode
, add_optab
, op0
, op0
,
3086 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3090 /* This used to use umul_optab if unsigned, but for non-widening multiply
3091 there is no difference between signed and unsigned. */
3092 op0
= expand_binop (mode
,
3094 && flag_trapv
&& (GET_MODE_CLASS(mode
) == MODE_INT
)
3095 ? smulv_optab
: smul_optab
,
3096 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
3101 /* Return the smallest n such that 2**n >= X. */
3104 ceil_log2 (unsigned HOST_WIDE_INT x
)
3106 return floor_log2 (x
- 1) + 1;
3109 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3110 replace division by D, and put the least significant N bits of the result
3111 in *MULTIPLIER_PTR and return the most significant bit.
3113 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3114 needed precision is in PRECISION (should be <= N).
3116 PRECISION should be as small as possible so this function can choose
3117 multiplier more freely.
3119 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3120 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3122 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3123 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3126 unsigned HOST_WIDE_INT
3127 choose_multiplier (unsigned HOST_WIDE_INT d
, int n
, int precision
,
3128 unsigned HOST_WIDE_INT
*multiplier_ptr
,
3129 int *post_shift_ptr
, int *lgup_ptr
)
3131 HOST_WIDE_INT mhigh_hi
, mlow_hi
;
3132 unsigned HOST_WIDE_INT mhigh_lo
, mlow_lo
;
3133 int lgup
, post_shift
;
3135 unsigned HOST_WIDE_INT nl
, dummy1
;
3136 HOST_WIDE_INT nh
, dummy2
;
3138 /* lgup = ceil(log2(divisor)); */
3139 lgup
= ceil_log2 (d
);
3141 gcc_assert (lgup
<= n
);
3144 pow2
= n
+ lgup
- precision
;
3146 /* We could handle this with some effort, but this case is much
3147 better handled directly with a scc insn, so rely on caller using
3149 gcc_assert (pow
!= 2 * HOST_BITS_PER_WIDE_INT
);
3151 /* mlow = 2^(N + lgup)/d */
3152 if (pow
>= HOST_BITS_PER_WIDE_INT
)
3154 nh
= (HOST_WIDE_INT
) 1 << (pow
- HOST_BITS_PER_WIDE_INT
);
3160 nl
= (unsigned HOST_WIDE_INT
) 1 << pow
;
3162 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
3163 &mlow_lo
, &mlow_hi
, &dummy1
, &dummy2
);
3165 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3166 if (pow2
>= HOST_BITS_PER_WIDE_INT
)
3167 nh
|= (HOST_WIDE_INT
) 1 << (pow2
- HOST_BITS_PER_WIDE_INT
);
3169 nl
|= (unsigned HOST_WIDE_INT
) 1 << pow2
;
3170 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
3171 &mhigh_lo
, &mhigh_hi
, &dummy1
, &dummy2
);
3173 gcc_assert (!mhigh_hi
|| nh
- d
< d
);
3174 gcc_assert (mhigh_hi
<= 1 && mlow_hi
<= 1);
3175 /* Assert that mlow < mhigh. */
3176 gcc_assert (mlow_hi
< mhigh_hi
3177 || (mlow_hi
== mhigh_hi
&& mlow_lo
< mhigh_lo
));
3179 /* If precision == N, then mlow, mhigh exceed 2^N
3180 (but they do not exceed 2^(N+1)). */
3182 /* Reduce to lowest terms. */
3183 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
3185 unsigned HOST_WIDE_INT ml_lo
= (mlow_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mlow_lo
>> 1);
3186 unsigned HOST_WIDE_INT mh_lo
= (mhigh_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mhigh_lo
>> 1);
3196 *post_shift_ptr
= post_shift
;
3198 if (n
< HOST_BITS_PER_WIDE_INT
)
3200 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
3201 *multiplier_ptr
= mhigh_lo
& mask
;
3202 return mhigh_lo
>= mask
;
3206 *multiplier_ptr
= mhigh_lo
;
3211 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3212 congruent to 1 (mod 2**N). */
3214 static unsigned HOST_WIDE_INT
3215 invert_mod2n (unsigned HOST_WIDE_INT x
, int n
)
3217 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3219 /* The algorithm notes that the choice y = x satisfies
3220 x*y == 1 mod 2^3, since x is assumed odd.
3221 Each iteration doubles the number of bits of significance in y. */
3223 unsigned HOST_WIDE_INT mask
;
3224 unsigned HOST_WIDE_INT y
= x
;
3227 mask
= (n
== HOST_BITS_PER_WIDE_INT
3228 ? ~(unsigned HOST_WIDE_INT
) 0
3229 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
3233 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
3239 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3240 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3241 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3242 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3245 The result is put in TARGET if that is convenient.
3247 MODE is the mode of operation. */
3250 expand_mult_highpart_adjust (enum machine_mode mode
, rtx adj_operand
, rtx op0
,
3251 rtx op1
, rtx target
, int unsignedp
)
3254 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
3256 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3257 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
) - 1),
3259 tem
= expand_and (mode
, tem
, op1
, NULL_RTX
);
3261 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3264 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
3265 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
) - 1),
3267 tem
= expand_and (mode
, tem
, op0
, NULL_RTX
);
3268 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
3274 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3277 extract_high_half (enum machine_mode mode
, rtx op
)
3279 enum machine_mode wider_mode
;
3281 if (mode
== word_mode
)
3282 return gen_highpart (mode
, op
);
3284 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3285 op
= expand_shift (RSHIFT_EXPR
, wider_mode
, op
,
3286 build_int_cst (NULL_TREE
, GET_MODE_BITSIZE (mode
)), 0, 1);
3287 return convert_modes (mode
, wider_mode
, op
, 0);
3290 /* Like expand_mult_highpart, but only consider using a multiplication
3291 optab. OP1 is an rtx for the constant operand. */
3294 expand_mult_highpart_optab (enum machine_mode mode
, rtx op0
, rtx op1
,
3295 rtx target
, int unsignedp
, int max_cost
)
3297 rtx narrow_op1
= gen_int_mode (INTVAL (op1
), mode
);
3298 enum machine_mode wider_mode
;
3303 wider_mode
= GET_MODE_WIDER_MODE (mode
);
3304 size
= GET_MODE_BITSIZE (mode
);
3306 /* Firstly, try using a multiplication insn that only generates the needed
3307 high part of the product, and in the sign flavor of unsignedp. */
3308 if (mul_highpart_cost
[mode
] < max_cost
)
3310 moptab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
3311 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3312 unsignedp
, OPTAB_DIRECT
);
3317 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3318 Need to adjust the result after the multiplication. */
3319 if (size
- 1 < BITS_PER_WORD
3320 && (mul_highpart_cost
[mode
] + 2 * shift_cost
[mode
][size
-1]
3321 + 4 * add_cost
[mode
] < max_cost
))
3323 moptab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
3324 tem
= expand_binop (mode
, moptab
, op0
, narrow_op1
, target
,
3325 unsignedp
, OPTAB_DIRECT
);
3327 /* We used the wrong signedness. Adjust the result. */
3328 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3332 /* Try widening multiplication. */
3333 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
3334 if (moptab
->handlers
[wider_mode
].insn_code
!= CODE_FOR_nothing
3335 && mul_widen_cost
[wider_mode
] < max_cost
)
3337 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
, 0,
3338 unsignedp
, OPTAB_WIDEN
);
3340 return extract_high_half (mode
, tem
);
3343 /* Try widening the mode and perform a non-widening multiplication. */
3344 moptab
= smul_optab
;
3345 if (smul_optab
->handlers
[wider_mode
].insn_code
!= CODE_FOR_nothing
3346 && size
- 1 < BITS_PER_WORD
3347 && mul_cost
[wider_mode
] + shift_cost
[mode
][size
-1] < max_cost
)
3349 tem
= expand_binop (wider_mode
, moptab
, op0
, op1
, 0,
3350 unsignedp
, OPTAB_WIDEN
);
3352 return extract_high_half (mode
, tem
);
3355 /* Try widening multiplication of opposite signedness, and adjust. */
3356 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
3357 if (moptab
->handlers
[wider_mode
].insn_code
!= CODE_FOR_nothing
3358 && size
- 1 < BITS_PER_WORD
3359 && (mul_widen_cost
[wider_mode
] + 2 * shift_cost
[mode
][size
-1]
3360 + 4 * add_cost
[mode
] < max_cost
))
3362 tem
= expand_binop (wider_mode
, moptab
, op0
, narrow_op1
,
3363 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
3366 tem
= extract_high_half (mode
, tem
);
3367 /* We used the wrong signedness. Adjust the result. */
3368 return expand_mult_highpart_adjust (mode
, tem
, op0
, narrow_op1
,
3376 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
3377 in TARGET if that is convenient, and return where the result is. If the
3378 operation can not be performed, 0 is returned.
3380 MODE is the mode of operation and result.
3382 UNSIGNEDP nonzero means unsigned multiply.
3384 MAX_COST is the total allowed cost for the expanded RTL. */
3387 expand_mult_highpart (enum machine_mode mode
, rtx op0
,
3388 unsigned HOST_WIDE_INT cnst1
, rtx target
,
3389 int unsignedp
, int max_cost
)
3391 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
3393 bool sign_adjust
= false;
3394 enum mult_variant variant
;
3395 struct algorithm alg
;
3398 /* We can't support modes wider than HOST_BITS_PER_INT. */
3399 gcc_assert (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
);
3401 op1
= gen_int_mode (cnst1
, wider_mode
);
3402 cnst1
&= GET_MODE_MASK (mode
);
3404 /* We can't optimize modes wider than BITS_PER_WORD.
3405 ??? We might be able to perform double-word arithmetic if
3406 mode == word_mode, however all the cost calculations in
3407 synth_mult etc. assume single-word operations. */
3408 if (GET_MODE_BITSIZE (wider_mode
) > BITS_PER_WORD
)
3409 return expand_mult_highpart_optab (mode
, op0
, op1
, target
,
3410 unsignedp
, max_cost
);
3412 extra_cost
= shift_cost
[mode
][GET_MODE_BITSIZE (mode
) - 1];
3414 /* Check whether we try to multiply by a negative constant. */
3415 if (!unsignedp
&& ((cnst1
>> (GET_MODE_BITSIZE (mode
) - 1)) & 1))
3418 extra_cost
+= add_cost
[mode
];
3421 /* See whether shift/add multiplication is cheap enough. */
3422 if (choose_mult_variant (wider_mode
, cnst1
, &alg
, &variant
,
3423 max_cost
- extra_cost
))
3425 /* See whether the specialized multiplication optabs are
3426 cheaper than the shift/add version. */
3427 tem
= expand_mult_highpart_optab (mode
, op0
, op1
, target
, unsignedp
,
3428 alg
.cost
.cost
+ extra_cost
);
3432 tem
= convert_to_mode (wider_mode
, op0
, unsignedp
);
3433 tem
= expand_mult_const (wider_mode
, tem
, cnst1
, 0, &alg
, variant
);
3434 tem
= extract_high_half (mode
, tem
);
3436 /* Adjust result for signedness. */
3438 tem
= force_operand (gen_rtx_MINUS (mode
, tem
, op0
), tem
);
3442 return expand_mult_highpart_optab (mode
, op0
, op1
, target
,
3443 unsignedp
, max_cost
);
3447 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3450 expand_smod_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3452 unsigned HOST_WIDE_INT masklow
, maskhigh
;
3453 rtx result
, temp
, shift
, label
;
3456 logd
= floor_log2 (d
);
3457 result
= gen_reg_rtx (mode
);
3459 /* Avoid conditional branches when they're expensive. */
3460 if (BRANCH_COST
>= 2
3463 rtx signmask
= emit_store_flag (result
, LT
, op0
, const0_rtx
,
3467 signmask
= force_reg (mode
, signmask
);
3468 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3469 shift
= GEN_INT (GET_MODE_BITSIZE (mode
) - logd
);
3471 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3472 which instruction sequence to use. If logical right shifts
3473 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3474 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3476 temp
= gen_rtx_LSHIFTRT (mode
, result
, shift
);
3477 if (lshr_optab
->handlers
[mode
].insn_code
== CODE_FOR_nothing
3478 || rtx_cost (temp
, SET
) > COSTS_N_INSNS (2))
3480 temp
= expand_binop (mode
, xor_optab
, op0
, signmask
,
3481 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3482 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3483 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3484 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3485 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3486 temp
= expand_binop (mode
, xor_optab
, temp
, signmask
,
3487 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3488 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3489 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3493 signmask
= expand_binop (mode
, lshr_optab
, signmask
, shift
,
3494 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3495 signmask
= force_reg (mode
, signmask
);
3497 temp
= expand_binop (mode
, add_optab
, op0
, signmask
,
3498 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3499 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (masklow
),
3500 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3501 temp
= expand_binop (mode
, sub_optab
, temp
, signmask
,
3502 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3508 /* Mask contains the mode's signbit and the significant bits of the
3509 modulus. By including the signbit in the operation, many targets
3510 can avoid an explicit compare operation in the following comparison
3513 masklow
= ((HOST_WIDE_INT
) 1 << logd
) - 1;
3514 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3516 masklow
|= (HOST_WIDE_INT
) -1 << (GET_MODE_BITSIZE (mode
) - 1);
3520 maskhigh
= (HOST_WIDE_INT
) -1
3521 << (GET_MODE_BITSIZE (mode
) - HOST_BITS_PER_WIDE_INT
- 1);
3523 temp
= expand_binop (mode
, and_optab
, op0
,
3524 immed_double_const (masklow
, maskhigh
, mode
),
3525 result
, 1, OPTAB_LIB_WIDEN
);
3527 emit_move_insn (result
, temp
);
3529 label
= gen_label_rtx ();
3530 do_cmp_and_jump (result
, const0_rtx
, GE
, mode
, label
);
3532 temp
= expand_binop (mode
, sub_optab
, result
, const1_rtx
, result
,
3533 0, OPTAB_LIB_WIDEN
);
3534 masklow
= (HOST_WIDE_INT
) -1 << logd
;
3536 temp
= expand_binop (mode
, ior_optab
, temp
,
3537 immed_double_const (masklow
, maskhigh
, mode
),
3538 result
, 1, OPTAB_LIB_WIDEN
);
3539 temp
= expand_binop (mode
, add_optab
, temp
, const1_rtx
, result
,
3540 0, OPTAB_LIB_WIDEN
);
3542 emit_move_insn (result
, temp
);
3547 /* Expand signed division of OP0 by a power of two D in mode MODE.
3548 This routine is only called for positive values of D. */
3551 expand_sdiv_pow2 (enum machine_mode mode
, rtx op0
, HOST_WIDE_INT d
)
3557 logd
= floor_log2 (d
);
3558 shift
= build_int_cst (NULL_TREE
, logd
);
3560 if (d
== 2 && BRANCH_COST
>= 1)
3562 temp
= gen_reg_rtx (mode
);
3563 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, 1);
3564 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3565 0, OPTAB_LIB_WIDEN
);
3566 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3569 #ifdef HAVE_conditional_move
3570 if (BRANCH_COST
>= 2)
3574 /* ??? emit_conditional_move forces a stack adjustment via
3575 compare_from_rtx so, if the sequence is discarded, it will
3576 be lost. Do it now instead. */
3577 do_pending_stack_adjust ();
3580 temp2
= copy_to_mode_reg (mode
, op0
);
3581 temp
= expand_binop (mode
, add_optab
, temp2
, GEN_INT (d
-1),
3582 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3583 temp
= force_reg (mode
, temp
);
3585 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3586 temp2
= emit_conditional_move (temp2
, LT
, temp2
, const0_rtx
,
3587 mode
, temp
, temp2
, mode
, 0);
3590 rtx seq
= get_insns ();
3593 return expand_shift (RSHIFT_EXPR
, mode
, temp2
, shift
, NULL_RTX
, 0);
3599 if (BRANCH_COST
>= 2)
3601 int ushift
= GET_MODE_BITSIZE (mode
) - logd
;
3603 temp
= gen_reg_rtx (mode
);
3604 temp
= emit_store_flag (temp
, LT
, op0
, const0_rtx
, mode
, 0, -1);
3605 if (shift_cost
[mode
][ushift
] > COSTS_N_INSNS (1))
3606 temp
= expand_binop (mode
, and_optab
, temp
, GEN_INT (d
- 1),
3607 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3609 temp
= expand_shift (RSHIFT_EXPR
, mode
, temp
,
3610 build_int_cst (NULL_TREE
, ushift
),
3612 temp
= expand_binop (mode
, add_optab
, temp
, op0
, NULL_RTX
,
3613 0, OPTAB_LIB_WIDEN
);
3614 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3617 label
= gen_label_rtx ();
3618 temp
= copy_to_mode_reg (mode
, op0
);
3619 do_cmp_and_jump (temp
, const0_rtx
, GE
, mode
, label
);
3620 expand_inc (temp
, GEN_INT (d
- 1));
3622 return expand_shift (RSHIFT_EXPR
, mode
, temp
, shift
, NULL_RTX
, 0);
3625 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3626 if that is convenient, and returning where the result is.
3627 You may request either the quotient or the remainder as the result;
3628 specify REM_FLAG nonzero to get the remainder.
3630 CODE is the expression code for which kind of division this is;
3631 it controls how rounding is done. MODE is the machine mode to use.
3632 UNSIGNEDP nonzero means do unsigned division. */
3634 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3635 and then correct it by or'ing in missing high bits
3636 if result of ANDI is nonzero.
3637 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3638 This could optimize to a bfexts instruction.
3639 But C doesn't use these operations, so their optimizations are
3641 /* ??? For modulo, we don't actually need the highpart of the first product,
3642 the low part will do nicely. And for small divisors, the second multiply
3643 can also be a low-part only multiply or even be completely left out.
3644 E.g. to calculate the remainder of a division by 3 with a 32 bit
3645 multiply, multiply with 0x55555556 and extract the upper two bits;
3646 the result is exact for inputs up to 0x1fffffff.
3647 The input range can be reduced by using cross-sum rules.
3648 For odd divisors >= 3, the following table gives right shift counts
3649 so that if a number is shifted by an integer multiple of the given
3650 amount, the remainder stays the same:
3651 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3652 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3653 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3654 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3655 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3657 Cross-sum rules for even numbers can be derived by leaving as many bits
3658 to the right alone as the divisor has zeros to the right.
3659 E.g. if x is an unsigned 32 bit number:
3660 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3663 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
3666 expand_divmod (int rem_flag
, enum tree_code code
, enum machine_mode mode
,
3667 rtx op0
, rtx op1
, rtx target
, int unsignedp
)
3669 enum machine_mode compute_mode
;
3671 rtx quotient
= 0, remainder
= 0;
3675 optab optab1
, optab2
;
3676 int op1_is_constant
, op1_is_pow2
= 0;
3677 int max_cost
, extra_cost
;
3678 static HOST_WIDE_INT last_div_const
= 0;
3679 static HOST_WIDE_INT ext_op1
;
3681 op1_is_constant
= GET_CODE (op1
) == CONST_INT
;
3682 if (op1_is_constant
)
3684 ext_op1
= INTVAL (op1
);
3686 ext_op1
&= GET_MODE_MASK (mode
);
3687 op1_is_pow2
= ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1
)
3688 || (! unsignedp
&& EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1
))));
3692 This is the structure of expand_divmod:
3694 First comes code to fix up the operands so we can perform the operations
3695 correctly and efficiently.
3697 Second comes a switch statement with code specific for each rounding mode.
3698 For some special operands this code emits all RTL for the desired
3699 operation, for other cases, it generates only a quotient and stores it in
3700 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3701 to indicate that it has not done anything.
3703 Last comes code that finishes the operation. If QUOTIENT is set and
3704 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3705 QUOTIENT is not set, it is computed using trunc rounding.
3707 We try to generate special code for division and remainder when OP1 is a
3708 constant. If |OP1| = 2**n we can use shifts and some other fast
3709 operations. For other values of OP1, we compute a carefully selected
3710 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3713 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3714 half of the product. Different strategies for generating the product are
3715 implemented in expand_mult_highpart.
3717 If what we actually want is the remainder, we generate that by another
3718 by-constant multiplication and a subtraction. */
3720 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3721 code below will malfunction if we are, so check here and handle
3722 the special case if so. */
3723 if (op1
== const1_rtx
)
3724 return rem_flag
? const0_rtx
: op0
;
3726 /* When dividing by -1, we could get an overflow.
3727 negv_optab can handle overflows. */
3728 if (! unsignedp
&& op1
== constm1_rtx
)
3732 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS(mode
) == MODE_INT
3733 ? negv_optab
: neg_optab
, op0
, target
, 0);
3737 /* Don't use the function value register as a target
3738 since we have to read it as well as write it,
3739 and function-inlining gets confused by this. */
3740 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
3741 /* Don't clobber an operand while doing a multi-step calculation. */
3742 || ((rem_flag
|| op1_is_constant
)
3743 && (reg_mentioned_p (target
, op0
)
3744 || (MEM_P (op0
) && MEM_P (target
))))
3745 || reg_mentioned_p (target
, op1
)
3746 || (MEM_P (op1
) && MEM_P (target
))))
3749 /* Get the mode in which to perform this computation. Normally it will
3750 be MODE, but sometimes we can't do the desired operation in MODE.
3751 If so, pick a wider mode in which we can do the operation. Convert
3752 to that mode at the start to avoid repeated conversions.
3754 First see what operations we need. These depend on the expression
3755 we are evaluating. (We assume that divxx3 insns exist under the
3756 same conditions that modxx3 insns and that these insns don't normally
3757 fail. If these assumptions are not correct, we may generate less
3758 efficient code in some cases.)
3760 Then see if we find a mode in which we can open-code that operation
3761 (either a division, modulus, or shift). Finally, check for the smallest
3762 mode for which we can do the operation with a library call. */
3764 /* We might want to refine this now that we have division-by-constant
3765 optimization. Since expand_mult_highpart tries so many variants, it is
3766 not straightforward to generalize this. Maybe we should make an array
3767 of possible modes in init_expmed? Save this for GCC 2.7. */
3769 optab1
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3770 ? (unsignedp
? lshr_optab
: ashr_optab
)
3771 : (unsignedp
? udiv_optab
: sdiv_optab
));
3772 optab2
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3774 : (unsignedp
? udivmod_optab
: sdivmod_optab
));
3776 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3777 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3778 if (optab1
->handlers
[compute_mode
].insn_code
!= CODE_FOR_nothing
3779 || optab2
->handlers
[compute_mode
].insn_code
!= CODE_FOR_nothing
)
3782 if (compute_mode
== VOIDmode
)
3783 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3784 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3785 if (optab1
->handlers
[compute_mode
].libfunc
3786 || optab2
->handlers
[compute_mode
].libfunc
)
3789 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3791 if (compute_mode
== VOIDmode
)
3792 compute_mode
= mode
;
3794 if (target
&& GET_MODE (target
) == compute_mode
)
3797 tquotient
= gen_reg_rtx (compute_mode
);
3799 size
= GET_MODE_BITSIZE (compute_mode
);
3801 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3802 (mode), and thereby get better code when OP1 is a constant. Do that
3803 later. It will require going over all usages of SIZE below. */
3804 size
= GET_MODE_BITSIZE (mode
);
3807 /* Only deduct something for a REM if the last divide done was
3808 for a different constant. Then set the constant of the last
3810 max_cost
= div_cost
[compute_mode
]
3811 - (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
3812 && INTVAL (op1
) == last_div_const
)
3813 ? mul_cost
[compute_mode
] + add_cost
[compute_mode
]
3816 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
3818 /* Now convert to the best mode to use. */
3819 if (compute_mode
!= mode
)
3821 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
3822 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
3824 /* convert_modes may have placed op1 into a register, so we
3825 must recompute the following. */
3826 op1_is_constant
= GET_CODE (op1
) == CONST_INT
;
3827 op1_is_pow2
= (op1_is_constant
3828 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
3830 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1
)))))) ;
3833 /* If one of the operands is a volatile MEM, copy it into a register. */
3835 if (MEM_P (op0
) && MEM_VOLATILE_P (op0
))
3836 op0
= force_reg (compute_mode
, op0
);
3837 if (MEM_P (op1
) && MEM_VOLATILE_P (op1
))
3838 op1
= force_reg (compute_mode
, op1
);
3840 /* If we need the remainder or if OP1 is constant, we need to
3841 put OP0 in a register in case it has any queued subexpressions. */
3842 if (rem_flag
|| op1_is_constant
)
3843 op0
= force_reg (compute_mode
, op0
);
3845 last
= get_last_insn ();
3847 /* Promote floor rounding to trunc rounding for unsigned operations. */
3850 if (code
== FLOOR_DIV_EXPR
)
3851 code
= TRUNC_DIV_EXPR
;
3852 if (code
== FLOOR_MOD_EXPR
)
3853 code
= TRUNC_MOD_EXPR
;
3854 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
3855 code
= TRUNC_DIV_EXPR
;
3858 if (op1
!= const0_rtx
)
3861 case TRUNC_MOD_EXPR
:
3862 case TRUNC_DIV_EXPR
:
3863 if (op1_is_constant
)
3867 unsigned HOST_WIDE_INT mh
, ml
;
3868 int pre_shift
, post_shift
;
3870 unsigned HOST_WIDE_INT d
= (INTVAL (op1
)
3871 & GET_MODE_MASK (compute_mode
));
3873 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
3875 pre_shift
= floor_log2 (d
);
3879 = expand_binop (compute_mode
, and_optab
, op0
,
3880 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
3884 return gen_lowpart (mode
, remainder
);
3886 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3887 build_int_cst (NULL_TREE
,
3891 else if (size
<= HOST_BITS_PER_WIDE_INT
)
3893 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
3895 /* Most significant bit of divisor is set; emit an scc
3897 quotient
= emit_store_flag (tquotient
, GEU
, op0
, op1
,
3898 compute_mode
, 1, 1);
3904 /* Find a suitable multiplier and right shift count
3905 instead of multiplying with D. */
3907 mh
= choose_multiplier (d
, size
, size
,
3908 &ml
, &post_shift
, &dummy
);
3910 /* If the suggested multiplier is more than SIZE bits,
3911 we can do better for even divisors, using an
3912 initial right shift. */
3913 if (mh
!= 0 && (d
& 1) == 0)
3915 pre_shift
= floor_log2 (d
& -d
);
3916 mh
= choose_multiplier (d
>> pre_shift
, size
,
3918 &ml
, &post_shift
, &dummy
);
3928 if (post_shift
- 1 >= BITS_PER_WORD
)
3932 = (shift_cost
[compute_mode
][post_shift
- 1]
3933 + shift_cost
[compute_mode
][1]
3934 + 2 * add_cost
[compute_mode
]);
3935 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
3937 max_cost
- extra_cost
);
3940 t2
= force_operand (gen_rtx_MINUS (compute_mode
,
3944 (RSHIFT_EXPR
, compute_mode
, t2
,
3945 build_int_cst (NULL_TREE
, 1),
3947 t4
= force_operand (gen_rtx_PLUS (compute_mode
,
3950 quotient
= expand_shift
3951 (RSHIFT_EXPR
, compute_mode
, t4
,
3952 build_int_cst (NULL_TREE
, post_shift
- 1),
3959 if (pre_shift
>= BITS_PER_WORD
3960 || post_shift
>= BITS_PER_WORD
)
3964 (RSHIFT_EXPR
, compute_mode
, op0
,
3965 build_int_cst (NULL_TREE
, pre_shift
),
3968 = (shift_cost
[compute_mode
][pre_shift
]
3969 + shift_cost
[compute_mode
][post_shift
]);
3970 t2
= expand_mult_highpart (compute_mode
, t1
, ml
,
3972 max_cost
- extra_cost
);
3975 quotient
= expand_shift
3976 (RSHIFT_EXPR
, compute_mode
, t2
,
3977 build_int_cst (NULL_TREE
, post_shift
),
3982 else /* Too wide mode to use tricky code */
3985 insn
= get_last_insn ();
3987 && (set
= single_set (insn
)) != 0
3988 && SET_DEST (set
) == quotient
)
3989 set_unique_reg_note (insn
,
3991 gen_rtx_UDIV (compute_mode
, op0
, op1
));
3993 else /* TRUNC_DIV, signed */
3995 unsigned HOST_WIDE_INT ml
;
3996 int lgup
, post_shift
;
3997 HOST_WIDE_INT d
= INTVAL (op1
);
3998 unsigned HOST_WIDE_INT abs_d
= d
>= 0 ? d
: -d
;
4000 /* n rem d = n rem -d */
4001 if (rem_flag
&& d
< 0)
4004 op1
= gen_int_mode (abs_d
, compute_mode
);
4010 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
4012 else if (abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4014 /* This case is not handled correctly below. */
4015 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
4016 compute_mode
, 1, 1);
4020 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
4021 && (rem_flag
? smod_pow2_cheap
[compute_mode
]
4022 : sdiv_pow2_cheap
[compute_mode
])
4023 /* We assume that cheap metric is true if the
4024 optab has an expander for this mode. */
4025 && (((rem_flag
? smod_optab
: sdiv_optab
)
4026 ->handlers
[compute_mode
].insn_code
4027 != CODE_FOR_nothing
)
4028 || (sdivmod_optab
->handlers
[compute_mode
]
4029 .insn_code
!= CODE_FOR_nothing
)))
4031 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
4035 remainder
= expand_smod_pow2 (compute_mode
, op0
, d
);
4037 return gen_lowpart (mode
, remainder
);
4040 if (sdiv_pow2_cheap
[compute_mode
]
4041 && ((sdiv_optab
->handlers
[compute_mode
].insn_code
4042 != CODE_FOR_nothing
)
4043 || (sdivmod_optab
->handlers
[compute_mode
].insn_code
4044 != CODE_FOR_nothing
)))
4045 quotient
= expand_divmod (0, TRUNC_DIV_EXPR
,
4047 gen_int_mode (abs_d
,
4051 quotient
= expand_sdiv_pow2 (compute_mode
, op0
, abs_d
);
4053 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4054 negate the quotient. */
4057 insn
= get_last_insn ();
4059 && (set
= single_set (insn
)) != 0
4060 && SET_DEST (set
) == quotient
4061 && abs_d
< ((unsigned HOST_WIDE_INT
) 1
4062 << (HOST_BITS_PER_WIDE_INT
- 1)))
4063 set_unique_reg_note (insn
,
4065 gen_rtx_DIV (compute_mode
,
4072 quotient
= expand_unop (compute_mode
, neg_optab
,
4073 quotient
, quotient
, 0);
4076 else if (size
<= HOST_BITS_PER_WIDE_INT
)
4078 choose_multiplier (abs_d
, size
, size
- 1,
4079 &ml
, &post_shift
, &lgup
);
4080 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
4084 if (post_shift
>= BITS_PER_WORD
4085 || size
- 1 >= BITS_PER_WORD
)
4088 extra_cost
= (shift_cost
[compute_mode
][post_shift
]
4089 + shift_cost
[compute_mode
][size
- 1]
4090 + add_cost
[compute_mode
]);
4091 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
4093 max_cost
- extra_cost
);
4097 (RSHIFT_EXPR
, compute_mode
, t1
,
4098 build_int_cst (NULL_TREE
, post_shift
),
4101 (RSHIFT_EXPR
, compute_mode
, op0
,
4102 build_int_cst (NULL_TREE
, size
- 1),
4106 = force_operand (gen_rtx_MINUS (compute_mode
,
4111 = force_operand (gen_rtx_MINUS (compute_mode
,
4119 if (post_shift
>= BITS_PER_WORD
4120 || size
- 1 >= BITS_PER_WORD
)
4123 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
4124 extra_cost
= (shift_cost
[compute_mode
][post_shift
]
4125 + shift_cost
[compute_mode
][size
- 1]
4126 + 2 * add_cost
[compute_mode
]);
4127 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
4129 max_cost
- extra_cost
);
4132 t2
= force_operand (gen_rtx_PLUS (compute_mode
,
4136 (RSHIFT_EXPR
, compute_mode
, t2
,
4137 build_int_cst (NULL_TREE
, post_shift
),
4140 (RSHIFT_EXPR
, compute_mode
, op0
,
4141 build_int_cst (NULL_TREE
, size
- 1),
4145 = force_operand (gen_rtx_MINUS (compute_mode
,
4150 = force_operand (gen_rtx_MINUS (compute_mode
,
4155 else /* Too wide mode to use tricky code */
4158 insn
= get_last_insn ();
4160 && (set
= single_set (insn
)) != 0
4161 && SET_DEST (set
) == quotient
)
4162 set_unique_reg_note (insn
,
4164 gen_rtx_DIV (compute_mode
, op0
, op1
));
4169 delete_insns_since (last
);
4172 case FLOOR_DIV_EXPR
:
4173 case FLOOR_MOD_EXPR
:
4174 /* We will come here only for signed operations. */
4175 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4177 unsigned HOST_WIDE_INT mh
, ml
;
4178 int pre_shift
, lgup
, post_shift
;
4179 HOST_WIDE_INT d
= INTVAL (op1
);
4183 /* We could just as easily deal with negative constants here,
4184 but it does not seem worth the trouble for GCC 2.6. */
4185 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
4187 pre_shift
= floor_log2 (d
);
4190 remainder
= expand_binop (compute_mode
, and_optab
, op0
,
4191 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
4192 remainder
, 0, OPTAB_LIB_WIDEN
);
4194 return gen_lowpart (mode
, remainder
);
4196 quotient
= expand_shift
4197 (RSHIFT_EXPR
, compute_mode
, op0
,
4198 build_int_cst (NULL_TREE
, pre_shift
),
4205 mh
= choose_multiplier (d
, size
, size
- 1,
4206 &ml
, &post_shift
, &lgup
);
4209 if (post_shift
< BITS_PER_WORD
4210 && size
- 1 < BITS_PER_WORD
)
4213 (RSHIFT_EXPR
, compute_mode
, op0
,
4214 build_int_cst (NULL_TREE
, size
- 1),
4216 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
4217 NULL_RTX
, 0, OPTAB_WIDEN
);
4218 extra_cost
= (shift_cost
[compute_mode
][post_shift
]
4219 + shift_cost
[compute_mode
][size
- 1]
4220 + 2 * add_cost
[compute_mode
]);
4221 t3
= expand_mult_highpart (compute_mode
, t2
, ml
,
4223 max_cost
- extra_cost
);
4227 (RSHIFT_EXPR
, compute_mode
, t3
,
4228 build_int_cst (NULL_TREE
, post_shift
),
4230 quotient
= expand_binop (compute_mode
, xor_optab
,
4231 t4
, t1
, tquotient
, 0,
4239 rtx nsign
, t1
, t2
, t3
, t4
;
4240 t1
= force_operand (gen_rtx_PLUS (compute_mode
,
4241 op0
, constm1_rtx
), NULL_RTX
);
4242 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
4244 nsign
= expand_shift
4245 (RSHIFT_EXPR
, compute_mode
, t2
,
4246 build_int_cst (NULL_TREE
, size
- 1),
4248 t3
= force_operand (gen_rtx_MINUS (compute_mode
, t1
, nsign
),
4250 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
4255 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
4257 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4266 delete_insns_since (last
);
4268 /* Try using an instruction that produces both the quotient and
4269 remainder, using truncation. We can easily compensate the quotient
4270 or remainder to get floor rounding, once we have the remainder.
4271 Notice that we compute also the final remainder value here,
4272 and return the result right away. */
4273 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4274 target
= gen_reg_rtx (compute_mode
);
4279 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4280 quotient
= gen_reg_rtx (compute_mode
);
4285 = REG_P (target
) ? target
: gen_reg_rtx (compute_mode
);
4286 remainder
= gen_reg_rtx (compute_mode
);
4289 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
4290 quotient
, remainder
, 0))
4292 /* This could be computed with a branch-less sequence.
4293 Save that for later. */
4295 rtx label
= gen_label_rtx ();
4296 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
4297 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4298 NULL_RTX
, 0, OPTAB_WIDEN
);
4299 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
4300 expand_dec (quotient
, const1_rtx
);
4301 expand_inc (remainder
, op1
);
4303 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4306 /* No luck with division elimination or divmod. Have to do it
4307 by conditionally adjusting op0 *and* the result. */
4309 rtx label1
, label2
, label3
, label4
, label5
;
4313 quotient
= gen_reg_rtx (compute_mode
);
4314 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4315 label1
= gen_label_rtx ();
4316 label2
= gen_label_rtx ();
4317 label3
= gen_label_rtx ();
4318 label4
= gen_label_rtx ();
4319 label5
= gen_label_rtx ();
4320 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4321 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
4322 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4323 quotient
, 0, OPTAB_LIB_WIDEN
);
4324 if (tem
!= quotient
)
4325 emit_move_insn (quotient
, tem
);
4326 emit_jump_insn (gen_jump (label5
));
4328 emit_label (label1
);
4329 expand_inc (adjusted_op0
, const1_rtx
);
4330 emit_jump_insn (gen_jump (label4
));
4332 emit_label (label2
);
4333 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
4334 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4335 quotient
, 0, OPTAB_LIB_WIDEN
);
4336 if (tem
!= quotient
)
4337 emit_move_insn (quotient
, tem
);
4338 emit_jump_insn (gen_jump (label5
));
4340 emit_label (label3
);
4341 expand_dec (adjusted_op0
, const1_rtx
);
4342 emit_label (label4
);
4343 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4344 quotient
, 0, OPTAB_LIB_WIDEN
);
4345 if (tem
!= quotient
)
4346 emit_move_insn (quotient
, tem
);
4347 expand_dec (quotient
, const1_rtx
);
4348 emit_label (label5
);
4356 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
4359 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4360 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4361 build_int_cst (NULL_TREE
, floor_log2 (d
)),
4363 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4365 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4366 t3
= gen_reg_rtx (compute_mode
);
4367 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4368 compute_mode
, 1, 1);
4372 lab
= gen_label_rtx ();
4373 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4374 expand_inc (t1
, const1_rtx
);
4379 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4385 /* Try using an instruction that produces both the quotient and
4386 remainder, using truncation. We can easily compensate the
4387 quotient or remainder to get ceiling rounding, once we have the
4388 remainder. Notice that we compute also the final remainder
4389 value here, and return the result right away. */
4390 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4391 target
= gen_reg_rtx (compute_mode
);
4395 remainder
= (REG_P (target
)
4396 ? target
: gen_reg_rtx (compute_mode
));
4397 quotient
= gen_reg_rtx (compute_mode
);
4401 quotient
= (REG_P (target
)
4402 ? target
: gen_reg_rtx (compute_mode
));
4403 remainder
= gen_reg_rtx (compute_mode
);
4406 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
4409 /* This could be computed with a branch-less sequence.
4410 Save that for later. */
4411 rtx label
= gen_label_rtx ();
4412 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4413 compute_mode
, label
);
4414 expand_inc (quotient
, const1_rtx
);
4415 expand_dec (remainder
, op1
);
4417 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4420 /* No luck with division elimination or divmod. Have to do it
4421 by conditionally adjusting op0 *and* the result. */
4424 rtx adjusted_op0
, tem
;
4426 quotient
= gen_reg_rtx (compute_mode
);
4427 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4428 label1
= gen_label_rtx ();
4429 label2
= gen_label_rtx ();
4430 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
4431 compute_mode
, label1
);
4432 emit_move_insn (quotient
, const0_rtx
);
4433 emit_jump_insn (gen_jump (label2
));
4435 emit_label (label1
);
4436 expand_dec (adjusted_op0
, const1_rtx
);
4437 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
4438 quotient
, 1, OPTAB_LIB_WIDEN
);
4439 if (tem
!= quotient
)
4440 emit_move_insn (quotient
, tem
);
4441 expand_inc (quotient
, const1_rtx
);
4442 emit_label (label2
);
4447 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
4448 && INTVAL (op1
) >= 0)
4450 /* This is extremely similar to the code for the unsigned case
4451 above. For 2.7 we should merge these variants, but for
4452 2.6.1 I don't want to touch the code for unsigned since that
4453 get used in C. The signed case will only be used by other
4457 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
4458 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4459 build_int_cst (NULL_TREE
, floor_log2 (d
)),
4461 t2
= expand_binop (compute_mode
, and_optab
, op0
,
4463 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4464 t3
= gen_reg_rtx (compute_mode
);
4465 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
4466 compute_mode
, 1, 1);
4470 lab
= gen_label_rtx ();
4471 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
4472 expand_inc (t1
, const1_rtx
);
4477 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
4483 /* Try using an instruction that produces both the quotient and
4484 remainder, using truncation. We can easily compensate the
4485 quotient or remainder to get ceiling rounding, once we have the
4486 remainder. Notice that we compute also the final remainder
4487 value here, and return the result right away. */
4488 if (target
== 0 || GET_MODE (target
) != compute_mode
)
4489 target
= gen_reg_rtx (compute_mode
);
4492 remainder
= (REG_P (target
)
4493 ? target
: gen_reg_rtx (compute_mode
));
4494 quotient
= gen_reg_rtx (compute_mode
);
4498 quotient
= (REG_P (target
)
4499 ? target
: gen_reg_rtx (compute_mode
));
4500 remainder
= gen_reg_rtx (compute_mode
);
4503 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
4506 /* This could be computed with a branch-less sequence.
4507 Save that for later. */
4509 rtx label
= gen_label_rtx ();
4510 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
4511 compute_mode
, label
);
4512 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4513 NULL_RTX
, 0, OPTAB_WIDEN
);
4514 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
4515 expand_inc (quotient
, const1_rtx
);
4516 expand_dec (remainder
, op1
);
4518 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4521 /* No luck with division elimination or divmod. Have to do it
4522 by conditionally adjusting op0 *and* the result. */
4524 rtx label1
, label2
, label3
, label4
, label5
;
4528 quotient
= gen_reg_rtx (compute_mode
);
4529 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
4530 label1
= gen_label_rtx ();
4531 label2
= gen_label_rtx ();
4532 label3
= gen_label_rtx ();
4533 label4
= gen_label_rtx ();
4534 label5
= gen_label_rtx ();
4535 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
4536 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
4537 compute_mode
, label1
);
4538 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4539 quotient
, 0, OPTAB_LIB_WIDEN
);
4540 if (tem
!= quotient
)
4541 emit_move_insn (quotient
, tem
);
4542 emit_jump_insn (gen_jump (label5
));
4544 emit_label (label1
);
4545 expand_dec (adjusted_op0
, const1_rtx
);
4546 emit_jump_insn (gen_jump (label4
));
4548 emit_label (label2
);
4549 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
4550 compute_mode
, label3
);
4551 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4552 quotient
, 0, OPTAB_LIB_WIDEN
);
4553 if (tem
!= quotient
)
4554 emit_move_insn (quotient
, tem
);
4555 emit_jump_insn (gen_jump (label5
));
4557 emit_label (label3
);
4558 expand_inc (adjusted_op0
, const1_rtx
);
4559 emit_label (label4
);
4560 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
4561 quotient
, 0, OPTAB_LIB_WIDEN
);
4562 if (tem
!= quotient
)
4563 emit_move_insn (quotient
, tem
);
4564 expand_inc (quotient
, const1_rtx
);
4565 emit_label (label5
);
4570 case EXACT_DIV_EXPR
:
4571 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
4573 HOST_WIDE_INT d
= INTVAL (op1
);
4574 unsigned HOST_WIDE_INT ml
;
4578 pre_shift
= floor_log2 (d
& -d
);
4579 ml
= invert_mod2n (d
>> pre_shift
, size
);
4580 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
4581 build_int_cst (NULL_TREE
, pre_shift
),
4582 NULL_RTX
, unsignedp
);
4583 quotient
= expand_mult (compute_mode
, t1
,
4584 gen_int_mode (ml
, compute_mode
),
4587 insn
= get_last_insn ();
4588 set_unique_reg_note (insn
,
4590 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
4596 case ROUND_DIV_EXPR
:
4597 case ROUND_MOD_EXPR
:
4602 label
= gen_label_rtx ();
4603 quotient
= gen_reg_rtx (compute_mode
);
4604 remainder
= gen_reg_rtx (compute_mode
);
4605 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
4608 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
4609 quotient
, 1, OPTAB_LIB_WIDEN
);
4610 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
4611 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4612 remainder
, 1, OPTAB_LIB_WIDEN
);
4614 tem
= plus_constant (op1
, -1);
4615 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4616 build_int_cst (NULL_TREE
, 1),
4618 do_cmp_and_jump (remainder
, tem
, LEU
, compute_mode
, label
);
4619 expand_inc (quotient
, const1_rtx
);
4620 expand_dec (remainder
, op1
);
4625 rtx abs_rem
, abs_op1
, tem
, mask
;
4627 label
= gen_label_rtx ();
4628 quotient
= gen_reg_rtx (compute_mode
);
4629 remainder
= gen_reg_rtx (compute_mode
);
4630 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
4633 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
4634 quotient
, 0, OPTAB_LIB_WIDEN
);
4635 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
4636 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
4637 remainder
, 0, OPTAB_LIB_WIDEN
);
4639 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 1, 0);
4640 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 1, 0);
4641 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
4642 build_int_cst (NULL_TREE
, 1),
4644 do_cmp_and_jump (tem
, abs_op1
, LTU
, compute_mode
, label
);
4645 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
4646 NULL_RTX
, 0, OPTAB_WIDEN
);
4647 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
4648 build_int_cst (NULL_TREE
, size
- 1),
4650 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
4651 NULL_RTX
, 0, OPTAB_WIDEN
);
4652 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4653 NULL_RTX
, 0, OPTAB_WIDEN
);
4654 expand_inc (quotient
, tem
);
4655 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
4656 NULL_RTX
, 0, OPTAB_WIDEN
);
4657 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
4658 NULL_RTX
, 0, OPTAB_WIDEN
);
4659 expand_dec (remainder
, tem
);
4662 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4670 if (target
&& GET_MODE (target
) != compute_mode
)
4675 /* Try to produce the remainder without producing the quotient.
4676 If we seem to have a divmod pattern that does not require widening,
4677 don't try widening here. We should really have a WIDEN argument
4678 to expand_twoval_binop, since what we'd really like to do here is
4679 1) try a mod insn in compute_mode
4680 2) try a divmod insn in compute_mode
4681 3) try a div insn in compute_mode and multiply-subtract to get
4683 4) try the same things with widening allowed. */
4685 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4688 ((optab2
->handlers
[compute_mode
].insn_code
4689 != CODE_FOR_nothing
)
4690 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4693 /* No luck there. Can we do remainder and divide at once
4694 without a library call? */
4695 remainder
= gen_reg_rtx (compute_mode
);
4696 if (! expand_twoval_binop ((unsignedp
4700 NULL_RTX
, remainder
, unsignedp
))
4705 return gen_lowpart (mode
, remainder
);
4708 /* Produce the quotient. Try a quotient insn, but not a library call.
4709 If we have a divmod in this mode, use it in preference to widening
4710 the div (for this test we assume it will not fail). Note that optab2
4711 is set to the one of the two optabs that the call below will use. */
4713 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
4714 op0
, op1
, rem_flag
? NULL_RTX
: target
,
4716 ((optab2
->handlers
[compute_mode
].insn_code
4717 != CODE_FOR_nothing
)
4718 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
4722 /* No luck there. Try a quotient-and-remainder insn,
4723 keeping the quotient alone. */
4724 quotient
= gen_reg_rtx (compute_mode
);
4725 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
4727 quotient
, NULL_RTX
, unsignedp
))
4731 /* Still no luck. If we are not computing the remainder,
4732 use a library call for the quotient. */
4733 quotient
= sign_expand_binop (compute_mode
,
4734 udiv_optab
, sdiv_optab
,
4736 unsignedp
, OPTAB_LIB_WIDEN
);
4743 if (target
&& GET_MODE (target
) != compute_mode
)
4748 /* No divide instruction either. Use library for remainder. */
4749 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4751 unsignedp
, OPTAB_LIB_WIDEN
);
4752 /* No remainder function. Try a quotient-and-remainder
4753 function, keeping the remainder. */
4756 remainder
= gen_reg_rtx (compute_mode
);
4757 if (!expand_twoval_binop_libfunc
4758 (unsignedp
? udivmod_optab
: sdivmod_optab
,
4760 NULL_RTX
, remainder
,
4761 unsignedp
? UMOD
: MOD
))
4762 remainder
= NULL_RTX
;
4767 /* We divided. Now finish doing X - Y * (X / Y). */
4768 remainder
= expand_mult (compute_mode
, quotient
, op1
,
4769 NULL_RTX
, unsignedp
);
4770 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
4771 remainder
, target
, unsignedp
,
4776 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4779 /* Return a tree node with data type TYPE, describing the value of X.
4780 Usually this is an VAR_DECL, if there is no obvious better choice.
4781 X may be an expression, however we only support those expressions
4782 generated by loop.c. */
4785 make_tree (tree type
, rtx x
)
4789 switch (GET_CODE (x
))
4793 HOST_WIDE_INT hi
= 0;
4796 && !(TYPE_UNSIGNED (type
)
4797 && (GET_MODE_BITSIZE (TYPE_MODE (type
))
4798 < HOST_BITS_PER_WIDE_INT
)))
4801 t
= build_int_cst_wide (type
, INTVAL (x
), hi
);
4807 if (GET_MODE (x
) == VOIDmode
)
4808 t
= build_int_cst_wide (type
,
4809 CONST_DOUBLE_LOW (x
), CONST_DOUBLE_HIGH (x
));
4814 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
4815 t
= build_real (type
, d
);
4826 units
= CONST_VECTOR_NUNITS (x
);
4828 /* Build a tree with vector elements. */
4829 for (i
= units
- 1; i
>= 0; --i
)
4831 elt
= CONST_VECTOR_ELT (x
, i
);
4832 t
= tree_cons (NULL_TREE
, make_tree (type
, elt
), t
);
4835 return build_vector (type
, t
);
4839 return fold (build2 (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4840 make_tree (type
, XEXP (x
, 1))));
4843 return fold (build2 (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4844 make_tree (type
, XEXP (x
, 1))));
4847 return fold (build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0))));
4850 return fold (build2 (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4851 make_tree (type
, XEXP (x
, 1))));
4854 return fold (build2 (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4855 make_tree (type
, XEXP (x
, 1))));
4858 t
= lang_hooks
.types
.unsigned_type (type
);
4859 return fold (convert (type
,
4860 build2 (RSHIFT_EXPR
, t
,
4861 make_tree (t
, XEXP (x
, 0)),
4862 make_tree (type
, XEXP (x
, 1)))));
4865 t
= lang_hooks
.types
.signed_type (type
);
4866 return fold (convert (type
,
4867 build2 (RSHIFT_EXPR
, t
,
4868 make_tree (t
, XEXP (x
, 0)),
4869 make_tree (type
, XEXP (x
, 1)))));
4872 if (TREE_CODE (type
) != REAL_TYPE
)
4873 t
= lang_hooks
.types
.signed_type (type
);
4877 return fold (convert (type
,
4878 build2 (TRUNC_DIV_EXPR
, t
,
4879 make_tree (t
, XEXP (x
, 0)),
4880 make_tree (t
, XEXP (x
, 1)))));
4882 t
= lang_hooks
.types
.unsigned_type (type
);
4883 return fold (convert (type
,
4884 build2 (TRUNC_DIV_EXPR
, t
,
4885 make_tree (t
, XEXP (x
, 0)),
4886 make_tree (t
, XEXP (x
, 1)))));
4890 t
= lang_hooks
.types
.type_for_mode (GET_MODE (XEXP (x
, 0)),
4891 GET_CODE (x
) == ZERO_EXTEND
);
4892 return fold (convert (type
, make_tree (t
, XEXP (x
, 0))));
4895 t
= build_decl (VAR_DECL
, NULL_TREE
, type
);
4897 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4898 ptr_mode. So convert. */
4899 if (POINTER_TYPE_P (type
))
4900 x
= convert_memory_address (TYPE_MODE (type
), x
);
4902 /* Note that we do *not* use SET_DECL_RTL here, because we do not
4903 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
4910 /* Check whether the multiplication X * MULT + ADD overflows.
4911 X, MULT and ADD must be CONST_*.
4912 MODE is the machine mode for the computation.
4913 X and MULT must have mode MODE. ADD may have a different mode.
4914 So can X (defaults to same as MODE).
4915 UNSIGNEDP is nonzero to do unsigned multiplication. */
4918 const_mult_add_overflow_p (rtx x
, rtx mult
, rtx add
,
4919 enum machine_mode mode
, int unsignedp
)
4921 tree type
, mult_type
, add_type
, result
;
4923 type
= lang_hooks
.types
.type_for_mode (mode
, unsignedp
);
4925 /* In order to get a proper overflow indication from an unsigned
4926 type, we have to pretend that it's a sizetype. */
4930 /* FIXME:It would be nice if we could step directly from this
4931 type to its sizetype equivalent. */
4932 mult_type
= build_distinct_type_copy (type
);
4933 TYPE_IS_SIZETYPE (mult_type
) = 1;
4936 add_type
= (GET_MODE (add
) == VOIDmode
? mult_type
4937 : lang_hooks
.types
.type_for_mode (GET_MODE (add
), unsignedp
));
4939 result
= fold (build2 (PLUS_EXPR
, mult_type
,
4940 fold (build2 (MULT_EXPR
, mult_type
,
4941 make_tree (mult_type
, x
),
4942 make_tree (mult_type
, mult
))),
4943 make_tree (add_type
, add
)));
4945 return TREE_CONSTANT_OVERFLOW (result
);
4948 /* Return an rtx representing the value of X * MULT + ADD.
4949 TARGET is a suggestion for where to store the result (an rtx).
4950 MODE is the machine mode for the computation.
4951 X and MULT must have mode MODE. ADD may have a different mode.
4952 So can X (defaults to same as MODE).
4953 UNSIGNEDP is nonzero to do unsigned multiplication.
4954 This may emit insns. */
4957 expand_mult_add (rtx x
, rtx target
, rtx mult
, rtx add
, enum machine_mode mode
,
4960 tree type
= lang_hooks
.types
.type_for_mode (mode
, unsignedp
);
4961 tree add_type
= (GET_MODE (add
) == VOIDmode
4962 ? type
: lang_hooks
.types
.type_for_mode (GET_MODE (add
),
4964 tree result
= fold (build2 (PLUS_EXPR
, type
,
4965 fold (build2 (MULT_EXPR
, type
,
4966 make_tree (type
, x
),
4967 make_tree (type
, mult
))),
4968 make_tree (add_type
, add
)));
4970 return expand_expr (result
, target
, VOIDmode
, 0);
4973 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4974 and returning TARGET.
4976 If TARGET is 0, a pseudo-register or constant is returned. */
4979 expand_and (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
)
4983 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
4984 tem
= simplify_binary_operation (AND
, mode
, op0
, op1
);
4986 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
4990 else if (tem
!= target
)
4991 emit_move_insn (target
, tem
);
4995 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4996 and storing in TARGET. Normally return TARGET.
4997 Return 0 if that cannot be done.
4999 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5000 it is VOIDmode, they cannot both be CONST_INT.
5002 UNSIGNEDP is for the case where we have to widen the operands
5003 to perform the operation. It says to use zero-extension.
5005 NORMALIZEP is 1 if we should convert the result to be either zero
5006 or one. Normalize is -1 if we should convert the result to be
5007 either zero or -1. If NORMALIZEP is zero, the result will be left
5008 "raw" out of the scc insn. */
5011 emit_store_flag (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5012 enum machine_mode mode
, int unsignedp
, int normalizep
)
5015 enum insn_code icode
;
5016 enum machine_mode compare_mode
;
5017 enum machine_mode target_mode
= GET_MODE (target
);
5019 rtx last
= get_last_insn ();
5020 rtx pattern
, comparison
;
5023 code
= unsigned_condition (code
);
5025 /* If one operand is constant, make it the second one. Only do this
5026 if the other operand is not constant as well. */
5028 if (swap_commutative_operands_p (op0
, op1
))
5033 code
= swap_condition (code
);
5036 if (mode
== VOIDmode
)
5037 mode
= GET_MODE (op0
);
5039 /* For some comparisons with 1 and -1, we can convert this to
5040 comparisons with zero. This will often produce more opportunities for
5041 store-flag insns. */
5046 if (op1
== const1_rtx
)
5047 op1
= const0_rtx
, code
= LE
;
5050 if (op1
== constm1_rtx
)
5051 op1
= const0_rtx
, code
= LT
;
5054 if (op1
== const1_rtx
)
5055 op1
= const0_rtx
, code
= GT
;
5058 if (op1
== constm1_rtx
)
5059 op1
= const0_rtx
, code
= GE
;
5062 if (op1
== const1_rtx
)
5063 op1
= const0_rtx
, code
= NE
;
5066 if (op1
== const1_rtx
)
5067 op1
= const0_rtx
, code
= EQ
;
5073 /* If we are comparing a double-word integer with zero or -1, we can
5074 convert the comparison into one involving a single word. */
5075 if (GET_MODE_BITSIZE (mode
) == BITS_PER_WORD
* 2
5076 && GET_MODE_CLASS (mode
) == MODE_INT
5077 && (!MEM_P (op0
) || ! MEM_VOLATILE_P (op0
)))
5079 if ((code
== EQ
|| code
== NE
)
5080 && (op1
== const0_rtx
|| op1
== constm1_rtx
))
5082 rtx op00
, op01
, op0both
;
5084 /* Do a logical OR or AND of the two words and compare the result. */
5085 op00
= simplify_gen_subreg (word_mode
, op0
, mode
, 0);
5086 op01
= simplify_gen_subreg (word_mode
, op0
, mode
, UNITS_PER_WORD
);
5087 op0both
= expand_binop (word_mode
,
5088 op1
== const0_rtx
? ior_optab
: and_optab
,
5089 op00
, op01
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
5092 return emit_store_flag (target
, code
, op0both
, op1
, word_mode
,
5093 unsignedp
, normalizep
);
5095 else if ((code
== LT
|| code
== GE
) && op1
== const0_rtx
)
5099 /* If testing the sign bit, can just test on high word. */
5100 op0h
= simplify_gen_subreg (word_mode
, op0
, mode
,
5101 subreg_highpart_offset (word_mode
, mode
));
5102 return emit_store_flag (target
, code
, op0h
, op1
, word_mode
,
5103 unsignedp
, normalizep
);
5107 /* From now on, we won't change CODE, so set ICODE now. */
5108 icode
= setcc_gen_code
[(int) code
];
5110 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5111 complement of A (for GE) and shifting the sign bit to the low bit. */
5112 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
5113 && GET_MODE_CLASS (mode
) == MODE_INT
5114 && (normalizep
|| STORE_FLAG_VALUE
== 1
5115 || (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5116 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5117 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))))
5121 /* If the result is to be wider than OP0, it is best to convert it
5122 first. If it is to be narrower, it is *incorrect* to convert it
5124 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
5126 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5130 if (target_mode
!= mode
)
5134 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
5135 ((STORE_FLAG_VALUE
== 1 || normalizep
)
5136 ? 0 : subtarget
), 0);
5138 if (STORE_FLAG_VALUE
== 1 || normalizep
)
5139 /* If we are supposed to produce a 0/1 value, we want to do
5140 a logical shift from the sign bit to the low-order bit; for
5141 a -1/0 value, we do an arithmetic shift. */
5142 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5143 size_int (GET_MODE_BITSIZE (mode
) - 1),
5144 subtarget
, normalizep
!= -1);
5146 if (mode
!= target_mode
)
5147 op0
= convert_modes (target_mode
, mode
, op0
, 0);
5152 if (icode
!= CODE_FOR_nothing
)
5154 insn_operand_predicate_fn pred
;
5156 /* We think we may be able to do this with a scc insn. Emit the
5157 comparison and then the scc insn. */
5159 do_pending_stack_adjust ();
5160 last
= get_last_insn ();
5163 = compare_from_rtx (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
);
5164 if (CONSTANT_P (comparison
))
5166 switch (GET_CODE (comparison
))
5169 if (comparison
== const0_rtx
)
5173 #ifdef FLOAT_STORE_FLAG_VALUE
5175 if (comparison
== CONST0_RTX (GET_MODE (comparison
)))
5183 if (normalizep
== 1)
5185 if (normalizep
== -1)
5187 return const_true_rtx
;
5190 /* The code of COMPARISON may not match CODE if compare_from_rtx
5191 decided to swap its operands and reverse the original code.
5193 We know that compare_from_rtx returns either a CONST_INT or
5194 a new comparison code, so it is safe to just extract the
5195 code from COMPARISON. */
5196 code
= GET_CODE (comparison
);
5198 /* Get a reference to the target in the proper mode for this insn. */
5199 compare_mode
= insn_data
[(int) icode
].operand
[0].mode
;
5201 pred
= insn_data
[(int) icode
].operand
[0].predicate
;
5202 if (optimize
|| ! (*pred
) (subtarget
, compare_mode
))
5203 subtarget
= gen_reg_rtx (compare_mode
);
5205 pattern
= GEN_FCN (icode
) (subtarget
);
5208 emit_insn (pattern
);
5210 /* If we are converting to a wider mode, first convert to
5211 TARGET_MODE, then normalize. This produces better combining
5212 opportunities on machines that have a SIGN_EXTRACT when we are
5213 testing a single bit. This mostly benefits the 68k.
5215 If STORE_FLAG_VALUE does not have the sign bit set when
5216 interpreted in COMPARE_MODE, we can do this conversion as
5217 unsigned, which is usually more efficient. */
5218 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (compare_mode
))
5220 convert_move (target
, subtarget
,
5221 (GET_MODE_BITSIZE (compare_mode
)
5222 <= HOST_BITS_PER_WIDE_INT
)
5223 && 0 == (STORE_FLAG_VALUE
5224 & ((HOST_WIDE_INT
) 1
5225 << (GET_MODE_BITSIZE (compare_mode
) -1))));
5227 compare_mode
= target_mode
;
5232 /* If we want to keep subexpressions around, don't reuse our
5238 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
5239 we don't have to do anything. */
5240 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
5242 /* STORE_FLAG_VALUE might be the most negative number, so write
5243 the comparison this way to avoid a compiler-time warning. */
5244 else if (- normalizep
== STORE_FLAG_VALUE
)
5245 op0
= expand_unop (compare_mode
, neg_optab
, op0
, subtarget
, 0);
5247 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
5248 makes it hard to use a value of just the sign bit due to
5249 ANSI integer constant typing rules. */
5250 else if (GET_MODE_BITSIZE (compare_mode
) <= HOST_BITS_PER_WIDE_INT
5251 && (STORE_FLAG_VALUE
5252 & ((HOST_WIDE_INT
) 1
5253 << (GET_MODE_BITSIZE (compare_mode
) - 1))))
5254 op0
= expand_shift (RSHIFT_EXPR
, compare_mode
, op0
,
5255 size_int (GET_MODE_BITSIZE (compare_mode
) - 1),
5256 subtarget
, normalizep
== 1);
5259 gcc_assert (STORE_FLAG_VALUE
& 1);
5261 op0
= expand_and (compare_mode
, op0
, const1_rtx
, subtarget
);
5262 if (normalizep
== -1)
5263 op0
= expand_unop (compare_mode
, neg_optab
, op0
, op0
, 0);
5266 /* If we were converting to a smaller mode, do the
5268 if (target_mode
!= compare_mode
)
5270 convert_move (target
, op0
, 0);
5278 delete_insns_since (last
);
5280 /* If optimizing, use different pseudo registers for each insn, instead
5281 of reusing the same pseudo. This leads to better CSE, but slows
5282 down the compiler, since there are more pseudos */
5283 subtarget
= (!optimize
5284 && (target_mode
== mode
)) ? target
: NULL_RTX
;
5286 /* If we reached here, we can't do this with a scc insn. However, there
5287 are some comparisons that can be done directly. For example, if
5288 this is an equality comparison of integers, we can try to exclusive-or
5289 (or subtract) the two operands and use a recursive call to try the
5290 comparison with zero. Don't do any of these cases if branches are
5294 && GET_MODE_CLASS (mode
) == MODE_INT
&& (code
== EQ
|| code
== NE
)
5295 && op1
!= const0_rtx
)
5297 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
5301 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
5304 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
5305 mode
, unsignedp
, normalizep
);
5307 delete_insns_since (last
);
5311 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5312 the constant zero. Reject all other comparisons at this point. Only
5313 do LE and GT if branches are expensive since they are expensive on
5314 2-operand machines. */
5316 if (BRANCH_COST
== 0
5317 || GET_MODE_CLASS (mode
) != MODE_INT
|| op1
!= const0_rtx
5318 || (code
!= EQ
&& code
!= NE
5319 && (BRANCH_COST
<= 1 || (code
!= LE
&& code
!= GT
))))
5322 /* See what we need to return. We can only return a 1, -1, or the
5325 if (normalizep
== 0)
5327 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
5328 normalizep
= STORE_FLAG_VALUE
;
5330 else if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
5331 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
5332 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))
5338 /* Try to put the result of the comparison in the sign bit. Assume we can't
5339 do the necessary operation below. */
5343 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5344 the sign bit set. */
5348 /* This is destructive, so SUBTARGET can't be OP0. */
5349 if (rtx_equal_p (subtarget
, op0
))
5352 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
5355 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
5359 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5360 number of bits in the mode of OP0, minus one. */
5364 if (rtx_equal_p (subtarget
, op0
))
5367 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
5368 size_int (GET_MODE_BITSIZE (mode
) - 1),
5370 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
5374 if (code
== EQ
|| code
== NE
)
5376 /* For EQ or NE, one way to do the comparison is to apply an operation
5377 that converts the operand into a positive number if it is nonzero
5378 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5379 for NE we negate. This puts the result in the sign bit. Then we
5380 normalize with a shift, if needed.
5382 Two operations that can do the above actions are ABS and FFS, so try
5383 them. If that doesn't work, and MODE is smaller than a full word,
5384 we can use zero-extension to the wider mode (an unsigned conversion)
5385 as the operation. */
5387 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5388 that is compensated by the subsequent overflow when subtracting
5391 if (abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)
5392 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
5393 else if (ffs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)
5394 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
5395 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
5397 tem
= convert_modes (word_mode
, mode
, op0
, 1);
5404 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
5407 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
5410 /* If we couldn't do it that way, for NE we can "or" the two's complement
5411 of the value with itself. For EQ, we take the one's complement of
5412 that "or", which is an extra insn, so we only handle EQ if branches
5415 if (tem
== 0 && (code
== NE
|| BRANCH_COST
> 1))
5417 if (rtx_equal_p (subtarget
, op0
))
5420 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
5421 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
5424 if (tem
&& code
== EQ
)
5425 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
5429 if (tem
&& normalizep
)
5430 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
5431 size_int (GET_MODE_BITSIZE (mode
) - 1),
5432 subtarget
, normalizep
== 1);
5436 if (GET_MODE (tem
) != target_mode
)
5438 convert_move (target
, tem
, 0);
5441 else if (!subtarget
)
5443 emit_move_insn (target
, tem
);
5448 delete_insns_since (last
);
5453 /* Like emit_store_flag, but always succeeds. */
5456 emit_store_flag_force (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
5457 enum machine_mode mode
, int unsignedp
, int normalizep
)
5461 /* First see if emit_store_flag can do the job. */
5462 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
5466 if (normalizep
== 0)
5469 /* If this failed, we have to do this with set/compare/jump/set code. */
5472 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
5473 target
= gen_reg_rtx (GET_MODE (target
));
5475 emit_move_insn (target
, const1_rtx
);
5476 label
= gen_label_rtx ();
5477 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
,
5480 emit_move_insn (target
, const0_rtx
);
5486 /* Perform possibly multi-word comparison and conditional jump to LABEL
5487 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
5489 The algorithm is based on the code in expr.c:do_jump.
5491 Note that this does not perform a general comparison. Only variants
5492 generated within expmed.c are correctly handled, others abort (but could
5493 be handled if needed). */
5496 do_cmp_and_jump (rtx arg1
, rtx arg2
, enum rtx_code op
, enum machine_mode mode
,
5499 /* If this mode is an integer too wide to compare properly,
5500 compare word by word. Rely on cse to optimize constant cases. */
5502 if (GET_MODE_CLASS (mode
) == MODE_INT
5503 && ! can_compare_p (op
, mode
, ccp_jump
))
5505 rtx label2
= gen_label_rtx ();
5510 do_jump_by_parts_greater_rtx (mode
, 1, arg2
, arg1
, label2
, label
);
5514 do_jump_by_parts_greater_rtx (mode
, 1, arg1
, arg2
, label
, label2
);
5518 do_jump_by_parts_greater_rtx (mode
, 0, arg2
, arg1
, label2
, label
);
5522 do_jump_by_parts_greater_rtx (mode
, 0, arg1
, arg2
, label2
, label
);
5526 do_jump_by_parts_greater_rtx (mode
, 0, arg2
, arg1
, label
, label2
);
5529 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
5530 that's the only equality operations we do */
5532 gcc_assert (arg2
== const0_rtx
&& mode
== GET_MODE(arg1
));
5533 do_jump_by_parts_equality_rtx (arg1
, label2
, label
);
5537 gcc_assert (arg2
== const0_rtx
&& mode
== GET_MODE(arg1
));
5538 do_jump_by_parts_equality_rtx (arg1
, label
, label2
);
5545 emit_label (label2
);
5548 emit_cmp_and_jump_insns (arg1
, arg2
, op
, NULL_RTX
, mode
, 0, label
);