1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
26 #include "coretypes.h"
33 #include "insn-config.h"
38 #include "langhooks.h"
40 static void store_fixed_bit_field
PARAMS ((rtx
, unsigned HOST_WIDE_INT
,
41 unsigned HOST_WIDE_INT
,
42 unsigned HOST_WIDE_INT
, rtx
));
43 static void store_split_bit_field
PARAMS ((rtx
, unsigned HOST_WIDE_INT
,
44 unsigned HOST_WIDE_INT
, rtx
));
45 static rtx extract_fixed_bit_field
PARAMS ((enum machine_mode
, rtx
,
46 unsigned HOST_WIDE_INT
,
47 unsigned HOST_WIDE_INT
,
48 unsigned HOST_WIDE_INT
,
50 static rtx mask_rtx
PARAMS ((enum machine_mode
, int,
52 static rtx lshift_value
PARAMS ((enum machine_mode
, rtx
,
54 static rtx extract_split_bit_field
PARAMS ((rtx
, unsigned HOST_WIDE_INT
,
55 unsigned HOST_WIDE_INT
, int));
56 static void do_cmp_and_jump
PARAMS ((rtx
, rtx
, enum rtx_code
,
57 enum machine_mode
, rtx
));
59 /* Nonzero means divides or modulus operations are relatively cheap for
60 powers of two, so don't use branches; emit the operation instead.
61 Usually, this will mean that the MD file will emit non-branch
64 static int sdiv_pow2_cheap
, smod_pow2_cheap
;
66 #ifndef SLOW_UNALIGNED_ACCESS
67 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
70 /* For compilers that support multiple targets with different word sizes,
71 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
72 is the H8/300(H) compiler. */
74 #ifndef MAX_BITS_PER_WORD
75 #define MAX_BITS_PER_WORD BITS_PER_WORD
78 /* Reduce conditional compilation elsewhere. */
81 #define CODE_FOR_insv CODE_FOR_nothing
82 #define gen_insv(a,b,c,d) NULL_RTX
86 #define CODE_FOR_extv CODE_FOR_nothing
87 #define gen_extv(a,b,c,d) NULL_RTX
91 #define CODE_FOR_extzv CODE_FOR_nothing
92 #define gen_extzv(a,b,c,d) NULL_RTX
95 /* Cost of various pieces of RTL. Note that some of these are indexed by
96 shift count and some by mode. */
97 static int add_cost
, negate_cost
, zero_cost
;
98 static int shift_cost
[MAX_BITS_PER_WORD
];
99 static int shiftadd_cost
[MAX_BITS_PER_WORD
];
100 static int shiftsub_cost
[MAX_BITS_PER_WORD
];
101 static int mul_cost
[NUM_MACHINE_MODES
];
102 static int div_cost
[NUM_MACHINE_MODES
];
103 static int mul_widen_cost
[NUM_MACHINE_MODES
];
104 static int mul_highpart_cost
[NUM_MACHINE_MODES
];
109 rtx reg
, shift_insn
, shiftadd_insn
, shiftsub_insn
;
112 enum machine_mode mode
, wider_mode
;
116 /* This is "some random pseudo register" for purposes of calling recog
117 to see what insns exist. */
118 reg
= gen_rtx_REG (word_mode
, 10000);
120 zero_cost
= rtx_cost (const0_rtx
, 0);
121 add_cost
= rtx_cost (gen_rtx_PLUS (word_mode
, reg
, reg
), SET
);
123 shift_insn
= emit_insn (gen_rtx_SET (VOIDmode
, reg
,
124 gen_rtx_ASHIFT (word_mode
, reg
,
128 = emit_insn (gen_rtx_SET (VOIDmode
, reg
,
129 gen_rtx_PLUS (word_mode
,
130 gen_rtx_MULT (word_mode
,
135 = emit_insn (gen_rtx_SET (VOIDmode
, reg
,
136 gen_rtx_MINUS (word_mode
,
137 gen_rtx_MULT (word_mode
,
144 shiftadd_cost
[0] = shiftsub_cost
[0] = add_cost
;
146 for (m
= 1; m
< MAX_BITS_PER_WORD
; m
++)
148 rtx c_int
= GEN_INT ((HOST_WIDE_INT
) 1 << m
);
149 shift_cost
[m
] = shiftadd_cost
[m
] = shiftsub_cost
[m
] = 32000;
151 XEXP (SET_SRC (PATTERN (shift_insn
)), 1) = GEN_INT (m
);
152 if (recog (PATTERN (shift_insn
), shift_insn
, &dummy
) >= 0)
153 shift_cost
[m
] = rtx_cost (SET_SRC (PATTERN (shift_insn
)), SET
);
155 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn
)), 0), 1) = c_int
;
156 if (recog (PATTERN (shiftadd_insn
), shiftadd_insn
, &dummy
) >= 0)
157 shiftadd_cost
[m
] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn
)), SET
);
159 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn
)), 0), 1) = c_int
;
160 if (recog (PATTERN (shiftsub_insn
), shiftsub_insn
, &dummy
) >= 0)
161 shiftsub_cost
[m
] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn
)), SET
);
164 negate_cost
= rtx_cost (gen_rtx_NEG (word_mode
, reg
), SET
);
167 = (rtx_cost (gen_rtx_DIV (word_mode
, reg
, GEN_INT (32)), SET
)
170 = (rtx_cost (gen_rtx_MOD (word_mode
, reg
, GEN_INT (32)), SET
)
173 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
175 mode
= GET_MODE_WIDER_MODE (mode
))
177 reg
= gen_rtx_REG (mode
, 10000);
178 div_cost
[(int) mode
] = rtx_cost (gen_rtx_UDIV (mode
, reg
, reg
), SET
);
179 mul_cost
[(int) mode
] = rtx_cost (gen_rtx_MULT (mode
, reg
, reg
), SET
);
180 wider_mode
= GET_MODE_WIDER_MODE (mode
);
181 if (wider_mode
!= VOIDmode
)
183 mul_widen_cost
[(int) wider_mode
]
184 = rtx_cost (gen_rtx_MULT (wider_mode
,
185 gen_rtx_ZERO_EXTEND (wider_mode
, reg
),
186 gen_rtx_ZERO_EXTEND (wider_mode
, reg
)),
188 mul_highpart_cost
[(int) mode
]
189 = rtx_cost (gen_rtx_TRUNCATE
191 gen_rtx_LSHIFTRT (wider_mode
,
192 gen_rtx_MULT (wider_mode
,
197 GEN_INT (GET_MODE_BITSIZE (mode
)))),
205 /* Return an rtx representing minus the value of X.
206 MODE is the intended mode of the result,
207 useful if X is a CONST_INT. */
211 enum machine_mode mode
;
214 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
217 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
222 /* Report on the availability of insv/extv/extzv and the desired mode
223 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
224 is false; else the mode of the specified operand. If OPNO is -1,
225 all the caller cares about is whether the insn is available. */
227 mode_for_extraction (pattern
, opno
)
228 enum extraction_pattern pattern
;
231 const struct insn_data
*data
;
238 data
= &insn_data
[CODE_FOR_insv
];
241 return MAX_MACHINE_MODE
;
246 data
= &insn_data
[CODE_FOR_extv
];
249 return MAX_MACHINE_MODE
;
254 data
= &insn_data
[CODE_FOR_extzv
];
257 return MAX_MACHINE_MODE
;
266 /* Everyone who uses this function used to follow it with
267 if (result == VOIDmode) result = word_mode; */
268 if (data
->operand
[opno
].mode
== VOIDmode
)
270 return data
->operand
[opno
].mode
;
274 /* Generate code to store value from rtx VALUE
275 into a bit-field within structure STR_RTX
276 containing BITSIZE bits starting at bit BITNUM.
277 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
278 ALIGN is the alignment that STR_RTX is known to have.
279 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
281 /* ??? Note that there are two different ideas here for how
282 to determine the size to count bits within, for a register.
283 One is BITS_PER_WORD, and the other is the size of operand 3
286 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
287 else, we use the mode of operand 3. */
290 store_bit_field (str_rtx
, bitsize
, bitnum
, fieldmode
, value
, total_size
)
292 unsigned HOST_WIDE_INT bitsize
;
293 unsigned HOST_WIDE_INT bitnum
;
294 enum machine_mode fieldmode
;
296 HOST_WIDE_INT total_size
;
299 = (GET_CODE (str_rtx
) == MEM
) ? BITS_PER_UNIT
: BITS_PER_WORD
;
300 unsigned HOST_WIDE_INT offset
= bitnum
/ unit
;
301 unsigned HOST_WIDE_INT bitpos
= bitnum
% unit
;
305 enum machine_mode op_mode
= mode_for_extraction (EP_insv
, 3);
307 /* Discount the part of the structure before the desired byte.
308 We need to know how many bytes are safe to reference after it. */
310 total_size
-= (bitpos
/ BIGGEST_ALIGNMENT
311 * (BIGGEST_ALIGNMENT
/ BITS_PER_UNIT
));
313 while (GET_CODE (op0
) == SUBREG
)
315 /* The following line once was done only if WORDS_BIG_ENDIAN,
316 but I think that is a mistake. WORDS_BIG_ENDIAN is
317 meaningful at a much higher level; when structures are copied
318 between memory and regs, the higher-numbered regs
319 always get higher addresses. */
320 offset
+= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
);
321 /* We used to adjust BITPOS here, but now we do the whole adjustment
322 right after the loop. */
323 op0
= SUBREG_REG (op0
);
326 value
= protect_from_queue (value
, 0);
330 int old_generating_concat_p
= generating_concat_p
;
331 generating_concat_p
= 0;
332 value
= force_not_mem (value
);
333 generating_concat_p
= old_generating_concat_p
;
336 /* If the target is a register, overwriting the entire object, or storing
337 a full-word or multi-word field can be done with just a SUBREG.
339 If the target is memory, storing any naturally aligned field can be
340 done with a simple store. For targets that support fast unaligned
341 memory, any naturally sized, unit aligned field can be done directly. */
343 byte_offset
= (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
344 + (offset
* UNITS_PER_WORD
);
347 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
348 && (GET_CODE (op0
) != MEM
349 ? ((GET_MODE_SIZE (fieldmode
) >= UNITS_PER_WORD
350 || GET_MODE_SIZE (GET_MODE (op0
)) == GET_MODE_SIZE (fieldmode
))
351 && byte_offset
% GET_MODE_SIZE (fieldmode
) == 0)
352 : (! SLOW_UNALIGNED_ACCESS (fieldmode
, MEM_ALIGN (op0
))
353 || (offset
* BITS_PER_UNIT
% bitsize
== 0
354 && MEM_ALIGN (op0
) % GET_MODE_BITSIZE (fieldmode
) == 0))))
356 if (GET_MODE (op0
) != fieldmode
)
358 if (GET_CODE (op0
) == SUBREG
)
360 if (GET_MODE (SUBREG_REG (op0
)) == fieldmode
361 || GET_MODE_CLASS (fieldmode
) == MODE_INT
362 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
)
363 op0
= SUBREG_REG (op0
);
365 /* Else we've got some float mode source being extracted into
366 a different float mode destination -- this combination of
367 subregs results in Severe Tire Damage. */
370 if (GET_CODE (op0
) == REG
)
371 op0
= gen_rtx_SUBREG (fieldmode
, op0
, byte_offset
);
373 op0
= adjust_address (op0
, fieldmode
, offset
);
375 emit_move_insn (op0
, value
);
379 /* Make sure we are playing with integral modes. Pun with subregs
380 if we aren't. This must come after the entire register case above,
381 since that case is valid for any mode. The following cases are only
382 valid for integral modes. */
384 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
385 if (imode
!= GET_MODE (op0
))
387 if (GET_CODE (op0
) == MEM
)
388 op0
= adjust_address (op0
, imode
, 0);
389 else if (imode
!= BLKmode
)
390 op0
= gen_lowpart (imode
, op0
);
396 /* We may be accessing data outside the field, which means
397 we can alias adjacent data. */
398 if (GET_CODE (op0
) == MEM
)
400 op0
= shallow_copy_rtx (op0
);
401 set_mem_alias_set (op0
, 0);
402 set_mem_expr (op0
, 0);
405 /* If OP0 is a register, BITPOS must count within a word.
406 But as we have it, it counts within whatever size OP0 now has.
407 On a bigendian machine, these are not the same, so convert. */
409 && GET_CODE (op0
) != MEM
410 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
411 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
413 /* Storing an lsb-aligned field in a register
414 can be done with a movestrict instruction. */
416 if (GET_CODE (op0
) != MEM
417 && (BYTES_BIG_ENDIAN
? bitpos
+ bitsize
== unit
: bitpos
== 0)
418 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
419 && (movstrict_optab
->handlers
[(int) fieldmode
].insn_code
420 != CODE_FOR_nothing
))
422 int icode
= movstrict_optab
->handlers
[(int) fieldmode
].insn_code
;
424 /* Get appropriate low part of the value being stored. */
425 if (GET_CODE (value
) == CONST_INT
|| GET_CODE (value
) == REG
)
426 value
= gen_lowpart (fieldmode
, value
);
427 else if (!(GET_CODE (value
) == SYMBOL_REF
428 || GET_CODE (value
) == LABEL_REF
429 || GET_CODE (value
) == CONST
))
430 value
= convert_to_mode (fieldmode
, value
, 0);
432 if (! (*insn_data
[icode
].operand
[1].predicate
) (value
, fieldmode
))
433 value
= copy_to_mode_reg (fieldmode
, value
);
435 if (GET_CODE (op0
) == SUBREG
)
437 if (GET_MODE (SUBREG_REG (op0
)) == fieldmode
438 || GET_MODE_CLASS (fieldmode
) == MODE_INT
439 || GET_MODE_CLASS (fieldmode
) == MODE_PARTIAL_INT
)
440 op0
= SUBREG_REG (op0
);
442 /* Else we've got some float mode source being extracted into
443 a different float mode destination -- this combination of
444 subregs results in Severe Tire Damage. */
448 emit_insn (GEN_FCN (icode
)
449 (gen_rtx_SUBREG (fieldmode
, op0
,
450 (bitnum
% BITS_PER_WORD
) / BITS_PER_UNIT
451 + (offset
* UNITS_PER_WORD
)),
457 /* Handle fields bigger than a word. */
459 if (bitsize
> BITS_PER_WORD
)
461 /* Here we transfer the words of the field
462 in the order least significant first.
463 This is because the most significant word is the one which may
465 However, only do that if the value is not BLKmode. */
467 unsigned int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
468 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
471 /* This is the mode we must force value to, so that there will be enough
472 subwords to extract. Note that fieldmode will often (always?) be
473 VOIDmode, because that is what store_field uses to indicate that this
474 is a bit field, but passing VOIDmode to operand_subword_force will
475 result in an abort. */
476 fieldmode
= smallest_mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
);
478 for (i
= 0; i
< nwords
; i
++)
480 /* If I is 0, use the low-order word in both field and target;
481 if I is 1, use the next to lowest word; and so on. */
482 unsigned int wordnum
= (backwards
? nwords
- i
- 1 : i
);
483 unsigned int bit_offset
= (backwards
484 ? MAX ((int) bitsize
- ((int) i
+ 1)
487 : (int) i
* BITS_PER_WORD
);
489 store_bit_field (op0
, MIN (BITS_PER_WORD
,
490 bitsize
- i
* BITS_PER_WORD
),
491 bitnum
+ bit_offset
, word_mode
,
492 operand_subword_force (value
, wordnum
,
493 (GET_MODE (value
) == VOIDmode
495 : GET_MODE (value
))),
501 /* From here on we can assume that the field to be stored in is
502 a full-word (whatever type that is), since it is shorter than a word. */
504 /* OFFSET is the number of words or bytes (UNIT says which)
505 from STR_RTX to the first word or byte containing part of the field. */
507 if (GET_CODE (op0
) != MEM
)
510 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
512 if (GET_CODE (op0
) != REG
)
514 /* Since this is a destination (lvalue), we can't copy it to a
515 pseudo. We can trivially remove a SUBREG that does not
516 change the size of the operand. Such a SUBREG may have been
517 added above. Otherwise, abort. */
518 if (GET_CODE (op0
) == SUBREG
519 && (GET_MODE_SIZE (GET_MODE (op0
))
520 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0
)))))
521 op0
= SUBREG_REG (op0
);
525 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
526 op0
, (offset
* UNITS_PER_WORD
));
531 op0
= protect_from_queue (op0
, 1);
533 /* If VALUE is a floating-point mode, access it as an integer of the
534 corresponding size. This can occur on a machine with 64 bit registers
535 that uses SFmode for float. This can also occur for unaligned float
537 if (GET_MODE_CLASS (GET_MODE (value
)) != MODE_INT
538 && GET_MODE_CLASS (GET_MODE (value
)) != MODE_PARTIAL_INT
)
539 value
= gen_lowpart ((GET_MODE (value
) == VOIDmode
540 ? word_mode
: int_mode_for_mode (GET_MODE (value
))),
543 /* Now OFFSET is nonzero only if OP0 is memory
544 and is therefore always measured in bytes. */
547 && GET_MODE (value
) != BLKmode
548 && !(bitsize
== 1 && GET_CODE (value
) == CONST_INT
)
549 /* Ensure insv's size is wide enough for this field. */
550 && (GET_MODE_BITSIZE (op_mode
) >= bitsize
)
551 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
552 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (op_mode
))))
554 int xbitpos
= bitpos
;
557 rtx last
= get_last_insn ();
559 enum machine_mode maxmode
= mode_for_extraction (EP_insv
, 3);
560 int save_volatile_ok
= volatile_ok
;
564 /* If this machine's insv can only insert into a register, copy OP0
565 into a register and save it back later. */
566 /* This used to check flag_force_mem, but that was a serious
567 de-optimization now that flag_force_mem is enabled by -O2. */
568 if (GET_CODE (op0
) == MEM
569 && ! ((*insn_data
[(int) CODE_FOR_insv
].operand
[0].predicate
)
573 enum machine_mode bestmode
;
575 /* Get the mode to use for inserting into this field. If OP0 is
576 BLKmode, get the smallest mode consistent with the alignment. If
577 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
578 mode. Otherwise, use the smallest mode containing the field. */
580 if (GET_MODE (op0
) == BLKmode
581 || GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (maxmode
))
583 = get_best_mode (bitsize
, bitnum
, MEM_ALIGN (op0
), maxmode
,
584 MEM_VOLATILE_P (op0
));
586 bestmode
= GET_MODE (op0
);
588 if (bestmode
== VOIDmode
589 || (SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (op0
))
590 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (op0
)))
593 /* Adjust address to point to the containing unit of that mode.
594 Compute offset as multiple of this unit, counting in bytes. */
595 unit
= GET_MODE_BITSIZE (bestmode
);
596 offset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
597 bitpos
= bitnum
% unit
;
598 op0
= adjust_address (op0
, bestmode
, offset
);
600 /* Fetch that unit, store the bitfield in it, then store
602 tempreg
= copy_to_reg (op0
);
603 store_bit_field (tempreg
, bitsize
, bitpos
, fieldmode
, value
,
605 emit_move_insn (op0
, tempreg
);
608 volatile_ok
= save_volatile_ok
;
610 /* Add OFFSET into OP0's address. */
611 if (GET_CODE (xop0
) == MEM
)
612 xop0
= adjust_address (xop0
, byte_mode
, offset
);
614 /* If xop0 is a register, we need it in MAXMODE
615 to make it acceptable to the format of insv. */
616 if (GET_CODE (xop0
) == SUBREG
)
617 /* We can't just change the mode, because this might clobber op0,
618 and we will need the original value of op0 if insv fails. */
619 xop0
= gen_rtx_SUBREG (maxmode
, SUBREG_REG (xop0
), SUBREG_BYTE (xop0
));
620 if (GET_CODE (xop0
) == REG
&& GET_MODE (xop0
) != maxmode
)
621 xop0
= gen_rtx_SUBREG (maxmode
, xop0
, 0);
623 /* On big-endian machines, we count bits from the most significant.
624 If the bit field insn does not, we must invert. */
626 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
627 xbitpos
= unit
- bitsize
- xbitpos
;
629 /* We have been counting XBITPOS within UNIT.
630 Count instead within the size of the register. */
631 if (BITS_BIG_ENDIAN
&& GET_CODE (xop0
) != MEM
)
632 xbitpos
+= GET_MODE_BITSIZE (maxmode
) - unit
;
634 unit
= GET_MODE_BITSIZE (maxmode
);
636 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
638 if (GET_MODE (value
) != maxmode
)
640 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
642 /* Optimization: Don't bother really extending VALUE
643 if it has all the bits we will actually use. However,
644 if we must narrow it, be sure we do it correctly. */
646 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (maxmode
))
650 tmp
= simplify_subreg (maxmode
, value1
, GET_MODE (value
), 0);
652 tmp
= simplify_gen_subreg (maxmode
,
653 force_reg (GET_MODE (value
),
655 GET_MODE (value
), 0);
659 value1
= gen_lowpart (maxmode
, value1
);
661 else if (GET_CODE (value
) == CONST_INT
)
662 value1
= gen_int_mode (INTVAL (value
), maxmode
);
663 else if (!CONSTANT_P (value
))
664 /* Parse phase is supposed to make VALUE's data type
665 match that of the component reference, which is a type
666 at least as wide as the field; so VALUE should have
667 a mode that corresponds to that type. */
671 /* If this machine's insv insists on a register,
672 get VALUE1 into a register. */
673 if (! ((*insn_data
[(int) CODE_FOR_insv
].operand
[3].predicate
)
675 value1
= force_reg (maxmode
, value1
);
677 pat
= gen_insv (xop0
, GEN_INT (bitsize
), GEN_INT (xbitpos
), value1
);
682 delete_insns_since (last
);
683 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
);
688 /* Insv is not available; store using shifts and boolean ops. */
689 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
);
693 /* Use shifts and boolean operations to store VALUE
694 into a bit field of width BITSIZE
695 in a memory location specified by OP0 except offset by OFFSET bytes.
696 (OFFSET must be 0 if OP0 is a register.)
697 The field starts at position BITPOS within the byte.
698 (If OP0 is a register, it may be a full word or a narrower mode,
699 but BITPOS still counts within a full word,
700 which is significant on bigendian machines.)
702 Note that protect_from_queue has already been done on OP0 and VALUE. */
705 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
)
707 unsigned HOST_WIDE_INT offset
, bitsize
, bitpos
;
710 enum machine_mode mode
;
711 unsigned int total_bits
= BITS_PER_WORD
;
716 /* There is a case not handled here:
717 a structure with a known alignment of just a halfword
718 and a field split across two aligned halfwords within the structure.
719 Or likewise a structure with a known alignment of just a byte
720 and a field split across two bytes.
721 Such cases are not supposed to be able to occur. */
723 if (GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
727 /* Special treatment for a bit field split across two registers. */
728 if (bitsize
+ bitpos
> BITS_PER_WORD
)
730 store_split_bit_field (op0
, bitsize
, bitpos
, value
);
736 /* Get the proper mode to use for this field. We want a mode that
737 includes the entire field. If such a mode would be larger than
738 a word, we won't be doing the extraction the normal way.
739 We don't want a mode bigger than the destination. */
741 mode
= GET_MODE (op0
);
742 if (GET_MODE_BITSIZE (mode
) == 0
743 || GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (word_mode
))
745 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
746 MEM_ALIGN (op0
), mode
, MEM_VOLATILE_P (op0
));
748 if (mode
== VOIDmode
)
750 /* The only way this should occur is if the field spans word
752 store_split_bit_field (op0
, bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
757 total_bits
= GET_MODE_BITSIZE (mode
);
759 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
760 be in the range 0 to total_bits-1, and put any excess bytes in
762 if (bitpos
>= total_bits
)
764 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
765 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
769 /* Get ref to an aligned byte, halfword, or word containing the field.
770 Adjust BITPOS to be position within a word,
771 and OFFSET to be the offset of that word.
772 Then alter OP0 to refer to that word. */
773 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
774 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
775 op0
= adjust_address (op0
, mode
, offset
);
778 mode
= GET_MODE (op0
);
780 /* Now MODE is either some integral mode for a MEM as OP0,
781 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
782 The bit field is contained entirely within OP0.
783 BITPOS is the starting bit number within OP0.
784 (OP0's mode may actually be narrower than MODE.) */
786 if (BYTES_BIG_ENDIAN
)
787 /* BITPOS is the distance between our msb
788 and that of the containing datum.
789 Convert it to the distance from the lsb. */
790 bitpos
= total_bits
- bitsize
- bitpos
;
792 /* Now BITPOS is always the distance between our lsb
795 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
796 we must first convert its mode to MODE. */
798 if (GET_CODE (value
) == CONST_INT
)
800 HOST_WIDE_INT v
= INTVAL (value
);
802 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
803 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
807 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
808 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
809 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
812 value
= lshift_value (mode
, value
, bitpos
, bitsize
);
816 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
817 && bitpos
+ bitsize
!= GET_MODE_BITSIZE (mode
));
819 if (GET_MODE (value
) != mode
)
821 if ((GET_CODE (value
) == REG
|| GET_CODE (value
) == SUBREG
)
822 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (value
)))
823 value
= gen_lowpart (mode
, value
);
825 value
= convert_to_mode (mode
, value
, 1);
829 value
= expand_binop (mode
, and_optab
, value
,
830 mask_rtx (mode
, 0, bitsize
, 0),
831 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
833 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
834 build_int_2 (bitpos
, 0), NULL_RTX
, 1);
837 /* Now clear the chosen bits in OP0,
838 except that if VALUE is -1 we need not bother. */
840 subtarget
= (GET_CODE (op0
) == REG
|| ! flag_force_mem
) ? op0
: 0;
844 temp
= expand_binop (mode
, and_optab
, op0
,
845 mask_rtx (mode
, bitpos
, bitsize
, 1),
846 subtarget
, 1, OPTAB_LIB_WIDEN
);
852 /* Now logical-or VALUE into OP0, unless it is zero. */
855 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
856 subtarget
, 1, OPTAB_LIB_WIDEN
);
858 emit_move_insn (op0
, temp
);
861 /* Store a bit field that is split across multiple accessible memory objects.
863 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
864 BITSIZE is the field width; BITPOS the position of its first bit
866 VALUE is the value to store.
868 This does not yet handle fields wider than BITS_PER_WORD. */
871 store_split_bit_field (op0
, bitsize
, bitpos
, value
)
873 unsigned HOST_WIDE_INT bitsize
, bitpos
;
877 unsigned int bitsdone
= 0;
879 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
881 if (GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
882 unit
= BITS_PER_WORD
;
884 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
886 /* If VALUE is a constant other than a CONST_INT, get it into a register in
887 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
888 that VALUE might be a floating-point constant. */
889 if (CONSTANT_P (value
) && GET_CODE (value
) != CONST_INT
)
891 rtx word
= gen_lowpart_common (word_mode
, value
);
893 if (word
&& (value
!= word
))
896 value
= gen_lowpart_common (word_mode
,
897 force_reg (GET_MODE (value
) != VOIDmode
899 : word_mode
, value
));
901 else if (GET_CODE (value
) == ADDRESSOF
)
902 value
= copy_to_reg (value
);
904 while (bitsdone
< bitsize
)
906 unsigned HOST_WIDE_INT thissize
;
908 unsigned HOST_WIDE_INT thispos
;
909 unsigned HOST_WIDE_INT offset
;
911 offset
= (bitpos
+ bitsdone
) / unit
;
912 thispos
= (bitpos
+ bitsdone
) % unit
;
914 /* THISSIZE must not overrun a word boundary. Otherwise,
915 store_fixed_bit_field will call us again, and we will mutually
917 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
918 thissize
= MIN (thissize
, unit
- thispos
);
920 if (BYTES_BIG_ENDIAN
)
924 /* We must do an endian conversion exactly the same way as it is
925 done in extract_bit_field, so that the two calls to
926 extract_fixed_bit_field will have comparable arguments. */
927 if (GET_CODE (value
) != MEM
|| GET_MODE (value
) == BLKmode
)
928 total_bits
= BITS_PER_WORD
;
930 total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
932 /* Fetch successively less significant portions. */
933 if (GET_CODE (value
) == CONST_INT
)
934 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
935 >> (bitsize
- bitsdone
- thissize
))
936 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
938 /* The args are chosen so that the last part includes the
939 lsb. Give extract_bit_field the value it needs (with
940 endianness compensation) to fetch the piece we want. */
941 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
942 total_bits
- bitsize
+ bitsdone
,
947 /* Fetch successively more significant portions. */
948 if (GET_CODE (value
) == CONST_INT
)
949 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
951 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
953 part
= extract_fixed_bit_field (word_mode
, value
, 0, thissize
,
954 bitsdone
, NULL_RTX
, 1);
957 /* If OP0 is a register, then handle OFFSET here.
959 When handling multiword bitfields, extract_bit_field may pass
960 down a word_mode SUBREG of a larger REG for a bitfield that actually
961 crosses a word boundary. Thus, for a SUBREG, we must find
962 the current word starting from the base register. */
963 if (GET_CODE (op0
) == SUBREG
)
965 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
966 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
967 GET_MODE (SUBREG_REG (op0
)));
970 else if (GET_CODE (op0
) == REG
)
972 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
978 /* OFFSET is in UNITs, and UNIT is in bits.
979 store_fixed_bit_field wants offset in bytes. */
980 store_fixed_bit_field (word
, offset
* unit
/ BITS_PER_UNIT
, thissize
,
982 bitsdone
+= thissize
;
986 /* Generate code to extract a byte-field from STR_RTX
987 containing BITSIZE bits, starting at BITNUM,
988 and put it in TARGET if possible (if TARGET is nonzero).
989 Regardless of TARGET, we return the rtx for where the value is placed.
992 STR_RTX is the structure containing the byte (a REG or MEM).
993 UNSIGNEDP is nonzero if this is an unsigned bit field.
994 MODE is the natural mode of the field value once extracted.
995 TMODE is the mode the caller would like the value to have;
996 but the value may be returned with type MODE instead.
998 TOTAL_SIZE is the size in bytes of the containing structure,
1001 If a TARGET is specified and we can store in it at no extra cost,
1002 we do so, and return TARGET.
1003 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1004 if they are equally easy. */
1007 extract_bit_field (str_rtx
, bitsize
, bitnum
, unsignedp
,
1008 target
, mode
, tmode
, total_size
)
1010 unsigned HOST_WIDE_INT bitsize
;
1011 unsigned HOST_WIDE_INT bitnum
;
1014 enum machine_mode mode
, tmode
;
1015 HOST_WIDE_INT total_size
;
1018 = (GET_CODE (str_rtx
) == MEM
) ? BITS_PER_UNIT
: BITS_PER_WORD
;
1019 unsigned HOST_WIDE_INT offset
= bitnum
/ unit
;
1020 unsigned HOST_WIDE_INT bitpos
= bitnum
% unit
;
1022 rtx spec_target
= target
;
1023 rtx spec_target_subreg
= 0;
1024 enum machine_mode int_mode
;
1025 enum machine_mode extv_mode
= mode_for_extraction (EP_extv
, 0);
1026 enum machine_mode extzv_mode
= mode_for_extraction (EP_extzv
, 0);
1027 enum machine_mode mode1
;
1030 /* Discount the part of the structure before the desired byte.
1031 We need to know how many bytes are safe to reference after it. */
1032 if (total_size
>= 0)
1033 total_size
-= (bitpos
/ BIGGEST_ALIGNMENT
1034 * (BIGGEST_ALIGNMENT
/ BITS_PER_UNIT
));
1036 if (tmode
== VOIDmode
)
1039 while (GET_CODE (op0
) == SUBREG
)
1041 bitpos
+= SUBREG_BYTE (op0
) * BITS_PER_UNIT
;
1044 offset
+= (bitpos
/ unit
);
1047 op0
= SUBREG_REG (op0
);
1050 if (GET_CODE (op0
) == REG
1051 && mode
== GET_MODE (op0
)
1053 && bitsize
== GET_MODE_BITSIZE (GET_MODE (op0
)))
1055 /* We're trying to extract a full register from itself. */
1059 /* Make sure we are playing with integral modes. Pun with subregs
1062 enum machine_mode imode
= int_mode_for_mode (GET_MODE (op0
));
1063 if (imode
!= GET_MODE (op0
))
1065 if (GET_CODE (op0
) == MEM
)
1066 op0
= adjust_address (op0
, imode
, 0);
1067 else if (imode
!= BLKmode
)
1068 op0
= gen_lowpart (imode
, op0
);
1074 /* We may be accessing data outside the field, which means
1075 we can alias adjacent data. */
1076 if (GET_CODE (op0
) == MEM
)
1078 op0
= shallow_copy_rtx (op0
);
1079 set_mem_alias_set (op0
, 0);
1080 set_mem_expr (op0
, 0);
1083 /* Extraction of a full-word or multi-word value from a structure
1084 in a register or aligned memory can be done with just a SUBREG.
1085 A subword value in the least significant part of a register
1086 can also be extracted with a SUBREG. For this, we need the
1087 byte offset of the value in op0. */
1089 byte_offset
= bitpos
/ BITS_PER_UNIT
+ offset
* UNITS_PER_WORD
;
1091 /* If OP0 is a register, BITPOS must count within a word.
1092 But as we have it, it counts within whatever size OP0 now has.
1093 On a bigendian machine, these are not the same, so convert. */
1094 if (BYTES_BIG_ENDIAN
1095 && GET_CODE (op0
) != MEM
1096 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
1097 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
1099 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1100 If that's wrong, the solution is to test for it and set TARGET to 0
1103 mode1
= (VECTOR_MODE_P (tmode
)
1105 : mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0));
1107 if (((GET_CODE (op0
) != MEM
1108 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
1109 GET_MODE_BITSIZE (GET_MODE (op0
)))
1110 && GET_MODE_SIZE (mode1
) != 0
1111 && byte_offset
% GET_MODE_SIZE (mode1
) == 0)
1112 || (GET_CODE (op0
) == MEM
1113 && (! SLOW_UNALIGNED_ACCESS (mode
, MEM_ALIGN (op0
))
1114 || (offset
* BITS_PER_UNIT
% bitsize
== 0
1115 && MEM_ALIGN (op0
) % bitsize
== 0))))
1116 && ((bitsize
>= BITS_PER_WORD
&& bitsize
== GET_MODE_BITSIZE (mode
)
1117 && bitpos
% BITS_PER_WORD
== 0)
1118 || (mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0) != BLKmode
1119 /* ??? The big endian test here is wrong. This is correct
1120 if the value is in a register, and if mode_for_size is not
1121 the same mode as op0. This causes us to get unnecessarily
1122 inefficient code from the Thumb port when -mbig-endian. */
1123 && (BYTES_BIG_ENDIAN
1124 ? bitpos
+ bitsize
== BITS_PER_WORD
1127 if (mode1
!= GET_MODE (op0
))
1129 if (GET_CODE (op0
) == SUBREG
)
1131 if (GET_MODE (SUBREG_REG (op0
)) == mode1
1132 || GET_MODE_CLASS (mode1
) == MODE_INT
1133 || GET_MODE_CLASS (mode1
) == MODE_PARTIAL_INT
)
1134 op0
= SUBREG_REG (op0
);
1136 /* Else we've got some float mode source being extracted into
1137 a different float mode destination -- this combination of
1138 subregs results in Severe Tire Damage. */
1139 goto no_subreg_mode_swap
;
1141 if (GET_CODE (op0
) == REG
)
1142 op0
= gen_rtx_SUBREG (mode1
, op0
, byte_offset
);
1144 op0
= adjust_address (op0
, mode1
, offset
);
1147 return convert_to_mode (tmode
, op0
, unsignedp
);
1150 no_subreg_mode_swap
:
1152 /* Handle fields bigger than a word. */
1154 if (bitsize
> BITS_PER_WORD
)
1156 /* Here we transfer the words of the field
1157 in the order least significant first.
1158 This is because the most significant word is the one which may
1159 be less than full. */
1161 unsigned int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
1164 if (target
== 0 || GET_CODE (target
) != REG
)
1165 target
= gen_reg_rtx (mode
);
1167 /* Indicate for flow that the entire target reg is being set. */
1168 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
1170 for (i
= 0; i
< nwords
; i
++)
1172 /* If I is 0, use the low-order word in both field and target;
1173 if I is 1, use the next to lowest word; and so on. */
1174 /* Word number in TARGET to use. */
1175 unsigned int wordnum
1177 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
1179 /* Offset from start of field in OP0. */
1180 unsigned int bit_offset
= (WORDS_BIG_ENDIAN
1181 ? MAX (0, ((int) bitsize
- ((int) i
+ 1)
1182 * (int) BITS_PER_WORD
))
1183 : (int) i
* BITS_PER_WORD
);
1184 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1186 = extract_bit_field (op0
, MIN (BITS_PER_WORD
,
1187 bitsize
- i
* BITS_PER_WORD
),
1188 bitnum
+ bit_offset
, 1, target_part
, mode
,
1189 word_mode
, total_size
);
1191 if (target_part
== 0)
1194 if (result_part
!= target_part
)
1195 emit_move_insn (target_part
, result_part
);
1200 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1201 need to be zero'd out. */
1202 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1204 unsigned int i
, total_words
;
1206 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1207 for (i
= nwords
; i
< total_words
; i
++)
1209 (operand_subword (target
,
1210 WORDS_BIG_ENDIAN
? total_words
- i
- 1 : i
,
1217 /* Signed bit field: sign-extend with two arithmetic shifts. */
1218 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1219 build_int_2 (GET_MODE_BITSIZE (mode
) - bitsize
, 0),
1221 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1222 build_int_2 (GET_MODE_BITSIZE (mode
) - bitsize
, 0),
1226 /* From here on we know the desired field is smaller than a word. */
1228 /* Check if there is a correspondingly-sized integer field, so we can
1229 safely extract it as one size of integer, if necessary; then
1230 truncate or extend to the size that is wanted; then use SUBREGs or
1231 convert_to_mode to get one of the modes we really wanted. */
1233 int_mode
= int_mode_for_mode (tmode
);
1234 if (int_mode
== BLKmode
)
1235 int_mode
= int_mode_for_mode (mode
);
1236 if (int_mode
== BLKmode
)
1237 abort (); /* Should probably push op0 out to memory and then
1240 /* OFFSET is the number of words or bytes (UNIT says which)
1241 from STR_RTX to the first word or byte containing part of the field. */
1243 if (GET_CODE (op0
) != MEM
)
1246 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1248 if (GET_CODE (op0
) != REG
)
1249 op0
= copy_to_reg (op0
);
1250 op0
= gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD
, MODE_INT
, 0),
1251 op0
, (offset
* UNITS_PER_WORD
));
1256 op0
= protect_from_queue (str_rtx
, 1);
1258 /* Now OFFSET is nonzero only for memory operands. */
1263 && (GET_MODE_BITSIZE (extzv_mode
) >= bitsize
)
1264 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
1265 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (extzv_mode
))))
1267 unsigned HOST_WIDE_INT xbitpos
= bitpos
, xoffset
= offset
;
1268 rtx bitsize_rtx
, bitpos_rtx
;
1269 rtx last
= get_last_insn ();
1271 rtx xtarget
= target
;
1272 rtx xspec_target
= spec_target
;
1273 rtx xspec_target_subreg
= spec_target_subreg
;
1275 enum machine_mode maxmode
= mode_for_extraction (EP_extzv
, 0);
1277 if (GET_CODE (xop0
) == MEM
)
1279 int save_volatile_ok
= volatile_ok
;
1282 /* Is the memory operand acceptable? */
1283 if (! ((*insn_data
[(int) CODE_FOR_extzv
].operand
[1].predicate
)
1284 (xop0
, GET_MODE (xop0
))))
1286 /* No, load into a reg and extract from there. */
1287 enum machine_mode bestmode
;
1289 /* Get the mode to use for inserting into this field. If
1290 OP0 is BLKmode, get the smallest mode consistent with the
1291 alignment. If OP0 is a non-BLKmode object that is no
1292 wider than MAXMODE, use its mode. Otherwise, use the
1293 smallest mode containing the field. */
1295 if (GET_MODE (xop0
) == BLKmode
1296 || (GET_MODE_SIZE (GET_MODE (op0
))
1297 > GET_MODE_SIZE (maxmode
)))
1298 bestmode
= get_best_mode (bitsize
, bitnum
,
1299 MEM_ALIGN (xop0
), maxmode
,
1300 MEM_VOLATILE_P (xop0
));
1302 bestmode
= GET_MODE (xop0
);
1304 if (bestmode
== VOIDmode
1305 || (SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (xop0
))
1306 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (xop0
)))
1309 /* Compute offset as multiple of this unit,
1310 counting in bytes. */
1311 unit
= GET_MODE_BITSIZE (bestmode
);
1312 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1313 xbitpos
= bitnum
% unit
;
1314 xop0
= adjust_address (xop0
, bestmode
, xoffset
);
1316 /* Fetch it to a register in that size. */
1317 xop0
= force_reg (bestmode
, xop0
);
1319 /* XBITPOS counts within UNIT, which is what is expected. */
1322 /* Get ref to first byte containing part of the field. */
1323 xop0
= adjust_address (xop0
, byte_mode
, xoffset
);
1325 volatile_ok
= save_volatile_ok
;
1328 /* If op0 is a register, we need it in MAXMODE (which is usually
1329 SImode). to make it acceptable to the format of extzv. */
1330 if (GET_CODE (xop0
) == SUBREG
&& GET_MODE (xop0
) != maxmode
)
1332 if (GET_CODE (xop0
) == REG
&& GET_MODE (xop0
) != maxmode
)
1333 xop0
= gen_rtx_SUBREG (maxmode
, xop0
, 0);
1335 /* On big-endian machines, we count bits from the most significant.
1336 If the bit field insn does not, we must invert. */
1337 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1338 xbitpos
= unit
- bitsize
- xbitpos
;
1340 /* Now convert from counting within UNIT to counting in MAXMODE. */
1341 if (BITS_BIG_ENDIAN
&& GET_CODE (xop0
) != MEM
)
1342 xbitpos
+= GET_MODE_BITSIZE (maxmode
) - unit
;
1344 unit
= GET_MODE_BITSIZE (maxmode
);
1347 || (flag_force_mem
&& GET_CODE (xtarget
) == MEM
))
1348 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1350 if (GET_MODE (xtarget
) != maxmode
)
1352 if (GET_CODE (xtarget
) == REG
)
1354 int wider
= (GET_MODE_SIZE (maxmode
)
1355 > GET_MODE_SIZE (GET_MODE (xtarget
)));
1356 xtarget
= gen_lowpart (maxmode
, xtarget
);
1358 xspec_target_subreg
= xtarget
;
1361 xtarget
= gen_reg_rtx (maxmode
);
1364 /* If this machine's extzv insists on a register target,
1365 make sure we have one. */
1366 if (! ((*insn_data
[(int) CODE_FOR_extzv
].operand
[0].predicate
)
1367 (xtarget
, maxmode
)))
1368 xtarget
= gen_reg_rtx (maxmode
);
1370 bitsize_rtx
= GEN_INT (bitsize
);
1371 bitpos_rtx
= GEN_INT (xbitpos
);
1373 pat
= gen_extzv (protect_from_queue (xtarget
, 1),
1374 xop0
, bitsize_rtx
, bitpos_rtx
);
1379 spec_target
= xspec_target
;
1380 spec_target_subreg
= xspec_target_subreg
;
1384 delete_insns_since (last
);
1385 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1391 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1397 && (GET_MODE_BITSIZE (extv_mode
) >= bitsize
)
1398 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
1399 && (bitsize
+ bitpos
> GET_MODE_BITSIZE (extv_mode
))))
1401 int xbitpos
= bitpos
, xoffset
= offset
;
1402 rtx bitsize_rtx
, bitpos_rtx
;
1403 rtx last
= get_last_insn ();
1404 rtx xop0
= op0
, xtarget
= target
;
1405 rtx xspec_target
= spec_target
;
1406 rtx xspec_target_subreg
= spec_target_subreg
;
1408 enum machine_mode maxmode
= mode_for_extraction (EP_extv
, 0);
1410 if (GET_CODE (xop0
) == MEM
)
1412 /* Is the memory operand acceptable? */
1413 if (! ((*insn_data
[(int) CODE_FOR_extv
].operand
[1].predicate
)
1414 (xop0
, GET_MODE (xop0
))))
1416 /* No, load into a reg and extract from there. */
1417 enum machine_mode bestmode
;
1419 /* Get the mode to use for inserting into this field. If
1420 OP0 is BLKmode, get the smallest mode consistent with the
1421 alignment. If OP0 is a non-BLKmode object that is no
1422 wider than MAXMODE, use its mode. Otherwise, use the
1423 smallest mode containing the field. */
1425 if (GET_MODE (xop0
) == BLKmode
1426 || (GET_MODE_SIZE (GET_MODE (op0
))
1427 > GET_MODE_SIZE (maxmode
)))
1428 bestmode
= get_best_mode (bitsize
, bitnum
,
1429 MEM_ALIGN (xop0
), maxmode
,
1430 MEM_VOLATILE_P (xop0
));
1432 bestmode
= GET_MODE (xop0
);
1434 if (bestmode
== VOIDmode
1435 || (SLOW_UNALIGNED_ACCESS (bestmode
, MEM_ALIGN (xop0
))
1436 && GET_MODE_BITSIZE (bestmode
) > MEM_ALIGN (xop0
)))
1439 /* Compute offset as multiple of this unit,
1440 counting in bytes. */
1441 unit
= GET_MODE_BITSIZE (bestmode
);
1442 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1443 xbitpos
= bitnum
% unit
;
1444 xop0
= adjust_address (xop0
, bestmode
, xoffset
);
1446 /* Fetch it to a register in that size. */
1447 xop0
= force_reg (bestmode
, xop0
);
1449 /* XBITPOS counts within UNIT, which is what is expected. */
1452 /* Get ref to first byte containing part of the field. */
1453 xop0
= adjust_address (xop0
, byte_mode
, xoffset
);
1456 /* If op0 is a register, we need it in MAXMODE (which is usually
1457 SImode) to make it acceptable to the format of extv. */
1458 if (GET_CODE (xop0
) == SUBREG
&& GET_MODE (xop0
) != maxmode
)
1460 if (GET_CODE (xop0
) == REG
&& GET_MODE (xop0
) != maxmode
)
1461 xop0
= gen_rtx_SUBREG (maxmode
, xop0
, 0);
1463 /* On big-endian machines, we count bits from the most significant.
1464 If the bit field insn does not, we must invert. */
1465 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1466 xbitpos
= unit
- bitsize
- xbitpos
;
1468 /* XBITPOS counts within a size of UNIT.
1469 Adjust to count within a size of MAXMODE. */
1470 if (BITS_BIG_ENDIAN
&& GET_CODE (xop0
) != MEM
)
1471 xbitpos
+= (GET_MODE_BITSIZE (maxmode
) - unit
);
1473 unit
= GET_MODE_BITSIZE (maxmode
);
1476 || (flag_force_mem
&& GET_CODE (xtarget
) == MEM
))
1477 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1479 if (GET_MODE (xtarget
) != maxmode
)
1481 if (GET_CODE (xtarget
) == REG
)
1483 int wider
= (GET_MODE_SIZE (maxmode
)
1484 > GET_MODE_SIZE (GET_MODE (xtarget
)));
1485 xtarget
= gen_lowpart (maxmode
, xtarget
);
1487 xspec_target_subreg
= xtarget
;
1490 xtarget
= gen_reg_rtx (maxmode
);
1493 /* If this machine's extv insists on a register target,
1494 make sure we have one. */
1495 if (! ((*insn_data
[(int) CODE_FOR_extv
].operand
[0].predicate
)
1496 (xtarget
, maxmode
)))
1497 xtarget
= gen_reg_rtx (maxmode
);
1499 bitsize_rtx
= GEN_INT (bitsize
);
1500 bitpos_rtx
= GEN_INT (xbitpos
);
1502 pat
= gen_extv (protect_from_queue (xtarget
, 1),
1503 xop0
, bitsize_rtx
, bitpos_rtx
);
1508 spec_target
= xspec_target
;
1509 spec_target_subreg
= xspec_target_subreg
;
1513 delete_insns_since (last
);
1514 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1520 target
= extract_fixed_bit_field (int_mode
, op0
, offset
, bitsize
,
1523 if (target
== spec_target
)
1525 if (target
== spec_target_subreg
)
1527 if (GET_MODE (target
) != tmode
&& GET_MODE (target
) != mode
)
1529 /* If the target mode is floating-point, first convert to the
1530 integer mode of that size and then access it as a floating-point
1531 value via a SUBREG. */
1532 if (GET_MODE_CLASS (tmode
) != MODE_INT
1533 && GET_MODE_CLASS (tmode
) != MODE_PARTIAL_INT
)
1535 target
= convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode
),
1538 return gen_lowpart (tmode
, target
);
1541 return convert_to_mode (tmode
, target
, unsignedp
);
1546 /* Extract a bit field using shifts and boolean operations
1547 Returns an rtx to represent the value.
1548 OP0 addresses a register (word) or memory (byte).
1549 BITPOS says which bit within the word or byte the bit field starts in.
1550 OFFSET says how many bytes farther the bit field starts;
1551 it is 0 if OP0 is a register.
1552 BITSIZE says how many bits long the bit field is.
1553 (If OP0 is a register, it may be narrower than a full word,
1554 but BITPOS still counts within a full word,
1555 which is significant on bigendian machines.)
1557 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1558 If TARGET is nonzero, attempts to store the value there
1559 and return TARGET, but this is not guaranteed.
1560 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1563 extract_fixed_bit_field (tmode
, op0
, offset
, bitsize
, bitpos
,
1565 enum machine_mode tmode
;
1567 unsigned HOST_WIDE_INT offset
, bitsize
, bitpos
;
1570 unsigned int total_bits
= BITS_PER_WORD
;
1571 enum machine_mode mode
;
1573 if (GET_CODE (op0
) == SUBREG
|| GET_CODE (op0
) == REG
)
1575 /* Special treatment for a bit field split across two registers. */
1576 if (bitsize
+ bitpos
> BITS_PER_WORD
)
1577 return extract_split_bit_field (op0
, bitsize
, bitpos
, unsignedp
);
1581 /* Get the proper mode to use for this field. We want a mode that
1582 includes the entire field. If such a mode would be larger than
1583 a word, we won't be doing the extraction the normal way. */
1585 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
1586 MEM_ALIGN (op0
), word_mode
, MEM_VOLATILE_P (op0
));
1588 if (mode
== VOIDmode
)
1589 /* The only way this should occur is if the field spans word
1591 return extract_split_bit_field (op0
, bitsize
,
1592 bitpos
+ offset
* BITS_PER_UNIT
,
1595 total_bits
= GET_MODE_BITSIZE (mode
);
1597 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1598 be in the range 0 to total_bits-1, and put any excess bytes in
1600 if (bitpos
>= total_bits
)
1602 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
1603 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
1607 /* Get ref to an aligned byte, halfword, or word containing the field.
1608 Adjust BITPOS to be position within a word,
1609 and OFFSET to be the offset of that word.
1610 Then alter OP0 to refer to that word. */
1611 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
1612 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
1613 op0
= adjust_address (op0
, mode
, offset
);
1616 mode
= GET_MODE (op0
);
1618 if (BYTES_BIG_ENDIAN
)
1619 /* BITPOS is the distance between our msb and that of OP0.
1620 Convert it to the distance from the lsb. */
1621 bitpos
= total_bits
- bitsize
- bitpos
;
1623 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1624 We have reduced the big-endian case to the little-endian case. */
1630 /* If the field does not already start at the lsb,
1631 shift it so it does. */
1632 tree amount
= build_int_2 (bitpos
, 0);
1633 /* Maybe propagate the target for the shift. */
1634 /* But not if we will return it--could confuse integrate.c. */
1635 rtx subtarget
= (target
!= 0 && GET_CODE (target
) == REG
1636 && !REG_FUNCTION_VALUE_P (target
)
1638 if (tmode
!= mode
) subtarget
= 0;
1639 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1641 /* Convert the value to the desired mode. */
1643 op0
= convert_to_mode (tmode
, op0
, 1);
1645 /* Unless the msb of the field used to be the msb when we shifted,
1646 mask out the upper bits. */
1648 if (GET_MODE_BITSIZE (mode
) != bitpos
+ bitsize
)
1649 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1650 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1651 target
, 1, OPTAB_LIB_WIDEN
);
1655 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1656 then arithmetic-shift its lsb to the lsb of the word. */
1657 op0
= force_reg (mode
, op0
);
1661 /* Find the narrowest integer mode that contains the field. */
1663 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1664 mode
= GET_MODE_WIDER_MODE (mode
))
1665 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitpos
)
1667 op0
= convert_to_mode (mode
, op0
, 0);
1671 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitpos
))
1674 = build_int_2 (GET_MODE_BITSIZE (mode
) - (bitsize
+ bitpos
), 0);
1675 /* Maybe propagate the target for the shift. */
1676 /* But not if we will return the result--could confuse integrate.c. */
1677 rtx subtarget
= (target
!= 0 && GET_CODE (target
) == REG
1678 && ! REG_FUNCTION_VALUE_P (target
)
1680 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1683 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1684 build_int_2 (GET_MODE_BITSIZE (mode
) - bitsize
, 0),
1688 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1689 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1690 complement of that if COMPLEMENT. The mask is truncated if
1691 necessary to the width of mode MODE. The mask is zero-extended if
1692 BITSIZE+BITPOS is too small for MODE. */
1695 mask_rtx (mode
, bitpos
, bitsize
, complement
)
1696 enum machine_mode mode
;
1697 int bitpos
, bitsize
, complement
;
1699 HOST_WIDE_INT masklow
, maskhigh
;
1701 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1702 masklow
= (HOST_WIDE_INT
) -1 << bitpos
;
1706 if (bitpos
+ bitsize
< HOST_BITS_PER_WIDE_INT
)
1707 masklow
&= ((unsigned HOST_WIDE_INT
) -1
1708 >> (HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1710 if (bitpos
<= HOST_BITS_PER_WIDE_INT
)
1713 maskhigh
= (HOST_WIDE_INT
) -1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
1715 if (bitpos
+ bitsize
> HOST_BITS_PER_WIDE_INT
)
1716 maskhigh
&= ((unsigned HOST_WIDE_INT
) -1
1717 >> (2 * HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1723 maskhigh
= ~maskhigh
;
1727 return immed_double_const (masklow
, maskhigh
, mode
);
1730 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1731 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1734 lshift_value (mode
, value
, bitpos
, bitsize
)
1735 enum machine_mode mode
;
1737 int bitpos
, bitsize
;
1739 unsigned HOST_WIDE_INT v
= INTVAL (value
);
1740 HOST_WIDE_INT low
, high
;
1742 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
1743 v
&= ~((HOST_WIDE_INT
) -1 << bitsize
);
1745 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1748 high
= (bitpos
> 0 ? (v
>> (HOST_BITS_PER_WIDE_INT
- bitpos
)) : 0);
1753 high
= v
<< (bitpos
- HOST_BITS_PER_WIDE_INT
);
1756 return immed_double_const (low
, high
, mode
);
1759 /* Extract a bit field that is split across two words
1760 and return an RTX for the result.
1762 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1763 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1764 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1767 extract_split_bit_field (op0
, bitsize
, bitpos
, unsignedp
)
1769 unsigned HOST_WIDE_INT bitsize
, bitpos
;
1773 unsigned int bitsdone
= 0;
1774 rtx result
= NULL_RTX
;
1777 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1779 if (GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
1780 unit
= BITS_PER_WORD
;
1782 unit
= MIN (MEM_ALIGN (op0
), BITS_PER_WORD
);
1784 while (bitsdone
< bitsize
)
1786 unsigned HOST_WIDE_INT thissize
;
1788 unsigned HOST_WIDE_INT thispos
;
1789 unsigned HOST_WIDE_INT offset
;
1791 offset
= (bitpos
+ bitsdone
) / unit
;
1792 thispos
= (bitpos
+ bitsdone
) % unit
;
1794 /* THISSIZE must not overrun a word boundary. Otherwise,
1795 extract_fixed_bit_field will call us again, and we will mutually
1797 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1798 thissize
= MIN (thissize
, unit
- thispos
);
1800 /* If OP0 is a register, then handle OFFSET here.
1802 When handling multiword bitfields, extract_bit_field may pass
1803 down a word_mode SUBREG of a larger REG for a bitfield that actually
1804 crosses a word boundary. Thus, for a SUBREG, we must find
1805 the current word starting from the base register. */
1806 if (GET_CODE (op0
) == SUBREG
)
1808 int word_offset
= (SUBREG_BYTE (op0
) / UNITS_PER_WORD
) + offset
;
1809 word
= operand_subword_force (SUBREG_REG (op0
), word_offset
,
1810 GET_MODE (SUBREG_REG (op0
)));
1813 else if (GET_CODE (op0
) == REG
)
1815 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1821 /* Extract the parts in bit-counting order,
1822 whose meaning is determined by BYTES_PER_UNIT.
1823 OFFSET is in UNITs, and UNIT is in bits.
1824 extract_fixed_bit_field wants offset in bytes. */
1825 part
= extract_fixed_bit_field (word_mode
, word
,
1826 offset
* unit
/ BITS_PER_UNIT
,
1827 thissize
, thispos
, 0, 1);
1828 bitsdone
+= thissize
;
1830 /* Shift this part into place for the result. */
1831 if (BYTES_BIG_ENDIAN
)
1833 if (bitsize
!= bitsdone
)
1834 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1835 build_int_2 (bitsize
- bitsdone
, 0), 0, 1);
1839 if (bitsdone
!= thissize
)
1840 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1841 build_int_2 (bitsdone
- thissize
, 0), 0, 1);
1847 /* Combine the parts with bitwise or. This works
1848 because we extracted each part as an unsigned bit field. */
1849 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
1855 /* Unsigned bit field: we are done. */
1858 /* Signed bit field: sign-extend with two arithmetic shifts. */
1859 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
1860 build_int_2 (BITS_PER_WORD
- bitsize
, 0),
1862 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
1863 build_int_2 (BITS_PER_WORD
- bitsize
, 0), NULL_RTX
, 0);
1866 /* Add INC into TARGET. */
1869 expand_inc (target
, inc
)
1872 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
1874 target
, 0, OPTAB_LIB_WIDEN
);
1875 if (value
!= target
)
1876 emit_move_insn (target
, value
);
1879 /* Subtract DEC from TARGET. */
1882 expand_dec (target
, dec
)
1885 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
1887 target
, 0, OPTAB_LIB_WIDEN
);
1888 if (value
!= target
)
1889 emit_move_insn (target
, value
);
1892 /* Output a shift instruction for expression code CODE,
1893 with SHIFTED being the rtx for the value to shift,
1894 and AMOUNT the tree for the amount to shift by.
1895 Store the result in the rtx TARGET, if that is convenient.
1896 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1897 Return the rtx for where the value is. */
1900 expand_shift (code
, mode
, shifted
, amount
, target
, unsignedp
)
1901 enum tree_code code
;
1902 enum machine_mode mode
;
1909 int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
1910 int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
1913 /* Previously detected shift-counts computed by NEGATE_EXPR
1914 and shifted in the other direction; but that does not work
1917 op1
= expand_expr (amount
, NULL_RTX
, VOIDmode
, 0);
1919 #ifdef SHIFT_COUNT_TRUNCATED
1920 if (SHIFT_COUNT_TRUNCATED
)
1922 if (GET_CODE (op1
) == CONST_INT
1923 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
) >=
1924 (unsigned HOST_WIDE_INT
) GET_MODE_BITSIZE (mode
)))
1925 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
1926 % GET_MODE_BITSIZE (mode
));
1927 else if (GET_CODE (op1
) == SUBREG
1928 && subreg_lowpart_p (op1
))
1929 op1
= SUBREG_REG (op1
);
1933 if (op1
== const0_rtx
)
1936 for (try = 0; temp
== 0 && try < 3; try++)
1938 enum optab_methods methods
;
1941 methods
= OPTAB_DIRECT
;
1943 methods
= OPTAB_WIDEN
;
1945 methods
= OPTAB_LIB_WIDEN
;
1949 /* Widening does not work for rotation. */
1950 if (methods
== OPTAB_WIDEN
)
1952 else if (methods
== OPTAB_LIB_WIDEN
)
1954 /* If we have been unable to open-code this by a rotation,
1955 do it as the IOR of two shifts. I.e., to rotate A
1956 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1957 where C is the bitsize of A.
1959 It is theoretically possible that the target machine might
1960 not be able to perform either shift and hence we would
1961 be making two libcalls rather than just the one for the
1962 shift (similarly if IOR could not be done). We will allow
1963 this extremely unlikely lossage to avoid complicating the
1966 rtx subtarget
= target
== shifted
? 0 : target
;
1968 tree type
= TREE_TYPE (amount
);
1969 tree new_amount
= make_tree (type
, op1
);
1971 = fold (build (MINUS_EXPR
, type
,
1973 build_int_2 (GET_MODE_BITSIZE (mode
),
1977 shifted
= force_reg (mode
, shifted
);
1979 temp
= expand_shift (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
1980 mode
, shifted
, new_amount
, subtarget
, 1);
1981 temp1
= expand_shift (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
1982 mode
, shifted
, other_amount
, 0, 1);
1983 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
1984 unsignedp
, methods
);
1987 temp
= expand_binop (mode
,
1988 left
? rotl_optab
: rotr_optab
,
1989 shifted
, op1
, target
, unsignedp
, methods
);
1991 /* If we don't have the rotate, but we are rotating by a constant
1992 that is in range, try a rotate in the opposite direction. */
1994 if (temp
== 0 && GET_CODE (op1
) == CONST_INT
1996 && (unsigned int) INTVAL (op1
) < GET_MODE_BITSIZE (mode
))
1997 temp
= expand_binop (mode
,
1998 left
? rotr_optab
: rotl_optab
,
2000 GEN_INT (GET_MODE_BITSIZE (mode
)
2002 target
, unsignedp
, methods
);
2005 temp
= expand_binop (mode
,
2006 left
? ashl_optab
: lshr_optab
,
2007 shifted
, op1
, target
, unsignedp
, methods
);
2009 /* Do arithmetic shifts.
2010 Also, if we are going to widen the operand, we can just as well
2011 use an arithmetic right-shift instead of a logical one. */
2012 if (temp
== 0 && ! rotate
2013 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
2015 enum optab_methods methods1
= methods
;
2017 /* If trying to widen a log shift to an arithmetic shift,
2018 don't accept an arithmetic shift of the same size. */
2020 methods1
= OPTAB_MUST_WIDEN
;
2022 /* Arithmetic shift */
2024 temp
= expand_binop (mode
,
2025 left
? ashl_optab
: ashr_optab
,
2026 shifted
, op1
, target
, unsignedp
, methods1
);
2029 /* We used to try extzv here for logical right shifts, but that was
2030 only useful for one machine, the VAX, and caused poor code
2031 generation there for lshrdi3, so the code was deleted and a
2032 define_expand for lshrsi3 was added to vax.md. */
2040 enum alg_code
{ alg_zero
, alg_m
, alg_shift
,
2041 alg_add_t_m2
, alg_sub_t_m2
,
2042 alg_add_factor
, alg_sub_factor
,
2043 alg_add_t2_m
, alg_sub_t2_m
,
2044 alg_add
, alg_subtract
, alg_factor
, alg_shiftop
};
2046 /* This structure records a sequence of operations.
2047 `ops' is the number of operations recorded.
2048 `cost' is their total cost.
2049 The operations are stored in `op' and the corresponding
2050 logarithms of the integer coefficients in `log'.
2052 These are the operations:
2053 alg_zero total := 0;
2054 alg_m total := multiplicand;
2055 alg_shift total := total * coeff
2056 alg_add_t_m2 total := total + multiplicand * coeff;
2057 alg_sub_t_m2 total := total - multiplicand * coeff;
2058 alg_add_factor total := total * coeff + total;
2059 alg_sub_factor total := total * coeff - total;
2060 alg_add_t2_m total := total * coeff + multiplicand;
2061 alg_sub_t2_m total := total * coeff - multiplicand;
2063 The first operand must be either alg_zero or alg_m. */
2069 /* The size of the OP and LOG fields are not directly related to the
2070 word size, but the worst-case algorithms will be if we have few
2071 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2072 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2073 in total wordsize operations. */
2074 enum alg_code op
[MAX_BITS_PER_WORD
];
2075 char log
[MAX_BITS_PER_WORD
];
2078 static void synth_mult
PARAMS ((struct algorithm
*,
2079 unsigned HOST_WIDE_INT
,
2081 static unsigned HOST_WIDE_INT choose_multiplier
PARAMS ((unsigned HOST_WIDE_INT
,
2083 unsigned HOST_WIDE_INT
*,
2085 static unsigned HOST_WIDE_INT invert_mod2n
PARAMS ((unsigned HOST_WIDE_INT
,
2087 /* Compute and return the best algorithm for multiplying by T.
2088 The algorithm must cost less than cost_limit
2089 If retval.cost >= COST_LIMIT, no algorithm was found and all
2090 other field of the returned struct are undefined. */
2093 synth_mult (alg_out
, t
, cost_limit
)
2094 struct algorithm
*alg_out
;
2095 unsigned HOST_WIDE_INT t
;
2099 struct algorithm
*alg_in
, *best_alg
;
2101 unsigned HOST_WIDE_INT q
;
2103 /* Indicate that no algorithm is yet found. If no algorithm
2104 is found, this value will be returned and indicate failure. */
2105 alg_out
->cost
= cost_limit
;
2107 if (cost_limit
<= 0)
2110 /* t == 1 can be done in zero cost. */
2115 alg_out
->op
[0] = alg_m
;
2119 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2123 if (zero_cost
>= cost_limit
)
2128 alg_out
->cost
= zero_cost
;
2129 alg_out
->op
[0] = alg_zero
;
2134 /* We'll be needing a couple extra algorithm structures now. */
2136 alg_in
= (struct algorithm
*)alloca (sizeof (struct algorithm
));
2137 best_alg
= (struct algorithm
*)alloca (sizeof (struct algorithm
));
2139 /* If we have a group of zero bits at the low-order part of T, try
2140 multiplying by the remaining bits and then doing a shift. */
2144 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
2145 if (m
< BITS_PER_WORD
)
2148 cost
= shift_cost
[m
];
2149 synth_mult (alg_in
, q
, cost_limit
- cost
);
2151 cost
+= alg_in
->cost
;
2152 if (cost
< cost_limit
)
2154 struct algorithm
*x
;
2155 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2156 best_alg
->log
[best_alg
->ops
] = m
;
2157 best_alg
->op
[best_alg
->ops
] = alg_shift
;
2163 /* If we have an odd number, add or subtract one. */
2166 unsigned HOST_WIDE_INT w
;
2168 for (w
= 1; (w
& t
) != 0; w
<<= 1)
2170 /* If T was -1, then W will be zero after the loop. This is another
2171 case where T ends with ...111. Handling this with (T + 1) and
2172 subtract 1 produces slightly better code and results in algorithm
2173 selection much faster than treating it like the ...0111 case
2177 /* Reject the case where t is 3.
2178 Thus we prefer addition in that case. */
2181 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2184 synth_mult (alg_in
, t
+ 1, cost_limit
- cost
);
2186 cost
+= alg_in
->cost
;
2187 if (cost
< cost_limit
)
2189 struct algorithm
*x
;
2190 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2191 best_alg
->log
[best_alg
->ops
] = 0;
2192 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2198 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2201 synth_mult (alg_in
, t
- 1, cost_limit
- cost
);
2203 cost
+= alg_in
->cost
;
2204 if (cost
< cost_limit
)
2206 struct algorithm
*x
;
2207 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2208 best_alg
->log
[best_alg
->ops
] = 0;
2209 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2215 /* Look for factors of t of the form
2216 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2217 If we find such a factor, we can multiply by t using an algorithm that
2218 multiplies by q, shift the result by m and add/subtract it to itself.
2220 We search for large factors first and loop down, even if large factors
2221 are less probable than small; if we find a large factor we will find a
2222 good sequence quickly, and therefore be able to prune (by decreasing
2223 COST_LIMIT) the search. */
2225 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2227 unsigned HOST_WIDE_INT d
;
2229 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2230 if (t
% d
== 0 && t
> d
&& m
< BITS_PER_WORD
)
2232 cost
= MIN (shiftadd_cost
[m
], add_cost
+ shift_cost
[m
]);
2233 synth_mult (alg_in
, t
/ d
, cost_limit
- cost
);
2235 cost
+= alg_in
->cost
;
2236 if (cost
< cost_limit
)
2238 struct algorithm
*x
;
2239 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2240 best_alg
->log
[best_alg
->ops
] = m
;
2241 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2244 /* Other factors will have been taken care of in the recursion. */
2248 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2249 if (t
% d
== 0 && t
> d
&& m
< BITS_PER_WORD
)
2251 cost
= MIN (shiftsub_cost
[m
], add_cost
+ shift_cost
[m
]);
2252 synth_mult (alg_in
, t
/ d
, cost_limit
- cost
);
2254 cost
+= alg_in
->cost
;
2255 if (cost
< cost_limit
)
2257 struct algorithm
*x
;
2258 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2259 best_alg
->log
[best_alg
->ops
] = m
;
2260 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2267 /* Try shift-and-add (load effective address) instructions,
2268 i.e. do a*3, a*5, a*9. */
2274 if (m
>= 0 && m
< BITS_PER_WORD
)
2276 cost
= shiftadd_cost
[m
];
2277 synth_mult (alg_in
, (t
- 1) >> m
, cost_limit
- cost
);
2279 cost
+= alg_in
->cost
;
2280 if (cost
< cost_limit
)
2282 struct algorithm
*x
;
2283 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2284 best_alg
->log
[best_alg
->ops
] = m
;
2285 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2293 if (m
>= 0 && m
< BITS_PER_WORD
)
2295 cost
= shiftsub_cost
[m
];
2296 synth_mult (alg_in
, (t
+ 1) >> m
, cost_limit
- cost
);
2298 cost
+= alg_in
->cost
;
2299 if (cost
< cost_limit
)
2301 struct algorithm
*x
;
2302 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2303 best_alg
->log
[best_alg
->ops
] = m
;
2304 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2310 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2311 we have not found any algorithm. */
2312 if (cost_limit
== alg_out
->cost
)
2315 /* If we are getting a too long sequence for `struct algorithm'
2316 to record, make this search fail. */
2317 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2320 /* Copy the algorithm from temporary space to the space at alg_out.
2321 We avoid using structure assignment because the majority of
2322 best_alg is normally undefined, and this is a critical function. */
2323 alg_out
->ops
= best_alg
->ops
+ 1;
2324 alg_out
->cost
= cost_limit
;
2325 memcpy (alg_out
->op
, best_alg
->op
,
2326 alg_out
->ops
* sizeof *alg_out
->op
);
2327 memcpy (alg_out
->log
, best_alg
->log
,
2328 alg_out
->ops
* sizeof *alg_out
->log
);
2331 /* Perform a multiplication and return an rtx for the result.
2332 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2333 TARGET is a suggestion for where to store the result (an rtx).
2335 We check specially for a constant integer as OP1.
2336 If you want this check for OP0 as well, then before calling
2337 you should swap the two operands if OP0 would be constant. */
2340 expand_mult (mode
, op0
, op1
, target
, unsignedp
)
2341 enum machine_mode mode
;
2342 rtx op0
, op1
, target
;
2345 rtx const_op1
= op1
;
2347 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2348 less than or equal in size to `unsigned int' this doesn't matter.
2349 If the mode is larger than `unsigned int', then synth_mult works only
2350 if the constant value exactly fits in an `unsigned int' without any
2351 truncation. This means that multiplying by negative values does
2352 not work; results are off by 2^32 on a 32 bit machine. */
2354 /* If we are multiplying in DImode, it may still be a win
2355 to try to work with shifts and adds. */
2356 if (GET_CODE (op1
) == CONST_DOUBLE
2357 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_INT
2358 && HOST_BITS_PER_INT
>= BITS_PER_WORD
2359 && CONST_DOUBLE_HIGH (op1
) == 0)
2360 const_op1
= GEN_INT (CONST_DOUBLE_LOW (op1
));
2361 else if (HOST_BITS_PER_INT
< GET_MODE_BITSIZE (mode
)
2362 && GET_CODE (op1
) == CONST_INT
2363 && INTVAL (op1
) < 0)
2366 /* We used to test optimize here, on the grounds that it's better to
2367 produce a smaller program when -O is not used.
2368 But this causes such a terrible slowdown sometimes
2369 that it seems better to use synth_mult always. */
2371 if (const_op1
&& GET_CODE (const_op1
) == CONST_INT
2372 && (unsignedp
|| ! flag_trapv
))
2374 struct algorithm alg
;
2375 struct algorithm alg2
;
2376 HOST_WIDE_INT val
= INTVAL (op1
);
2377 HOST_WIDE_INT val_so_far
;
2380 enum {basic_variant
, negate_variant
, add_variant
} variant
= basic_variant
;
2382 /* op0 must be register to make mult_cost match the precomputed
2383 shiftadd_cost array. */
2384 op0
= force_reg (mode
, op0
);
2386 /* Try to do the computation three ways: multiply by the negative of OP1
2387 and then negate, do the multiplication directly, or do multiplication
2390 mult_cost
= rtx_cost (gen_rtx_MULT (mode
, op0
, op1
), SET
);
2391 mult_cost
= MIN (12 * add_cost
, mult_cost
);
2393 synth_mult (&alg
, val
, mult_cost
);
2395 /* This works only if the inverted value actually fits in an
2397 if (HOST_BITS_PER_INT
>= GET_MODE_BITSIZE (mode
))
2399 synth_mult (&alg2
, - val
,
2400 (alg
.cost
< mult_cost
? alg
.cost
: mult_cost
) - negate_cost
);
2401 if (alg2
.cost
+ negate_cost
< alg
.cost
)
2402 alg
= alg2
, variant
= negate_variant
;
2405 /* This proves very useful for division-by-constant. */
2406 synth_mult (&alg2
, val
- 1,
2407 (alg
.cost
< mult_cost
? alg
.cost
: mult_cost
) - add_cost
);
2408 if (alg2
.cost
+ add_cost
< alg
.cost
)
2409 alg
= alg2
, variant
= add_variant
;
2411 if (alg
.cost
< mult_cost
)
2413 /* We found something cheaper than a multiply insn. */
2416 enum machine_mode nmode
;
2418 op0
= protect_from_queue (op0
, 0);
2420 /* Avoid referencing memory over and over.
2421 For speed, but also for correctness when mem is volatile. */
2422 if (GET_CODE (op0
) == MEM
)
2423 op0
= force_reg (mode
, op0
);
2425 /* ACCUM starts out either as OP0 or as a zero, depending on
2426 the first operation. */
2428 if (alg
.op
[0] == alg_zero
)
2430 accum
= copy_to_mode_reg (mode
, const0_rtx
);
2433 else if (alg
.op
[0] == alg_m
)
2435 accum
= copy_to_mode_reg (mode
, op0
);
2441 for (opno
= 1; opno
< alg
.ops
; opno
++)
2443 int log
= alg
.log
[opno
];
2444 int preserve
= preserve_subexpressions_p ();
2445 rtx shift_subtarget
= preserve
? 0 : accum
;
2447 = (opno
== alg
.ops
- 1 && target
!= 0 && variant
!= add_variant
2450 rtx accum_target
= preserve
? 0 : accum
;
2452 switch (alg
.op
[opno
])
2455 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2456 build_int_2 (log
, 0), NULL_RTX
, 0);
2461 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2462 build_int_2 (log
, 0), NULL_RTX
, 0);
2463 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2465 ? add_target
: accum_target
);
2466 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
2470 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2471 build_int_2 (log
, 0), NULL_RTX
, 0);
2472 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, tem
),
2474 ? add_target
: accum_target
);
2475 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
2479 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2480 build_int_2 (log
, 0), shift_subtarget
,
2482 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
),
2484 ? add_target
: accum_target
);
2485 val_so_far
= (val_so_far
<< log
) + 1;
2489 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2490 build_int_2 (log
, 0), shift_subtarget
,
2492 accum
= force_operand (gen_rtx_MINUS (mode
, accum
, op0
),
2494 ? add_target
: accum_target
);
2495 val_so_far
= (val_so_far
<< log
) - 1;
2498 case alg_add_factor
:
2499 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2500 build_int_2 (log
, 0), NULL_RTX
, 0);
2501 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, tem
),
2503 ? add_target
: accum_target
);
2504 val_so_far
+= val_so_far
<< log
;
2507 case alg_sub_factor
:
2508 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2509 build_int_2 (log
, 0), NULL_RTX
, 0);
2510 accum
= force_operand (gen_rtx_MINUS (mode
, tem
, accum
),
2511 (add_target
? add_target
2512 : preserve
? 0 : tem
));
2513 val_so_far
= (val_so_far
<< log
) - val_so_far
;
2520 /* Write a REG_EQUAL note on the last insn so that we can cse
2521 multiplication sequences. Note that if ACCUM is a SUBREG,
2522 we've set the inner register and must properly indicate
2525 tem
= op0
, nmode
= mode
;
2526 if (GET_CODE (accum
) == SUBREG
)
2528 nmode
= GET_MODE (SUBREG_REG (accum
));
2529 tem
= gen_lowpart (nmode
, op0
);
2532 insn
= get_last_insn ();
2533 set_unique_reg_note (insn
,
2535 gen_rtx_MULT (nmode
, tem
,
2536 GEN_INT (val_so_far
)));
2539 if (variant
== negate_variant
)
2541 val_so_far
= - val_so_far
;
2542 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
2544 else if (variant
== add_variant
)
2546 val_so_far
= val_so_far
+ 1;
2547 accum
= force_operand (gen_rtx_PLUS (mode
, accum
, op0
), target
);
2550 if (val
!= val_so_far
)
2557 /* This used to use umul_optab if unsigned, but for non-widening multiply
2558 there is no difference between signed and unsigned. */
2559 op0
= expand_binop (mode
,
2561 && flag_trapv
&& (GET_MODE_CLASS(mode
) == MODE_INT
)
2562 ? smulv_optab
: smul_optab
,
2563 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
2569 /* Return the smallest n such that 2**n >= X. */
2573 unsigned HOST_WIDE_INT x
;
2575 return floor_log2 (x
- 1) + 1;
2578 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2579 replace division by D, and put the least significant N bits of the result
2580 in *MULTIPLIER_PTR and return the most significant bit.
2582 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2583 needed precision is in PRECISION (should be <= N).
2585 PRECISION should be as small as possible so this function can choose
2586 multiplier more freely.
2588 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2589 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2591 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2592 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2595 unsigned HOST_WIDE_INT
2596 choose_multiplier (d
, n
, precision
, multiplier_ptr
, post_shift_ptr
, lgup_ptr
)
2597 unsigned HOST_WIDE_INT d
;
2600 unsigned HOST_WIDE_INT
*multiplier_ptr
;
2601 int *post_shift_ptr
;
2604 HOST_WIDE_INT mhigh_hi
, mlow_hi
;
2605 unsigned HOST_WIDE_INT mhigh_lo
, mlow_lo
;
2606 int lgup
, post_shift
;
2608 unsigned HOST_WIDE_INT nl
, dummy1
;
2609 HOST_WIDE_INT nh
, dummy2
;
2611 /* lgup = ceil(log2(divisor)); */
2612 lgup
= ceil_log2 (d
);
2618 pow2
= n
+ lgup
- precision
;
2620 if (pow
== 2 * HOST_BITS_PER_WIDE_INT
)
2622 /* We could handle this with some effort, but this case is much better
2623 handled directly with a scc insn, so rely on caller using that. */
2627 /* mlow = 2^(N + lgup)/d */
2628 if (pow
>= HOST_BITS_PER_WIDE_INT
)
2630 nh
= (HOST_WIDE_INT
) 1 << (pow
- HOST_BITS_PER_WIDE_INT
);
2636 nl
= (unsigned HOST_WIDE_INT
) 1 << pow
;
2638 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
2639 &mlow_lo
, &mlow_hi
, &dummy1
, &dummy2
);
2641 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2642 if (pow2
>= HOST_BITS_PER_WIDE_INT
)
2643 nh
|= (HOST_WIDE_INT
) 1 << (pow2
- HOST_BITS_PER_WIDE_INT
);
2645 nl
|= (unsigned HOST_WIDE_INT
) 1 << pow2
;
2646 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
2647 &mhigh_lo
, &mhigh_hi
, &dummy1
, &dummy2
);
2649 if (mhigh_hi
&& nh
- d
>= d
)
2651 if (mhigh_hi
> 1 || mlow_hi
> 1)
2653 /* assert that mlow < mhigh. */
2654 if (! (mlow_hi
< mhigh_hi
|| (mlow_hi
== mhigh_hi
&& mlow_lo
< mhigh_lo
)))
2657 /* If precision == N, then mlow, mhigh exceed 2^N
2658 (but they do not exceed 2^(N+1)). */
2660 /* Reduce to lowest terms */
2661 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
2663 unsigned HOST_WIDE_INT ml_lo
= (mlow_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mlow_lo
>> 1);
2664 unsigned HOST_WIDE_INT mh_lo
= (mhigh_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mhigh_lo
>> 1);
2674 *post_shift_ptr
= post_shift
;
2676 if (n
< HOST_BITS_PER_WIDE_INT
)
2678 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
2679 *multiplier_ptr
= mhigh_lo
& mask
;
2680 return mhigh_lo
>= mask
;
2684 *multiplier_ptr
= mhigh_lo
;
2689 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2690 congruent to 1 (mod 2**N). */
2692 static unsigned HOST_WIDE_INT
2694 unsigned HOST_WIDE_INT x
;
2697 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2699 /* The algorithm notes that the choice y = x satisfies
2700 x*y == 1 mod 2^3, since x is assumed odd.
2701 Each iteration doubles the number of bits of significance in y. */
2703 unsigned HOST_WIDE_INT mask
;
2704 unsigned HOST_WIDE_INT y
= x
;
2707 mask
= (n
== HOST_BITS_PER_WIDE_INT
2708 ? ~(unsigned HOST_WIDE_INT
) 0
2709 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
2713 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
2719 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2720 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2721 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2722 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2725 The result is put in TARGET if that is convenient.
2727 MODE is the mode of operation. */
2730 expand_mult_highpart_adjust (mode
, adj_operand
, op0
, op1
, target
, unsignedp
)
2731 enum machine_mode mode
;
2732 rtx adj_operand
, op0
, op1
, target
;
2736 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
2738 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2739 build_int_2 (GET_MODE_BITSIZE (mode
) - 1, 0),
2741 tem
= expand_and (mode
, tem
, op1
, NULL_RTX
);
2743 = force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
2746 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
2747 build_int_2 (GET_MODE_BITSIZE (mode
) - 1, 0),
2749 tem
= expand_and (mode
, tem
, op0
, NULL_RTX
);
2750 target
= force_operand (gen_rtx_fmt_ee (adj_code
, mode
, adj_operand
, tem
),
2756 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2757 in TARGET if that is convenient, and return where the result is. If the
2758 operation can not be performed, 0 is returned.
2760 MODE is the mode of operation and result.
2762 UNSIGNEDP nonzero means unsigned multiply.
2764 MAX_COST is the total allowed cost for the expanded RTL. */
2767 expand_mult_highpart (mode
, op0
, cnst1
, target
, unsignedp
, max_cost
)
2768 enum machine_mode mode
;
2770 unsigned HOST_WIDE_INT cnst1
;
2774 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
2775 optab mul_highpart_optab
;
2778 int size
= GET_MODE_BITSIZE (mode
);
2781 /* We can't support modes wider than HOST_BITS_PER_INT. */
2782 if (size
> HOST_BITS_PER_WIDE_INT
)
2785 op1
= gen_int_mode (cnst1
, mode
);
2788 = immed_double_const (cnst1
,
2791 : -(cnst1
>> (HOST_BITS_PER_WIDE_INT
- 1))),
2794 /* expand_mult handles constant multiplication of word_mode
2795 or narrower. It does a poor job for large modes. */
2796 if (size
< BITS_PER_WORD
2797 && mul_cost
[(int) wider_mode
] + shift_cost
[size
-1] < max_cost
)
2799 /* We have to do this, since expand_binop doesn't do conversion for
2800 multiply. Maybe change expand_binop to handle widening multiply? */
2801 op0
= convert_to_mode (wider_mode
, op0
, unsignedp
);
2803 /* We know that this can't have signed overflow, so pretend this is
2804 an unsigned multiply. */
2805 tem
= expand_mult (wider_mode
, op0
, wide_op1
, NULL_RTX
, 0);
2806 tem
= expand_shift (RSHIFT_EXPR
, wider_mode
, tem
,
2807 build_int_2 (size
, 0), NULL_RTX
, 1);
2808 return convert_modes (mode
, wider_mode
, tem
, unsignedp
);
2812 target
= gen_reg_rtx (mode
);
2814 /* Firstly, try using a multiplication insn that only generates the needed
2815 high part of the product, and in the sign flavor of unsignedp. */
2816 if (mul_highpart_cost
[(int) mode
] < max_cost
)
2818 mul_highpart_optab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
2819 target
= expand_binop (mode
, mul_highpart_optab
,
2820 op0
, op1
, target
, unsignedp
, OPTAB_DIRECT
);
2825 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2826 Need to adjust the result after the multiplication. */
2827 if (size
- 1 < BITS_PER_WORD
2828 && (mul_highpart_cost
[(int) mode
] + 2 * shift_cost
[size
-1] + 4 * add_cost
2831 mul_highpart_optab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
2832 target
= expand_binop (mode
, mul_highpart_optab
,
2833 op0
, op1
, target
, unsignedp
, OPTAB_DIRECT
);
2835 /* We used the wrong signedness. Adjust the result. */
2836 return expand_mult_highpart_adjust (mode
, target
, op0
,
2837 op1
, target
, unsignedp
);
2840 /* Try widening multiplication. */
2841 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
2842 if (moptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
2843 && mul_widen_cost
[(int) wider_mode
] < max_cost
)
2845 op1
= force_reg (mode
, op1
);
2849 /* Try widening the mode and perform a non-widening multiplication. */
2850 moptab
= smul_optab
;
2851 if (smul_optab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
2852 && size
- 1 < BITS_PER_WORD
2853 && mul_cost
[(int) wider_mode
] + shift_cost
[size
-1] < max_cost
)
2859 /* Try widening multiplication of opposite signedness, and adjust. */
2860 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
2861 if (moptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
2862 && size
- 1 < BITS_PER_WORD
2863 && (mul_widen_cost
[(int) wider_mode
]
2864 + 2 * shift_cost
[size
-1] + 4 * add_cost
< max_cost
))
2866 rtx regop1
= force_reg (mode
, op1
);
2867 tem
= expand_binop (wider_mode
, moptab
, op0
, regop1
,
2868 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
2871 /* Extract the high half of the just generated product. */
2872 tem
= expand_shift (RSHIFT_EXPR
, wider_mode
, tem
,
2873 build_int_2 (size
, 0), NULL_RTX
, 1);
2874 tem
= convert_modes (mode
, wider_mode
, tem
, unsignedp
);
2875 /* We used the wrong signedness. Adjust the result. */
2876 return expand_mult_highpart_adjust (mode
, tem
, op0
, op1
,
2884 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2885 tem
= expand_binop (wider_mode
, moptab
, op0
, op1
,
2886 NULL_RTX
, unsignedp
, OPTAB_WIDEN
);
2890 /* Extract the high half of the just generated product. */
2891 if (mode
== word_mode
)
2893 return gen_highpart (mode
, tem
);
2897 tem
= expand_shift (RSHIFT_EXPR
, wider_mode
, tem
,
2898 build_int_2 (size
, 0), NULL_RTX
, 1);
2899 return convert_modes (mode
, wider_mode
, tem
, unsignedp
);
2903 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2904 if that is convenient, and returning where the result is.
2905 You may request either the quotient or the remainder as the result;
2906 specify REM_FLAG nonzero to get the remainder.
2908 CODE is the expression code for which kind of division this is;
2909 it controls how rounding is done. MODE is the machine mode to use.
2910 UNSIGNEDP nonzero means do unsigned division. */
2912 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2913 and then correct it by or'ing in missing high bits
2914 if result of ANDI is nonzero.
2915 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2916 This could optimize to a bfexts instruction.
2917 But C doesn't use these operations, so their optimizations are
2919 /* ??? For modulo, we don't actually need the highpart of the first product,
2920 the low part will do nicely. And for small divisors, the second multiply
2921 can also be a low-part only multiply or even be completely left out.
2922 E.g. to calculate the remainder of a division by 3 with a 32 bit
2923 multiply, multiply with 0x55555556 and extract the upper two bits;
2924 the result is exact for inputs up to 0x1fffffff.
2925 The input range can be reduced by using cross-sum rules.
2926 For odd divisors >= 3, the following table gives right shift counts
2927 so that if a number is shifted by an integer multiple of the given
2928 amount, the remainder stays the same:
2929 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2930 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2931 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2932 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2933 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2935 Cross-sum rules for even numbers can be derived by leaving as many bits
2936 to the right alone as the divisor has zeros to the right.
2937 E.g. if x is an unsigned 32 bit number:
2938 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2941 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2944 expand_divmod (rem_flag
, code
, mode
, op0
, op1
, target
, unsignedp
)
2946 enum tree_code code
;
2947 enum machine_mode mode
;
2948 rtx op0
, op1
, target
;
2951 enum machine_mode compute_mode
;
2953 rtx quotient
= 0, remainder
= 0;
2957 optab optab1
, optab2
;
2958 int op1_is_constant
, op1_is_pow2
= 0;
2959 int max_cost
, extra_cost
;
2960 static HOST_WIDE_INT last_div_const
= 0;
2961 static HOST_WIDE_INT ext_op1
;
2963 op1_is_constant
= GET_CODE (op1
) == CONST_INT
;
2964 if (op1_is_constant
)
2966 ext_op1
= INTVAL (op1
);
2968 ext_op1
&= GET_MODE_MASK (mode
);
2969 op1_is_pow2
= ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1
)
2970 || (! unsignedp
&& EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1
))));
2974 This is the structure of expand_divmod:
2976 First comes code to fix up the operands so we can perform the operations
2977 correctly and efficiently.
2979 Second comes a switch statement with code specific for each rounding mode.
2980 For some special operands this code emits all RTL for the desired
2981 operation, for other cases, it generates only a quotient and stores it in
2982 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2983 to indicate that it has not done anything.
2985 Last comes code that finishes the operation. If QUOTIENT is set and
2986 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2987 QUOTIENT is not set, it is computed using trunc rounding.
2989 We try to generate special code for division and remainder when OP1 is a
2990 constant. If |OP1| = 2**n we can use shifts and some other fast
2991 operations. For other values of OP1, we compute a carefully selected
2992 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2995 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2996 half of the product. Different strategies for generating the product are
2997 implemented in expand_mult_highpart.
2999 If what we actually want is the remainder, we generate that by another
3000 by-constant multiplication and a subtraction. */
3002 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3003 code below will malfunction if we are, so check here and handle
3004 the special case if so. */
3005 if (op1
== const1_rtx
)
3006 return rem_flag
? const0_rtx
: op0
;
3008 /* When dividing by -1, we could get an overflow.
3009 negv_optab can handle overflows. */
3010 if (! unsignedp
&& op1
== constm1_rtx
)
3014 return expand_unop (mode
, flag_trapv
&& GET_MODE_CLASS(mode
) == MODE_INT
3015 ? negv_optab
: neg_optab
, op0
, target
, 0);
3019 /* Don't use the function value register as a target
3020 since we have to read it as well as write it,
3021 and function-inlining gets confused by this. */
3022 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
3023 /* Don't clobber an operand while doing a multi-step calculation. */
3024 || ((rem_flag
|| op1_is_constant
)
3025 && (reg_mentioned_p (target
, op0
)
3026 || (GET_CODE (op0
) == MEM
&& GET_CODE (target
) == MEM
)))
3027 || reg_mentioned_p (target
, op1
)
3028 || (GET_CODE (op1
) == MEM
&& GET_CODE (target
) == MEM
)))
3031 /* Get the mode in which to perform this computation. Normally it will
3032 be MODE, but sometimes we can't do the desired operation in MODE.
3033 If so, pick a wider mode in which we can do the operation. Convert
3034 to that mode at the start to avoid repeated conversions.
3036 First see what operations we need. These depend on the expression
3037 we are evaluating. (We assume that divxx3 insns exist under the
3038 same conditions that modxx3 insns and that these insns don't normally
3039 fail. If these assumptions are not correct, we may generate less
3040 efficient code in some cases.)
3042 Then see if we find a mode in which we can open-code that operation
3043 (either a division, modulus, or shift). Finally, check for the smallest
3044 mode for which we can do the operation with a library call. */
3046 /* We might want to refine this now that we have division-by-constant
3047 optimization. Since expand_mult_highpart tries so many variants, it is
3048 not straightforward to generalize this. Maybe we should make an array
3049 of possible modes in init_expmed? Save this for GCC 2.7. */
3051 optab1
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3052 ? (unsignedp
? lshr_optab
: ashr_optab
)
3053 : (unsignedp
? udiv_optab
: sdiv_optab
));
3054 optab2
= ((op1_is_pow2
&& op1
!= const0_rtx
)
3056 : (unsignedp
? udivmod_optab
: sdivmod_optab
));
3058 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3059 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3060 if (optab1
->handlers
[(int) compute_mode
].insn_code
!= CODE_FOR_nothing
3061 || optab2
->handlers
[(int) compute_mode
].insn_code
!= CODE_FOR_nothing
)
3064 if (compute_mode
== VOIDmode
)
3065 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
3066 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
3067 if (optab1
->handlers
[(int) compute_mode
].libfunc
3068 || optab2
->handlers
[(int) compute_mode
].libfunc
)
3071 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3073 if (compute_mode
== VOIDmode
)
3074 compute_mode
= mode
;
3076 if (target
&& GET_MODE (target
) == compute_mode
)
3079 tquotient
= gen_reg_rtx (compute_mode
);
3081 size
= GET_MODE_BITSIZE (compute_mode
);
3083 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3084 (mode), and thereby get better code when OP1 is a constant. Do that
3085 later. It will require going over all usages of SIZE below. */
3086 size
= GET_MODE_BITSIZE (mode
);
3089 /* Only deduct something for a REM if the last divide done was
3090 for a different constant. Then set the constant of the last
3092 max_cost
= div_cost
[(int) compute_mode
]
3093 - (rem_flag
&& ! (last_div_const
!= 0 && op1_is_constant
3094 && INTVAL (op1
) == last_div_const
)
3095 ? mul_cost
[(int) compute_mode
] + add_cost
: 0);
3097 last_div_const
= ! rem_flag
&& op1_is_constant
? INTVAL (op1
) : 0;
3099 /* Now convert to the best mode to use. */
3100 if (compute_mode
!= mode
)
3102 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
3103 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
3105 /* convert_modes may have placed op1 into a register, so we
3106 must recompute the following. */
3107 op1_is_constant
= GET_CODE (op1
) == CONST_INT
;
3108 op1_is_pow2
= (op1_is_constant
3109 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
3111 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1
)))))) ;
3114 /* If one of the operands is a volatile MEM, copy it into a register. */
3116 if (GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
))
3117 op0
= force_reg (compute_mode
, op0
);
3118 if (GET_CODE (op1
) == MEM
&& MEM_VOLATILE_P (op1
))
3119 op1
= force_reg (compute_mode
, op1
);
3121 /* If we need the remainder or if OP1 is constant, we need to
3122 put OP0 in a register in case it has any queued subexpressions. */
3123 if (rem_flag
|| op1_is_constant
)
3124 op0
= force_reg (compute_mode
, op0
);
3126 last
= get_last_insn ();
3128 /* Promote floor rounding to trunc rounding for unsigned operations. */
3131 if (code
== FLOOR_DIV_EXPR
)
3132 code
= TRUNC_DIV_EXPR
;
3133 if (code
== FLOOR_MOD_EXPR
)
3134 code
= TRUNC_MOD_EXPR
;
3135 if (code
== EXACT_DIV_EXPR
&& op1_is_pow2
)
3136 code
= TRUNC_DIV_EXPR
;
3139 if (op1
!= const0_rtx
)
3142 case TRUNC_MOD_EXPR
:
3143 case TRUNC_DIV_EXPR
:
3144 if (op1_is_constant
)
3148 unsigned HOST_WIDE_INT mh
, ml
;
3149 int pre_shift
, post_shift
;
3151 unsigned HOST_WIDE_INT d
= (INTVAL (op1
)
3152 & GET_MODE_MASK (compute_mode
));
3154 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
3156 pre_shift
= floor_log2 (d
);
3160 = expand_binop (compute_mode
, and_optab
, op0
,
3161 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
3165 return gen_lowpart (mode
, remainder
);
3167 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3168 build_int_2 (pre_shift
, 0),
3171 else if (size
<= HOST_BITS_PER_WIDE_INT
)
3173 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
3175 /* Most significant bit of divisor is set; emit an scc
3177 quotient
= emit_store_flag (tquotient
, GEU
, op0
, op1
,
3178 compute_mode
, 1, 1);
3184 /* Find a suitable multiplier and right shift count
3185 instead of multiplying with D. */
3187 mh
= choose_multiplier (d
, size
, size
,
3188 &ml
, &post_shift
, &dummy
);
3190 /* If the suggested multiplier is more than SIZE bits,
3191 we can do better for even divisors, using an
3192 initial right shift. */
3193 if (mh
!= 0 && (d
& 1) == 0)
3195 pre_shift
= floor_log2 (d
& -d
);
3196 mh
= choose_multiplier (d
>> pre_shift
, size
,
3198 &ml
, &post_shift
, &dummy
);
3209 if (post_shift
- 1 >= BITS_PER_WORD
)
3212 extra_cost
= (shift_cost
[post_shift
- 1]
3213 + shift_cost
[1] + 2 * add_cost
);
3214 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
3216 max_cost
- extra_cost
);
3219 t2
= force_operand (gen_rtx_MINUS (compute_mode
,
3222 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
3223 build_int_2 (1, 0), NULL_RTX
,1);
3224 t4
= force_operand (gen_rtx_PLUS (compute_mode
,
3228 = expand_shift (RSHIFT_EXPR
, compute_mode
, t4
,
3229 build_int_2 (post_shift
- 1, 0),
3236 if (pre_shift
>= BITS_PER_WORD
3237 || post_shift
>= BITS_PER_WORD
)
3240 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3241 build_int_2 (pre_shift
, 0),
3243 extra_cost
= (shift_cost
[pre_shift
]
3244 + shift_cost
[post_shift
]);
3245 t2
= expand_mult_highpart (compute_mode
, t1
, ml
,
3247 max_cost
- extra_cost
);
3251 = expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
3252 build_int_2 (post_shift
, 0),
3257 else /* Too wide mode to use tricky code */
3260 insn
= get_last_insn ();
3262 && (set
= single_set (insn
)) != 0
3263 && SET_DEST (set
) == quotient
)
3264 set_unique_reg_note (insn
,
3266 gen_rtx_UDIV (compute_mode
, op0
, op1
));
3268 else /* TRUNC_DIV, signed */
3270 unsigned HOST_WIDE_INT ml
;
3271 int lgup
, post_shift
;
3272 HOST_WIDE_INT d
= INTVAL (op1
);
3273 unsigned HOST_WIDE_INT abs_d
= d
>= 0 ? d
: -d
;
3275 /* n rem d = n rem -d */
3276 if (rem_flag
&& d
< 0)
3279 op1
= gen_int_mode (abs_d
, compute_mode
);
3285 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
3287 else if (abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
3289 /* This case is not handled correctly below. */
3290 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
3291 compute_mode
, 1, 1);
3295 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
3296 && (rem_flag
? smod_pow2_cheap
: sdiv_pow2_cheap
)
3297 /* ??? The cheap metric is computed only for
3298 word_mode. If this operation is wider, this may
3299 not be so. Assume true if the optab has an
3300 expander for this mode. */
3301 && (((rem_flag
? smod_optab
: sdiv_optab
)
3302 ->handlers
[(int) compute_mode
].insn_code
3303 != CODE_FOR_nothing
)
3304 || (sdivmod_optab
->handlers
[(int) compute_mode
]
3305 .insn_code
!= CODE_FOR_nothing
)))
3307 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
3309 lgup
= floor_log2 (abs_d
);
3310 if (BRANCH_COST
< 1 || (abs_d
!= 2 && BRANCH_COST
< 3))
3312 rtx label
= gen_label_rtx ();
3315 t1
= copy_to_mode_reg (compute_mode
, op0
);
3316 do_cmp_and_jump (t1
, const0_rtx
, GE
,
3317 compute_mode
, label
);
3318 expand_inc (t1
, gen_int_mode (abs_d
- 1,
3321 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, t1
,
3322 build_int_2 (lgup
, 0),
3328 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3329 build_int_2 (size
- 1, 0),
3331 t2
= expand_shift (RSHIFT_EXPR
, compute_mode
, t1
,
3332 build_int_2 (size
- lgup
, 0),
3334 t3
= force_operand (gen_rtx_PLUS (compute_mode
,
3337 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, t3
,
3338 build_int_2 (lgup
, 0),
3342 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3346 insn
= get_last_insn ();
3348 && (set
= single_set (insn
)) != 0
3349 && SET_DEST (set
) == quotient
3350 && abs_d
< ((unsigned HOST_WIDE_INT
) 1
3351 << (HOST_BITS_PER_WIDE_INT
- 1)))
3352 set_unique_reg_note (insn
,
3354 gen_rtx_DIV (compute_mode
,
3361 quotient
= expand_unop (compute_mode
, neg_optab
,
3362 quotient
, quotient
, 0);
3365 else if (size
<= HOST_BITS_PER_WIDE_INT
)
3367 choose_multiplier (abs_d
, size
, size
- 1,
3368 &ml
, &post_shift
, &lgup
);
3369 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
3373 if (post_shift
>= BITS_PER_WORD
3374 || size
- 1 >= BITS_PER_WORD
)
3377 extra_cost
= (shift_cost
[post_shift
]
3378 + shift_cost
[size
- 1] + add_cost
);
3379 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
3381 max_cost
- extra_cost
);
3384 t2
= expand_shift (RSHIFT_EXPR
, compute_mode
, t1
,
3385 build_int_2 (post_shift
, 0), NULL_RTX
, 0);
3386 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3387 build_int_2 (size
- 1, 0), NULL_RTX
, 0);
3390 = force_operand (gen_rtx_MINUS (compute_mode
,
3395 = force_operand (gen_rtx_MINUS (compute_mode
,
3403 if (post_shift
>= BITS_PER_WORD
3404 || size
- 1 >= BITS_PER_WORD
)
3407 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
3408 extra_cost
= (shift_cost
[post_shift
]
3409 + shift_cost
[size
- 1] + 2 * add_cost
);
3410 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
3412 max_cost
- extra_cost
);
3415 t2
= force_operand (gen_rtx_PLUS (compute_mode
,
3418 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
3419 build_int_2 (post_shift
, 0),
3421 t4
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3422 build_int_2 (size
- 1, 0),
3426 = force_operand (gen_rtx_MINUS (compute_mode
,
3431 = force_operand (gen_rtx_MINUS (compute_mode
,
3436 else /* Too wide mode to use tricky code */
3439 insn
= get_last_insn ();
3441 && (set
= single_set (insn
)) != 0
3442 && SET_DEST (set
) == quotient
)
3443 set_unique_reg_note (insn
,
3445 gen_rtx_DIV (compute_mode
, op0
, op1
));
3450 delete_insns_since (last
);
3453 case FLOOR_DIV_EXPR
:
3454 case FLOOR_MOD_EXPR
:
3455 /* We will come here only for signed operations. */
3456 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
3458 unsigned HOST_WIDE_INT mh
, ml
;
3459 int pre_shift
, lgup
, post_shift
;
3460 HOST_WIDE_INT d
= INTVAL (op1
);
3464 /* We could just as easily deal with negative constants here,
3465 but it does not seem worth the trouble for GCC 2.6. */
3466 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
3468 pre_shift
= floor_log2 (d
);
3471 remainder
= expand_binop (compute_mode
, and_optab
, op0
,
3472 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
3473 remainder
, 0, OPTAB_LIB_WIDEN
);
3475 return gen_lowpart (mode
, remainder
);
3477 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3478 build_int_2 (pre_shift
, 0),
3485 mh
= choose_multiplier (d
, size
, size
- 1,
3486 &ml
, &post_shift
, &lgup
);
3490 if (post_shift
< BITS_PER_WORD
3491 && size
- 1 < BITS_PER_WORD
)
3493 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3494 build_int_2 (size
- 1, 0),
3496 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
3497 NULL_RTX
, 0, OPTAB_WIDEN
);
3498 extra_cost
= (shift_cost
[post_shift
]
3499 + shift_cost
[size
- 1] + 2 * add_cost
);
3500 t3
= expand_mult_highpart (compute_mode
, t2
, ml
,
3502 max_cost
- extra_cost
);
3505 t4
= expand_shift (RSHIFT_EXPR
, compute_mode
, t3
,
3506 build_int_2 (post_shift
, 0),
3508 quotient
= expand_binop (compute_mode
, xor_optab
,
3509 t4
, t1
, tquotient
, 0,
3517 rtx nsign
, t1
, t2
, t3
, t4
;
3518 t1
= force_operand (gen_rtx_PLUS (compute_mode
,
3519 op0
, constm1_rtx
), NULL_RTX
);
3520 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
3522 nsign
= expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
3523 build_int_2 (size
- 1, 0), NULL_RTX
, 0);
3524 t3
= force_operand (gen_rtx_MINUS (compute_mode
, t1
, nsign
),
3526 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
3531 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
3533 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
3542 delete_insns_since (last
);
3544 /* Try using an instruction that produces both the quotient and
3545 remainder, using truncation. We can easily compensate the quotient
3546 or remainder to get floor rounding, once we have the remainder.
3547 Notice that we compute also the final remainder value here,
3548 and return the result right away. */
3549 if (target
== 0 || GET_MODE (target
) != compute_mode
)
3550 target
= gen_reg_rtx (compute_mode
);
3555 = GET_CODE (target
) == REG
? target
: gen_reg_rtx (compute_mode
);
3556 quotient
= gen_reg_rtx (compute_mode
);
3561 = GET_CODE (target
) == REG
? target
: gen_reg_rtx (compute_mode
);
3562 remainder
= gen_reg_rtx (compute_mode
);
3565 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
3566 quotient
, remainder
, 0))
3568 /* This could be computed with a branch-less sequence.
3569 Save that for later. */
3571 rtx label
= gen_label_rtx ();
3572 do_cmp_and_jump (remainder
, const0_rtx
, EQ
, compute_mode
, label
);
3573 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
3574 NULL_RTX
, 0, OPTAB_WIDEN
);
3575 do_cmp_and_jump (tem
, const0_rtx
, GE
, compute_mode
, label
);
3576 expand_dec (quotient
, const1_rtx
);
3577 expand_inc (remainder
, op1
);
3579 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3582 /* No luck with division elimination or divmod. Have to do it
3583 by conditionally adjusting op0 *and* the result. */
3585 rtx label1
, label2
, label3
, label4
, label5
;
3589 quotient
= gen_reg_rtx (compute_mode
);
3590 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
3591 label1
= gen_label_rtx ();
3592 label2
= gen_label_rtx ();
3593 label3
= gen_label_rtx ();
3594 label4
= gen_label_rtx ();
3595 label5
= gen_label_rtx ();
3596 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
3597 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
, compute_mode
, label1
);
3598 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3599 quotient
, 0, OPTAB_LIB_WIDEN
);
3600 if (tem
!= quotient
)
3601 emit_move_insn (quotient
, tem
);
3602 emit_jump_insn (gen_jump (label5
));
3604 emit_label (label1
);
3605 expand_inc (adjusted_op0
, const1_rtx
);
3606 emit_jump_insn (gen_jump (label4
));
3608 emit_label (label2
);
3609 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
, compute_mode
, label3
);
3610 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3611 quotient
, 0, OPTAB_LIB_WIDEN
);
3612 if (tem
!= quotient
)
3613 emit_move_insn (quotient
, tem
);
3614 emit_jump_insn (gen_jump (label5
));
3616 emit_label (label3
);
3617 expand_dec (adjusted_op0
, const1_rtx
);
3618 emit_label (label4
);
3619 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3620 quotient
, 0, OPTAB_LIB_WIDEN
);
3621 if (tem
!= quotient
)
3622 emit_move_insn (quotient
, tem
);
3623 expand_dec (quotient
, const1_rtx
);
3624 emit_label (label5
);
3632 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
3635 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
3636 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3637 build_int_2 (floor_log2 (d
), 0),
3639 t2
= expand_binop (compute_mode
, and_optab
, op0
,
3641 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3642 t3
= gen_reg_rtx (compute_mode
);
3643 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
3644 compute_mode
, 1, 1);
3648 lab
= gen_label_rtx ();
3649 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
3650 expand_inc (t1
, const1_rtx
);
3655 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
3661 /* Try using an instruction that produces both the quotient and
3662 remainder, using truncation. We can easily compensate the
3663 quotient or remainder to get ceiling rounding, once we have the
3664 remainder. Notice that we compute also the final remainder
3665 value here, and return the result right away. */
3666 if (target
== 0 || GET_MODE (target
) != compute_mode
)
3667 target
= gen_reg_rtx (compute_mode
);
3671 remainder
= (GET_CODE (target
) == REG
3672 ? target
: gen_reg_rtx (compute_mode
));
3673 quotient
= gen_reg_rtx (compute_mode
);
3677 quotient
= (GET_CODE (target
) == REG
3678 ? target
: gen_reg_rtx (compute_mode
));
3679 remainder
= gen_reg_rtx (compute_mode
);
3682 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
3685 /* This could be computed with a branch-less sequence.
3686 Save that for later. */
3687 rtx label
= gen_label_rtx ();
3688 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
3689 compute_mode
, label
);
3690 expand_inc (quotient
, const1_rtx
);
3691 expand_dec (remainder
, op1
);
3693 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3696 /* No luck with division elimination or divmod. Have to do it
3697 by conditionally adjusting op0 *and* the result. */
3700 rtx adjusted_op0
, tem
;
3702 quotient
= gen_reg_rtx (compute_mode
);
3703 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
3704 label1
= gen_label_rtx ();
3705 label2
= gen_label_rtx ();
3706 do_cmp_and_jump (adjusted_op0
, const0_rtx
, NE
,
3707 compute_mode
, label1
);
3708 emit_move_insn (quotient
, const0_rtx
);
3709 emit_jump_insn (gen_jump (label2
));
3711 emit_label (label1
);
3712 expand_dec (adjusted_op0
, const1_rtx
);
3713 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
3714 quotient
, 1, OPTAB_LIB_WIDEN
);
3715 if (tem
!= quotient
)
3716 emit_move_insn (quotient
, tem
);
3717 expand_inc (quotient
, const1_rtx
);
3718 emit_label (label2
);
3723 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
3724 && INTVAL (op1
) >= 0)
3726 /* This is extremely similar to the code for the unsigned case
3727 above. For 2.7 we should merge these variants, but for
3728 2.6.1 I don't want to touch the code for unsigned since that
3729 get used in C. The signed case will only be used by other
3733 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
3734 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3735 build_int_2 (floor_log2 (d
), 0),
3737 t2
= expand_binop (compute_mode
, and_optab
, op0
,
3739 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3740 t3
= gen_reg_rtx (compute_mode
);
3741 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
3742 compute_mode
, 1, 1);
3746 lab
= gen_label_rtx ();
3747 do_cmp_and_jump (t2
, const0_rtx
, EQ
, compute_mode
, lab
);
3748 expand_inc (t1
, const1_rtx
);
3753 quotient
= force_operand (gen_rtx_PLUS (compute_mode
,
3759 /* Try using an instruction that produces both the quotient and
3760 remainder, using truncation. We can easily compensate the
3761 quotient or remainder to get ceiling rounding, once we have the
3762 remainder. Notice that we compute also the final remainder
3763 value here, and return the result right away. */
3764 if (target
== 0 || GET_MODE (target
) != compute_mode
)
3765 target
= gen_reg_rtx (compute_mode
);
3768 remainder
= (GET_CODE (target
) == REG
3769 ? target
: gen_reg_rtx (compute_mode
));
3770 quotient
= gen_reg_rtx (compute_mode
);
3774 quotient
= (GET_CODE (target
) == REG
3775 ? target
: gen_reg_rtx (compute_mode
));
3776 remainder
= gen_reg_rtx (compute_mode
);
3779 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
3782 /* This could be computed with a branch-less sequence.
3783 Save that for later. */
3785 rtx label
= gen_label_rtx ();
3786 do_cmp_and_jump (remainder
, const0_rtx
, EQ
,
3787 compute_mode
, label
);
3788 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
3789 NULL_RTX
, 0, OPTAB_WIDEN
);
3790 do_cmp_and_jump (tem
, const0_rtx
, LT
, compute_mode
, label
);
3791 expand_inc (quotient
, const1_rtx
);
3792 expand_dec (remainder
, op1
);
3794 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3797 /* No luck with division elimination or divmod. Have to do it
3798 by conditionally adjusting op0 *and* the result. */
3800 rtx label1
, label2
, label3
, label4
, label5
;
3804 quotient
= gen_reg_rtx (compute_mode
);
3805 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
3806 label1
= gen_label_rtx ();
3807 label2
= gen_label_rtx ();
3808 label3
= gen_label_rtx ();
3809 label4
= gen_label_rtx ();
3810 label5
= gen_label_rtx ();
3811 do_cmp_and_jump (op1
, const0_rtx
, LT
, compute_mode
, label2
);
3812 do_cmp_and_jump (adjusted_op0
, const0_rtx
, GT
,
3813 compute_mode
, label1
);
3814 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3815 quotient
, 0, OPTAB_LIB_WIDEN
);
3816 if (tem
!= quotient
)
3817 emit_move_insn (quotient
, tem
);
3818 emit_jump_insn (gen_jump (label5
));
3820 emit_label (label1
);
3821 expand_dec (adjusted_op0
, const1_rtx
);
3822 emit_jump_insn (gen_jump (label4
));
3824 emit_label (label2
);
3825 do_cmp_and_jump (adjusted_op0
, const0_rtx
, LT
,
3826 compute_mode
, label3
);
3827 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3828 quotient
, 0, OPTAB_LIB_WIDEN
);
3829 if (tem
!= quotient
)
3830 emit_move_insn (quotient
, tem
);
3831 emit_jump_insn (gen_jump (label5
));
3833 emit_label (label3
);
3834 expand_inc (adjusted_op0
, const1_rtx
);
3835 emit_label (label4
);
3836 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3837 quotient
, 0, OPTAB_LIB_WIDEN
);
3838 if (tem
!= quotient
)
3839 emit_move_insn (quotient
, tem
);
3840 expand_inc (quotient
, const1_rtx
);
3841 emit_label (label5
);
3846 case EXACT_DIV_EXPR
:
3847 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
3849 HOST_WIDE_INT d
= INTVAL (op1
);
3850 unsigned HOST_WIDE_INT ml
;
3854 pre_shift
= floor_log2 (d
& -d
);
3855 ml
= invert_mod2n (d
>> pre_shift
, size
);
3856 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3857 build_int_2 (pre_shift
, 0), NULL_RTX
, unsignedp
);
3858 quotient
= expand_mult (compute_mode
, t1
,
3859 gen_int_mode (ml
, compute_mode
),
3862 insn
= get_last_insn ();
3863 set_unique_reg_note (insn
,
3865 gen_rtx_fmt_ee (unsignedp
? UDIV
: DIV
,
3871 case ROUND_DIV_EXPR
:
3872 case ROUND_MOD_EXPR
:
3877 label
= gen_label_rtx ();
3878 quotient
= gen_reg_rtx (compute_mode
);
3879 remainder
= gen_reg_rtx (compute_mode
);
3880 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
3883 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
3884 quotient
, 1, OPTAB_LIB_WIDEN
);
3885 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
3886 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
3887 remainder
, 1, OPTAB_LIB_WIDEN
);
3889 tem
= plus_constant (op1
, -1);
3890 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
3891 build_int_2 (1, 0), NULL_RTX
, 1);
3892 do_cmp_and_jump (remainder
, tem
, LEU
, compute_mode
, label
);
3893 expand_inc (quotient
, const1_rtx
);
3894 expand_dec (remainder
, op1
);
3899 rtx abs_rem
, abs_op1
, tem
, mask
;
3901 label
= gen_label_rtx ();
3902 quotient
= gen_reg_rtx (compute_mode
);
3903 remainder
= gen_reg_rtx (compute_mode
);
3904 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
3907 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
3908 quotient
, 0, OPTAB_LIB_WIDEN
);
3909 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
3910 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
3911 remainder
, 0, OPTAB_LIB_WIDEN
);
3913 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 1, 0);
3914 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 1, 0);
3915 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
3916 build_int_2 (1, 0), NULL_RTX
, 1);
3917 do_cmp_and_jump (tem
, abs_op1
, LTU
, compute_mode
, label
);
3918 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
3919 NULL_RTX
, 0, OPTAB_WIDEN
);
3920 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
3921 build_int_2 (size
- 1, 0), NULL_RTX
, 0);
3922 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
3923 NULL_RTX
, 0, OPTAB_WIDEN
);
3924 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
3925 NULL_RTX
, 0, OPTAB_WIDEN
);
3926 expand_inc (quotient
, tem
);
3927 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
3928 NULL_RTX
, 0, OPTAB_WIDEN
);
3929 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
3930 NULL_RTX
, 0, OPTAB_WIDEN
);
3931 expand_dec (remainder
, tem
);
3934 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3942 if (target
&& GET_MODE (target
) != compute_mode
)
3947 /* Try to produce the remainder without producing the quotient.
3948 If we seem to have a divmod pattern that does not require widening,
3949 don't try widening here. We should really have an WIDEN argument
3950 to expand_twoval_binop, since what we'd really like to do here is
3951 1) try a mod insn in compute_mode
3952 2) try a divmod insn in compute_mode
3953 3) try a div insn in compute_mode and multiply-subtract to get
3955 4) try the same things with widening allowed. */
3957 = sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
3960 ((optab2
->handlers
[(int) compute_mode
].insn_code
3961 != CODE_FOR_nothing
)
3962 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
3965 /* No luck there. Can we do remainder and divide at once
3966 without a library call? */
3967 remainder
= gen_reg_rtx (compute_mode
);
3968 if (! expand_twoval_binop ((unsignedp
3972 NULL_RTX
, remainder
, unsignedp
))
3977 return gen_lowpart (mode
, remainder
);
3980 /* Produce the quotient. Try a quotient insn, but not a library call.
3981 If we have a divmod in this mode, use it in preference to widening
3982 the div (for this test we assume it will not fail). Note that optab2
3983 is set to the one of the two optabs that the call below will use. */
3985 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
3986 op0
, op1
, rem_flag
? NULL_RTX
: target
,
3988 ((optab2
->handlers
[(int) compute_mode
].insn_code
3989 != CODE_FOR_nothing
)
3990 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
3994 /* No luck there. Try a quotient-and-remainder insn,
3995 keeping the quotient alone. */
3996 quotient
= gen_reg_rtx (compute_mode
);
3997 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
3999 quotient
, NULL_RTX
, unsignedp
))
4003 /* Still no luck. If we are not computing the remainder,
4004 use a library call for the quotient. */
4005 quotient
= sign_expand_binop (compute_mode
,
4006 udiv_optab
, sdiv_optab
,
4008 unsignedp
, OPTAB_LIB_WIDEN
);
4015 if (target
&& GET_MODE (target
) != compute_mode
)
4019 /* No divide instruction either. Use library for remainder. */
4020 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
4022 unsignedp
, OPTAB_LIB_WIDEN
);
4025 /* We divided. Now finish doing X - Y * (X / Y). */
4026 remainder
= expand_mult (compute_mode
, quotient
, op1
,
4027 NULL_RTX
, unsignedp
);
4028 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
4029 remainder
, target
, unsignedp
,
4034 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
4037 /* Return a tree node with data type TYPE, describing the value of X.
4038 Usually this is an RTL_EXPR, if there is no obvious better choice.
4039 X may be an expression, however we only support those expressions
4040 generated by loop.c. */
4049 switch (GET_CODE (x
))
4052 t
= build_int_2 (INTVAL (x
),
4053 (TREE_UNSIGNED (type
)
4054 && (GET_MODE_BITSIZE (TYPE_MODE (type
)) < HOST_BITS_PER_WIDE_INT
))
4055 || INTVAL (x
) >= 0 ? 0 : -1);
4056 TREE_TYPE (t
) = type
;
4060 if (GET_MODE (x
) == VOIDmode
)
4062 t
= build_int_2 (CONST_DOUBLE_LOW (x
), CONST_DOUBLE_HIGH (x
));
4063 TREE_TYPE (t
) = type
;
4069 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
4070 t
= build_real (type
, d
);
4081 units
= CONST_VECTOR_NUNITS (x
);
4083 /* Build a tree with vector elements. */
4084 for (i
= units
- 1; i
>= 0; --i
)
4086 elt
= CONST_VECTOR_ELT (x
, i
);
4087 t
= tree_cons (NULL_TREE
, make_tree (type
, elt
), t
);
4090 return build_vector (type
, t
);
4094 return fold (build (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4095 make_tree (type
, XEXP (x
, 1))));
4098 return fold (build (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4099 make_tree (type
, XEXP (x
, 1))));
4102 return fold (build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0))));
4105 return fold (build (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4106 make_tree (type
, XEXP (x
, 1))));
4109 return fold (build (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
4110 make_tree (type
, XEXP (x
, 1))));
4113 t
= (*lang_hooks
.types
.unsigned_type
) (type
);
4114 return fold (convert (type
,
4115 build (RSHIFT_EXPR
, t
,
4116 make_tree (t
, XEXP (x
, 0)),
4117 make_tree (type
, XEXP (x
, 1)))));
4120 t
= (*lang_hooks
.types
.signed_type
) (type
);
4121 return fold (convert (type
,
4122 build (RSHIFT_EXPR
, t
,
4123 make_tree (t
, XEXP (x
, 0)),
4124 make_tree (type
, XEXP (x
, 1)))));
4127 if (TREE_CODE (type
) != REAL_TYPE
)
4128 t
= (*lang_hooks
.types
.signed_type
) (type
);
4132 return fold (convert (type
,
4133 build (TRUNC_DIV_EXPR
, t
,
4134 make_tree (t
, XEXP (x
, 0)),
4135 make_tree (t
, XEXP (x
, 1)))));
4137 t
= (*lang_hooks
.types
.unsigned_type
) (type
);
4138 return fold (convert (type
,
4139 build (TRUNC_DIV_EXPR
, t
,
4140 make_tree (t
, XEXP (x
, 0)),
4141 make_tree (t
, XEXP (x
, 1)))));
4145 t
= (*lang_hooks
.types
.type_for_mode
) (GET_MODE (XEXP (x
, 0)),
4146 GET_CODE (x
) == ZERO_EXTEND
);
4147 return fold (convert (type
, make_tree (t
, XEXP (x
, 0))));
4150 t
= make_node (RTL_EXPR
);
4151 TREE_TYPE (t
) = type
;
4153 #ifdef POINTERS_EXTEND_UNSIGNED
4154 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4155 ptr_mode. So convert. */
4156 if (POINTER_TYPE_P (type
) && GET_MODE (x
) != TYPE_MODE (type
))
4157 x
= convert_memory_address (TYPE_MODE (type
), x
);
4160 RTL_EXPR_RTL (t
) = x
;
4161 /* There are no insns to be output
4162 when this rtl_expr is used. */
4163 RTL_EXPR_SEQUENCE (t
) = 0;
4168 /* Check whether the multiplication X * MULT + ADD overflows.
4169 X, MULT and ADD must be CONST_*.
4170 MODE is the machine mode for the computation.
4171 X and MULT must have mode MODE. ADD may have a different mode.
4172 So can X (defaults to same as MODE).
4173 UNSIGNEDP is nonzero to do unsigned multiplication. */
4176 const_mult_add_overflow_p (x
, mult
, add
, mode
, unsignedp
)
4178 enum machine_mode mode
;
4181 tree type
, mult_type
, add_type
, result
;
4183 type
= (*lang_hooks
.types
.type_for_mode
) (mode
, unsignedp
);
4185 /* In order to get a proper overflow indication from an unsigned
4186 type, we have to pretend that it's a sizetype. */
4190 mult_type
= copy_node (type
);
4191 TYPE_IS_SIZETYPE (mult_type
) = 1;
4194 add_type
= (GET_MODE (add
) == VOIDmode
? mult_type
4195 : (*lang_hooks
.types
.type_for_mode
) (GET_MODE (add
), unsignedp
));
4197 result
= fold (build (PLUS_EXPR
, mult_type
,
4198 fold (build (MULT_EXPR
, mult_type
,
4199 make_tree (mult_type
, x
),
4200 make_tree (mult_type
, mult
))),
4201 make_tree (add_type
, add
)));
4203 return TREE_CONSTANT_OVERFLOW (result
);
4206 /* Return an rtx representing the value of X * MULT + ADD.
4207 TARGET is a suggestion for where to store the result (an rtx).
4208 MODE is the machine mode for the computation.
4209 X and MULT must have mode MODE. ADD may have a different mode.
4210 So can X (defaults to same as MODE).
4211 UNSIGNEDP is nonzero to do unsigned multiplication.
4212 This may emit insns. */
4215 expand_mult_add (x
, target
, mult
, add
, mode
, unsignedp
)
4216 rtx x
, target
, mult
, add
;
4217 enum machine_mode mode
;
4220 tree type
= (*lang_hooks
.types
.type_for_mode
) (mode
, unsignedp
);
4221 tree add_type
= (GET_MODE (add
) == VOIDmode
4222 ? type
: (*lang_hooks
.types
.type_for_mode
) (GET_MODE (add
),
4224 tree result
= fold (build (PLUS_EXPR
, type
,
4225 fold (build (MULT_EXPR
, type
,
4226 make_tree (type
, x
),
4227 make_tree (type
, mult
))),
4228 make_tree (add_type
, add
)));
4230 return expand_expr (result
, target
, VOIDmode
, 0);
4233 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4234 and returning TARGET.
4236 If TARGET is 0, a pseudo-register or constant is returned. */
4239 expand_and (mode
, op0
, op1
, target
)
4240 enum machine_mode mode
;
4241 rtx op0
, op1
, target
;
4245 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
4246 tem
= simplify_binary_operation (AND
, mode
, op0
, op1
);
4248 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
4252 else if (tem
!= target
)
4253 emit_move_insn (target
, tem
);
4257 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4258 and storing in TARGET. Normally return TARGET.
4259 Return 0 if that cannot be done.
4261 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4262 it is VOIDmode, they cannot both be CONST_INT.
4264 UNSIGNEDP is for the case where we have to widen the operands
4265 to perform the operation. It says to use zero-extension.
4267 NORMALIZEP is 1 if we should convert the result to be either zero
4268 or one. Normalize is -1 if we should convert the result to be
4269 either zero or -1. If NORMALIZEP is zero, the result will be left
4270 "raw" out of the scc insn. */
4273 emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
)
4277 enum machine_mode mode
;
4282 enum insn_code icode
;
4283 enum machine_mode compare_mode
;
4284 enum machine_mode target_mode
= GET_MODE (target
);
4286 rtx last
= get_last_insn ();
4287 rtx pattern
, comparison
;
4289 /* ??? Ok to do this and then fail? */
4290 op0
= protect_from_queue (op0
, 0);
4291 op1
= protect_from_queue (op1
, 0);
4294 code
= unsigned_condition (code
);
4296 /* If one operand is constant, make it the second one. Only do this
4297 if the other operand is not constant as well. */
4299 if (swap_commutative_operands_p (op0
, op1
))
4304 code
= swap_condition (code
);
4307 if (mode
== VOIDmode
)
4308 mode
= GET_MODE (op0
);
4310 /* For some comparisons with 1 and -1, we can convert this to
4311 comparisons with zero. This will often produce more opportunities for
4312 store-flag insns. */
4317 if (op1
== const1_rtx
)
4318 op1
= const0_rtx
, code
= LE
;
4321 if (op1
== constm1_rtx
)
4322 op1
= const0_rtx
, code
= LT
;
4325 if (op1
== const1_rtx
)
4326 op1
= const0_rtx
, code
= GT
;
4329 if (op1
== constm1_rtx
)
4330 op1
= const0_rtx
, code
= GE
;
4333 if (op1
== const1_rtx
)
4334 op1
= const0_rtx
, code
= NE
;
4337 if (op1
== const1_rtx
)
4338 op1
= const0_rtx
, code
= EQ
;
4344 /* If we are comparing a double-word integer with zero, we can convert
4345 the comparison into one involving a single word. */
4346 if (GET_MODE_BITSIZE (mode
) == BITS_PER_WORD
* 2
4347 && GET_MODE_CLASS (mode
) == MODE_INT
4348 && op1
== const0_rtx
4349 && (GET_CODE (op0
) != MEM
|| ! MEM_VOLATILE_P (op0
)))
4351 if (code
== EQ
|| code
== NE
)
4353 /* Do a logical OR of the two words and compare the result. */
4354 rtx op0h
= gen_highpart (word_mode
, op0
);
4355 rtx op0l
= gen_lowpart (word_mode
, op0
);
4356 rtx op0both
= expand_binop (word_mode
, ior_optab
, op0h
, op0l
,
4357 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
4359 return emit_store_flag (target
, code
, op0both
, op1
, word_mode
,
4360 unsignedp
, normalizep
);
4362 else if (code
== LT
|| code
== GE
)
4363 /* If testing the sign bit, can just test on high word. */
4364 return emit_store_flag (target
, code
, gen_highpart (word_mode
, op0
),
4365 op1
, word_mode
, unsignedp
, normalizep
);
4368 /* From now on, we won't change CODE, so set ICODE now. */
4369 icode
= setcc_gen_code
[(int) code
];
4371 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4372 complement of A (for GE) and shifting the sign bit to the low bit. */
4373 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
4374 && GET_MODE_CLASS (mode
) == MODE_INT
4375 && (normalizep
|| STORE_FLAG_VALUE
== 1
4376 || (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4377 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
4378 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))))
4382 /* If the result is to be wider than OP0, it is best to convert it
4383 first. If it is to be narrower, it is *incorrect* to convert it
4385 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
4387 op0
= protect_from_queue (op0
, 0);
4388 op0
= convert_modes (target_mode
, mode
, op0
, 0);
4392 if (target_mode
!= mode
)
4396 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
4397 ((STORE_FLAG_VALUE
== 1 || normalizep
)
4398 ? 0 : subtarget
), 0);
4400 if (STORE_FLAG_VALUE
== 1 || normalizep
)
4401 /* If we are supposed to produce a 0/1 value, we want to do
4402 a logical shift from the sign bit to the low-order bit; for
4403 a -1/0 value, we do an arithmetic shift. */
4404 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
4405 size_int (GET_MODE_BITSIZE (mode
) - 1),
4406 subtarget
, normalizep
!= -1);
4408 if (mode
!= target_mode
)
4409 op0
= convert_modes (target_mode
, mode
, op0
, 0);
4414 if (icode
!= CODE_FOR_nothing
)
4416 insn_operand_predicate_fn pred
;
4418 /* We think we may be able to do this with a scc insn. Emit the
4419 comparison and then the scc insn.
4421 compare_from_rtx may call emit_queue, which would be deleted below
4422 if the scc insn fails. So call it ourselves before setting LAST.
4423 Likewise for do_pending_stack_adjust. */
4426 do_pending_stack_adjust ();
4427 last
= get_last_insn ();
4430 = compare_from_rtx (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
);
4431 if (GET_CODE (comparison
) == CONST_INT
)
4432 return (comparison
== const0_rtx
? const0_rtx
4433 : normalizep
== 1 ? const1_rtx
4434 : normalizep
== -1 ? constm1_rtx
4437 /* The code of COMPARISON may not match CODE if compare_from_rtx
4438 decided to swap its operands and reverse the original code.
4440 We know that compare_from_rtx returns either a CONST_INT or
4441 a new comparison code, so it is safe to just extract the
4442 code from COMPARISON. */
4443 code
= GET_CODE (comparison
);
4445 /* Get a reference to the target in the proper mode for this insn. */
4446 compare_mode
= insn_data
[(int) icode
].operand
[0].mode
;
4448 pred
= insn_data
[(int) icode
].operand
[0].predicate
;
4449 if (preserve_subexpressions_p ()
4450 || ! (*pred
) (subtarget
, compare_mode
))
4451 subtarget
= gen_reg_rtx (compare_mode
);
4453 pattern
= GEN_FCN (icode
) (subtarget
);
4456 emit_insn (pattern
);
4458 /* If we are converting to a wider mode, first convert to
4459 TARGET_MODE, then normalize. This produces better combining
4460 opportunities on machines that have a SIGN_EXTRACT when we are
4461 testing a single bit. This mostly benefits the 68k.
4463 If STORE_FLAG_VALUE does not have the sign bit set when
4464 interpreted in COMPARE_MODE, we can do this conversion as
4465 unsigned, which is usually more efficient. */
4466 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (compare_mode
))
4468 convert_move (target
, subtarget
,
4469 (GET_MODE_BITSIZE (compare_mode
)
4470 <= HOST_BITS_PER_WIDE_INT
)
4471 && 0 == (STORE_FLAG_VALUE
4472 & ((HOST_WIDE_INT
) 1
4473 << (GET_MODE_BITSIZE (compare_mode
) -1))));
4475 compare_mode
= target_mode
;
4480 /* If we want to keep subexpressions around, don't reuse our
4483 if (preserve_subexpressions_p ())
4486 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4487 we don't have to do anything. */
4488 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
4490 /* STORE_FLAG_VALUE might be the most negative number, so write
4491 the comparison this way to avoid a compiler-time warning. */
4492 else if (- normalizep
== STORE_FLAG_VALUE
)
4493 op0
= expand_unop (compare_mode
, neg_optab
, op0
, subtarget
, 0);
4495 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4496 makes it hard to use a value of just the sign bit due to
4497 ANSI integer constant typing rules. */
4498 else if (GET_MODE_BITSIZE (compare_mode
) <= HOST_BITS_PER_WIDE_INT
4499 && (STORE_FLAG_VALUE
4500 & ((HOST_WIDE_INT
) 1
4501 << (GET_MODE_BITSIZE (compare_mode
) - 1))))
4502 op0
= expand_shift (RSHIFT_EXPR
, compare_mode
, op0
,
4503 size_int (GET_MODE_BITSIZE (compare_mode
) - 1),
4504 subtarget
, normalizep
== 1);
4505 else if (STORE_FLAG_VALUE
& 1)
4507 op0
= expand_and (compare_mode
, op0
, const1_rtx
, subtarget
);
4508 if (normalizep
== -1)
4509 op0
= expand_unop (compare_mode
, neg_optab
, op0
, op0
, 0);
4514 /* If we were converting to a smaller mode, do the
4516 if (target_mode
!= compare_mode
)
4518 convert_move (target
, op0
, 0);
4526 delete_insns_since (last
);
4528 /* If expensive optimizations, use different pseudo registers for each
4529 insn, instead of reusing the same pseudo. This leads to better CSE,
4530 but slows down the compiler, since there are more pseudos */
4531 subtarget
= (!flag_expensive_optimizations
4532 && (target_mode
== mode
)) ? target
: NULL_RTX
;
4534 /* If we reached here, we can't do this with a scc insn. However, there
4535 are some comparisons that can be done directly. For example, if
4536 this is an equality comparison of integers, we can try to exclusive-or
4537 (or subtract) the two operands and use a recursive call to try the
4538 comparison with zero. Don't do any of these cases if branches are
4542 && GET_MODE_CLASS (mode
) == MODE_INT
&& (code
== EQ
|| code
== NE
)
4543 && op1
!= const0_rtx
)
4545 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
4549 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
4552 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
4553 mode
, unsignedp
, normalizep
);
4555 delete_insns_since (last
);
4559 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4560 the constant zero. Reject all other comparisons at this point. Only
4561 do LE and GT if branches are expensive since they are expensive on
4562 2-operand machines. */
4564 if (BRANCH_COST
== 0
4565 || GET_MODE_CLASS (mode
) != MODE_INT
|| op1
!= const0_rtx
4566 || (code
!= EQ
&& code
!= NE
4567 && (BRANCH_COST
<= 1 || (code
!= LE
&& code
!= GT
))))
4570 /* See what we need to return. We can only return a 1, -1, or the
4573 if (normalizep
== 0)
4575 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
4576 normalizep
= STORE_FLAG_VALUE
;
4578 else if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4579 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
4580 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))
4586 /* Try to put the result of the comparison in the sign bit. Assume we can't
4587 do the necessary operation below. */
4591 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4592 the sign bit set. */
4596 /* This is destructive, so SUBTARGET can't be OP0. */
4597 if (rtx_equal_p (subtarget
, op0
))
4600 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
4603 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
4607 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4608 number of bits in the mode of OP0, minus one. */
4612 if (rtx_equal_p (subtarget
, op0
))
4615 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
4616 size_int (GET_MODE_BITSIZE (mode
) - 1),
4618 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
4622 if (code
== EQ
|| code
== NE
)
4624 /* For EQ or NE, one way to do the comparison is to apply an operation
4625 that converts the operand into a positive number if it is nonzero
4626 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4627 for NE we negate. This puts the result in the sign bit. Then we
4628 normalize with a shift, if needed.
4630 Two operations that can do the above actions are ABS and FFS, so try
4631 them. If that doesn't work, and MODE is smaller than a full word,
4632 we can use zero-extension to the wider mode (an unsigned conversion)
4633 as the operation. */
4635 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4636 that is compensated by the subsequent overflow when subtracting
4639 if (abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
4640 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
4641 else if (ffs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
4642 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
4643 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
4645 op0
= protect_from_queue (op0
, 0);
4646 tem
= convert_modes (word_mode
, mode
, op0
, 1);
4653 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
4656 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
4659 /* If we couldn't do it that way, for NE we can "or" the two's complement
4660 of the value with itself. For EQ, we take the one's complement of
4661 that "or", which is an extra insn, so we only handle EQ if branches
4664 if (tem
== 0 && (code
== NE
|| BRANCH_COST
> 1))
4666 if (rtx_equal_p (subtarget
, op0
))
4669 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
4670 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
4673 if (tem
&& code
== EQ
)
4674 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
4678 if (tem
&& normalizep
)
4679 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
4680 size_int (GET_MODE_BITSIZE (mode
) - 1),
4681 subtarget
, normalizep
== 1);
4685 if (GET_MODE (tem
) != target_mode
)
4687 convert_move (target
, tem
, 0);
4690 else if (!subtarget
)
4692 emit_move_insn (target
, tem
);
4697 delete_insns_since (last
);
4702 /* Like emit_store_flag, but always succeeds. */
4705 emit_store_flag_force (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
)
4709 enum machine_mode mode
;
4715 /* First see if emit_store_flag can do the job. */
4716 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
4720 if (normalizep
== 0)
4723 /* If this failed, we have to do this with set/compare/jump/set code. */
4725 if (GET_CODE (target
) != REG
4726 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
4727 target
= gen_reg_rtx (GET_MODE (target
));
4729 emit_move_insn (target
, const1_rtx
);
4730 label
= gen_label_rtx ();
4731 do_compare_rtx_and_jump (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
,
4734 emit_move_insn (target
, const0_rtx
);
4740 /* Perform possibly multi-word comparison and conditional jump to LABEL
4741 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4743 The algorithm is based on the code in expr.c:do_jump.
4745 Note that this does not perform a general comparison. Only variants
4746 generated within expmed.c are correctly handled, others abort (but could
4747 be handled if needed). */
4750 do_cmp_and_jump (arg1
, arg2
, op
, mode
, label
)
4751 rtx arg1
, arg2
, label
;
4753 enum machine_mode mode
;
4755 /* If this mode is an integer too wide to compare properly,
4756 compare word by word. Rely on cse to optimize constant cases. */
4758 if (GET_MODE_CLASS (mode
) == MODE_INT
4759 && ! can_compare_p (op
, mode
, ccp_jump
))
4761 rtx label2
= gen_label_rtx ();
4766 do_jump_by_parts_greater_rtx (mode
, 1, arg2
, arg1
, label2
, label
);
4770 do_jump_by_parts_greater_rtx (mode
, 1, arg1
, arg2
, label
, label2
);
4774 do_jump_by_parts_greater_rtx (mode
, 0, arg2
, arg1
, label2
, label
);
4778 do_jump_by_parts_greater_rtx (mode
, 0, arg1
, arg2
, label2
, label
);
4782 do_jump_by_parts_greater_rtx (mode
, 0, arg2
, arg1
, label
, label2
);
4785 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4786 that's the only equality operations we do */
4788 if (arg2
!= const0_rtx
|| mode
!= GET_MODE(arg1
))
4790 do_jump_by_parts_equality_rtx (arg1
, label2
, label
);
4794 if (arg2
!= const0_rtx
|| mode
!= GET_MODE(arg1
))
4796 do_jump_by_parts_equality_rtx (arg1
, label
, label2
);
4803 emit_label (label2
);
4806 emit_cmp_and_jump_insns (arg1
, arg2
, op
, NULL_RTX
, mode
, 0, label
);