1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 88, 89, 92-6, 1997 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
27 #include "insn-flags.h"
28 #include "insn-codes.h"
29 #include "insn-config.h"
34 static void store_fixed_bit_field
PROTO((rtx
, int, int, int, rtx
, int));
35 static void store_split_bit_field
PROTO((rtx
, int, int, rtx
, int));
36 static rtx extract_fixed_bit_field
PROTO((enum machine_mode
, rtx
, int,
37 int, int, rtx
, int, int));
38 static rtx mask_rtx
PROTO((enum machine_mode
, int,
40 static rtx lshift_value
PROTO((enum machine_mode
, rtx
,
42 static rtx extract_split_bit_field
PROTO((rtx
, int, int, int, int));
44 #define CEIL(x,y) (((x) + (y) - 1) / (y))
46 /* Non-zero means divides or modulus operations are relatively cheap for
47 powers of two, so don't use branches; emit the operation instead.
48 Usually, this will mean that the MD file will emit non-branch
51 static int sdiv_pow2_cheap
, smod_pow2_cheap
;
53 #ifndef SLOW_UNALIGNED_ACCESS
54 #define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
57 /* For compilers that support multiple targets with different word sizes,
58 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
59 is the H8/300(H) compiler. */
61 #ifndef MAX_BITS_PER_WORD
62 #define MAX_BITS_PER_WORD BITS_PER_WORD
65 /* Cost of various pieces of RTL. Note that some of these are indexed by shift count,
67 static int add_cost
, negate_cost
, zero_cost
;
68 static int shift_cost
[MAX_BITS_PER_WORD
];
69 static int shiftadd_cost
[MAX_BITS_PER_WORD
];
70 static int shiftsub_cost
[MAX_BITS_PER_WORD
];
71 static int mul_cost
[NUM_MACHINE_MODES
];
72 static int div_cost
[NUM_MACHINE_MODES
];
73 static int mul_widen_cost
[NUM_MACHINE_MODES
];
74 static int mul_highpart_cost
[NUM_MACHINE_MODES
];
80 /* This is "some random pseudo register" for purposes of calling recog
81 to see what insns exist. */
82 rtx reg
= gen_rtx (REG
, word_mode
, 10000);
83 rtx shift_insn
, shiftadd_insn
, shiftsub_insn
;
86 enum machine_mode mode
, wider_mode
;
90 /* Since we are on the permanent obstack, we must be sure we save this
91 spot AFTER we call start_sequence, since it will reuse the rtl it
94 free_point
= (char *) oballoc (0);
96 zero_cost
= rtx_cost (const0_rtx
, 0);
97 add_cost
= rtx_cost (gen_rtx (PLUS
, word_mode
, reg
, reg
), SET
);
99 shift_insn
= emit_insn (gen_rtx (SET
, VOIDmode
, reg
,
100 gen_rtx (ASHIFT
, word_mode
, reg
,
103 shiftadd_insn
= emit_insn (gen_rtx (SET
, VOIDmode
, reg
,
104 gen_rtx (PLUS
, word_mode
,
105 gen_rtx (MULT
, word_mode
,
109 shiftsub_insn
= emit_insn (gen_rtx (SET
, VOIDmode
, reg
,
110 gen_rtx (MINUS
, word_mode
,
111 gen_rtx (MULT
, word_mode
,
118 shiftadd_cost
[0] = shiftsub_cost
[0] = add_cost
;
120 for (m
= 1; m
< BITS_PER_WORD
; m
++)
122 shift_cost
[m
] = shiftadd_cost
[m
] = shiftsub_cost
[m
] = 32000;
124 XEXP (SET_SRC (PATTERN (shift_insn
)), 1) = GEN_INT (m
);
125 if (recog (PATTERN (shift_insn
), shift_insn
, &dummy
) >= 0)
126 shift_cost
[m
] = rtx_cost (SET_SRC (PATTERN (shift_insn
)), SET
);
128 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn
)), 0), 1)
129 = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
130 if (recog (PATTERN (shiftadd_insn
), shiftadd_insn
, &dummy
) >= 0)
131 shiftadd_cost
[m
] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn
)), SET
);
133 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn
)), 0), 1)
134 = GEN_INT ((HOST_WIDE_INT
) 1 << m
);
135 if (recog (PATTERN (shiftsub_insn
), shiftsub_insn
, &dummy
) >= 0)
136 shiftsub_cost
[m
] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn
)), SET
);
139 negate_cost
= rtx_cost (gen_rtx (NEG
, word_mode
, reg
), SET
);
142 = (rtx_cost (gen_rtx (DIV
, word_mode
, reg
, GEN_INT (32)), SET
)
145 = (rtx_cost (gen_rtx (MOD
, word_mode
, reg
, GEN_INT (32)), SET
)
148 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
150 mode
= GET_MODE_WIDER_MODE (mode
))
152 reg
= gen_rtx (REG
, mode
, 10000);
153 div_cost
[(int) mode
] = rtx_cost (gen_rtx (UDIV
, mode
, reg
, reg
), SET
);
154 mul_cost
[(int) mode
] = rtx_cost (gen_rtx (MULT
, mode
, reg
, reg
), SET
);
155 wider_mode
= GET_MODE_WIDER_MODE (mode
);
156 if (wider_mode
!= VOIDmode
)
158 mul_widen_cost
[(int) wider_mode
]
159 = rtx_cost (gen_rtx (MULT
, wider_mode
,
160 gen_rtx (ZERO_EXTEND
, wider_mode
, reg
),
161 gen_rtx (ZERO_EXTEND
, wider_mode
, reg
)),
163 mul_highpart_cost
[(int) mode
]
164 = rtx_cost (gen_rtx (TRUNCATE
, mode
,
165 gen_rtx (LSHIFTRT
, wider_mode
,
166 gen_rtx (MULT
, wider_mode
,
167 gen_rtx (ZERO_EXTEND
, wider_mode
, reg
),
168 gen_rtx (ZERO_EXTEND
, wider_mode
, reg
)),
169 GEN_INT (GET_MODE_BITSIZE (mode
)))),
174 /* Free the objects we just allocated. */
179 /* Return an rtx representing minus the value of X.
180 MODE is the intended mode of the result,
181 useful if X is a CONST_INT. */
185 enum machine_mode mode
;
188 rtx result
= simplify_unary_operation (NEG
, mode
, x
, mode
);
191 result
= expand_unop (mode
, neg_optab
, x
, NULL_RTX
, 0);
196 /* Generate code to store value from rtx VALUE
197 into a bit-field within structure STR_RTX
198 containing BITSIZE bits starting at bit BITNUM.
199 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
200 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
201 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
203 /* ??? Note that there are two different ideas here for how
204 to determine the size to count bits within, for a register.
205 One is BITS_PER_WORD, and the other is the size of operand 3
206 of the insv pattern. (The latter assumes that an n-bit machine
207 will be able to insert bit fields up to n bits wide.)
208 It isn't certain that either of these is right.
209 extract_bit_field has the same quandary. */
212 store_bit_field (str_rtx
, bitsize
, bitnum
, fieldmode
, value
, align
, total_size
)
214 register int bitsize
;
216 enum machine_mode fieldmode
;
221 int unit
= (GET_CODE (str_rtx
) == MEM
) ? BITS_PER_UNIT
: BITS_PER_WORD
;
222 register int offset
= bitnum
/ unit
;
223 register int bitpos
= bitnum
% unit
;
224 register rtx op0
= str_rtx
;
226 if (GET_CODE (str_rtx
) == MEM
&& ! MEM_IN_STRUCT_P (str_rtx
))
229 /* Discount the part of the structure before the desired byte.
230 We need to know how many bytes are safe to reference after it. */
232 total_size
-= (bitpos
/ BIGGEST_ALIGNMENT
233 * (BIGGEST_ALIGNMENT
/ BITS_PER_UNIT
));
235 while (GET_CODE (op0
) == SUBREG
)
237 /* The following line once was done only if WORDS_BIG_ENDIAN,
238 but I think that is a mistake. WORDS_BIG_ENDIAN is
239 meaningful at a much higher level; when structures are copied
240 between memory and regs, the higher-numbered regs
241 always get higher addresses. */
242 offset
+= SUBREG_WORD (op0
);
243 /* We used to adjust BITPOS here, but now we do the whole adjustment
244 right after the loop. */
245 op0
= SUBREG_REG (op0
);
248 /* If OP0 is a register, BITPOS must count within a word.
249 But as we have it, it counts within whatever size OP0 now has.
250 On a bigendian machine, these are not the same, so convert. */
252 && GET_CODE (op0
) != MEM
253 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
254 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
256 value
= protect_from_queue (value
, 0);
259 value
= force_not_mem (value
);
261 /* Note that the adjustment of BITPOS above has no effect on whether
262 BITPOS is 0 in a REG bigger than a word. */
263 if (GET_MODE_SIZE (fieldmode
) >= UNITS_PER_WORD
264 && (GET_CODE (op0
) != MEM
265 || ! SLOW_UNALIGNED_ACCESS
266 || (offset
* BITS_PER_UNIT
% bitsize
== 0
267 && align
% GET_MODE_SIZE (fieldmode
) == 0))
268 && bitpos
== 0 && bitsize
== GET_MODE_BITSIZE (fieldmode
))
270 /* Storing in a full-word or multi-word field in a register
271 can be done with just SUBREG. */
272 if (GET_MODE (op0
) != fieldmode
)
274 if (GET_CODE (op0
) == REG
)
275 op0
= gen_rtx (SUBREG
, fieldmode
, op0
, offset
);
277 op0
= change_address (op0
, fieldmode
,
278 plus_constant (XEXP (op0
, 0), offset
));
280 emit_move_insn (op0
, value
);
284 /* Storing an lsb-aligned field in a register
285 can be done with a movestrict instruction. */
287 if (GET_CODE (op0
) != MEM
288 && (BYTES_BIG_ENDIAN
? bitpos
+ bitsize
== unit
: bitpos
== 0)
289 && bitsize
== GET_MODE_BITSIZE (fieldmode
)
290 && (GET_MODE (op0
) == fieldmode
291 || (movstrict_optab
->handlers
[(int) fieldmode
].insn_code
292 != CODE_FOR_nothing
)))
294 /* Get appropriate low part of the value being stored. */
295 if (GET_CODE (value
) == CONST_INT
|| GET_CODE (value
) == REG
)
296 value
= gen_lowpart (fieldmode
, value
);
297 else if (!(GET_CODE (value
) == SYMBOL_REF
298 || GET_CODE (value
) == LABEL_REF
299 || GET_CODE (value
) == CONST
))
300 value
= convert_to_mode (fieldmode
, value
, 0);
302 if (GET_MODE (op0
) == fieldmode
)
303 emit_move_insn (op0
, value
);
306 int icode
= movstrict_optab
->handlers
[(int) fieldmode
].insn_code
;
307 if(! (*insn_operand_predicate
[icode
][1]) (value
, fieldmode
))
308 value
= copy_to_mode_reg (fieldmode
, value
);
309 emit_insn (GEN_FCN (icode
)
310 (gen_rtx (SUBREG
, fieldmode
, op0
, offset
), value
));
315 /* Handle fields bigger than a word. */
317 if (bitsize
> BITS_PER_WORD
)
319 /* Here we transfer the words of the field
320 in the order least significant first.
321 This is because the most significant word is the one which may
323 However, only do that if the value is not BLKmode. */
325 int backwards
= WORDS_BIG_ENDIAN
&& fieldmode
!= BLKmode
;
327 int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
330 /* This is the mode we must force value to, so that there will be enough
331 subwords to extract. Note that fieldmode will often (always?) be
332 VOIDmode, because that is what store_field uses to indicate that this
333 is a bit field, but passing VOIDmode to operand_subword_force will
334 result in an abort. */
335 fieldmode
= mode_for_size (nwords
* BITS_PER_WORD
, MODE_INT
, 0);
337 for (i
= 0; i
< nwords
; i
++)
339 /* If I is 0, use the low-order word in both field and target;
340 if I is 1, use the next to lowest word; and so on. */
341 int wordnum
= (backwards
? nwords
- i
- 1 : i
);
342 int bit_offset
= (backwards
343 ? MAX (bitsize
- (i
+ 1) * BITS_PER_WORD
, 0)
344 : i
* BITS_PER_WORD
);
345 store_bit_field (op0
, MIN (BITS_PER_WORD
,
346 bitsize
- i
* BITS_PER_WORD
),
347 bitnum
+ bit_offset
, word_mode
,
348 operand_subword_force (value
, wordnum
,
349 (GET_MODE (value
) == VOIDmode
351 : GET_MODE (value
))),
357 /* From here on we can assume that the field to be stored in is
358 a full-word (whatever type that is), since it is shorter than a word. */
360 /* OFFSET is the number of words or bytes (UNIT says which)
361 from STR_RTX to the first word or byte containing part of the field. */
363 if (GET_CODE (op0
) == REG
)
366 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
367 op0
= gen_rtx (SUBREG
, TYPE_MODE (type_for_size (BITS_PER_WORD
, 0)),
373 op0
= protect_from_queue (op0
, 1);
376 /* If VALUE is a floating-point mode, access it as an integer of the
377 corresponding size. This can occur on a machine with 64 bit registers
378 that uses SFmode for float. This can also occur for unaligned float
380 if (GET_MODE_CLASS (GET_MODE (value
)) == MODE_FLOAT
)
382 if (GET_CODE (value
) != REG
)
383 value
= copy_to_reg (value
);
384 value
= gen_rtx (SUBREG
, word_mode
, value
, 0);
387 /* Now OFFSET is nonzero only if OP0 is memory
388 and is therefore always measured in bytes. */
392 && GET_MODE (value
) != BLKmode
393 && !(bitsize
== 1 && GET_CODE (value
) == CONST_INT
)
394 /* Ensure insv's size is wide enough for this field. */
395 && (GET_MODE_BITSIZE (insn_operand_mode
[(int) CODE_FOR_insv
][3])
397 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
399 > GET_MODE_BITSIZE (insn_operand_mode
[(int) CODE_FOR_insv
][3]))))
401 int xbitpos
= bitpos
;
404 rtx last
= get_last_insn ();
406 enum machine_mode maxmode
407 = insn_operand_mode
[(int) CODE_FOR_insv
][3];
409 int save_volatile_ok
= volatile_ok
;
412 /* If this machine's insv can only insert into a register, copy OP0
413 into a register and save it back later. */
414 /* This used to check flag_force_mem, but that was a serious
415 de-optimization now that flag_force_mem is enabled by -O2. */
416 if (GET_CODE (op0
) == MEM
417 && ! ((*insn_operand_predicate
[(int) CODE_FOR_insv
][0])
421 enum machine_mode bestmode
;
423 /* Get the mode to use for inserting into this field. If OP0 is
424 BLKmode, get the smallest mode consistent with the alignment. If
425 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
426 mode. Otherwise, use the smallest mode containing the field. */
428 if (GET_MODE (op0
) == BLKmode
429 || GET_MODE_SIZE (GET_MODE (op0
)) > GET_MODE_SIZE (maxmode
))
431 = get_best_mode (bitsize
, bitnum
, align
* BITS_PER_UNIT
, maxmode
,
432 MEM_VOLATILE_P (op0
));
434 bestmode
= GET_MODE (op0
);
436 if (bestmode
== VOIDmode
437 || (SLOW_UNALIGNED_ACCESS
&& GET_MODE_SIZE (bestmode
) > align
))
440 /* Adjust address to point to the containing unit of that mode. */
441 unit
= GET_MODE_BITSIZE (bestmode
);
442 /* Compute offset as multiple of this unit, counting in bytes. */
443 offset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
444 bitpos
= bitnum
% unit
;
445 op0
= change_address (op0
, bestmode
,
446 plus_constant (XEXP (op0
, 0), offset
));
448 /* Fetch that unit, store the bitfield in it, then store the unit. */
449 tempreg
= copy_to_reg (op0
);
450 store_bit_field (tempreg
, bitsize
, bitpos
, fieldmode
, value
,
452 emit_move_insn (op0
, tempreg
);
455 volatile_ok
= save_volatile_ok
;
457 /* Add OFFSET into OP0's address. */
458 if (GET_CODE (xop0
) == MEM
)
459 xop0
= change_address (xop0
, byte_mode
,
460 plus_constant (XEXP (xop0
, 0), offset
));
462 /* If xop0 is a register, we need it in MAXMODE
463 to make it acceptable to the format of insv. */
464 if (GET_CODE (xop0
) == SUBREG
)
465 /* We can't just change the mode, because this might clobber op0,
466 and we will need the original value of op0 if insv fails. */
467 xop0
= gen_rtx (SUBREG
, maxmode
, SUBREG_REG (xop0
), SUBREG_WORD (xop0
));
468 if (GET_CODE (xop0
) == REG
&& GET_MODE (xop0
) != maxmode
)
469 xop0
= gen_rtx (SUBREG
, maxmode
, xop0
, 0);
471 /* On big-endian machines, we count bits from the most significant.
472 If the bit field insn does not, we must invert. */
474 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
475 xbitpos
= unit
- bitsize
- xbitpos
;
477 /* We have been counting XBITPOS within UNIT.
478 Count instead within the size of the register. */
479 if (BITS_BIG_ENDIAN
&& GET_CODE (xop0
) != MEM
)
480 xbitpos
+= GET_MODE_BITSIZE (maxmode
) - unit
;
482 unit
= GET_MODE_BITSIZE (maxmode
);
484 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
486 if (GET_MODE (value
) != maxmode
)
488 if (GET_MODE_BITSIZE (GET_MODE (value
)) >= bitsize
)
490 /* Optimization: Don't bother really extending VALUE
491 if it has all the bits we will actually use. However,
492 if we must narrow it, be sure we do it correctly. */
494 if (GET_MODE_SIZE (GET_MODE (value
)) < GET_MODE_SIZE (maxmode
))
496 /* Avoid making subreg of a subreg, or of a mem. */
497 if (GET_CODE (value1
) != REG
)
498 value1
= copy_to_reg (value1
);
499 value1
= gen_rtx (SUBREG
, maxmode
, value1
, 0);
502 value1
= gen_lowpart (maxmode
, value1
);
504 else if (!CONSTANT_P (value
))
505 /* Parse phase is supposed to make VALUE's data type
506 match that of the component reference, which is a type
507 at least as wide as the field; so VALUE should have
508 a mode that corresponds to that type. */
512 /* If this machine's insv insists on a register,
513 get VALUE1 into a register. */
514 if (! ((*insn_operand_predicate
[(int) CODE_FOR_insv
][3])
516 value1
= force_reg (maxmode
, value1
);
518 pat
= gen_insv (xop0
, GEN_INT (bitsize
), GEN_INT (xbitpos
), value1
);
523 delete_insns_since (last
);
524 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
, align
);
530 /* Insv is not available; store using shifts and boolean ops. */
531 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
, align
);
535 /* Use shifts and boolean operations to store VALUE
536 into a bit field of width BITSIZE
537 in a memory location specified by OP0 except offset by OFFSET bytes.
538 (OFFSET must be 0 if OP0 is a register.)
539 The field starts at position BITPOS within the byte.
540 (If OP0 is a register, it may be a full word or a narrower mode,
541 but BITPOS still counts within a full word,
542 which is significant on bigendian machines.)
543 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
545 Note that protect_from_queue has already been done on OP0 and VALUE. */
548 store_fixed_bit_field (op0
, offset
, bitsize
, bitpos
, value
, struct_align
)
550 register int offset
, bitsize
, bitpos
;
554 register enum machine_mode mode
;
555 int total_bits
= BITS_PER_WORD
;
560 if (! SLOW_UNALIGNED_ACCESS
)
561 struct_align
= BIGGEST_ALIGNMENT
/ BITS_PER_UNIT
;
563 /* There is a case not handled here:
564 a structure with a known alignment of just a halfword
565 and a field split across two aligned halfwords within the structure.
566 Or likewise a structure with a known alignment of just a byte
567 and a field split across two bytes.
568 Such cases are not supposed to be able to occur. */
570 if (GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
574 /* Special treatment for a bit field split across two registers. */
575 if (bitsize
+ bitpos
> BITS_PER_WORD
)
577 store_split_bit_field (op0
, bitsize
, bitpos
,
578 value
, BITS_PER_WORD
);
584 /* Get the proper mode to use for this field. We want a mode that
585 includes the entire field. If such a mode would be larger than
586 a word, we won't be doing the extraction the normal way. */
588 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
589 struct_align
* BITS_PER_UNIT
, word_mode
,
590 GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
));
592 if (mode
== VOIDmode
)
594 /* The only way this should occur is if the field spans word
596 store_split_bit_field (op0
,
597 bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
598 value
, struct_align
);
602 total_bits
= GET_MODE_BITSIZE (mode
);
604 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
605 be be in the range 0 to total_bits-1, and put any excess bytes in
607 if (bitpos
>= total_bits
)
609 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
610 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
614 /* Get ref to an aligned byte, halfword, or word containing the field.
615 Adjust BITPOS to be position within a word,
616 and OFFSET to be the offset of that word.
617 Then alter OP0 to refer to that word. */
618 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
619 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
620 op0
= change_address (op0
, mode
,
621 plus_constant (XEXP (op0
, 0), offset
));
624 mode
= GET_MODE (op0
);
626 /* Now MODE is either some integral mode for a MEM as OP0,
627 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
628 The bit field is contained entirely within OP0.
629 BITPOS is the starting bit number within OP0.
630 (OP0's mode may actually be narrower than MODE.) */
632 if (BYTES_BIG_ENDIAN
)
633 /* BITPOS is the distance between our msb
634 and that of the containing datum.
635 Convert it to the distance from the lsb. */
636 bitpos
= total_bits
- bitsize
- bitpos
;
638 /* Now BITPOS is always the distance between our lsb
641 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
642 we must first convert its mode to MODE. */
644 if (GET_CODE (value
) == CONST_INT
)
646 register HOST_WIDE_INT v
= INTVAL (value
);
648 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
649 v
&= ((HOST_WIDE_INT
) 1 << bitsize
) - 1;
653 else if ((bitsize
< HOST_BITS_PER_WIDE_INT
654 && v
== ((HOST_WIDE_INT
) 1 << bitsize
) - 1)
655 || (bitsize
== HOST_BITS_PER_WIDE_INT
&& v
== -1))
658 value
= lshift_value (mode
, value
, bitpos
, bitsize
);
662 int must_and
= (GET_MODE_BITSIZE (GET_MODE (value
)) != bitsize
663 && bitpos
+ bitsize
!= GET_MODE_BITSIZE (mode
));
665 if (GET_MODE (value
) != mode
)
667 if ((GET_CODE (value
) == REG
|| GET_CODE (value
) == SUBREG
)
668 && GET_MODE_SIZE (mode
) < GET_MODE_SIZE (GET_MODE (value
)))
669 value
= gen_lowpart (mode
, value
);
671 value
= convert_to_mode (mode
, value
, 1);
675 value
= expand_binop (mode
, and_optab
, value
,
676 mask_rtx (mode
, 0, bitsize
, 0),
677 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
679 value
= expand_shift (LSHIFT_EXPR
, mode
, value
,
680 build_int_2 (bitpos
, 0), NULL_RTX
, 1);
683 /* Now clear the chosen bits in OP0,
684 except that if VALUE is -1 we need not bother. */
686 subtarget
= (GET_CODE (op0
) == REG
|| ! flag_force_mem
) ? op0
: 0;
690 temp
= expand_binop (mode
, and_optab
, op0
,
691 mask_rtx (mode
, bitpos
, bitsize
, 1),
692 subtarget
, 1, OPTAB_LIB_WIDEN
);
698 /* Now logical-or VALUE into OP0, unless it is zero. */
701 temp
= expand_binop (mode
, ior_optab
, temp
, value
,
702 subtarget
, 1, OPTAB_LIB_WIDEN
);
704 emit_move_insn (op0
, temp
);
707 /* Store a bit field that is split across multiple accessible memory objects.
709 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
710 BITSIZE is the field width; BITPOS the position of its first bit
712 VALUE is the value to store.
713 ALIGN is the known alignment of OP0, measured in bytes.
714 This is also the size of the memory objects to be used.
716 This does not yet handle fields wider than BITS_PER_WORD. */
719 store_split_bit_field (op0
, bitsize
, bitpos
, value
, align
)
728 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
730 if (GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
731 unit
= BITS_PER_WORD
;
733 unit
= MIN (align
* BITS_PER_UNIT
, BITS_PER_WORD
);
735 /* If VALUE is a constant other than a CONST_INT, get it into a register in
736 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
737 that VALUE might be a floating-point constant. */
738 if (CONSTANT_P (value
) && GET_CODE (value
) != CONST_INT
)
740 rtx word
= gen_lowpart_common (word_mode
, value
);
742 if (word
&& (value
!= word
))
745 value
= gen_lowpart_common (word_mode
,
746 force_reg (GET_MODE (value
) != VOIDmode
748 : word_mode
, value
));
751 while (bitsdone
< bitsize
)
758 offset
= (bitpos
+ bitsdone
) / unit
;
759 thispos
= (bitpos
+ bitsdone
) % unit
;
761 /* THISSIZE must not overrun a word boundary. Otherwise,
762 store_fixed_bit_field will call us again, and we will mutually
764 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
765 thissize
= MIN (thissize
, unit
- thispos
);
767 if (BYTES_BIG_ENDIAN
)
771 /* We must do an endian conversion exactly the same way as it is
772 done in extract_bit_field, so that the two calls to
773 extract_fixed_bit_field will have comparable arguments. */
774 if (GET_CODE (value
) != MEM
|| GET_MODE (value
) == BLKmode
)
775 total_bits
= BITS_PER_WORD
;
777 total_bits
= GET_MODE_BITSIZE (GET_MODE (value
));
779 /* Fetch successively less significant portions. */
780 if (GET_CODE (value
) == CONST_INT
)
781 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
782 >> (bitsize
- bitsdone
- thissize
))
783 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
785 /* The args are chosen so that the last part includes the
786 lsb. Give extract_bit_field the value it needs (with
787 endianness compensation) to fetch the piece we want.
789 ??? We have no idea what the alignment of VALUE is, so
790 we have to use a guess. */
792 = extract_fixed_bit_field
793 (word_mode
, value
, 0, thissize
,
794 total_bits
- bitsize
+ bitsdone
, NULL_RTX
, 1,
795 GET_MODE (value
) == VOIDmode
797 : (GET_MODE (value
) == BLKmode
799 : GET_MODE_ALIGNMENT (GET_MODE (value
)) / BITS_PER_UNIT
));
803 /* Fetch successively more significant portions. */
804 if (GET_CODE (value
) == CONST_INT
)
805 part
= GEN_INT (((unsigned HOST_WIDE_INT
) (INTVAL (value
))
807 & (((HOST_WIDE_INT
) 1 << thissize
) - 1));
810 = extract_fixed_bit_field
811 (word_mode
, value
, 0, thissize
, bitsdone
, NULL_RTX
, 1,
812 GET_MODE (value
) == VOIDmode
814 : (GET_MODE (value
) == BLKmode
816 : GET_MODE_ALIGNMENT (GET_MODE (value
)) / BITS_PER_UNIT
));
819 /* If OP0 is a register, then handle OFFSET here.
821 When handling multiword bitfields, extract_bit_field may pass
822 down a word_mode SUBREG of a larger REG for a bitfield that actually
823 crosses a word boundary. Thus, for a SUBREG, we must find
824 the current word starting from the base register. */
825 if (GET_CODE (op0
) == SUBREG
)
827 word
= operand_subword_force (SUBREG_REG (op0
),
828 SUBREG_WORD (op0
) + offset
,
829 GET_MODE (SUBREG_REG (op0
)));
832 else if (GET_CODE (op0
) == REG
)
834 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
840 /* OFFSET is in UNITs, and UNIT is in bits.
841 store_fixed_bit_field wants offset in bytes. */
842 store_fixed_bit_field (word
, offset
* unit
/ BITS_PER_UNIT
,
843 thissize
, thispos
, part
, align
);
844 bitsdone
+= thissize
;
848 /* Generate code to extract a byte-field from STR_RTX
849 containing BITSIZE bits, starting at BITNUM,
850 and put it in TARGET if possible (if TARGET is nonzero).
851 Regardless of TARGET, we return the rtx for where the value is placed.
854 STR_RTX is the structure containing the byte (a REG or MEM).
855 UNSIGNEDP is nonzero if this is an unsigned bit field.
856 MODE is the natural mode of the field value once extracted.
857 TMODE is the mode the caller would like the value to have;
858 but the value may be returned with type MODE instead.
860 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
861 TOTAL_SIZE is the size in bytes of the containing structure,
864 If a TARGET is specified and we can store in it at no extra cost,
865 we do so, and return TARGET.
866 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
867 if they are equally easy. */
870 extract_bit_field (str_rtx
, bitsize
, bitnum
, unsignedp
,
871 target
, mode
, tmode
, align
, total_size
)
873 register int bitsize
;
877 enum machine_mode mode
, tmode
;
881 int unit
= (GET_CODE (str_rtx
) == MEM
) ? BITS_PER_UNIT
: BITS_PER_WORD
;
882 register int offset
= bitnum
/ unit
;
883 register int bitpos
= bitnum
% unit
;
884 register rtx op0
= str_rtx
;
885 rtx spec_target
= target
;
886 rtx spec_target_subreg
= 0;
888 /* Discount the part of the structure before the desired byte.
889 We need to know how many bytes are safe to reference after it. */
891 total_size
-= (bitpos
/ BIGGEST_ALIGNMENT
892 * (BIGGEST_ALIGNMENT
/ BITS_PER_UNIT
));
894 if (tmode
== VOIDmode
)
896 while (GET_CODE (op0
) == SUBREG
)
898 int outer_size
= GET_MODE_BITSIZE (GET_MODE (op0
));
899 int inner_size
= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)));
901 offset
+= SUBREG_WORD (op0
);
903 if (BYTES_BIG_ENDIAN
&& (outer_size
< inner_size
))
905 bitpos
+= inner_size
- outer_size
;
908 offset
+= (bitpos
/ unit
);
913 op0
= SUBREG_REG (op0
);
916 /* ??? We currently assume TARGET is at least as big as BITSIZE.
917 If that's wrong, the solution is to test for it and set TARGET to 0
920 /* If OP0 is a register, BITPOS must count within a word.
921 But as we have it, it counts within whatever size OP0 now has.
922 On a bigendian machine, these are not the same, so convert. */
924 && GET_CODE (op0
) != MEM
925 && unit
> GET_MODE_BITSIZE (GET_MODE (op0
)))
926 bitpos
+= unit
- GET_MODE_BITSIZE (GET_MODE (op0
));
928 /* Extracting a full-word or multi-word value
929 from a structure in a register or aligned memory.
930 This can be done with just SUBREG.
931 So too extracting a subword value in
932 the least significant part of the register. */
934 if (((GET_CODE (op0
) == REG
935 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
936 GET_MODE_BITSIZE (GET_MODE (op0
))))
937 || (GET_CODE (op0
) == MEM
938 && (! SLOW_UNALIGNED_ACCESS
939 || (offset
* BITS_PER_UNIT
% bitsize
== 0
940 && align
* BITS_PER_UNIT
% bitsize
== 0))))
941 && ((bitsize
>= BITS_PER_WORD
&& bitsize
== GET_MODE_BITSIZE (mode
)
942 && bitpos
% BITS_PER_WORD
== 0)
943 || (mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0) != BLKmode
945 ? bitpos
+ bitsize
== BITS_PER_WORD
948 enum machine_mode mode1
949 = mode_for_size (bitsize
, GET_MODE_CLASS (tmode
), 0);
951 if (mode1
!= GET_MODE (op0
))
953 if (GET_CODE (op0
) == REG
)
954 op0
= gen_rtx (SUBREG
, mode1
, op0
, offset
);
956 op0
= change_address (op0
, mode1
,
957 plus_constant (XEXP (op0
, 0), offset
));
960 return convert_to_mode (tmode
, op0
, unsignedp
);
964 /* Handle fields bigger than a word. */
966 if (bitsize
> BITS_PER_WORD
)
968 /* Here we transfer the words of the field
969 in the order least significant first.
970 This is because the most significant word is the one which may
971 be less than full. */
973 int nwords
= (bitsize
+ (BITS_PER_WORD
- 1)) / BITS_PER_WORD
;
976 if (target
== 0 || GET_CODE (target
) != REG
)
977 target
= gen_reg_rtx (mode
);
979 /* Indicate for flow that the entire target reg is being set. */
980 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, target
));
982 for (i
= 0; i
< nwords
; i
++)
984 /* If I is 0, use the low-order word in both field and target;
985 if I is 1, use the next to lowest word; and so on. */
986 /* Word number in TARGET to use. */
987 int wordnum
= (WORDS_BIG_ENDIAN
988 ? GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
- i
- 1
990 /* Offset from start of field in OP0. */
991 int bit_offset
= (WORDS_BIG_ENDIAN
992 ? MAX (0, bitsize
- (i
+ 1) * BITS_PER_WORD
)
993 : i
* BITS_PER_WORD
);
994 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
996 = extract_bit_field (op0
, MIN (BITS_PER_WORD
,
997 bitsize
- i
* BITS_PER_WORD
),
999 1, target_part
, mode
, word_mode
,
1002 if (target_part
== 0)
1005 if (result_part
!= target_part
)
1006 emit_move_insn (target_part
, result_part
);
1011 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1012 need to be zero'd out. */
1013 if (GET_MODE_SIZE (GET_MODE (target
)) > nwords
* UNITS_PER_WORD
)
1017 total_words
= GET_MODE_SIZE (GET_MODE (target
)) / UNITS_PER_WORD
;
1018 for (i
= nwords
; i
< total_words
; i
++)
1020 int wordnum
= WORDS_BIG_ENDIAN
? total_words
- i
- 1 : i
;
1021 rtx target_part
= operand_subword (target
, wordnum
, 1, VOIDmode
);
1022 emit_move_insn (target_part
, const0_rtx
);
1028 /* Signed bit field: sign-extend with two arithmetic shifts. */
1029 target
= expand_shift (LSHIFT_EXPR
, mode
, target
,
1030 build_int_2 (GET_MODE_BITSIZE (mode
) - bitsize
, 0),
1032 return expand_shift (RSHIFT_EXPR
, mode
, target
,
1033 build_int_2 (GET_MODE_BITSIZE (mode
) - bitsize
, 0),
1037 /* From here on we know the desired field is smaller than a word
1038 so we can assume it is an integer. So we can safely extract it as one
1039 size of integer, if necessary, and then truncate or extend
1040 to the size that is wanted. */
1042 /* OFFSET is the number of words or bytes (UNIT says which)
1043 from STR_RTX to the first word or byte containing part of the field. */
1045 if (GET_CODE (op0
) == REG
)
1048 || GET_MODE_SIZE (GET_MODE (op0
)) > UNITS_PER_WORD
)
1049 op0
= gen_rtx (SUBREG
, TYPE_MODE (type_for_size (BITS_PER_WORD
, 0)),
1055 op0
= protect_from_queue (str_rtx
, 1);
1058 /* Now OFFSET is nonzero only for memory operands. */
1064 && (GET_MODE_BITSIZE (insn_operand_mode
[(int) CODE_FOR_extzv
][0])
1066 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
1067 && (bitsize
+ bitpos
1068 > GET_MODE_BITSIZE (insn_operand_mode
[(int) CODE_FOR_extzv
][0]))))
1070 int xbitpos
= bitpos
, xoffset
= offset
;
1071 rtx bitsize_rtx
, bitpos_rtx
;
1072 rtx last
= get_last_insn();
1074 rtx xtarget
= target
;
1075 rtx xspec_target
= spec_target
;
1076 rtx xspec_target_subreg
= spec_target_subreg
;
1078 enum machine_mode maxmode
1079 = insn_operand_mode
[(int) CODE_FOR_extzv
][0];
1081 if (GET_CODE (xop0
) == MEM
)
1083 int save_volatile_ok
= volatile_ok
;
1086 /* Is the memory operand acceptable? */
1088 || ! ((*insn_operand_predicate
[(int) CODE_FOR_extzv
][1])
1089 (xop0
, GET_MODE (xop0
))))
1091 /* No, load into a reg and extract from there. */
1092 enum machine_mode bestmode
;
1094 /* Get the mode to use for inserting into this field. If
1095 OP0 is BLKmode, get the smallest mode consistent with the
1096 alignment. If OP0 is a non-BLKmode object that is no
1097 wider than MAXMODE, use its mode. Otherwise, use the
1098 smallest mode containing the field. */
1100 if (GET_MODE (xop0
) == BLKmode
1101 || (GET_MODE_SIZE (GET_MODE (op0
))
1102 > GET_MODE_SIZE (maxmode
)))
1103 bestmode
= get_best_mode (bitsize
, bitnum
,
1104 align
* BITS_PER_UNIT
, maxmode
,
1105 MEM_VOLATILE_P (xop0
));
1107 bestmode
= GET_MODE (xop0
);
1109 if (bestmode
== VOIDmode
1110 || (SLOW_UNALIGNED_ACCESS
&& GET_MODE_SIZE (bestmode
) > align
))
1113 /* Compute offset as multiple of this unit,
1114 counting in bytes. */
1115 unit
= GET_MODE_BITSIZE (bestmode
);
1116 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1117 xbitpos
= bitnum
% unit
;
1118 xop0
= change_address (xop0
, bestmode
,
1119 plus_constant (XEXP (xop0
, 0),
1121 /* Fetch it to a register in that size. */
1122 xop0
= force_reg (bestmode
, xop0
);
1124 /* XBITPOS counts within UNIT, which is what is expected. */
1127 /* Get ref to first byte containing part of the field. */
1128 xop0
= change_address (xop0
, byte_mode
,
1129 plus_constant (XEXP (xop0
, 0), xoffset
));
1131 volatile_ok
= save_volatile_ok
;
1134 /* If op0 is a register, we need it in MAXMODE (which is usually
1135 SImode). to make it acceptable to the format of extzv. */
1136 if (GET_CODE (xop0
) == SUBREG
&& GET_MODE (xop0
) != maxmode
)
1138 if (GET_CODE (xop0
) == REG
&& GET_MODE (xop0
) != maxmode
)
1139 xop0
= gen_rtx (SUBREG
, maxmode
, xop0
, 0);
1141 /* On big-endian machines, we count bits from the most significant.
1142 If the bit field insn does not, we must invert. */
1143 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1144 xbitpos
= unit
- bitsize
- xbitpos
;
1146 /* Now convert from counting within UNIT to counting in MAXMODE. */
1147 if (BITS_BIG_ENDIAN
&& GET_CODE (xop0
) != MEM
)
1148 xbitpos
+= GET_MODE_BITSIZE (maxmode
) - unit
;
1150 unit
= GET_MODE_BITSIZE (maxmode
);
1153 || (flag_force_mem
&& GET_CODE (xtarget
) == MEM
))
1154 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1156 if (GET_MODE (xtarget
) != maxmode
)
1158 if (GET_CODE (xtarget
) == REG
)
1160 int wider
= (GET_MODE_SIZE (maxmode
)
1161 > GET_MODE_SIZE (GET_MODE (xtarget
)));
1162 xtarget
= gen_lowpart (maxmode
, xtarget
);
1164 xspec_target_subreg
= xtarget
;
1167 xtarget
= gen_reg_rtx (maxmode
);
1170 /* If this machine's extzv insists on a register target,
1171 make sure we have one. */
1172 if (! ((*insn_operand_predicate
[(int) CODE_FOR_extzv
][0])
1173 (xtarget
, maxmode
)))
1174 xtarget
= gen_reg_rtx (maxmode
);
1176 bitsize_rtx
= GEN_INT (bitsize
);
1177 bitpos_rtx
= GEN_INT (xbitpos
);
1179 pat
= gen_extzv (protect_from_queue (xtarget
, 1),
1180 xop0
, bitsize_rtx
, bitpos_rtx
);
1185 spec_target
= xspec_target
;
1186 spec_target_subreg
= xspec_target_subreg
;
1190 delete_insns_since (last
);
1191 target
= extract_fixed_bit_field (tmode
, op0
, offset
, bitsize
,
1192 bitpos
, target
, 1, align
);
1198 target
= extract_fixed_bit_field (tmode
, op0
, offset
, bitsize
, bitpos
,
1205 && (GET_MODE_BITSIZE (insn_operand_mode
[(int) CODE_FOR_extv
][0])
1207 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
1208 && (bitsize
+ bitpos
1209 > GET_MODE_BITSIZE (insn_operand_mode
[(int) CODE_FOR_extv
][0]))))
1211 int xbitpos
= bitpos
, xoffset
= offset
;
1212 rtx bitsize_rtx
, bitpos_rtx
;
1213 rtx last
= get_last_insn();
1214 rtx xop0
= op0
, xtarget
= target
;
1215 rtx xspec_target
= spec_target
;
1216 rtx xspec_target_subreg
= spec_target_subreg
;
1218 enum machine_mode maxmode
1219 = insn_operand_mode
[(int) CODE_FOR_extv
][0];
1221 if (GET_CODE (xop0
) == MEM
)
1223 /* Is the memory operand acceptable? */
1224 if (! ((*insn_operand_predicate
[(int) CODE_FOR_extv
][1])
1225 (xop0
, GET_MODE (xop0
))))
1227 /* No, load into a reg and extract from there. */
1228 enum machine_mode bestmode
;
1230 /* Get the mode to use for inserting into this field. If
1231 OP0 is BLKmode, get the smallest mode consistent with the
1232 alignment. If OP0 is a non-BLKmode object that is no
1233 wider than MAXMODE, use its mode. Otherwise, use the
1234 smallest mode containing the field. */
1236 if (GET_MODE (xop0
) == BLKmode
1237 || (GET_MODE_SIZE (GET_MODE (op0
))
1238 > GET_MODE_SIZE (maxmode
)))
1239 bestmode
= get_best_mode (bitsize
, bitnum
,
1240 align
* BITS_PER_UNIT
, maxmode
,
1241 MEM_VOLATILE_P (xop0
));
1243 bestmode
= GET_MODE (xop0
);
1245 if (bestmode
== VOIDmode
1246 || (SLOW_UNALIGNED_ACCESS
&& GET_MODE_SIZE (bestmode
) > align
))
1249 /* Compute offset as multiple of this unit,
1250 counting in bytes. */
1251 unit
= GET_MODE_BITSIZE (bestmode
);
1252 xoffset
= (bitnum
/ unit
) * GET_MODE_SIZE (bestmode
);
1253 xbitpos
= bitnum
% unit
;
1254 xop0
= change_address (xop0
, bestmode
,
1255 plus_constant (XEXP (xop0
, 0),
1257 /* Fetch it to a register in that size. */
1258 xop0
= force_reg (bestmode
, xop0
);
1260 /* XBITPOS counts within UNIT, which is what is expected. */
1263 /* Get ref to first byte containing part of the field. */
1264 xop0
= change_address (xop0
, byte_mode
,
1265 plus_constant (XEXP (xop0
, 0), xoffset
));
1268 /* If op0 is a register, we need it in MAXMODE (which is usually
1269 SImode) to make it acceptable to the format of extv. */
1270 if (GET_CODE (xop0
) == SUBREG
&& GET_MODE (xop0
) != maxmode
)
1272 if (GET_CODE (xop0
) == REG
&& GET_MODE (xop0
) != maxmode
)
1273 xop0
= gen_rtx (SUBREG
, maxmode
, xop0
, 0);
1275 /* On big-endian machines, we count bits from the most significant.
1276 If the bit field insn does not, we must invert. */
1277 if (BITS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
1278 xbitpos
= unit
- bitsize
- xbitpos
;
1280 /* XBITPOS counts within a size of UNIT.
1281 Adjust to count within a size of MAXMODE. */
1282 if (BITS_BIG_ENDIAN
&& GET_CODE (xop0
) != MEM
)
1283 xbitpos
+= (GET_MODE_BITSIZE (maxmode
) - unit
);
1285 unit
= GET_MODE_BITSIZE (maxmode
);
1288 || (flag_force_mem
&& GET_CODE (xtarget
) == MEM
))
1289 xtarget
= xspec_target
= gen_reg_rtx (tmode
);
1291 if (GET_MODE (xtarget
) != maxmode
)
1293 if (GET_CODE (xtarget
) == REG
)
1295 int wider
= (GET_MODE_SIZE (maxmode
)
1296 > GET_MODE_SIZE (GET_MODE (xtarget
)));
1297 xtarget
= gen_lowpart (maxmode
, xtarget
);
1299 xspec_target_subreg
= xtarget
;
1302 xtarget
= gen_reg_rtx (maxmode
);
1305 /* If this machine's extv insists on a register target,
1306 make sure we have one. */
1307 if (! ((*insn_operand_predicate
[(int) CODE_FOR_extv
][0])
1308 (xtarget
, maxmode
)))
1309 xtarget
= gen_reg_rtx (maxmode
);
1311 bitsize_rtx
= GEN_INT (bitsize
);
1312 bitpos_rtx
= GEN_INT (xbitpos
);
1314 pat
= gen_extv (protect_from_queue (xtarget
, 1),
1315 xop0
, bitsize_rtx
, bitpos_rtx
);
1320 spec_target
= xspec_target
;
1321 spec_target_subreg
= xspec_target_subreg
;
1325 delete_insns_since (last
);
1326 target
= extract_fixed_bit_field (tmode
, op0
, offset
, bitsize
,
1327 bitpos
, target
, 0, align
);
1333 target
= extract_fixed_bit_field (tmode
, op0
, offset
, bitsize
, bitpos
,
1336 if (target
== spec_target
)
1338 if (target
== spec_target_subreg
)
1340 if (GET_MODE (target
) != tmode
&& GET_MODE (target
) != mode
)
1342 /* If the target mode is floating-point, first convert to the
1343 integer mode of that size and then access it as a floating-point
1344 value via a SUBREG. */
1345 if (GET_MODE_CLASS (tmode
) == MODE_FLOAT
)
1347 target
= convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode
),
1350 if (GET_CODE (target
) != REG
)
1351 target
= copy_to_reg (target
);
1352 return gen_rtx (SUBREG
, tmode
, target
, 0);
1355 return convert_to_mode (tmode
, target
, unsignedp
);
1360 /* Extract a bit field using shifts and boolean operations
1361 Returns an rtx to represent the value.
1362 OP0 addresses a register (word) or memory (byte).
1363 BITPOS says which bit within the word or byte the bit field starts in.
1364 OFFSET says how many bytes farther the bit field starts;
1365 it is 0 if OP0 is a register.
1366 BITSIZE says how many bits long the bit field is.
1367 (If OP0 is a register, it may be narrower than a full word,
1368 but BITPOS still counts within a full word,
1369 which is significant on bigendian machines.)
1371 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1372 If TARGET is nonzero, attempts to store the value there
1373 and return TARGET, but this is not guaranteed.
1374 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1376 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1379 extract_fixed_bit_field (tmode
, op0
, offset
, bitsize
, bitpos
,
1380 target
, unsignedp
, align
)
1381 enum machine_mode tmode
;
1382 register rtx op0
, target
;
1383 register int offset
, bitsize
, bitpos
;
1387 int total_bits
= BITS_PER_WORD
;
1388 enum machine_mode mode
;
1390 if (GET_CODE (op0
) == SUBREG
|| GET_CODE (op0
) == REG
)
1392 /* Special treatment for a bit field split across two registers. */
1393 if (bitsize
+ bitpos
> BITS_PER_WORD
)
1394 return extract_split_bit_field (op0
, bitsize
, bitpos
,
1399 /* Get the proper mode to use for this field. We want a mode that
1400 includes the entire field. If such a mode would be larger than
1401 a word, we won't be doing the extraction the normal way. */
1403 mode
= get_best_mode (bitsize
, bitpos
+ offset
* BITS_PER_UNIT
,
1404 align
* BITS_PER_UNIT
, word_mode
,
1405 GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
));
1407 if (mode
== VOIDmode
)
1408 /* The only way this should occur is if the field spans word
1410 return extract_split_bit_field (op0
, bitsize
,
1411 bitpos
+ offset
* BITS_PER_UNIT
,
1414 total_bits
= GET_MODE_BITSIZE (mode
);
1416 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1417 be be in the range 0 to total_bits-1, and put any excess bytes in
1419 if (bitpos
>= total_bits
)
1421 offset
+= (bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
);
1422 bitpos
-= ((bitpos
/ total_bits
) * (total_bits
/ BITS_PER_UNIT
)
1426 /* Get ref to an aligned byte, halfword, or word containing the field.
1427 Adjust BITPOS to be position within a word,
1428 and OFFSET to be the offset of that word.
1429 Then alter OP0 to refer to that word. */
1430 bitpos
+= (offset
% (total_bits
/ BITS_PER_UNIT
)) * BITS_PER_UNIT
;
1431 offset
-= (offset
% (total_bits
/ BITS_PER_UNIT
));
1432 op0
= change_address (op0
, mode
,
1433 plus_constant (XEXP (op0
, 0), offset
));
1436 mode
= GET_MODE (op0
);
1438 if (BYTES_BIG_ENDIAN
)
1440 /* BITPOS is the distance between our msb and that of OP0.
1441 Convert it to the distance from the lsb. */
1443 bitpos
= total_bits
- bitsize
- bitpos
;
1446 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1447 We have reduced the big-endian case to the little-endian case. */
1453 /* If the field does not already start at the lsb,
1454 shift it so it does. */
1455 tree amount
= build_int_2 (bitpos
, 0);
1456 /* Maybe propagate the target for the shift. */
1457 /* But not if we will return it--could confuse integrate.c. */
1458 rtx subtarget
= (target
!= 0 && GET_CODE (target
) == REG
1459 && !REG_FUNCTION_VALUE_P (target
)
1461 if (tmode
!= mode
) subtarget
= 0;
1462 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1464 /* Convert the value to the desired mode. */
1466 op0
= convert_to_mode (tmode
, op0
, 1);
1468 /* Unless the msb of the field used to be the msb when we shifted,
1469 mask out the upper bits. */
1471 if (GET_MODE_BITSIZE (mode
) != bitpos
+ bitsize
1473 #ifdef SLOW_ZERO_EXTEND
1474 /* Always generate an `and' if
1475 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1476 will combine fruitfully with the zero-extend. */
1481 return expand_binop (GET_MODE (op0
), and_optab
, op0
,
1482 mask_rtx (GET_MODE (op0
), 0, bitsize
, 0),
1483 target
, 1, OPTAB_LIB_WIDEN
);
1487 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1488 then arithmetic-shift its lsb to the lsb of the word. */
1489 op0
= force_reg (mode
, op0
);
1493 /* Find the narrowest integer mode that contains the field. */
1495 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= VOIDmode
;
1496 mode
= GET_MODE_WIDER_MODE (mode
))
1497 if (GET_MODE_BITSIZE (mode
) >= bitsize
+ bitpos
)
1499 op0
= convert_to_mode (mode
, op0
, 0);
1503 if (GET_MODE_BITSIZE (mode
) != (bitsize
+ bitpos
))
1505 tree amount
= build_int_2 (GET_MODE_BITSIZE (mode
) - (bitsize
+ bitpos
), 0);
1506 /* Maybe propagate the target for the shift. */
1507 /* But not if we will return the result--could confuse integrate.c. */
1508 rtx subtarget
= (target
!= 0 && GET_CODE (target
) == REG
1509 && ! REG_FUNCTION_VALUE_P (target
)
1511 op0
= expand_shift (LSHIFT_EXPR
, mode
, op0
, amount
, subtarget
, 1);
1514 return expand_shift (RSHIFT_EXPR
, mode
, op0
,
1515 build_int_2 (GET_MODE_BITSIZE (mode
) - bitsize
, 0),
1519 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1520 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1521 complement of that if COMPLEMENT. The mask is truncated if
1522 necessary to the width of mode MODE. The mask is zero-extended if
1523 BITSIZE+BITPOS is too small for MODE. */
1526 mask_rtx (mode
, bitpos
, bitsize
, complement
)
1527 enum machine_mode mode
;
1528 int bitpos
, bitsize
, complement
;
1530 HOST_WIDE_INT masklow
, maskhigh
;
1532 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1533 masklow
= (HOST_WIDE_INT
) -1 << bitpos
;
1537 if (bitpos
+ bitsize
< HOST_BITS_PER_WIDE_INT
)
1538 masklow
&= ((unsigned HOST_WIDE_INT
) -1
1539 >> (HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1541 if (bitpos
<= HOST_BITS_PER_WIDE_INT
)
1544 maskhigh
= (HOST_WIDE_INT
) -1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
1546 if (bitpos
+ bitsize
> HOST_BITS_PER_WIDE_INT
)
1547 maskhigh
&= ((unsigned HOST_WIDE_INT
) -1
1548 >> (2 * HOST_BITS_PER_WIDE_INT
- bitpos
- bitsize
));
1554 maskhigh
= ~maskhigh
;
1558 return immed_double_const (masklow
, maskhigh
, mode
);
1561 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1562 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1565 lshift_value (mode
, value
, bitpos
, bitsize
)
1566 enum machine_mode mode
;
1568 int bitpos
, bitsize
;
1570 unsigned HOST_WIDE_INT v
= INTVAL (value
);
1571 HOST_WIDE_INT low
, high
;
1573 if (bitsize
< HOST_BITS_PER_WIDE_INT
)
1574 v
&= ~((HOST_WIDE_INT
) -1 << bitsize
);
1576 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
1579 high
= (bitpos
> 0 ? (v
>> (HOST_BITS_PER_WIDE_INT
- bitpos
)) : 0);
1584 high
= v
<< (bitpos
- HOST_BITS_PER_WIDE_INT
);
1587 return immed_double_const (low
, high
, mode
);
1590 /* Extract a bit field that is split across two words
1591 and return an RTX for the result.
1593 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1594 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1595 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1597 ALIGN is the known alignment of OP0, measured in bytes.
1598 This is also the size of the memory objects to be used. */
1601 extract_split_bit_field (op0
, bitsize
, bitpos
, unsignedp
, align
)
1603 int bitsize
, bitpos
, unsignedp
, align
;
1610 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1612 if (GET_CODE (op0
) == REG
|| GET_CODE (op0
) == SUBREG
)
1613 unit
= BITS_PER_WORD
;
1615 unit
= MIN (align
* BITS_PER_UNIT
, BITS_PER_WORD
);
1617 while (bitsdone
< bitsize
)
1624 offset
= (bitpos
+ bitsdone
) / unit
;
1625 thispos
= (bitpos
+ bitsdone
) % unit
;
1627 /* THISSIZE must not overrun a word boundary. Otherwise,
1628 extract_fixed_bit_field will call us again, and we will mutually
1630 thissize
= MIN (bitsize
- bitsdone
, BITS_PER_WORD
);
1631 thissize
= MIN (thissize
, unit
- thispos
);
1633 /* If OP0 is a register, then handle OFFSET here.
1635 When handling multiword bitfields, extract_bit_field may pass
1636 down a word_mode SUBREG of a larger REG for a bitfield that actually
1637 crosses a word boundary. Thus, for a SUBREG, we must find
1638 the current word starting from the base register. */
1639 if (GET_CODE (op0
) == SUBREG
)
1641 word
= operand_subword_force (SUBREG_REG (op0
),
1642 SUBREG_WORD (op0
) + offset
,
1643 GET_MODE (SUBREG_REG (op0
)));
1646 else if (GET_CODE (op0
) == REG
)
1648 word
= operand_subword_force (op0
, offset
, GET_MODE (op0
));
1654 /* Extract the parts in bit-counting order,
1655 whose meaning is determined by BYTES_PER_UNIT.
1656 OFFSET is in UNITs, and UNIT is in bits.
1657 extract_fixed_bit_field wants offset in bytes. */
1658 part
= extract_fixed_bit_field (word_mode
, word
,
1659 offset
* unit
/ BITS_PER_UNIT
,
1660 thissize
, thispos
, 0, 1, align
);
1661 bitsdone
+= thissize
;
1663 /* Shift this part into place for the result. */
1664 if (BYTES_BIG_ENDIAN
)
1666 if (bitsize
!= bitsdone
)
1667 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1668 build_int_2 (bitsize
- bitsdone
, 0), 0, 1);
1672 if (bitsdone
!= thissize
)
1673 part
= expand_shift (LSHIFT_EXPR
, word_mode
, part
,
1674 build_int_2 (bitsdone
- thissize
, 0), 0, 1);
1680 /* Combine the parts with bitwise or. This works
1681 because we extracted each part as an unsigned bit field. */
1682 result
= expand_binop (word_mode
, ior_optab
, part
, result
, NULL_RTX
, 1,
1688 /* Unsigned bit field: we are done. */
1691 /* Signed bit field: sign-extend with two arithmetic shifts. */
1692 result
= expand_shift (LSHIFT_EXPR
, word_mode
, result
,
1693 build_int_2 (BITS_PER_WORD
- bitsize
, 0),
1695 return expand_shift (RSHIFT_EXPR
, word_mode
, result
,
1696 build_int_2 (BITS_PER_WORD
- bitsize
, 0), NULL_RTX
, 0);
1699 /* Add INC into TARGET. */
1702 expand_inc (target
, inc
)
1705 rtx value
= expand_binop (GET_MODE (target
), add_optab
,
1707 target
, 0, OPTAB_LIB_WIDEN
);
1708 if (value
!= target
)
1709 emit_move_insn (target
, value
);
1712 /* Subtract DEC from TARGET. */
1715 expand_dec (target
, dec
)
1718 rtx value
= expand_binop (GET_MODE (target
), sub_optab
,
1720 target
, 0, OPTAB_LIB_WIDEN
);
1721 if (value
!= target
)
1722 emit_move_insn (target
, value
);
1725 /* Output a shift instruction for expression code CODE,
1726 with SHIFTED being the rtx for the value to shift,
1727 and AMOUNT the tree for the amount to shift by.
1728 Store the result in the rtx TARGET, if that is convenient.
1729 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1730 Return the rtx for where the value is. */
1733 expand_shift (code
, mode
, shifted
, amount
, target
, unsignedp
)
1734 enum tree_code code
;
1735 register enum machine_mode mode
;
1738 register rtx target
;
1741 register rtx op1
, temp
= 0;
1742 register int left
= (code
== LSHIFT_EXPR
|| code
== LROTATE_EXPR
);
1743 register int rotate
= (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
);
1746 /* Previously detected shift-counts computed by NEGATE_EXPR
1747 and shifted in the other direction; but that does not work
1750 op1
= expand_expr (amount
, NULL_RTX
, VOIDmode
, 0);
1752 #ifdef SHIFT_COUNT_TRUNCATED
1753 if (SHIFT_COUNT_TRUNCATED
1754 && GET_CODE (op1
) == CONST_INT
1755 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) >= GET_MODE_BITSIZE (mode
))
1756 op1
= GEN_INT ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
1757 % GET_MODE_BITSIZE (mode
));
1760 if (op1
== const0_rtx
)
1763 for (try = 0; temp
== 0 && try < 3; try++)
1765 enum optab_methods methods
;
1768 methods
= OPTAB_DIRECT
;
1770 methods
= OPTAB_WIDEN
;
1772 methods
= OPTAB_LIB_WIDEN
;
1776 /* Widening does not work for rotation. */
1777 if (methods
== OPTAB_WIDEN
)
1779 else if (methods
== OPTAB_LIB_WIDEN
)
1781 /* If we have been unable to open-code this by a rotation,
1782 do it as the IOR of two shifts. I.e., to rotate A
1783 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1784 where C is the bitsize of A.
1786 It is theoretically possible that the target machine might
1787 not be able to perform either shift and hence we would
1788 be making two libcalls rather than just the one for the
1789 shift (similarly if IOR could not be done). We will allow
1790 this extremely unlikely lossage to avoid complicating the
1793 rtx subtarget
= target
== shifted
? 0 : target
;
1795 tree type
= TREE_TYPE (amount
);
1796 tree new_amount
= make_tree (type
, op1
);
1798 = fold (build (MINUS_EXPR
, type
,
1800 build_int_2 (GET_MODE_BITSIZE (mode
),
1804 shifted
= force_reg (mode
, shifted
);
1806 temp
= expand_shift (left
? LSHIFT_EXPR
: RSHIFT_EXPR
,
1807 mode
, shifted
, new_amount
, subtarget
, 1);
1808 temp1
= expand_shift (left
? RSHIFT_EXPR
: LSHIFT_EXPR
,
1809 mode
, shifted
, other_amount
, 0, 1);
1810 return expand_binop (mode
, ior_optab
, temp
, temp1
, target
,
1811 unsignedp
, methods
);
1814 temp
= expand_binop (mode
,
1815 left
? rotl_optab
: rotr_optab
,
1816 shifted
, op1
, target
, unsignedp
, methods
);
1818 /* If we don't have the rotate, but we are rotating by a constant
1819 that is in range, try a rotate in the opposite direction. */
1821 if (temp
== 0 && GET_CODE (op1
) == CONST_INT
1822 && INTVAL (op1
) > 0 && INTVAL (op1
) < GET_MODE_BITSIZE (mode
))
1823 temp
= expand_binop (mode
,
1824 left
? rotr_optab
: rotl_optab
,
1826 GEN_INT (GET_MODE_BITSIZE (mode
)
1828 target
, unsignedp
, methods
);
1831 temp
= expand_binop (mode
,
1832 left
? ashl_optab
: lshr_optab
,
1833 shifted
, op1
, target
, unsignedp
, methods
);
1835 /* Do arithmetic shifts.
1836 Also, if we are going to widen the operand, we can just as well
1837 use an arithmetic right-shift instead of a logical one. */
1838 if (temp
== 0 && ! rotate
1839 && (! unsignedp
|| (! left
&& methods
== OPTAB_WIDEN
)))
1841 enum optab_methods methods1
= methods
;
1843 /* If trying to widen a log shift to an arithmetic shift,
1844 don't accept an arithmetic shift of the same size. */
1846 methods1
= OPTAB_MUST_WIDEN
;
1848 /* Arithmetic shift */
1850 temp
= expand_binop (mode
,
1851 left
? ashl_optab
: ashr_optab
,
1852 shifted
, op1
, target
, unsignedp
, methods1
);
1855 /* We used to try extzv here for logical right shifts, but that was
1856 only useful for one machine, the VAX, and caused poor code
1857 generation there for lshrdi3, so the code was deleted and a
1858 define_expand for lshrsi3 was added to vax.md. */
1866 enum alg_code
{ alg_zero
, alg_m
, alg_shift
,
1867 alg_add_t_m2
, alg_sub_t_m2
,
1868 alg_add_factor
, alg_sub_factor
,
1869 alg_add_t2_m
, alg_sub_t2_m
,
1870 alg_add
, alg_subtract
, alg_factor
, alg_shiftop
};
1872 /* This structure records a sequence of operations.
1873 `ops' is the number of operations recorded.
1874 `cost' is their total cost.
1875 The operations are stored in `op' and the corresponding
1876 logarithms of the integer coefficients in `log'.
1878 These are the operations:
1879 alg_zero total := 0;
1880 alg_m total := multiplicand;
1881 alg_shift total := total * coeff
1882 alg_add_t_m2 total := total + multiplicand * coeff;
1883 alg_sub_t_m2 total := total - multiplicand * coeff;
1884 alg_add_factor total := total * coeff + total;
1885 alg_sub_factor total := total * coeff - total;
1886 alg_add_t2_m total := total * coeff + multiplicand;
1887 alg_sub_t2_m total := total * coeff - multiplicand;
1889 The first operand must be either alg_zero or alg_m. */
1895 /* The size of the OP and LOG fields are not directly related to the
1896 word size, but the worst-case algorithms will be if we have few
1897 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
1898 In that case we will generate shift-by-2, add, shift-by-2, add,...,
1899 in total wordsize operations. */
1900 enum alg_code op
[MAX_BITS_PER_WORD
];
1901 char log
[MAX_BITS_PER_WORD
];
1904 /* Compute and return the best algorithm for multiplying by T.
1905 The algorithm must cost less than cost_limit
1906 If retval.cost >= COST_LIMIT, no algorithm was found and all
1907 other field of the returned struct are undefined. */
1910 synth_mult (alg_out
, t
, cost_limit
)
1911 struct algorithm
*alg_out
;
1912 unsigned HOST_WIDE_INT t
;
1916 struct algorithm
*alg_in
, *best_alg
;
1918 unsigned HOST_WIDE_INT q
;
1920 /* Indicate that no algorithm is yet found. If no algorithm
1921 is found, this value will be returned and indicate failure. */
1922 alg_out
->cost
= cost_limit
;
1924 if (cost_limit
<= 0)
1927 /* t == 1 can be done in zero cost. */
1932 alg_out
->op
[0] = alg_m
;
1936 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
1940 if (zero_cost
>= cost_limit
)
1945 alg_out
->cost
= zero_cost
;
1946 alg_out
->op
[0] = alg_zero
;
1951 /* We'll be needing a couple extra algorithm structures now. */
1953 alg_in
= (struct algorithm
*)alloca (sizeof (struct algorithm
));
1954 best_alg
= (struct algorithm
*)alloca (sizeof (struct algorithm
));
1956 /* If we have a group of zero bits at the low-order part of T, try
1957 multiplying by the remaining bits and then doing a shift. */
1961 m
= floor_log2 (t
& -t
); /* m = number of low zero bits */
1963 cost
= shift_cost
[m
];
1964 synth_mult (alg_in
, q
, cost_limit
- cost
);
1966 cost
+= alg_in
->cost
;
1967 if (cost
< cost_limit
)
1969 struct algorithm
*x
;
1970 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
1971 best_alg
->log
[best_alg
->ops
] = m
;
1972 best_alg
->op
[best_alg
->ops
] = alg_shift
;
1977 /* If we have an odd number, add or subtract one. */
1980 unsigned HOST_WIDE_INT w
;
1982 for (w
= 1; (w
& t
) != 0; w
<<= 1)
1985 /* Reject the case where t is 3.
1986 Thus we prefer addition in that case. */
1989 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
1992 synth_mult (alg_in
, t
+ 1, cost_limit
- cost
);
1994 cost
+= alg_in
->cost
;
1995 if (cost
< cost_limit
)
1997 struct algorithm
*x
;
1998 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
1999 best_alg
->log
[best_alg
->ops
] = 0;
2000 best_alg
->op
[best_alg
->ops
] = alg_sub_t_m2
;
2006 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2009 synth_mult (alg_in
, t
- 1, cost_limit
- cost
);
2011 cost
+= alg_in
->cost
;
2012 if (cost
< cost_limit
)
2014 struct algorithm
*x
;
2015 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2016 best_alg
->log
[best_alg
->ops
] = 0;
2017 best_alg
->op
[best_alg
->ops
] = alg_add_t_m2
;
2023 /* Look for factors of t of the form
2024 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2025 If we find such a factor, we can multiply by t using an algorithm that
2026 multiplies by q, shift the result by m and add/subtract it to itself.
2028 We search for large factors first and loop down, even if large factors
2029 are less probable than small; if we find a large factor we will find a
2030 good sequence quickly, and therefore be able to prune (by decreasing
2031 COST_LIMIT) the search. */
2033 for (m
= floor_log2 (t
- 1); m
>= 2; m
--)
2035 unsigned HOST_WIDE_INT d
;
2037 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) + 1;
2038 if (t
% d
== 0 && t
> d
)
2040 cost
= MIN (shiftadd_cost
[m
], add_cost
+ shift_cost
[m
]);
2041 synth_mult (alg_in
, t
/ d
, cost_limit
- cost
);
2043 cost
+= alg_in
->cost
;
2044 if (cost
< cost_limit
)
2046 struct algorithm
*x
;
2047 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2048 best_alg
->log
[best_alg
->ops
] = m
;
2049 best_alg
->op
[best_alg
->ops
] = alg_add_factor
;
2052 /* Other factors will have been taken care of in the recursion. */
2056 d
= ((unsigned HOST_WIDE_INT
) 1 << m
) - 1;
2057 if (t
% d
== 0 && t
> d
)
2059 cost
= MIN (shiftsub_cost
[m
], add_cost
+ shift_cost
[m
]);
2060 synth_mult (alg_in
, t
/ d
, cost_limit
- cost
);
2062 cost
+= alg_in
->cost
;
2063 if (cost
< cost_limit
)
2065 struct algorithm
*x
;
2066 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2067 best_alg
->log
[best_alg
->ops
] = m
;
2068 best_alg
->op
[best_alg
->ops
] = alg_sub_factor
;
2075 /* Try shift-and-add (load effective address) instructions,
2076 i.e. do a*3, a*5, a*9. */
2084 cost
= shiftadd_cost
[m
];
2085 synth_mult (alg_in
, (t
- 1) >> m
, cost_limit
- cost
);
2087 cost
+= alg_in
->cost
;
2088 if (cost
< cost_limit
)
2090 struct algorithm
*x
;
2091 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2092 best_alg
->log
[best_alg
->ops
] = m
;
2093 best_alg
->op
[best_alg
->ops
] = alg_add_t2_m
;
2103 cost
= shiftsub_cost
[m
];
2104 synth_mult (alg_in
, (t
+ 1) >> m
, cost_limit
- cost
);
2106 cost
+= alg_in
->cost
;
2107 if (cost
< cost_limit
)
2109 struct algorithm
*x
;
2110 x
= alg_in
, alg_in
= best_alg
, best_alg
= x
;
2111 best_alg
->log
[best_alg
->ops
] = m
;
2112 best_alg
->op
[best_alg
->ops
] = alg_sub_t2_m
;
2118 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2119 we have not found any algorithm. */
2120 if (cost_limit
== alg_out
->cost
)
2123 /* If we are getting a too long sequence for `struct algorithm'
2124 to record, make this search fail. */
2125 if (best_alg
->ops
== MAX_BITS_PER_WORD
)
2128 /* Copy the algorithm from temporary space to the space at alg_out.
2129 We avoid using structure assignment because the majority of
2130 best_alg is normally undefined, and this is a critical function. */
2131 alg_out
->ops
= best_alg
->ops
+ 1;
2132 alg_out
->cost
= cost_limit
;
2133 bcopy ((char *) best_alg
->op
, (char *) alg_out
->op
,
2134 alg_out
->ops
* sizeof *alg_out
->op
);
2135 bcopy ((char *) best_alg
->log
, (char *) alg_out
->log
,
2136 alg_out
->ops
* sizeof *alg_out
->log
);
2139 /* Perform a multiplication and return an rtx for the result.
2140 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2141 TARGET is a suggestion for where to store the result (an rtx).
2143 We check specially for a constant integer as OP1.
2144 If you want this check for OP0 as well, then before calling
2145 you should swap the two operands if OP0 would be constant. */
2148 expand_mult (mode
, op0
, op1
, target
, unsignedp
)
2149 enum machine_mode mode
;
2150 register rtx op0
, op1
, target
;
2153 rtx const_op1
= op1
;
2155 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2156 less than or equal in size to `unsigned int' this doesn't matter.
2157 If the mode is larger than `unsigned int', then synth_mult works only
2158 if the constant value exactly fits in an `unsigned int' without any
2159 truncation. This means that multiplying by negative values does
2160 not work; results are off by 2^32 on a 32 bit machine. */
2162 /* If we are multiplying in DImode, it may still be a win
2163 to try to work with shifts and adds. */
2164 if (GET_CODE (op1
) == CONST_DOUBLE
2165 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_INT
2166 && HOST_BITS_PER_INT
>= BITS_PER_WORD
2167 && CONST_DOUBLE_HIGH (op1
) == 0)
2168 const_op1
= GEN_INT (CONST_DOUBLE_LOW (op1
));
2169 else if (HOST_BITS_PER_INT
< GET_MODE_BITSIZE (mode
)
2170 && GET_CODE (op1
) == CONST_INT
2171 && INTVAL (op1
) < 0)
2174 /* We used to test optimize here, on the grounds that it's better to
2175 produce a smaller program when -O is not used.
2176 But this causes such a terrible slowdown sometimes
2177 that it seems better to use synth_mult always. */
2179 if (const_op1
&& GET_CODE (const_op1
) == CONST_INT
)
2181 struct algorithm alg
;
2182 struct algorithm alg2
;
2183 HOST_WIDE_INT val
= INTVAL (op1
);
2184 HOST_WIDE_INT val_so_far
;
2187 enum {basic_variant
, negate_variant
, add_variant
} variant
= basic_variant
;
2189 /* Try to do the computation three ways: multiply by the negative of OP1
2190 and then negate, do the multiplication directly, or do multiplication
2193 mult_cost
= rtx_cost (gen_rtx (MULT
, mode
, op0
, op1
), SET
);
2194 mult_cost
= MIN (12 * add_cost
, mult_cost
);
2196 synth_mult (&alg
, val
, mult_cost
);
2198 /* This works only if the inverted value actually fits in an
2200 if (HOST_BITS_PER_INT
>= GET_MODE_BITSIZE (mode
))
2202 synth_mult (&alg2
, - val
,
2203 (alg
.cost
< mult_cost
? alg
.cost
: mult_cost
) - negate_cost
);
2204 if (alg2
.cost
+ negate_cost
< alg
.cost
)
2205 alg
= alg2
, variant
= negate_variant
;
2208 /* This proves very useful for division-by-constant. */
2209 synth_mult (&alg2
, val
- 1,
2210 (alg
.cost
< mult_cost
? alg
.cost
: mult_cost
) - add_cost
);
2211 if (alg2
.cost
+ add_cost
< alg
.cost
)
2212 alg
= alg2
, variant
= add_variant
;
2214 if (alg
.cost
< mult_cost
)
2216 /* We found something cheaper than a multiply insn. */
2220 op0
= protect_from_queue (op0
, 0);
2222 /* Avoid referencing memory over and over.
2223 For speed, but also for correctness when mem is volatile. */
2224 if (GET_CODE (op0
) == MEM
)
2225 op0
= force_reg (mode
, op0
);
2227 /* ACCUM starts out either as OP0 or as a zero, depending on
2228 the first operation. */
2230 if (alg
.op
[0] == alg_zero
)
2232 accum
= copy_to_mode_reg (mode
, const0_rtx
);
2235 else if (alg
.op
[0] == alg_m
)
2237 accum
= copy_to_mode_reg (mode
, op0
);
2243 for (opno
= 1; opno
< alg
.ops
; opno
++)
2245 int log
= alg
.log
[opno
];
2246 int preserve
= preserve_subexpressions_p ();
2247 rtx shift_subtarget
= preserve
? 0 : accum
;
2249 = (opno
== alg
.ops
- 1 && target
!= 0 && variant
!= add_variant
2251 rtx accum_target
= preserve
? 0 : accum
;
2253 switch (alg
.op
[opno
])
2256 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2257 build_int_2 (log
, 0), NULL_RTX
, 0);
2262 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2263 build_int_2 (log
, 0), NULL_RTX
, 0);
2264 accum
= force_operand (gen_rtx (PLUS
, mode
, accum
, tem
),
2265 add_target
? add_target
: accum_target
);
2266 val_so_far
+= (HOST_WIDE_INT
) 1 << log
;
2270 tem
= expand_shift (LSHIFT_EXPR
, mode
, op0
,
2271 build_int_2 (log
, 0), NULL_RTX
, 0);
2272 accum
= force_operand (gen_rtx (MINUS
, mode
, accum
, tem
),
2273 add_target
? add_target
: accum_target
);
2274 val_so_far
-= (HOST_WIDE_INT
) 1 << log
;
2278 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2279 build_int_2 (log
, 0), shift_subtarget
,
2281 accum
= force_operand (gen_rtx (PLUS
, mode
, accum
, op0
),
2282 add_target
? add_target
: accum_target
);
2283 val_so_far
= (val_so_far
<< log
) + 1;
2287 accum
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2288 build_int_2 (log
, 0), shift_subtarget
,
2290 accum
= force_operand (gen_rtx (MINUS
, mode
, accum
, op0
),
2291 add_target
? add_target
: accum_target
);
2292 val_so_far
= (val_so_far
<< log
) - 1;
2295 case alg_add_factor
:
2296 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2297 build_int_2 (log
, 0), NULL_RTX
, 0);
2298 accum
= force_operand (gen_rtx (PLUS
, mode
, accum
, tem
),
2299 add_target
? add_target
: accum_target
);
2300 val_so_far
+= val_so_far
<< log
;
2303 case alg_sub_factor
:
2304 tem
= expand_shift (LSHIFT_EXPR
, mode
, accum
,
2305 build_int_2 (log
, 0), NULL_RTX
, 0);
2306 accum
= force_operand (gen_rtx (MINUS
, mode
, tem
, accum
),
2307 (add_target
? add_target
2308 : preserve
? 0 : tem
));
2309 val_so_far
= (val_so_far
<< log
) - val_so_far
;
2316 /* Write a REG_EQUAL note on the last insn so that we can cse
2317 multiplication sequences. */
2319 insn
= get_last_insn ();
2321 = gen_rtx (EXPR_LIST
, REG_EQUAL
,
2322 gen_rtx (MULT
, mode
, op0
, GEN_INT (val_so_far
)),
2326 if (variant
== negate_variant
)
2328 val_so_far
= - val_so_far
;
2329 accum
= expand_unop (mode
, neg_optab
, accum
, target
, 0);
2331 else if (variant
== add_variant
)
2333 val_so_far
= val_so_far
+ 1;
2334 accum
= force_operand (gen_rtx (PLUS
, mode
, accum
, op0
), target
);
2337 if (val
!= val_so_far
)
2344 /* This used to use umul_optab if unsigned, but for non-widening multiply
2345 there is no difference between signed and unsigned. */
2346 op0
= expand_binop (mode
, smul_optab
,
2347 op0
, op1
, target
, unsignedp
, OPTAB_LIB_WIDEN
);
2353 /* Return the smallest n such that 2**n >= X. */
2357 unsigned HOST_WIDE_INT x
;
2359 return floor_log2 (x
- 1) + 1;
2362 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2363 replace division by D, and put the least significant N bits of the result
2364 in *MULTIPLIER_PTR and return the most significant bit.
2366 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2367 needed precision is in PRECISION (should be <= N).
2369 PRECISION should be as small as possible so this function can choose
2370 multiplier more freely.
2372 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2373 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2375 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2376 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2379 unsigned HOST_WIDE_INT
2380 choose_multiplier (d
, n
, precision
, multiplier_ptr
, post_shift_ptr
, lgup_ptr
)
2381 unsigned HOST_WIDE_INT d
;
2384 unsigned HOST_WIDE_INT
*multiplier_ptr
;
2385 int *post_shift_ptr
;
2388 unsigned HOST_WIDE_INT mhigh_hi
, mhigh_lo
;
2389 unsigned HOST_WIDE_INT mlow_hi
, mlow_lo
;
2390 int lgup
, post_shift
;
2392 unsigned HOST_WIDE_INT nh
, nl
, dummy1
, dummy2
;
2394 /* lgup = ceil(log2(divisor)); */
2395 lgup
= ceil_log2 (d
);
2401 pow2
= n
+ lgup
- precision
;
2403 if (pow
== 2 * HOST_BITS_PER_WIDE_INT
)
2405 /* We could handle this with some effort, but this case is much better
2406 handled directly with a scc insn, so rely on caller using that. */
2410 /* mlow = 2^(N + lgup)/d */
2411 if (pow
>= HOST_BITS_PER_WIDE_INT
)
2413 nh
= (unsigned HOST_WIDE_INT
) 1 << (pow
- HOST_BITS_PER_WIDE_INT
);
2419 nl
= (unsigned HOST_WIDE_INT
) 1 << pow
;
2421 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
2422 &mlow_lo
, &mlow_hi
, &dummy1
, &dummy2
);
2424 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2425 if (pow2
>= HOST_BITS_PER_WIDE_INT
)
2426 nh
|= (unsigned HOST_WIDE_INT
) 1 << (pow2
- HOST_BITS_PER_WIDE_INT
);
2428 nl
|= (unsigned HOST_WIDE_INT
) 1 << pow2
;
2429 div_and_round_double (TRUNC_DIV_EXPR
, 1, nl
, nh
, d
, (HOST_WIDE_INT
) 0,
2430 &mhigh_lo
, &mhigh_hi
, &dummy1
, &dummy2
);
2432 if (mhigh_hi
&& nh
- d
>= d
)
2434 if (mhigh_hi
> 1 || mlow_hi
> 1)
2436 /* assert that mlow < mhigh. */
2437 if (! (mlow_hi
< mhigh_hi
|| (mlow_hi
== mhigh_hi
&& mlow_lo
< mhigh_lo
)))
2440 /* If precision == N, then mlow, mhigh exceed 2^N
2441 (but they do not exceed 2^(N+1)). */
2443 /* Reduce to lowest terms */
2444 for (post_shift
= lgup
; post_shift
> 0; post_shift
--)
2446 unsigned HOST_WIDE_INT ml_lo
= (mlow_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mlow_lo
>> 1);
2447 unsigned HOST_WIDE_INT mh_lo
= (mhigh_hi
<< (HOST_BITS_PER_WIDE_INT
- 1)) | (mhigh_lo
>> 1);
2457 *post_shift_ptr
= post_shift
;
2459 if (n
< HOST_BITS_PER_WIDE_INT
)
2461 unsigned HOST_WIDE_INT mask
= ((unsigned HOST_WIDE_INT
) 1 << n
) - 1;
2462 *multiplier_ptr
= mhigh_lo
& mask
;
2463 return mhigh_lo
>= mask
;
2467 *multiplier_ptr
= mhigh_lo
;
2472 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2473 congruent to 1 (mod 2**N). */
2475 static unsigned HOST_WIDE_INT
2477 unsigned HOST_WIDE_INT x
;
2480 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2482 /* The algorithm notes that the choice y = x satisfies
2483 x*y == 1 mod 2^3, since x is assumed odd.
2484 Each iteration doubles the number of bits of significance in y. */
2486 unsigned HOST_WIDE_INT mask
;
2487 unsigned HOST_WIDE_INT y
= x
;
2490 mask
= (n
== HOST_BITS_PER_WIDE_INT
2491 ? ~(unsigned HOST_WIDE_INT
) 0
2492 : ((unsigned HOST_WIDE_INT
) 1 << n
) - 1);
2496 y
= y
* (2 - x
*y
) & mask
; /* Modulo 2^N */
2502 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2503 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2504 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2505 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2508 The result is put in TARGET if that is convenient.
2510 MODE is the mode of operation. */
2513 expand_mult_highpart_adjust (mode
, adj_operand
, op0
, op1
, target
, unsignedp
)
2514 enum machine_mode mode
;
2515 register rtx adj_operand
, op0
, op1
, target
;
2519 enum rtx_code adj_code
= unsignedp
? PLUS
: MINUS
;
2521 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2522 build_int_2 (GET_MODE_BITSIZE (mode
) - 1, 0),
2524 tem
= expand_and (tem
, op1
, NULL_RTX
);
2525 adj_operand
= force_operand (gen_rtx (adj_code
, mode
, adj_operand
, tem
),
2528 tem
= expand_shift (RSHIFT_EXPR
, mode
, op1
,
2529 build_int_2 (GET_MODE_BITSIZE (mode
) - 1, 0),
2531 tem
= expand_and (tem
, op0
, NULL_RTX
);
2532 target
= force_operand (gen_rtx (adj_code
, mode
, adj_operand
, tem
), target
);
2537 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2538 in TARGET if that is convenient, and return where the result is. If the
2539 operation can not be performed, 0 is returned.
2541 MODE is the mode of operation and result.
2543 UNSIGNEDP nonzero means unsigned multiply.
2545 MAX_COST is the total allowed cost for the expanded RTL. */
2548 expand_mult_highpart (mode
, op0
, cnst1
, target
, unsignedp
, max_cost
)
2549 enum machine_mode mode
;
2550 register rtx op0
, target
;
2551 unsigned HOST_WIDE_INT cnst1
;
2555 enum machine_mode wider_mode
= GET_MODE_WIDER_MODE (mode
);
2556 optab mul_highpart_optab
;
2559 int size
= GET_MODE_BITSIZE (mode
);
2562 /* We can't support modes wider than HOST_BITS_PER_INT. */
2563 if (size
> HOST_BITS_PER_WIDE_INT
)
2566 op1
= GEN_INT (cnst1
);
2568 if (GET_MODE_BITSIZE (wider_mode
) <= HOST_BITS_PER_INT
)
2572 = immed_double_const (cnst1
,
2575 : -(cnst1
>> (HOST_BITS_PER_WIDE_INT
- 1))),
2578 /* expand_mult handles constant multiplication of word_mode
2579 or narrower. It does a poor job for large modes. */
2580 if (size
< BITS_PER_WORD
2581 && mul_cost
[(int) wider_mode
] + shift_cost
[size
-1] < max_cost
)
2583 /* We have to do this, since expand_binop doesn't do conversion for
2584 multiply. Maybe change expand_binop to handle widening multiply? */
2585 op0
= convert_to_mode (wider_mode
, op0
, unsignedp
);
2587 tem
= expand_mult (wider_mode
, op0
, wide_op1
, NULL_RTX
, unsignedp
);
2588 tem
= expand_shift (RSHIFT_EXPR
, wider_mode
, tem
,
2589 build_int_2 (size
, 0), NULL_RTX
, 1);
2590 return convert_modes (mode
, wider_mode
, tem
, unsignedp
);
2594 target
= gen_reg_rtx (mode
);
2596 /* Firstly, try using a multiplication insn that only generates the needed
2597 high part of the product, and in the sign flavor of unsignedp. */
2598 if (mul_highpart_cost
[(int) mode
] < max_cost
)
2600 mul_highpart_optab
= unsignedp
? umul_highpart_optab
: smul_highpart_optab
;
2601 target
= expand_binop (mode
, mul_highpart_optab
,
2602 op0
, wide_op1
, target
, unsignedp
, OPTAB_DIRECT
);
2607 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2608 Need to adjust the result after the multiplication. */
2609 if (mul_highpart_cost
[(int) mode
] + 2 * shift_cost
[size
-1] + 4 * add_cost
< max_cost
)
2611 mul_highpart_optab
= unsignedp
? smul_highpart_optab
: umul_highpart_optab
;
2612 target
= expand_binop (mode
, mul_highpart_optab
,
2613 op0
, wide_op1
, target
, unsignedp
, OPTAB_DIRECT
);
2615 /* We used the wrong signedness. Adjust the result. */
2616 return expand_mult_highpart_adjust (mode
, target
, op0
,
2617 op1
, target
, unsignedp
);
2620 /* Try widening multiplication. */
2621 moptab
= unsignedp
? umul_widen_optab
: smul_widen_optab
;
2622 if (moptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
2623 && mul_widen_cost
[(int) wider_mode
] < max_cost
)
2625 op1
= force_reg (mode
, op1
);
2629 /* Try widening the mode and perform a non-widening multiplication. */
2630 moptab
= smul_optab
;
2631 if (smul_optab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
2632 && mul_cost
[(int) wider_mode
] + shift_cost
[size
-1] < max_cost
)
2638 /* Try widening multiplication of opposite signedness, and adjust. */
2639 moptab
= unsignedp
? smul_widen_optab
: umul_widen_optab
;
2640 if (moptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
2641 && (mul_widen_cost
[(int) wider_mode
]
2642 + 2 * shift_cost
[size
-1] + 4 * add_cost
< max_cost
))
2644 rtx regop1
= force_reg (mode
, op1
);
2645 tem
= expand_binop (wider_mode
, moptab
, op0
, regop1
,
2646 NULL_RTX
, ! unsignedp
, OPTAB_WIDEN
);
2649 /* Extract the high half of the just generated product. */
2650 tem
= expand_shift (RSHIFT_EXPR
, wider_mode
, tem
,
2651 build_int_2 (size
, 0), NULL_RTX
, 1);
2652 tem
= convert_modes (mode
, wider_mode
, tem
, unsignedp
);
2653 /* We used the wrong signedness. Adjust the result. */
2654 return expand_mult_highpart_adjust (mode
, tem
, op0
, op1
,
2662 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2663 tem
= expand_binop (wider_mode
, moptab
, op0
, op1
,
2664 NULL_RTX
, unsignedp
, OPTAB_WIDEN
);
2668 /* Extract the high half of the just generated product. */
2669 if (mode
== word_mode
)
2671 return gen_highpart (mode
, tem
);
2675 tem
= expand_shift (RSHIFT_EXPR
, wider_mode
, tem
,
2676 build_int_2 (size
, 0), NULL_RTX
, 1);
2677 return convert_modes (mode
, wider_mode
, tem
, unsignedp
);
2681 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2682 if that is convenient, and returning where the result is.
2683 You may request either the quotient or the remainder as the result;
2684 specify REM_FLAG nonzero to get the remainder.
2686 CODE is the expression code for which kind of division this is;
2687 it controls how rounding is done. MODE is the machine mode to use.
2688 UNSIGNEDP nonzero means do unsigned division. */
2690 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2691 and then correct it by or'ing in missing high bits
2692 if result of ANDI is nonzero.
2693 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2694 This could optimize to a bfexts instruction.
2695 But C doesn't use these operations, so their optimizations are
2698 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2701 expand_divmod (rem_flag
, code
, mode
, op0
, op1
, target
, unsignedp
)
2703 enum tree_code code
;
2704 enum machine_mode mode
;
2705 register rtx op0
, op1
, target
;
2708 enum machine_mode compute_mode
;
2709 register rtx tquotient
;
2710 rtx quotient
= 0, remainder
= 0;
2714 optab optab1
, optab2
;
2715 int op1_is_constant
, op1_is_pow2
;
2716 int max_cost
, extra_cost
;
2718 op1_is_constant
= GET_CODE (op1
) == CONST_INT
;
2719 op1_is_pow2
= (op1_is_constant
2720 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
2721 || EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1
)))));
2724 This is the structure of expand_divmod:
2726 First comes code to fix up the operands so we can perform the operations
2727 correctly and efficiently.
2729 Second comes a switch statement with code specific for each rounding mode.
2730 For some special operands this code emits all RTL for the desired
2731 operation, for other cases, it generates only a quotient and stores it in
2732 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2733 to indicate that it has not done anything.
2735 Last comes code that finishes the operation. If QUOTIENT is set and
2736 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2737 QUOTIENT is not set, it is computed using trunc rounding.
2739 We try to generate special code for division and remainder when OP1 is a
2740 constant. If |OP1| = 2**n we can use shifts and some other fast
2741 operations. For other values of OP1, we compute a carefully selected
2742 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2745 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2746 half of the product. Different strategies for generating the product are
2747 implemented in expand_mult_highpart.
2749 If what we actually want is the remainder, we generate that by another
2750 by-constant multiplication and a subtraction. */
2752 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2753 code below will malfunction if we are, so check here and handle
2754 the special case if so. */
2755 if (op1
== const1_rtx
)
2756 return rem_flag
? const0_rtx
: op0
;
2759 /* Don't use the function value register as a target
2760 since we have to read it as well as write it,
2761 and function-inlining gets confused by this. */
2762 && ((REG_P (target
) && REG_FUNCTION_VALUE_P (target
))
2763 /* Don't clobber an operand while doing a multi-step calculation. */
2764 || ((rem_flag
|| op1_is_constant
)
2765 && (reg_mentioned_p (target
, op0
)
2766 || (GET_CODE (op0
) == MEM
&& GET_CODE (target
) == MEM
)))
2767 || reg_mentioned_p (target
, op1
)
2768 || (GET_CODE (op1
) == MEM
&& GET_CODE (target
) == MEM
)))
2771 /* Get the mode in which to perform this computation. Normally it will
2772 be MODE, but sometimes we can't do the desired operation in MODE.
2773 If so, pick a wider mode in which we can do the operation. Convert
2774 to that mode at the start to avoid repeated conversions.
2776 First see what operations we need. These depend on the expression
2777 we are evaluating. (We assume that divxx3 insns exist under the
2778 same conditions that modxx3 insns and that these insns don't normally
2779 fail. If these assumptions are not correct, we may generate less
2780 efficient code in some cases.)
2782 Then see if we find a mode in which we can open-code that operation
2783 (either a division, modulus, or shift). Finally, check for the smallest
2784 mode for which we can do the operation with a library call. */
2786 /* We might want to refine this now that we have division-by-constant
2787 optimization. Since expand_mult_highpart tries so many variants, it is
2788 not straightforward to generalize this. Maybe we should make an array
2789 of possible modes in init_expmed? Save this for GCC 2.7. */
2791 optab1
= (op1_is_pow2
? (unsignedp
? lshr_optab
: ashr_optab
)
2792 : (unsignedp
? udiv_optab
: sdiv_optab
));
2793 optab2
= (op1_is_pow2
? optab1
: (unsignedp
? udivmod_optab
: sdivmod_optab
));
2795 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
2796 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
2797 if (optab1
->handlers
[(int) compute_mode
].insn_code
!= CODE_FOR_nothing
2798 || optab2
->handlers
[(int) compute_mode
].insn_code
!= CODE_FOR_nothing
)
2801 if (compute_mode
== VOIDmode
)
2802 for (compute_mode
= mode
; compute_mode
!= VOIDmode
;
2803 compute_mode
= GET_MODE_WIDER_MODE (compute_mode
))
2804 if (optab1
->handlers
[(int) compute_mode
].libfunc
2805 || optab2
->handlers
[(int) compute_mode
].libfunc
)
2808 /* If we still couldn't find a mode, use MODE, but we'll probably abort
2810 if (compute_mode
== VOIDmode
)
2811 compute_mode
= mode
;
2813 if (target
&& GET_MODE (target
) == compute_mode
)
2816 tquotient
= gen_reg_rtx (compute_mode
);
2818 size
= GET_MODE_BITSIZE (compute_mode
);
2820 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
2821 (mode), and thereby get better code when OP1 is a constant. Do that
2822 later. It will require going over all usages of SIZE below. */
2823 size
= GET_MODE_BITSIZE (mode
);
2826 max_cost
= div_cost
[(int) compute_mode
]
2827 - (rem_flag
? mul_cost
[(int) compute_mode
] + add_cost
: 0);
2829 /* Now convert to the best mode to use. */
2830 if (compute_mode
!= mode
)
2832 op0
= convert_modes (compute_mode
, mode
, op0
, unsignedp
);
2833 op1
= convert_modes (compute_mode
, mode
, op1
, unsignedp
);
2836 /* If one of the operands is a volatile MEM, copy it into a register. */
2838 if (GET_CODE (op0
) == MEM
&& MEM_VOLATILE_P (op0
))
2839 op0
= force_reg (compute_mode
, op0
);
2840 if (GET_CODE (op1
) == MEM
&& MEM_VOLATILE_P (op1
))
2841 op1
= force_reg (compute_mode
, op1
);
2843 /* If we need the remainder or if OP1 is constant, we need to
2844 put OP0 in a register in case it has any queued subexpressions. */
2845 if (rem_flag
|| op1_is_constant
)
2846 op0
= force_reg (compute_mode
, op0
);
2848 last
= get_last_insn ();
2850 /* Promote floor rounding to trunc rounding for unsigned operations. */
2853 if (code
== FLOOR_DIV_EXPR
)
2854 code
= TRUNC_DIV_EXPR
;
2855 if (code
== FLOOR_MOD_EXPR
)
2856 code
= TRUNC_MOD_EXPR
;
2859 if (op1
!= const0_rtx
)
2862 case TRUNC_MOD_EXPR
:
2863 case TRUNC_DIV_EXPR
:
2864 if (op1_is_constant
)
2868 unsigned HOST_WIDE_INT mh
, ml
;
2869 int pre_shift
, post_shift
;
2871 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
2873 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
2875 pre_shift
= floor_log2 (d
);
2879 = expand_binop (compute_mode
, and_optab
, op0
,
2880 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
2884 return gen_lowpart (mode
, remainder
);
2886 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
2887 build_int_2 (pre_shift
, 0),
2890 else if (size
<= HOST_BITS_PER_WIDE_INT
)
2892 if (d
>= ((unsigned HOST_WIDE_INT
) 1 << (size
- 1)))
2894 /* Most significant bit of divisor is set; emit an scc
2896 quotient
= emit_store_flag (tquotient
, GEU
, op0
, op1
,
2897 compute_mode
, 1, 1);
2903 /* Find a suitable multiplier and right shift count
2904 instead of multiplying with D. */
2906 mh
= choose_multiplier (d
, size
, size
,
2907 &ml
, &post_shift
, &dummy
);
2909 /* If the suggested multiplier is more than SIZE bits,
2910 we can do better for even divisors, using an
2911 initial right shift. */
2912 if (mh
!= 0 && (d
& 1) == 0)
2914 pre_shift
= floor_log2 (d
& -d
);
2915 mh
= choose_multiplier (d
>> pre_shift
, size
,
2917 &ml
, &post_shift
, &dummy
);
2928 extra_cost
= (shift_cost
[post_shift
- 1]
2929 + shift_cost
[1] + 2 * add_cost
);
2930 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
2932 max_cost
- extra_cost
);
2935 t2
= force_operand (gen_rtx (MINUS
, compute_mode
,
2938 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
2939 build_int_2 (1, 0), NULL_RTX
,1);
2940 t4
= force_operand (gen_rtx (PLUS
, compute_mode
,
2944 = expand_shift (RSHIFT_EXPR
, compute_mode
, t4
,
2945 build_int_2 (post_shift
- 1, 0),
2952 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
2953 build_int_2 (pre_shift
, 0),
2955 extra_cost
= (shift_cost
[pre_shift
]
2956 + shift_cost
[post_shift
]);
2957 t2
= expand_mult_highpart (compute_mode
, t1
, ml
,
2959 max_cost
- extra_cost
);
2963 = expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
2964 build_int_2 (post_shift
, 0),
2969 else /* Too wide mode to use tricky code */
2972 insn
= get_last_insn ();
2974 && (set
= single_set (insn
)) != 0
2975 && SET_DEST (set
) == quotient
)
2977 = gen_rtx (EXPR_LIST
, REG_EQUAL
,
2978 gen_rtx (UDIV
, compute_mode
, op0
, op1
),
2981 else /* TRUNC_DIV, signed */
2983 unsigned HOST_WIDE_INT ml
;
2984 int lgup
, post_shift
;
2985 HOST_WIDE_INT d
= INTVAL (op1
);
2986 unsigned HOST_WIDE_INT abs_d
= d
>= 0 ? d
: -d
;
2988 /* n rem d = n rem -d */
2989 if (rem_flag
&& d
< 0)
2992 op1
= GEN_INT (abs_d
);
2998 quotient
= expand_unop (compute_mode
, neg_optab
, op0
,
3000 else if (abs_d
== (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
3002 /* This case is not handled correctly below. */
3003 quotient
= emit_store_flag (tquotient
, EQ
, op0
, op1
,
3004 compute_mode
, 1, 1);
3008 else if (EXACT_POWER_OF_2_OR_ZERO_P (d
)
3009 && (rem_flag
? smod_pow2_cheap
: sdiv_pow2_cheap
))
3011 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d
))
3013 lgup
= floor_log2 (abs_d
);
3014 if (abs_d
!= 2 && BRANCH_COST
< 3)
3016 rtx label
= gen_label_rtx ();
3019 t1
= copy_to_mode_reg (compute_mode
, op0
);
3020 emit_cmp_insn (t1
, const0_rtx
, GE
,
3021 NULL_RTX
, compute_mode
, 0, 0);
3022 emit_jump_insn (gen_bge (label
));
3023 expand_inc (t1
, GEN_INT (abs_d
- 1));
3025 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, t1
,
3026 build_int_2 (lgup
, 0),
3032 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3033 build_int_2 (size
- 1, 0),
3035 t2
= expand_shift (RSHIFT_EXPR
, compute_mode
, t1
,
3036 build_int_2 (size
- lgup
, 0),
3038 t3
= force_operand (gen_rtx (PLUS
, compute_mode
,
3041 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, t3
,
3042 build_int_2 (lgup
, 0),
3046 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3050 insn
= get_last_insn ();
3052 && (set
= single_set (insn
)) != 0
3053 && SET_DEST (set
) == quotient
)
3055 = gen_rtx (EXPR_LIST
, REG_EQUAL
,
3056 gen_rtx (DIV
, compute_mode
, op0
,
3060 quotient
= expand_unop (compute_mode
, neg_optab
,
3061 quotient
, quotient
, 0);
3064 else if (size
<= HOST_BITS_PER_WIDE_INT
)
3066 choose_multiplier (abs_d
, size
, size
- 1,
3067 &ml
, &post_shift
, &lgup
);
3068 if (ml
< (unsigned HOST_WIDE_INT
) 1 << (size
- 1))
3072 extra_cost
= (shift_cost
[post_shift
]
3073 + shift_cost
[size
- 1] + add_cost
);
3074 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
3076 max_cost
- extra_cost
);
3079 t2
= expand_shift (RSHIFT_EXPR
, compute_mode
, t1
,
3080 build_int_2 (post_shift
, 0), NULL_RTX
, 0);
3081 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3082 build_int_2 (size
- 1, 0), NULL_RTX
, 0);
3084 quotient
= force_operand (gen_rtx (MINUS
, compute_mode
, t3
, t2
),
3087 quotient
= force_operand (gen_rtx (MINUS
, compute_mode
, t2
, t3
),
3094 ml
|= (~(unsigned HOST_WIDE_INT
) 0) << (size
- 1);
3095 extra_cost
= (shift_cost
[post_shift
]
3096 + shift_cost
[size
- 1] + 2 * add_cost
);
3097 t1
= expand_mult_highpart (compute_mode
, op0
, ml
,
3099 max_cost
- extra_cost
);
3102 t2
= force_operand (gen_rtx (PLUS
, compute_mode
, t1
, op0
),
3104 t3
= expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
3105 build_int_2 (post_shift
, 0), NULL_RTX
, 0);
3106 t4
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3107 build_int_2 (size
- 1, 0), NULL_RTX
, 0);
3109 quotient
= force_operand (gen_rtx (MINUS
, compute_mode
, t4
, t3
),
3112 quotient
= force_operand (gen_rtx (MINUS
, compute_mode
, t3
, t4
),
3116 else /* Too wide mode to use tricky code */
3119 insn
= get_last_insn ();
3121 && (set
= single_set (insn
)) != 0
3122 && SET_DEST (set
) == quotient
)
3124 = gen_rtx (EXPR_LIST
, REG_EQUAL
,
3125 gen_rtx (DIV
, compute_mode
, op0
, op1
),
3131 delete_insns_since (last
);
3134 case FLOOR_DIV_EXPR
:
3135 case FLOOR_MOD_EXPR
:
3136 /* We will come here only for signed operations. */
3137 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
3139 unsigned HOST_WIDE_INT mh
, ml
;
3140 int pre_shift
, lgup
, post_shift
;
3141 HOST_WIDE_INT d
= INTVAL (op1
);
3145 /* We could just as easily deal with negative constants here,
3146 but it does not seem worth the trouble for GCC 2.6. */
3147 if (EXACT_POWER_OF_2_OR_ZERO_P (d
))
3149 pre_shift
= floor_log2 (d
);
3152 remainder
= expand_binop (compute_mode
, and_optab
, op0
,
3153 GEN_INT (((HOST_WIDE_INT
) 1 << pre_shift
) - 1),
3154 remainder
, 0, OPTAB_LIB_WIDEN
);
3156 return gen_lowpart (mode
, remainder
);
3158 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3159 build_int_2 (pre_shift
, 0),
3166 mh
= choose_multiplier (d
, size
, size
- 1,
3167 &ml
, &post_shift
, &lgup
);
3171 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3172 build_int_2 (size
- 1, 0), NULL_RTX
, 0);
3173 t2
= expand_binop (compute_mode
, xor_optab
, op0
, t1
,
3174 NULL_RTX
, 0, OPTAB_WIDEN
);
3175 extra_cost
= (shift_cost
[post_shift
]
3176 + shift_cost
[size
- 1] + 2 * add_cost
);
3177 t3
= expand_mult_highpart (compute_mode
, t2
, ml
,
3179 max_cost
- extra_cost
);
3182 t4
= expand_shift (RSHIFT_EXPR
, compute_mode
, t3
,
3183 build_int_2 (post_shift
, 0),
3185 quotient
= expand_binop (compute_mode
, xor_optab
,
3186 t4
, t1
, tquotient
, 0,
3193 rtx nsign
, t1
, t2
, t3
, t4
;
3194 t1
= force_operand (gen_rtx (PLUS
, compute_mode
,
3195 op0
, constm1_rtx
), NULL_RTX
);
3196 t2
= expand_binop (compute_mode
, ior_optab
, op0
, t1
, NULL_RTX
,
3198 nsign
= expand_shift (RSHIFT_EXPR
, compute_mode
, t2
,
3199 build_int_2 (size
- 1, 0), NULL_RTX
, 0);
3200 t3
= force_operand (gen_rtx (MINUS
, compute_mode
, t1
, nsign
),
3202 t4
= expand_divmod (0, TRUNC_DIV_EXPR
, compute_mode
, t3
, op1
,
3207 t5
= expand_unop (compute_mode
, one_cmpl_optab
, nsign
,
3209 quotient
= force_operand (gen_rtx (PLUS
, compute_mode
,
3218 delete_insns_since (last
);
3220 /* Try using an instruction that produces both the quotient and
3221 remainder, using truncation. We can easily compensate the quotient
3222 or remainder to get floor rounding, once we have the remainder.
3223 Notice that we compute also the final remainder value here,
3224 and return the result right away. */
3225 if (target
== 0 || GET_MODE (target
) != compute_mode
)
3226 target
= gen_reg_rtx (compute_mode
);
3231 = GET_CODE (target
) == REG
? target
: gen_reg_rtx (compute_mode
);
3232 quotient
= gen_reg_rtx (compute_mode
);
3237 = GET_CODE (target
) == REG
? target
: gen_reg_rtx (compute_mode
);
3238 remainder
= gen_reg_rtx (compute_mode
);
3241 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
,
3242 quotient
, remainder
, 0))
3244 /* This could be computed with a branch-less sequence.
3245 Save that for later. */
3247 rtx label
= gen_label_rtx ();
3248 emit_cmp_insn (remainder
, const0_rtx
, EQ
, NULL_RTX
,
3249 compute_mode
, 0, 0);
3250 emit_jump_insn (gen_beq (label
));
3251 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
3252 NULL_RTX
, 0, OPTAB_WIDEN
);
3253 emit_cmp_insn (tem
, const0_rtx
, GE
, NULL_RTX
, compute_mode
, 0, 0);
3254 emit_jump_insn (gen_bge (label
));
3255 expand_dec (quotient
, const1_rtx
);
3256 expand_inc (remainder
, op1
);
3258 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3261 /* No luck with division elimination or divmod. Have to do it
3262 by conditionally adjusting op0 *and* the result. */
3264 rtx label1
, label2
, label3
, label4
, label5
;
3268 quotient
= gen_reg_rtx (compute_mode
);
3269 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
3270 label1
= gen_label_rtx ();
3271 label2
= gen_label_rtx ();
3272 label3
= gen_label_rtx ();
3273 label4
= gen_label_rtx ();
3274 label5
= gen_label_rtx ();
3275 emit_cmp_insn (op1
, const0_rtx
, LT
, NULL_RTX
, compute_mode
, 0, 0);
3276 emit_jump_insn (gen_blt (label2
));
3277 emit_cmp_insn (adjusted_op0
, const0_rtx
, LT
, NULL_RTX
,
3278 compute_mode
, 0, 0);
3279 emit_jump_insn (gen_blt (label1
));
3280 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3281 quotient
, 0, OPTAB_LIB_WIDEN
);
3282 if (tem
!= quotient
)
3283 emit_move_insn (quotient
, tem
);
3284 emit_jump_insn (gen_jump (label5
));
3286 emit_label (label1
);
3287 expand_inc (adjusted_op0
, const1_rtx
);
3288 emit_jump_insn (gen_jump (label4
));
3290 emit_label (label2
);
3291 emit_cmp_insn (adjusted_op0
, const0_rtx
, GT
, NULL_RTX
,
3292 compute_mode
, 0, 0);
3293 emit_jump_insn (gen_bgt (label3
));
3294 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3295 quotient
, 0, OPTAB_LIB_WIDEN
);
3296 if (tem
!= quotient
)
3297 emit_move_insn (quotient
, tem
);
3298 emit_jump_insn (gen_jump (label5
));
3300 emit_label (label3
);
3301 expand_dec (adjusted_op0
, const1_rtx
);
3302 emit_label (label4
);
3303 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3304 quotient
, 0, OPTAB_LIB_WIDEN
);
3305 if (tem
!= quotient
)
3306 emit_move_insn (quotient
, tem
);
3307 expand_dec (quotient
, const1_rtx
);
3308 emit_label (label5
);
3316 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
)))
3319 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
3320 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3321 build_int_2 (floor_log2 (d
), 0),
3323 t2
= expand_binop (compute_mode
, and_optab
, op0
,
3325 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3326 t3
= gen_reg_rtx (compute_mode
);
3327 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
3328 compute_mode
, 1, 1);
3332 lab
= gen_label_rtx ();
3333 emit_cmp_insn (t2
, const0_rtx
, EQ
, NULL_RTX
,
3334 compute_mode
, 0, 0);
3335 emit_jump_insn (gen_beq (lab
));
3336 expand_inc (t1
, const1_rtx
);
3341 quotient
= force_operand (gen_rtx (PLUS
, compute_mode
,
3347 /* Try using an instruction that produces both the quotient and
3348 remainder, using truncation. We can easily compensate the
3349 quotient or remainder to get ceiling rounding, once we have the
3350 remainder. Notice that we compute also the final remainder
3351 value here, and return the result right away. */
3352 if (target
== 0 || GET_MODE (target
) != compute_mode
)
3353 target
= gen_reg_rtx (compute_mode
);
3357 remainder
= (GET_CODE (target
) == REG
3358 ? target
: gen_reg_rtx (compute_mode
));
3359 quotient
= gen_reg_rtx (compute_mode
);
3363 quotient
= (GET_CODE (target
) == REG
3364 ? target
: gen_reg_rtx (compute_mode
));
3365 remainder
= gen_reg_rtx (compute_mode
);
3368 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
,
3371 /* This could be computed with a branch-less sequence.
3372 Save that for later. */
3373 rtx label
= gen_label_rtx ();
3374 emit_cmp_insn (remainder
, const0_rtx
, EQ
, NULL_RTX
,
3375 compute_mode
, 0, 0);
3376 emit_jump_insn (gen_beq (label
));
3377 expand_inc (quotient
, const1_rtx
);
3378 expand_dec (remainder
, op1
);
3380 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3383 /* No luck with division elimination or divmod. Have to do it
3384 by conditionally adjusting op0 *and* the result. */
3387 rtx adjusted_op0
, tem
;
3389 quotient
= gen_reg_rtx (compute_mode
);
3390 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
3391 label1
= gen_label_rtx ();
3392 label2
= gen_label_rtx ();
3393 emit_cmp_insn (adjusted_op0
, const0_rtx
, NE
, NULL_RTX
,
3394 compute_mode
, 0, 0);
3395 emit_jump_insn (gen_bne (label1
));
3396 emit_move_insn (quotient
, const0_rtx
);
3397 emit_jump_insn (gen_jump (label2
));
3399 emit_label (label1
);
3400 expand_dec (adjusted_op0
, const1_rtx
);
3401 tem
= expand_binop (compute_mode
, udiv_optab
, adjusted_op0
, op1
,
3402 quotient
, 1, OPTAB_LIB_WIDEN
);
3403 if (tem
!= quotient
)
3404 emit_move_insn (quotient
, tem
);
3405 expand_inc (quotient
, const1_rtx
);
3406 emit_label (label2
);
3411 if (op1_is_constant
&& EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1
))
3412 && INTVAL (op1
) >= 0)
3414 /* This is extremely similar to the code for the unsigned case
3415 above. For 2.7 we should merge these variants, but for
3416 2.6.1 I don't want to touch the code for unsigned since that
3417 get used in C. The signed case will only be used by other
3421 unsigned HOST_WIDE_INT d
= INTVAL (op1
);
3422 t1
= expand_shift (RSHIFT_EXPR
, compute_mode
, op0
,
3423 build_int_2 (floor_log2 (d
), 0),
3425 t2
= expand_binop (compute_mode
, and_optab
, op0
,
3427 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3428 t3
= gen_reg_rtx (compute_mode
);
3429 t3
= emit_store_flag (t3
, NE
, t2
, const0_rtx
,
3430 compute_mode
, 1, 1);
3434 lab
= gen_label_rtx ();
3435 emit_cmp_insn (t2
, const0_rtx
, EQ
, NULL_RTX
,
3436 compute_mode
, 0, 0);
3437 emit_jump_insn (gen_beq (lab
));
3438 expand_inc (t1
, const1_rtx
);
3443 quotient
= force_operand (gen_rtx (PLUS
, compute_mode
,
3449 /* Try using an instruction that produces both the quotient and
3450 remainder, using truncation. We can easily compensate the
3451 quotient or remainder to get ceiling rounding, once we have the
3452 remainder. Notice that we compute also the final remainder
3453 value here, and return the result right away. */
3454 if (target
== 0 || GET_MODE (target
) != compute_mode
)
3455 target
= gen_reg_rtx (compute_mode
);
3458 remainder
= (GET_CODE (target
) == REG
3459 ? target
: gen_reg_rtx (compute_mode
));
3460 quotient
= gen_reg_rtx (compute_mode
);
3464 quotient
= (GET_CODE (target
) == REG
3465 ? target
: gen_reg_rtx (compute_mode
));
3466 remainder
= gen_reg_rtx (compute_mode
);
3469 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
,
3472 /* This could be computed with a branch-less sequence.
3473 Save that for later. */
3475 rtx label
= gen_label_rtx ();
3476 emit_cmp_insn (remainder
, const0_rtx
, EQ
, NULL_RTX
,
3477 compute_mode
, 0, 0);
3478 emit_jump_insn (gen_beq (label
));
3479 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
3480 NULL_RTX
, 0, OPTAB_WIDEN
);
3481 emit_cmp_insn (tem
, const0_rtx
, LT
, NULL_RTX
,
3482 compute_mode
, 0, 0);
3483 emit_jump_insn (gen_blt (label
));
3484 expand_inc (quotient
, const1_rtx
);
3485 expand_dec (remainder
, op1
);
3487 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3490 /* No luck with division elimination or divmod. Have to do it
3491 by conditionally adjusting op0 *and* the result. */
3493 rtx label1
, label2
, label3
, label4
, label5
;
3497 quotient
= gen_reg_rtx (compute_mode
);
3498 adjusted_op0
= copy_to_mode_reg (compute_mode
, op0
);
3499 label1
= gen_label_rtx ();
3500 label2
= gen_label_rtx ();
3501 label3
= gen_label_rtx ();
3502 label4
= gen_label_rtx ();
3503 label5
= gen_label_rtx ();
3504 emit_cmp_insn (op1
, const0_rtx
, LT
, NULL_RTX
,
3505 compute_mode
, 0, 0);
3506 emit_jump_insn (gen_blt (label2
));
3507 emit_cmp_insn (adjusted_op0
, const0_rtx
, GT
, NULL_RTX
,
3508 compute_mode
, 0, 0);
3509 emit_jump_insn (gen_bgt (label1
));
3510 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3511 quotient
, 0, OPTAB_LIB_WIDEN
);
3512 if (tem
!= quotient
)
3513 emit_move_insn (quotient
, tem
);
3514 emit_jump_insn (gen_jump (label5
));
3516 emit_label (label1
);
3517 expand_dec (adjusted_op0
, const1_rtx
);
3518 emit_jump_insn (gen_jump (label4
));
3520 emit_label (label2
);
3521 emit_cmp_insn (adjusted_op0
, const0_rtx
, LT
, NULL_RTX
,
3522 compute_mode
, 0, 0);
3523 emit_jump_insn (gen_blt (label3
));
3524 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3525 quotient
, 0, OPTAB_LIB_WIDEN
);
3526 if (tem
!= quotient
)
3527 emit_move_insn (quotient
, tem
);
3528 emit_jump_insn (gen_jump (label5
));
3530 emit_label (label3
);
3531 expand_inc (adjusted_op0
, const1_rtx
);
3532 emit_label (label4
);
3533 tem
= expand_binop (compute_mode
, sdiv_optab
, adjusted_op0
, op1
,
3534 quotient
, 0, OPTAB_LIB_WIDEN
);
3535 if (tem
!= quotient
)
3536 emit_move_insn (quotient
, tem
);
3537 expand_inc (quotient
, const1_rtx
);
3538 emit_label (label5
);
3543 case EXACT_DIV_EXPR
:
3544 if (op1_is_constant
&& HOST_BITS_PER_WIDE_INT
>= size
)
3546 HOST_WIDE_INT d
= INTVAL (op1
);
3547 unsigned HOST_WIDE_INT ml
;
3551 post_shift
= floor_log2 (d
& -d
);
3552 ml
= invert_mod2n (d
>> post_shift
, size
);
3553 t1
= expand_mult (compute_mode
, op0
, GEN_INT (ml
), NULL_RTX
,
3555 quotient
= expand_shift (RSHIFT_EXPR
, compute_mode
, t1
,
3556 build_int_2 (post_shift
, 0),
3557 NULL_RTX
, unsignedp
);
3559 insn
= get_last_insn ();
3561 = gen_rtx (EXPR_LIST
, REG_EQUAL
,
3562 gen_rtx (unsignedp
? UDIV
: DIV
, compute_mode
,
3568 case ROUND_DIV_EXPR
:
3569 case ROUND_MOD_EXPR
:
3574 label
= gen_label_rtx ();
3575 quotient
= gen_reg_rtx (compute_mode
);
3576 remainder
= gen_reg_rtx (compute_mode
);
3577 if (expand_twoval_binop (udivmod_optab
, op0
, op1
, quotient
, remainder
, 1) == 0)
3580 quotient
= expand_binop (compute_mode
, udiv_optab
, op0
, op1
,
3581 quotient
, 1, OPTAB_LIB_WIDEN
);
3582 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 1);
3583 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
3584 remainder
, 1, OPTAB_LIB_WIDEN
);
3586 tem
= plus_constant (op1
, -1);
3587 tem
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
3588 build_int_2 (1, 0), NULL_RTX
, 1);
3589 emit_cmp_insn (remainder
, tem
, LEU
, NULL_RTX
, compute_mode
, 0, 0);
3590 emit_jump_insn (gen_bleu (label
));
3591 expand_inc (quotient
, const1_rtx
);
3592 expand_dec (remainder
, op1
);
3597 rtx abs_rem
, abs_op1
, tem
, mask
;
3599 label
= gen_label_rtx ();
3600 quotient
= gen_reg_rtx (compute_mode
);
3601 remainder
= gen_reg_rtx (compute_mode
);
3602 if (expand_twoval_binop (sdivmod_optab
, op0
, op1
, quotient
, remainder
, 0) == 0)
3605 quotient
= expand_binop (compute_mode
, sdiv_optab
, op0
, op1
,
3606 quotient
, 0, OPTAB_LIB_WIDEN
);
3607 tem
= expand_mult (compute_mode
, quotient
, op1
, NULL_RTX
, 0);
3608 remainder
= expand_binop (compute_mode
, sub_optab
, op0
, tem
,
3609 remainder
, 0, OPTAB_LIB_WIDEN
);
3611 abs_rem
= expand_abs (compute_mode
, remainder
, NULL_RTX
, 0, 0);
3612 abs_op1
= expand_abs (compute_mode
, op1
, NULL_RTX
, 0, 0);
3613 tem
= expand_shift (LSHIFT_EXPR
, compute_mode
, abs_rem
,
3614 build_int_2 (1, 0), NULL_RTX
, 1);
3615 emit_cmp_insn (tem
, abs_op1
, LTU
, NULL_RTX
, compute_mode
, 0, 0);
3616 emit_jump_insn (gen_bltu (label
));
3617 tem
= expand_binop (compute_mode
, xor_optab
, op0
, op1
,
3618 NULL_RTX
, 0, OPTAB_WIDEN
);
3619 mask
= expand_shift (RSHIFT_EXPR
, compute_mode
, tem
,
3620 build_int_2 (size
- 1, 0), NULL_RTX
, 0);
3621 tem
= expand_binop (compute_mode
, xor_optab
, mask
, const1_rtx
,
3622 NULL_RTX
, 0, OPTAB_WIDEN
);
3623 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
3624 NULL_RTX
, 0, OPTAB_WIDEN
);
3625 expand_inc (quotient
, tem
);
3626 tem
= expand_binop (compute_mode
, xor_optab
, mask
, op1
,
3627 NULL_RTX
, 0, OPTAB_WIDEN
);
3628 tem
= expand_binop (compute_mode
, sub_optab
, tem
, mask
,
3629 NULL_RTX
, 0, OPTAB_WIDEN
);
3630 expand_dec (remainder
, tem
);
3633 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3638 if (target
&& GET_MODE (target
) != compute_mode
)
3643 /* Try to produce the remainder directly without a library call. */
3644 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
3646 unsignedp
, OPTAB_WIDEN
);
3649 /* No luck there. Can we do remainder and divide at once
3650 without a library call? */
3651 remainder
= gen_reg_rtx (compute_mode
);
3652 if (! expand_twoval_binop ((unsignedp
3656 NULL_RTX
, remainder
, unsignedp
))
3661 return gen_lowpart (mode
, remainder
);
3664 /* Produce the quotient. Try a quotient insn, but not a library call.
3665 If we have a divmod in this mode, use it in preference to widening
3666 the div (for this test we assume it will not fail). Note that optab2
3667 is set to the one of the two optabs that the call below will use. */
3669 = sign_expand_binop (compute_mode
, udiv_optab
, sdiv_optab
,
3670 op0
, op1
, rem_flag
? NULL_RTX
: target
,
3672 ((optab2
->handlers
[(int) compute_mode
].insn_code
3673 != CODE_FOR_nothing
)
3674 ? OPTAB_DIRECT
: OPTAB_WIDEN
));
3678 /* No luck there. Try a quotient-and-remainder insn,
3679 keeping the quotient alone. */
3680 quotient
= gen_reg_rtx (compute_mode
);
3681 if (! expand_twoval_binop (unsignedp
? udivmod_optab
: sdivmod_optab
,
3683 quotient
, NULL_RTX
, unsignedp
))
3687 /* Still no luck. If we are not computing the remainder,
3688 use a library call for the quotient. */
3689 quotient
= sign_expand_binop (compute_mode
,
3690 udiv_optab
, sdiv_optab
,
3692 unsignedp
, OPTAB_LIB_WIDEN
);
3699 if (target
&& GET_MODE (target
) != compute_mode
)
3703 /* No divide instruction either. Use library for remainder. */
3704 remainder
= sign_expand_binop (compute_mode
, umod_optab
, smod_optab
,
3706 unsignedp
, OPTAB_LIB_WIDEN
);
3709 /* We divided. Now finish doing X - Y * (X / Y). */
3710 remainder
= expand_mult (compute_mode
, quotient
, op1
,
3711 NULL_RTX
, unsignedp
);
3712 remainder
= expand_binop (compute_mode
, sub_optab
, op0
,
3713 remainder
, target
, unsignedp
,
3718 return gen_lowpart (mode
, rem_flag
? remainder
: quotient
);
3721 /* Return a tree node with data type TYPE, describing the value of X.
3722 Usually this is an RTL_EXPR, if there is no obvious better choice.
3723 X may be an expression, however we only support those expressions
3724 generated by loop.c. */
3733 switch (GET_CODE (x
))
3736 t
= build_int_2 (INTVAL (x
),
3737 TREE_UNSIGNED (type
) || INTVAL (x
) >= 0 ? 0 : -1);
3738 TREE_TYPE (t
) = type
;
3742 if (GET_MODE (x
) == VOIDmode
)
3744 t
= build_int_2 (CONST_DOUBLE_LOW (x
), CONST_DOUBLE_HIGH (x
));
3745 TREE_TYPE (t
) = type
;
3751 REAL_VALUE_FROM_CONST_DOUBLE (d
, x
);
3752 t
= build_real (type
, d
);
3758 return fold (build (PLUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
3759 make_tree (type
, XEXP (x
, 1))));
3762 return fold (build (MINUS_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
3763 make_tree (type
, XEXP (x
, 1))));
3766 return fold (build1 (NEGATE_EXPR
, type
, make_tree (type
, XEXP (x
, 0))));
3769 return fold (build (MULT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
3770 make_tree (type
, XEXP (x
, 1))));
3773 return fold (build (LSHIFT_EXPR
, type
, make_tree (type
, XEXP (x
, 0)),
3774 make_tree (type
, XEXP (x
, 1))));
3777 return fold (convert (type
,
3778 build (RSHIFT_EXPR
, unsigned_type (type
),
3779 make_tree (unsigned_type (type
),
3781 make_tree (type
, XEXP (x
, 1)))));
3784 return fold (convert (type
,
3785 build (RSHIFT_EXPR
, signed_type (type
),
3786 make_tree (signed_type (type
), XEXP (x
, 0)),
3787 make_tree (type
, XEXP (x
, 1)))));
3790 if (TREE_CODE (type
) != REAL_TYPE
)
3791 t
= signed_type (type
);
3795 return fold (convert (type
,
3796 build (TRUNC_DIV_EXPR
, t
,
3797 make_tree (t
, XEXP (x
, 0)),
3798 make_tree (t
, XEXP (x
, 1)))));
3800 t
= unsigned_type (type
);
3801 return fold (convert (type
,
3802 build (TRUNC_DIV_EXPR
, t
,
3803 make_tree (t
, XEXP (x
, 0)),
3804 make_tree (t
, XEXP (x
, 1)))));
3806 t
= make_node (RTL_EXPR
);
3807 TREE_TYPE (t
) = type
;
3808 RTL_EXPR_RTL (t
) = x
;
3809 /* There are no insns to be output
3810 when this rtl_expr is used. */
3811 RTL_EXPR_SEQUENCE (t
) = 0;
3816 /* Return an rtx representing the value of X * MULT + ADD.
3817 TARGET is a suggestion for where to store the result (an rtx).
3818 MODE is the machine mode for the computation.
3819 X and MULT must have mode MODE. ADD may have a different mode.
3820 So can X (defaults to same as MODE).
3821 UNSIGNEDP is non-zero to do unsigned multiplication.
3822 This may emit insns. */
3825 expand_mult_add (x
, target
, mult
, add
, mode
, unsignedp
)
3826 rtx x
, target
, mult
, add
;
3827 enum machine_mode mode
;
3830 tree type
= type_for_mode (mode
, unsignedp
);
3831 tree add_type
= (GET_MODE (add
) == VOIDmode
3832 ? type
: type_for_mode (GET_MODE (add
), unsignedp
));
3833 tree result
= fold (build (PLUS_EXPR
, type
,
3834 fold (build (MULT_EXPR
, type
,
3835 make_tree (type
, x
),
3836 make_tree (type
, mult
))),
3837 make_tree (add_type
, add
)));
3839 return expand_expr (result
, target
, VOIDmode
, 0);
3842 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
3843 and returning TARGET.
3845 If TARGET is 0, a pseudo-register or constant is returned. */
3848 expand_and (op0
, op1
, target
)
3849 rtx op0
, op1
, target
;
3851 enum machine_mode mode
= VOIDmode
;
3854 if (GET_MODE (op0
) != VOIDmode
)
3855 mode
= GET_MODE (op0
);
3856 else if (GET_MODE (op1
) != VOIDmode
)
3857 mode
= GET_MODE (op1
);
3859 if (mode
!= VOIDmode
)
3860 tem
= expand_binop (mode
, and_optab
, op0
, op1
, target
, 0, OPTAB_LIB_WIDEN
);
3861 else if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
)
3862 tem
= GEN_INT (INTVAL (op0
) & INTVAL (op1
));
3868 else if (tem
!= target
)
3869 emit_move_insn (target
, tem
);
3873 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
3874 and storing in TARGET. Normally return TARGET.
3875 Return 0 if that cannot be done.
3877 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
3878 it is VOIDmode, they cannot both be CONST_INT.
3880 UNSIGNEDP is for the case where we have to widen the operands
3881 to perform the operation. It says to use zero-extension.
3883 NORMALIZEP is 1 if we should convert the result to be either zero
3884 or one. Normalize is -1 if we should convert the result to be
3885 either zero or -1. If NORMALIZEP is zero, the result will be left
3886 "raw" out of the scc insn. */
3889 emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
)
3893 enum machine_mode mode
;
3898 enum insn_code icode
;
3899 enum machine_mode compare_mode
;
3900 enum machine_mode target_mode
= GET_MODE (target
);
3902 rtx last
= get_last_insn ();
3903 rtx pattern
, comparison
;
3905 /* If one operand is constant, make it the second one. Only do this
3906 if the other operand is not constant as well. */
3908 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
3909 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
3914 code
= swap_condition (code
);
3917 if (mode
== VOIDmode
)
3918 mode
= GET_MODE (op0
);
3920 /* For some comparisons with 1 and -1, we can convert this to
3921 comparisons with zero. This will often produce more opportunities for
3922 store-flag insns. */
3927 if (op1
== const1_rtx
)
3928 op1
= const0_rtx
, code
= LE
;
3931 if (op1
== constm1_rtx
)
3932 op1
= const0_rtx
, code
= LT
;
3935 if (op1
== const1_rtx
)
3936 op1
= const0_rtx
, code
= GT
;
3939 if (op1
== constm1_rtx
)
3940 op1
= const0_rtx
, code
= GE
;
3943 if (op1
== const1_rtx
)
3944 op1
= const0_rtx
, code
= NE
;
3947 if (op1
== const1_rtx
)
3948 op1
= const0_rtx
, code
= EQ
;
3952 /* From now on, we won't change CODE, so set ICODE now. */
3953 icode
= setcc_gen_code
[(int) code
];
3955 /* If this is A < 0 or A >= 0, we can do this by taking the ones
3956 complement of A (for GE) and shifting the sign bit to the low bit. */
3957 if (op1
== const0_rtx
&& (code
== LT
|| code
== GE
)
3958 && GET_MODE_CLASS (mode
) == MODE_INT
3959 && (normalizep
|| STORE_FLAG_VALUE
== 1
3960 || (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
3961 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
3962 == (HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))))
3966 /* If the result is to be wider than OP0, it is best to convert it
3967 first. If it is to be narrower, it is *incorrect* to convert it
3969 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (mode
))
3971 op0
= protect_from_queue (op0
, 0);
3972 op0
= convert_modes (target_mode
, mode
, op0
, 0);
3976 if (target_mode
!= mode
)
3980 op0
= expand_unop (mode
, one_cmpl_optab
, op0
,
3981 ((STORE_FLAG_VALUE
== 1 || normalizep
)
3982 ? 0 : subtarget
), 0);
3984 if (STORE_FLAG_VALUE
== 1 || normalizep
)
3985 /* If we are supposed to produce a 0/1 value, we want to do
3986 a logical shift from the sign bit to the low-order bit; for
3987 a -1/0 value, we do an arithmetic shift. */
3988 op0
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
3989 size_int (GET_MODE_BITSIZE (mode
) - 1),
3990 subtarget
, normalizep
!= -1);
3992 if (mode
!= target_mode
)
3993 op0
= convert_modes (target_mode
, mode
, op0
, 0);
3998 if (icode
!= CODE_FOR_nothing
)
4000 /* We think we may be able to do this with a scc insn. Emit the
4001 comparison and then the scc insn.
4003 compare_from_rtx may call emit_queue, which would be deleted below
4004 if the scc insn fails. So call it ourselves before setting LAST. */
4007 last
= get_last_insn ();
4010 = compare_from_rtx (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
, 0);
4011 if (GET_CODE (comparison
) == CONST_INT
)
4012 return (comparison
== const0_rtx
? const0_rtx
4013 : normalizep
== 1 ? const1_rtx
4014 : normalizep
== -1 ? constm1_rtx
4017 /* If the code of COMPARISON doesn't match CODE, something is
4018 wrong; we can no longer be sure that we have the operation.
4019 We could handle this case, but it should not happen. */
4021 if (GET_CODE (comparison
) != code
)
4024 /* Get a reference to the target in the proper mode for this insn. */
4025 compare_mode
= insn_operand_mode
[(int) icode
][0];
4027 if (preserve_subexpressions_p ()
4028 || ! (*insn_operand_predicate
[(int) icode
][0]) (subtarget
, compare_mode
))
4029 subtarget
= gen_reg_rtx (compare_mode
);
4031 pattern
= GEN_FCN (icode
) (subtarget
);
4034 emit_insn (pattern
);
4036 /* If we are converting to a wider mode, first convert to
4037 TARGET_MODE, then normalize. This produces better combining
4038 opportunities on machines that have a SIGN_EXTRACT when we are
4039 testing a single bit. This mostly benefits the 68k.
4041 If STORE_FLAG_VALUE does not have the sign bit set when
4042 interpreted in COMPARE_MODE, we can do this conversion as
4043 unsigned, which is usually more efficient. */
4044 if (GET_MODE_SIZE (target_mode
) > GET_MODE_SIZE (compare_mode
))
4046 convert_move (target
, subtarget
,
4047 (GET_MODE_BITSIZE (compare_mode
)
4048 <= HOST_BITS_PER_WIDE_INT
)
4049 && 0 == (STORE_FLAG_VALUE
4050 & ((HOST_WIDE_INT
) 1
4051 << (GET_MODE_BITSIZE (compare_mode
) -1))));
4053 compare_mode
= target_mode
;
4058 /* If we want to keep subexpressions around, don't reuse our
4061 if (preserve_subexpressions_p ())
4064 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4065 we don't have to do anything. */
4066 if (normalizep
== 0 || normalizep
== STORE_FLAG_VALUE
)
4068 else if (normalizep
== - STORE_FLAG_VALUE
)
4069 op0
= expand_unop (compare_mode
, neg_optab
, op0
, subtarget
, 0);
4071 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4072 makes it hard to use a value of just the sign bit due to
4073 ANSI integer constant typing rules. */
4074 else if (GET_MODE_BITSIZE (compare_mode
) <= HOST_BITS_PER_WIDE_INT
4075 && (STORE_FLAG_VALUE
4076 & ((HOST_WIDE_INT
) 1
4077 << (GET_MODE_BITSIZE (compare_mode
) - 1))))
4078 op0
= expand_shift (RSHIFT_EXPR
, compare_mode
, op0
,
4079 size_int (GET_MODE_BITSIZE (compare_mode
) - 1),
4080 subtarget
, normalizep
== 1);
4081 else if (STORE_FLAG_VALUE
& 1)
4083 op0
= expand_and (op0
, const1_rtx
, subtarget
);
4084 if (normalizep
== -1)
4085 op0
= expand_unop (compare_mode
, neg_optab
, op0
, op0
, 0);
4090 /* If we were converting to a smaller mode, do the
4092 if (target_mode
!= compare_mode
)
4094 convert_move (target
, op0
, 0);
4102 delete_insns_since (last
);
4104 /* If expensive optimizations, use different pseudo registers for each
4105 insn, instead of reusing the same pseudo. This leads to better CSE,
4106 but slows down the compiler, since there are more pseudos */
4107 subtarget
= (!flag_expensive_optimizations
4108 && (target_mode
== mode
)) ? target
: NULL_RTX
;
4110 /* If we reached here, we can't do this with a scc insn. However, there
4111 are some comparisons that can be done directly. For example, if
4112 this is an equality comparison of integers, we can try to exclusive-or
4113 (or subtract) the two operands and use a recursive call to try the
4114 comparison with zero. Don't do any of these cases if branches are
4118 && GET_MODE_CLASS (mode
) == MODE_INT
&& (code
== EQ
|| code
== NE
)
4119 && op1
!= const0_rtx
)
4121 tem
= expand_binop (mode
, xor_optab
, op0
, op1
, subtarget
, 1,
4125 tem
= expand_binop (mode
, sub_optab
, op0
, op1
, subtarget
, 1,
4128 tem
= emit_store_flag (target
, code
, tem
, const0_rtx
,
4129 mode
, unsignedp
, normalizep
);
4131 delete_insns_since (last
);
4135 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4136 the constant zero. Reject all other comparisons at this point. Only
4137 do LE and GT if branches are expensive since they are expensive on
4138 2-operand machines. */
4140 if (BRANCH_COST
== 0
4141 || GET_MODE_CLASS (mode
) != MODE_INT
|| op1
!= const0_rtx
4142 || (code
!= EQ
&& code
!= NE
4143 && (BRANCH_COST
<= 1 || (code
!= LE
&& code
!= GT
))))
4146 /* See what we need to return. We can only return a 1, -1, or the
4149 if (normalizep
== 0)
4151 if (STORE_FLAG_VALUE
== 1 || STORE_FLAG_VALUE
== -1)
4152 normalizep
= STORE_FLAG_VALUE
;
4154 else if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4155 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
4156 == (HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1)))
4162 /* Try to put the result of the comparison in the sign bit. Assume we can't
4163 do the necessary operation below. */
4167 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4168 the sign bit set. */
4172 /* This is destructive, so SUBTARGET can't be OP0. */
4173 if (rtx_equal_p (subtarget
, op0
))
4176 tem
= expand_binop (mode
, sub_optab
, op0
, const1_rtx
, subtarget
, 0,
4179 tem
= expand_binop (mode
, ior_optab
, op0
, tem
, subtarget
, 0,
4183 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4184 number of bits in the mode of OP0, minus one. */
4188 if (rtx_equal_p (subtarget
, op0
))
4191 tem
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
4192 size_int (GET_MODE_BITSIZE (mode
) - 1),
4194 tem
= expand_binop (mode
, sub_optab
, tem
, op0
, subtarget
, 0,
4198 if (code
== EQ
|| code
== NE
)
4200 /* For EQ or NE, one way to do the comparison is to apply an operation
4201 that converts the operand into a positive number if it is non-zero
4202 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4203 for NE we negate. This puts the result in the sign bit. Then we
4204 normalize with a shift, if needed.
4206 Two operations that can do the above actions are ABS and FFS, so try
4207 them. If that doesn't work, and MODE is smaller than a full word,
4208 we can use zero-extension to the wider mode (an unsigned conversion)
4209 as the operation. */
4211 if (abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
4212 tem
= expand_unop (mode
, abs_optab
, op0
, subtarget
, 1);
4213 else if (ffs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
4214 tem
= expand_unop (mode
, ffs_optab
, op0
, subtarget
, 1);
4215 else if (GET_MODE_SIZE (mode
) < UNITS_PER_WORD
)
4217 op0
= protect_from_queue (op0
, 0);
4218 tem
= convert_modes (word_mode
, mode
, op0
, 1);
4225 tem
= expand_binop (mode
, sub_optab
, tem
, const1_rtx
, subtarget
,
4228 tem
= expand_unop (mode
, neg_optab
, tem
, subtarget
, 0);
4231 /* If we couldn't do it that way, for NE we can "or" the two's complement
4232 of the value with itself. For EQ, we take the one's complement of
4233 that "or", which is an extra insn, so we only handle EQ if branches
4236 if (tem
== 0 && (code
== NE
|| BRANCH_COST
> 1))
4238 if (rtx_equal_p (subtarget
, op0
))
4241 tem
= expand_unop (mode
, neg_optab
, op0
, subtarget
, 0);
4242 tem
= expand_binop (mode
, ior_optab
, tem
, op0
, subtarget
, 0,
4245 if (tem
&& code
== EQ
)
4246 tem
= expand_unop (mode
, one_cmpl_optab
, tem
, subtarget
, 0);
4250 if (tem
&& normalizep
)
4251 tem
= expand_shift (RSHIFT_EXPR
, mode
, tem
,
4252 size_int (GET_MODE_BITSIZE (mode
) - 1),
4253 subtarget
, normalizep
== 1);
4257 if (GET_MODE (tem
) != target_mode
)
4259 convert_move (target
, tem
, 0);
4262 else if (!subtarget
)
4264 emit_move_insn (target
, tem
);
4269 delete_insns_since (last
);
4274 /* Like emit_store_flag, but always succeeds. */
4277 emit_store_flag_force (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
)
4281 enum machine_mode mode
;
4287 /* First see if emit_store_flag can do the job. */
4288 tem
= emit_store_flag (target
, code
, op0
, op1
, mode
, unsignedp
, normalizep
);
4292 if (normalizep
== 0)
4295 /* If this failed, we have to do this with set/compare/jump/set code. */
4297 if (GET_CODE (target
) != REG
4298 || reg_mentioned_p (target
, op0
) || reg_mentioned_p (target
, op1
))
4299 target
= gen_reg_rtx (GET_MODE (target
));
4301 emit_move_insn (target
, const1_rtx
);
4302 tem
= compare_from_rtx (op0
, op1
, code
, unsignedp
, mode
, NULL_RTX
, 0);
4303 if (GET_CODE (tem
) == CONST_INT
)
4306 label
= gen_label_rtx ();
4307 if (bcc_gen_fctn
[(int) code
] == 0)
4310 emit_jump_insn ((*bcc_gen_fctn
[(int) code
]) (label
));
4311 emit_move_insn (target
, const0_rtx
);