Do not do src->dest copy if register would not be allocated a normal register
[official-gcc.git] / gcc / expmed.c
blobe81eb0a1b0183a1952887663c2c269a37fc8500c
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "insn-flags.h"
29 #include "insn-codes.h"
30 #include "insn-config.h"
31 #include "expr.h"
32 #include "real.h"
33 #include "recog.h"
35 static void store_fixed_bit_field PROTO((rtx, int, int, int, rtx, int));
36 static void store_split_bit_field PROTO((rtx, int, int, rtx, int));
37 static rtx extract_fixed_bit_field PROTO((enum machine_mode, rtx, int,
38 int, int, rtx, int, int));
39 static rtx mask_rtx PROTO((enum machine_mode, int,
40 int, int));
41 static rtx lshift_value PROTO((enum machine_mode, rtx,
42 int, int));
43 static rtx extract_split_bit_field PROTO((rtx, int, int, int, int));
44 static void do_cmp_and_jump PROTO((rtx, rtx, enum rtx_code,
45 enum machine_mode, rtx));
47 #define CEIL(x,y) (((x) + (y) - 1) / (y))
49 /* Non-zero means divides or modulus operations are relatively cheap for
50 powers of two, so don't use branches; emit the operation instead.
51 Usually, this will mean that the MD file will emit non-branch
52 sequences. */
54 static int sdiv_pow2_cheap, smod_pow2_cheap;
56 #ifndef SLOW_UNALIGNED_ACCESS
57 #define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
58 #endif
60 /* For compilers that support multiple targets with different word sizes,
61 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
62 is the H8/300(H) compiler. */
64 #ifndef MAX_BITS_PER_WORD
65 #define MAX_BITS_PER_WORD BITS_PER_WORD
66 #endif
68 /* Cost of various pieces of RTL. Note that some of these are indexed by shift count,
69 and some by mode. */
70 static int add_cost, negate_cost, zero_cost;
71 static int shift_cost[MAX_BITS_PER_WORD];
72 static int shiftadd_cost[MAX_BITS_PER_WORD];
73 static int shiftsub_cost[MAX_BITS_PER_WORD];
74 static int mul_cost[NUM_MACHINE_MODES];
75 static int div_cost[NUM_MACHINE_MODES];
76 static int mul_widen_cost[NUM_MACHINE_MODES];
77 static int mul_highpart_cost[NUM_MACHINE_MODES];
79 void
80 init_expmed ()
82 char *free_point;
83 /* This is "some random pseudo register" for purposes of calling recog
84 to see what insns exist. */
85 rtx reg = gen_rtx_REG (word_mode, 10000);
86 rtx shift_insn, shiftadd_insn, shiftsub_insn;
87 int dummy;
88 int m;
89 enum machine_mode mode, wider_mode;
91 start_sequence ();
93 /* Since we are on the permanent obstack, we must be sure we save this
94 spot AFTER we call start_sequence, since it will reuse the rtl it
95 makes. */
97 free_point = (char *) oballoc (0);
99 zero_cost = rtx_cost (const0_rtx, 0);
100 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
102 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
103 gen_rtx_ASHIFT (word_mode, reg,
104 const0_rtx)));
106 shiftadd_insn
107 = emit_insn (gen_rtx_SET (VOIDmode, reg,
108 gen_rtx_PLUS (word_mode,
109 gen_rtx_MULT (word_mode,
110 reg, const0_rtx),
111 reg)));
113 shiftsub_insn
114 = emit_insn (gen_rtx_SET (VOIDmode, reg,
115 gen_rtx_MINUS (word_mode,
116 gen_rtx_MULT (word_mode,
117 reg, const0_rtx),
118 reg)));
120 init_recog ();
122 shift_cost[0] = 0;
123 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
125 for (m = 1; m < BITS_PER_WORD; m++)
127 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
129 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
130 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
131 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
133 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
134 = GEN_INT ((HOST_WIDE_INT) 1 << m);
135 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
136 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
138 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
139 = GEN_INT ((HOST_WIDE_INT) 1 << m);
140 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
141 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
144 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
146 sdiv_pow2_cheap
147 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
148 <= 2 * add_cost);
149 smod_pow2_cheap
150 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
151 <= 2 * add_cost);
153 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
154 mode != VOIDmode;
155 mode = GET_MODE_WIDER_MODE (mode))
157 reg = gen_rtx_REG (mode, 10000);
158 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
159 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
160 wider_mode = GET_MODE_WIDER_MODE (mode);
161 if (wider_mode != VOIDmode)
163 mul_widen_cost[(int) wider_mode]
164 = rtx_cost (gen_rtx_MULT (wider_mode,
165 gen_rtx_ZERO_EXTEND (wider_mode, reg),
166 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
167 SET);
168 mul_highpart_cost[(int) mode]
169 = rtx_cost (gen_rtx_TRUNCATE
170 (mode,
171 gen_rtx_LSHIFTRT
172 (wider_mode,
173 gen_rtx_MULT (wider_mode,
174 gen_rtx_ZERO_EXTEND (wider_mode, reg),
175 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
176 GEN_INT (GET_MODE_BITSIZE (mode)))),
177 SET);
181 /* Free the objects we just allocated. */
182 end_sequence ();
183 obfree (free_point);
186 /* Return an rtx representing minus the value of X.
187 MODE is the intended mode of the result,
188 useful if X is a CONST_INT. */
191 negate_rtx (mode, x)
192 enum machine_mode mode;
193 rtx x;
195 rtx result = simplify_unary_operation (NEG, mode, x, mode);
197 if (result == 0)
198 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
200 return result;
203 /* Generate code to store value from rtx VALUE
204 into a bit-field within structure STR_RTX
205 containing BITSIZE bits starting at bit BITNUM.
206 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
207 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
208 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
210 /* ??? Note that there are two different ideas here for how
211 to determine the size to count bits within, for a register.
212 One is BITS_PER_WORD, and the other is the size of operand 3
213 of the insv pattern. (The latter assumes that an n-bit machine
214 will be able to insert bit fields up to n bits wide.)
215 It isn't certain that either of these is right.
216 extract_bit_field has the same quandary. */
219 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
220 rtx str_rtx;
221 register int bitsize;
222 int bitnum;
223 enum machine_mode fieldmode;
224 rtx value;
225 int align;
226 int total_size;
228 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
229 register int offset = bitnum / unit;
230 register int bitpos = bitnum % unit;
231 register rtx op0 = str_rtx;
233 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
234 abort ();
236 /* Discount the part of the structure before the desired byte.
237 We need to know how many bytes are safe to reference after it. */
238 if (total_size >= 0)
239 total_size -= (bitpos / BIGGEST_ALIGNMENT
240 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
242 while (GET_CODE (op0) == SUBREG)
244 /* The following line once was done only if WORDS_BIG_ENDIAN,
245 but I think that is a mistake. WORDS_BIG_ENDIAN is
246 meaningful at a much higher level; when structures are copied
247 between memory and regs, the higher-numbered regs
248 always get higher addresses. */
249 offset += SUBREG_WORD (op0);
250 /* We used to adjust BITPOS here, but now we do the whole adjustment
251 right after the loop. */
252 op0 = SUBREG_REG (op0);
255 /* If OP0 is a register, BITPOS must count within a word.
256 But as we have it, it counts within whatever size OP0 now has.
257 On a bigendian machine, these are not the same, so convert. */
258 if (BYTES_BIG_ENDIAN
259 && GET_CODE (op0) != MEM
260 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
261 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
263 value = protect_from_queue (value, 0);
265 if (flag_force_mem)
266 value = force_not_mem (value);
268 /* Note that the adjustment of BITPOS above has no effect on whether
269 BITPOS is 0 in a REG bigger than a word. */
270 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
271 && (GET_CODE (op0) != MEM
272 || ! SLOW_UNALIGNED_ACCESS
273 || (offset * BITS_PER_UNIT % bitsize == 0
274 && align % GET_MODE_SIZE (fieldmode) == 0))
275 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
277 /* Storing in a full-word or multi-word field in a register
278 can be done with just SUBREG. */
279 if (GET_MODE (op0) != fieldmode)
281 if (GET_CODE (op0) == REG)
282 op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
283 else
284 op0 = change_address (op0, fieldmode,
285 plus_constant (XEXP (op0, 0), offset));
287 emit_move_insn (op0, value);
288 return value;
291 /* Storing an lsb-aligned field in a register
292 can be done with a movestrict instruction. */
294 if (GET_CODE (op0) != MEM
295 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
296 && bitsize == GET_MODE_BITSIZE (fieldmode)
297 && (GET_MODE (op0) == fieldmode
298 || (movstrict_optab->handlers[(int) fieldmode].insn_code
299 != CODE_FOR_nothing)))
301 /* Get appropriate low part of the value being stored. */
302 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
303 value = gen_lowpart (fieldmode, value);
304 else if (!(GET_CODE (value) == SYMBOL_REF
305 || GET_CODE (value) == LABEL_REF
306 || GET_CODE (value) == CONST))
307 value = convert_to_mode (fieldmode, value, 0);
309 if (GET_MODE (op0) == fieldmode)
310 emit_move_insn (op0, value);
311 else
313 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
314 if(! (*insn_operand_predicate[icode][1]) (value, fieldmode))
315 value = copy_to_mode_reg (fieldmode, value);
316 emit_insn (GEN_FCN (icode)
317 (gen_rtx_SUBREG (fieldmode, op0, offset), value));
319 return value;
322 /* Handle fields bigger than a word. */
324 if (bitsize > BITS_PER_WORD)
326 /* Here we transfer the words of the field
327 in the order least significant first.
328 This is because the most significant word is the one which may
329 be less than full.
330 However, only do that if the value is not BLKmode. */
332 int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
334 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
335 int i;
337 /* This is the mode we must force value to, so that there will be enough
338 subwords to extract. Note that fieldmode will often (always?) be
339 VOIDmode, because that is what store_field uses to indicate that this
340 is a bit field, but passing VOIDmode to operand_subword_force will
341 result in an abort. */
342 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
344 for (i = 0; i < nwords; i++)
346 /* If I is 0, use the low-order word in both field and target;
347 if I is 1, use the next to lowest word; and so on. */
348 int wordnum = (backwards ? nwords - i - 1 : i);
349 int bit_offset = (backwards
350 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
351 : i * BITS_PER_WORD);
352 store_bit_field (op0, MIN (BITS_PER_WORD,
353 bitsize - i * BITS_PER_WORD),
354 bitnum + bit_offset, word_mode,
355 operand_subword_force (value, wordnum,
356 (GET_MODE (value) == VOIDmode
357 ? fieldmode
358 : GET_MODE (value))),
359 align, total_size);
361 return value;
364 /* From here on we can assume that the field to be stored in is
365 a full-word (whatever type that is), since it is shorter than a word. */
367 /* OFFSET is the number of words or bytes (UNIT says which)
368 from STR_RTX to the first word or byte containing part of the field. */
370 if (GET_CODE (op0) == REG)
372 if (offset != 0
373 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
374 op0 = gen_rtx_SUBREG (TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
375 op0, offset);
376 offset = 0;
378 else
380 op0 = protect_from_queue (op0, 1);
383 /* If VALUE is a floating-point mode, access it as an integer of the
384 corresponding size. This can occur on a machine with 64 bit registers
385 that uses SFmode for float. This can also occur for unaligned float
386 structure fields. */
387 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
389 if (GET_CODE (value) != REG)
390 value = copy_to_reg (value);
391 value = gen_rtx_SUBREG (word_mode, value, 0);
394 /* Now OFFSET is nonzero only if OP0 is memory
395 and is therefore always measured in bytes. */
397 #ifdef HAVE_insv
398 if (HAVE_insv
399 && GET_MODE (value) != BLKmode
400 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
401 /* Ensure insv's size is wide enough for this field. */
402 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3])
403 >= bitsize)
404 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
405 && (bitsize + bitpos
406 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3]))))
408 int xbitpos = bitpos;
409 rtx value1;
410 rtx xop0 = op0;
411 rtx last = get_last_insn ();
412 rtx pat;
413 enum machine_mode maxmode
414 = insn_operand_mode[(int) CODE_FOR_insv][3];
416 int save_volatile_ok = volatile_ok;
417 volatile_ok = 1;
419 /* If this machine's insv can only insert into a register, copy OP0
420 into a register and save it back later. */
421 /* This used to check flag_force_mem, but that was a serious
422 de-optimization now that flag_force_mem is enabled by -O2. */
423 if (GET_CODE (op0) == MEM
424 && ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
425 (op0, VOIDmode)))
427 rtx tempreg;
428 enum machine_mode bestmode;
430 /* Get the mode to use for inserting into this field. If OP0 is
431 BLKmode, get the smallest mode consistent with the alignment. If
432 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
433 mode. Otherwise, use the smallest mode containing the field. */
435 if (GET_MODE (op0) == BLKmode
436 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
437 bestmode
438 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
439 MEM_VOLATILE_P (op0));
440 else
441 bestmode = GET_MODE (op0);
443 if (bestmode == VOIDmode
444 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
445 goto insv_loses;
447 /* Adjust address to point to the containing unit of that mode. */
448 unit = GET_MODE_BITSIZE (bestmode);
449 /* Compute offset as multiple of this unit, counting in bytes. */
450 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
451 bitpos = bitnum % unit;
452 op0 = change_address (op0, bestmode,
453 plus_constant (XEXP (op0, 0), offset));
455 /* Fetch that unit, store the bitfield in it, then store the unit. */
456 tempreg = copy_to_reg (op0);
457 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
458 align, total_size);
459 emit_move_insn (op0, tempreg);
460 return value;
462 volatile_ok = save_volatile_ok;
464 /* Add OFFSET into OP0's address. */
465 if (GET_CODE (xop0) == MEM)
466 xop0 = change_address (xop0, byte_mode,
467 plus_constant (XEXP (xop0, 0), offset));
469 /* If xop0 is a register, we need it in MAXMODE
470 to make it acceptable to the format of insv. */
471 if (GET_CODE (xop0) == SUBREG)
472 /* We can't just change the mode, because this might clobber op0,
473 and we will need the original value of op0 if insv fails. */
474 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
475 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
476 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
478 /* On big-endian machines, we count bits from the most significant.
479 If the bit field insn does not, we must invert. */
481 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
482 xbitpos = unit - bitsize - xbitpos;
484 /* We have been counting XBITPOS within UNIT.
485 Count instead within the size of the register. */
486 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
487 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
489 unit = GET_MODE_BITSIZE (maxmode);
491 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
492 value1 = value;
493 if (GET_MODE (value) != maxmode)
495 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
497 /* Optimization: Don't bother really extending VALUE
498 if it has all the bits we will actually use. However,
499 if we must narrow it, be sure we do it correctly. */
501 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
503 /* Avoid making subreg of a subreg, or of a mem. */
504 if (GET_CODE (value1) != REG)
505 value1 = copy_to_reg (value1);
506 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
508 else
509 value1 = gen_lowpart (maxmode, value1);
511 else if (!CONSTANT_P (value))
512 /* Parse phase is supposed to make VALUE's data type
513 match that of the component reference, which is a type
514 at least as wide as the field; so VALUE should have
515 a mode that corresponds to that type. */
516 abort ();
519 /* If this machine's insv insists on a register,
520 get VALUE1 into a register. */
521 if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
522 (value1, maxmode)))
523 value1 = force_reg (maxmode, value1);
525 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
526 if (pat)
527 emit_insn (pat);
528 else
530 delete_insns_since (last);
531 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
534 else
535 insv_loses:
536 #endif
537 /* Insv is not available; store using shifts and boolean ops. */
538 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
539 return value;
542 /* Use shifts and boolean operations to store VALUE
543 into a bit field of width BITSIZE
544 in a memory location specified by OP0 except offset by OFFSET bytes.
545 (OFFSET must be 0 if OP0 is a register.)
546 The field starts at position BITPOS within the byte.
547 (If OP0 is a register, it may be a full word or a narrower mode,
548 but BITPOS still counts within a full word,
549 which is significant on bigendian machines.)
550 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
552 Note that protect_from_queue has already been done on OP0 and VALUE. */
554 static void
555 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
556 register rtx op0;
557 register int offset, bitsize, bitpos;
558 register rtx value;
559 int struct_align;
561 register enum machine_mode mode;
562 int total_bits = BITS_PER_WORD;
563 rtx subtarget, temp;
564 int all_zero = 0;
565 int all_one = 0;
567 if (! SLOW_UNALIGNED_ACCESS)
568 struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
570 /* There is a case not handled here:
571 a structure with a known alignment of just a halfword
572 and a field split across two aligned halfwords within the structure.
573 Or likewise a structure with a known alignment of just a byte
574 and a field split across two bytes.
575 Such cases are not supposed to be able to occur. */
577 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
579 if (offset != 0)
580 abort ();
581 /* Special treatment for a bit field split across two registers. */
582 if (bitsize + bitpos > BITS_PER_WORD)
584 store_split_bit_field (op0, bitsize, bitpos,
585 value, BITS_PER_WORD);
586 return;
589 else
591 /* Get the proper mode to use for this field. We want a mode that
592 includes the entire field. If such a mode would be larger than
593 a word, we won't be doing the extraction the normal way. */
595 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
596 struct_align * BITS_PER_UNIT, word_mode,
597 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
599 if (mode == VOIDmode)
601 /* The only way this should occur is if the field spans word
602 boundaries. */
603 store_split_bit_field (op0,
604 bitsize, bitpos + offset * BITS_PER_UNIT,
605 value, struct_align);
606 return;
609 total_bits = GET_MODE_BITSIZE (mode);
611 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
612 be in the range 0 to total_bits-1, and put any excess bytes in
613 OFFSET. */
614 if (bitpos >= total_bits)
616 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
617 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
618 * BITS_PER_UNIT);
621 /* Get ref to an aligned byte, halfword, or word containing the field.
622 Adjust BITPOS to be position within a word,
623 and OFFSET to be the offset of that word.
624 Then alter OP0 to refer to that word. */
625 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
626 offset -= (offset % (total_bits / BITS_PER_UNIT));
627 op0 = change_address (op0, mode,
628 plus_constant (XEXP (op0, 0), offset));
631 mode = GET_MODE (op0);
633 /* Now MODE is either some integral mode for a MEM as OP0,
634 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
635 The bit field is contained entirely within OP0.
636 BITPOS is the starting bit number within OP0.
637 (OP0's mode may actually be narrower than MODE.) */
639 if (BYTES_BIG_ENDIAN)
640 /* BITPOS is the distance between our msb
641 and that of the containing datum.
642 Convert it to the distance from the lsb. */
643 bitpos = total_bits - bitsize - bitpos;
645 /* Now BITPOS is always the distance between our lsb
646 and that of OP0. */
648 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
649 we must first convert its mode to MODE. */
651 if (GET_CODE (value) == CONST_INT)
653 register HOST_WIDE_INT v = INTVAL (value);
655 if (bitsize < HOST_BITS_PER_WIDE_INT)
656 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
658 if (v == 0)
659 all_zero = 1;
660 else if ((bitsize < HOST_BITS_PER_WIDE_INT
661 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
662 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
663 all_one = 1;
665 value = lshift_value (mode, value, bitpos, bitsize);
667 else
669 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
670 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
672 if (GET_MODE (value) != mode)
674 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
675 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
676 value = gen_lowpart (mode, value);
677 else
678 value = convert_to_mode (mode, value, 1);
681 if (must_and)
682 value = expand_binop (mode, and_optab, value,
683 mask_rtx (mode, 0, bitsize, 0),
684 NULL_RTX, 1, OPTAB_LIB_WIDEN);
685 if (bitpos > 0)
686 value = expand_shift (LSHIFT_EXPR, mode, value,
687 build_int_2 (bitpos, 0), NULL_RTX, 1);
690 /* Now clear the chosen bits in OP0,
691 except that if VALUE is -1 we need not bother. */
693 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
695 if (! all_one)
697 temp = expand_binop (mode, and_optab, op0,
698 mask_rtx (mode, bitpos, bitsize, 1),
699 subtarget, 1, OPTAB_LIB_WIDEN);
700 subtarget = temp;
702 else
703 temp = op0;
705 /* Now logical-or VALUE into OP0, unless it is zero. */
707 if (! all_zero)
708 temp = expand_binop (mode, ior_optab, temp, value,
709 subtarget, 1, OPTAB_LIB_WIDEN);
710 if (op0 != temp)
711 emit_move_insn (op0, temp);
714 /* Store a bit field that is split across multiple accessible memory objects.
716 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
717 BITSIZE is the field width; BITPOS the position of its first bit
718 (within the word).
719 VALUE is the value to store.
720 ALIGN is the known alignment of OP0, measured in bytes.
721 This is also the size of the memory objects to be used.
723 This does not yet handle fields wider than BITS_PER_WORD. */
725 static void
726 store_split_bit_field (op0, bitsize, bitpos, value, align)
727 rtx op0;
728 int bitsize, bitpos;
729 rtx value;
730 int align;
732 int unit;
733 int bitsdone = 0;
735 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
736 much at a time. */
737 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
738 unit = BITS_PER_WORD;
739 else
740 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
742 /* If VALUE is a constant other than a CONST_INT, get it into a register in
743 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
744 that VALUE might be a floating-point constant. */
745 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
747 rtx word = gen_lowpart_common (word_mode, value);
749 if (word && (value != word))
750 value = word;
751 else
752 value = gen_lowpart_common (word_mode,
753 force_reg (GET_MODE (value) != VOIDmode
754 ? GET_MODE (value)
755 : word_mode, value));
757 else if (GET_CODE (value) == ADDRESSOF)
758 value = copy_to_reg (value);
760 while (bitsdone < bitsize)
762 int thissize;
763 rtx part, word;
764 int thispos;
765 int offset;
767 offset = (bitpos + bitsdone) / unit;
768 thispos = (bitpos + bitsdone) % unit;
770 /* THISSIZE must not overrun a word boundary. Otherwise,
771 store_fixed_bit_field will call us again, and we will mutually
772 recurse forever. */
773 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
774 thissize = MIN (thissize, unit - thispos);
776 if (BYTES_BIG_ENDIAN)
778 int total_bits;
780 /* We must do an endian conversion exactly the same way as it is
781 done in extract_bit_field, so that the two calls to
782 extract_fixed_bit_field will have comparable arguments. */
783 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
784 total_bits = BITS_PER_WORD;
785 else
786 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
788 /* Fetch successively less significant portions. */
789 if (GET_CODE (value) == CONST_INT)
790 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
791 >> (bitsize - bitsdone - thissize))
792 & (((HOST_WIDE_INT) 1 << thissize) - 1));
793 else
794 /* The args are chosen so that the last part includes the
795 lsb. Give extract_bit_field the value it needs (with
796 endianness compensation) to fetch the piece we want.
798 ??? We have no idea what the alignment of VALUE is, so
799 we have to use a guess. */
800 part
801 = extract_fixed_bit_field
802 (word_mode, value, 0, thissize,
803 total_bits - bitsize + bitsdone, NULL_RTX, 1,
804 GET_MODE (value) == VOIDmode
805 ? UNITS_PER_WORD
806 : (GET_MODE (value) == BLKmode
808 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
810 else
812 /* Fetch successively more significant portions. */
813 if (GET_CODE (value) == CONST_INT)
814 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
815 >> bitsdone)
816 & (((HOST_WIDE_INT) 1 << thissize) - 1));
817 else
818 part
819 = extract_fixed_bit_field
820 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
821 GET_MODE (value) == VOIDmode
822 ? UNITS_PER_WORD
823 : (GET_MODE (value) == BLKmode
825 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
828 /* If OP0 is a register, then handle OFFSET here.
830 When handling multiword bitfields, extract_bit_field may pass
831 down a word_mode SUBREG of a larger REG for a bitfield that actually
832 crosses a word boundary. Thus, for a SUBREG, we must find
833 the current word starting from the base register. */
834 if (GET_CODE (op0) == SUBREG)
836 word = operand_subword_force (SUBREG_REG (op0),
837 SUBREG_WORD (op0) + offset,
838 GET_MODE (SUBREG_REG (op0)));
839 offset = 0;
841 else if (GET_CODE (op0) == REG)
843 word = operand_subword_force (op0, offset, GET_MODE (op0));
844 offset = 0;
846 else
847 word = op0;
849 /* OFFSET is in UNITs, and UNIT is in bits.
850 store_fixed_bit_field wants offset in bytes. */
851 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
852 thissize, thispos, part, align);
853 bitsdone += thissize;
857 /* Generate code to extract a byte-field from STR_RTX
858 containing BITSIZE bits, starting at BITNUM,
859 and put it in TARGET if possible (if TARGET is nonzero).
860 Regardless of TARGET, we return the rtx for where the value is placed.
861 It may be a QUEUED.
863 STR_RTX is the structure containing the byte (a REG or MEM).
864 UNSIGNEDP is nonzero if this is an unsigned bit field.
865 MODE is the natural mode of the field value once extracted.
866 TMODE is the mode the caller would like the value to have;
867 but the value may be returned with type MODE instead.
869 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
870 TOTAL_SIZE is the size in bytes of the containing structure,
871 or -1 if varying.
873 If a TARGET is specified and we can store in it at no extra cost,
874 we do so, and return TARGET.
875 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
876 if they are equally easy. */
879 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
880 target, mode, tmode, align, total_size)
881 rtx str_rtx;
882 register int bitsize;
883 int bitnum;
884 int unsignedp;
885 rtx target;
886 enum machine_mode mode, tmode;
887 int align;
888 int total_size;
890 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
891 register int offset = bitnum / unit;
892 register int bitpos = bitnum % unit;
893 register rtx op0 = str_rtx;
894 rtx spec_target = target;
895 rtx spec_target_subreg = 0;
897 /* Discount the part of the structure before the desired byte.
898 We need to know how many bytes are safe to reference after it. */
899 if (total_size >= 0)
900 total_size -= (bitpos / BIGGEST_ALIGNMENT
901 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
903 if (tmode == VOIDmode)
904 tmode = mode;
905 while (GET_CODE (op0) == SUBREG)
907 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
908 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
910 offset += SUBREG_WORD (op0);
912 inner_size = MIN (inner_size, BITS_PER_WORD);
914 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
916 bitpos += inner_size - outer_size;
917 if (bitpos > unit)
919 offset += (bitpos / unit);
920 bitpos %= unit;
924 op0 = SUBREG_REG (op0);
927 /* ??? We currently assume TARGET is at least as big as BITSIZE.
928 If that's wrong, the solution is to test for it and set TARGET to 0
929 if needed. */
931 /* If OP0 is a register, BITPOS must count within a word.
932 But as we have it, it counts within whatever size OP0 now has.
933 On a bigendian machine, these are not the same, so convert. */
934 if (BYTES_BIG_ENDIAN
935 && GET_CODE (op0) != MEM
936 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
937 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
939 /* Extracting a full-word or multi-word value
940 from a structure in a register or aligned memory.
941 This can be done with just SUBREG.
942 So too extracting a subword value in
943 the least significant part of the register. */
945 if (((GET_CODE (op0) == REG
946 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
947 GET_MODE_BITSIZE (GET_MODE (op0))))
948 || (GET_CODE (op0) == MEM
949 && (! SLOW_UNALIGNED_ACCESS
950 || (offset * BITS_PER_UNIT % bitsize == 0
951 && align * BITS_PER_UNIT % bitsize == 0))))
952 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
953 && bitpos % BITS_PER_WORD == 0)
954 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
955 && (BYTES_BIG_ENDIAN
956 ? bitpos + bitsize == BITS_PER_WORD
957 : bitpos == 0))))
959 enum machine_mode mode1
960 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
962 if (mode1 != GET_MODE (op0))
964 if (GET_CODE (op0) == REG)
965 op0 = gen_rtx_SUBREG (mode1, op0, offset);
966 else
967 op0 = change_address (op0, mode1,
968 plus_constant (XEXP (op0, 0), offset));
970 if (mode1 != mode)
971 return convert_to_mode (tmode, op0, unsignedp);
972 return op0;
975 /* Handle fields bigger than a word. */
977 if (bitsize > BITS_PER_WORD)
979 /* Here we transfer the words of the field
980 in the order least significant first.
981 This is because the most significant word is the one which may
982 be less than full. */
984 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
985 int i;
987 if (target == 0 || GET_CODE (target) != REG)
988 target = gen_reg_rtx (mode);
990 /* Indicate for flow that the entire target reg is being set. */
991 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
993 for (i = 0; i < nwords; i++)
995 /* If I is 0, use the low-order word in both field and target;
996 if I is 1, use the next to lowest word; and so on. */
997 /* Word number in TARGET to use. */
998 int wordnum = (WORDS_BIG_ENDIAN
999 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1000 : i);
1001 /* Offset from start of field in OP0. */
1002 int bit_offset = (WORDS_BIG_ENDIAN
1003 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
1004 : i * BITS_PER_WORD);
1005 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1006 rtx result_part
1007 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1008 bitsize - i * BITS_PER_WORD),
1009 bitnum + bit_offset,
1010 1, target_part, mode, word_mode,
1011 align, total_size);
1013 if (target_part == 0)
1014 abort ();
1016 if (result_part != target_part)
1017 emit_move_insn (target_part, result_part);
1020 if (unsignedp)
1022 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1023 need to be zero'd out. */
1024 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1026 int i,total_words;
1028 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1029 for (i = nwords; i < total_words; i++)
1031 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1032 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1033 emit_move_insn (target_part, const0_rtx);
1036 return target;
1039 /* Signed bit field: sign-extend with two arithmetic shifts. */
1040 target = expand_shift (LSHIFT_EXPR, mode, target,
1041 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1042 NULL_RTX, 0);
1043 return expand_shift (RSHIFT_EXPR, mode, target,
1044 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1045 NULL_RTX, 0);
1048 /* From here on we know the desired field is smaller than a word
1049 so we can assume it is an integer. So we can safely extract it as one
1050 size of integer, if necessary, and then truncate or extend
1051 to the size that is wanted. */
1053 /* OFFSET is the number of words or bytes (UNIT says which)
1054 from STR_RTX to the first word or byte containing part of the field. */
1056 if (GET_CODE (op0) == REG)
1058 if (offset != 0
1059 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1060 op0 = gen_rtx_SUBREG (TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
1061 op0, offset);
1062 offset = 0;
1064 else
1066 op0 = protect_from_queue (str_rtx, 1);
1069 /* Now OFFSET is nonzero only for memory operands. */
1071 if (unsignedp)
1073 #ifdef HAVE_extzv
1074 if (HAVE_extzv
1075 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0])
1076 >= bitsize)
1077 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1078 && (bitsize + bitpos
1079 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0]))))
1081 int xbitpos = bitpos, xoffset = offset;
1082 rtx bitsize_rtx, bitpos_rtx;
1083 rtx last = get_last_insn ();
1084 rtx xop0 = op0;
1085 rtx xtarget = target;
1086 rtx xspec_target = spec_target;
1087 rtx xspec_target_subreg = spec_target_subreg;
1088 rtx pat;
1089 enum machine_mode maxmode
1090 = insn_operand_mode[(int) CODE_FOR_extzv][0];
1092 if (GET_CODE (xop0) == MEM)
1094 int save_volatile_ok = volatile_ok;
1095 volatile_ok = 1;
1097 /* Is the memory operand acceptable? */
1098 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
1099 (xop0, GET_MODE (xop0))))
1101 /* No, load into a reg and extract from there. */
1102 enum machine_mode bestmode;
1104 /* Get the mode to use for inserting into this field. If
1105 OP0 is BLKmode, get the smallest mode consistent with the
1106 alignment. If OP0 is a non-BLKmode object that is no
1107 wider than MAXMODE, use its mode. Otherwise, use the
1108 smallest mode containing the field. */
1110 if (GET_MODE (xop0) == BLKmode
1111 || (GET_MODE_SIZE (GET_MODE (op0))
1112 > GET_MODE_SIZE (maxmode)))
1113 bestmode = get_best_mode (bitsize, bitnum,
1114 align * BITS_PER_UNIT, maxmode,
1115 MEM_VOLATILE_P (xop0));
1116 else
1117 bestmode = GET_MODE (xop0);
1119 if (bestmode == VOIDmode
1120 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1121 goto extzv_loses;
1123 /* Compute offset as multiple of this unit,
1124 counting in bytes. */
1125 unit = GET_MODE_BITSIZE (bestmode);
1126 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1127 xbitpos = bitnum % unit;
1128 xop0 = change_address (xop0, bestmode,
1129 plus_constant (XEXP (xop0, 0),
1130 xoffset));
1131 /* Fetch it to a register in that size. */
1132 xop0 = force_reg (bestmode, xop0);
1134 /* XBITPOS counts within UNIT, which is what is expected. */
1136 else
1137 /* Get ref to first byte containing part of the field. */
1138 xop0 = change_address (xop0, byte_mode,
1139 plus_constant (XEXP (xop0, 0), xoffset));
1141 volatile_ok = save_volatile_ok;
1144 /* If op0 is a register, we need it in MAXMODE (which is usually
1145 SImode). to make it acceptable to the format of extzv. */
1146 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1147 goto extzv_loses;
1148 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1149 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1151 /* On big-endian machines, we count bits from the most significant.
1152 If the bit field insn does not, we must invert. */
1153 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1154 xbitpos = unit - bitsize - xbitpos;
1156 /* Now convert from counting within UNIT to counting in MAXMODE. */
1157 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1158 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1160 unit = GET_MODE_BITSIZE (maxmode);
1162 if (xtarget == 0
1163 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1164 xtarget = xspec_target = gen_reg_rtx (tmode);
1166 if (GET_MODE (xtarget) != maxmode)
1168 if (GET_CODE (xtarget) == REG)
1170 int wider = (GET_MODE_SIZE (maxmode)
1171 > GET_MODE_SIZE (GET_MODE (xtarget)));
1172 xtarget = gen_lowpart (maxmode, xtarget);
1173 if (wider)
1174 xspec_target_subreg = xtarget;
1176 else
1177 xtarget = gen_reg_rtx (maxmode);
1180 /* If this machine's extzv insists on a register target,
1181 make sure we have one. */
1182 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
1183 (xtarget, maxmode)))
1184 xtarget = gen_reg_rtx (maxmode);
1186 bitsize_rtx = GEN_INT (bitsize);
1187 bitpos_rtx = GEN_INT (xbitpos);
1189 pat = gen_extzv (protect_from_queue (xtarget, 1),
1190 xop0, bitsize_rtx, bitpos_rtx);
1191 if (pat)
1193 emit_insn (pat);
1194 target = xtarget;
1195 spec_target = xspec_target;
1196 spec_target_subreg = xspec_target_subreg;
1198 else
1200 delete_insns_since (last);
1201 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1202 bitpos, target, 1, align);
1205 else
1206 extzv_loses:
1207 #endif
1208 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1209 target, 1, align);
1211 else
1213 #ifdef HAVE_extv
1214 if (HAVE_extv
1215 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0])
1216 >= bitsize)
1217 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1218 && (bitsize + bitpos
1219 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0]))))
1221 int xbitpos = bitpos, xoffset = offset;
1222 rtx bitsize_rtx, bitpos_rtx;
1223 rtx last = get_last_insn ();
1224 rtx xop0 = op0, xtarget = target;
1225 rtx xspec_target = spec_target;
1226 rtx xspec_target_subreg = spec_target_subreg;
1227 rtx pat;
1228 enum machine_mode maxmode
1229 = insn_operand_mode[(int) CODE_FOR_extv][0];
1231 if (GET_CODE (xop0) == MEM)
1233 /* Is the memory operand acceptable? */
1234 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
1235 (xop0, GET_MODE (xop0))))
1237 /* No, load into a reg and extract from there. */
1238 enum machine_mode bestmode;
1240 /* Get the mode to use for inserting into this field. If
1241 OP0 is BLKmode, get the smallest mode consistent with the
1242 alignment. If OP0 is a non-BLKmode object that is no
1243 wider than MAXMODE, use its mode. Otherwise, use the
1244 smallest mode containing the field. */
1246 if (GET_MODE (xop0) == BLKmode
1247 || (GET_MODE_SIZE (GET_MODE (op0))
1248 > GET_MODE_SIZE (maxmode)))
1249 bestmode = get_best_mode (bitsize, bitnum,
1250 align * BITS_PER_UNIT, maxmode,
1251 MEM_VOLATILE_P (xop0));
1252 else
1253 bestmode = GET_MODE (xop0);
1255 if (bestmode == VOIDmode
1256 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1257 goto extv_loses;
1259 /* Compute offset as multiple of this unit,
1260 counting in bytes. */
1261 unit = GET_MODE_BITSIZE (bestmode);
1262 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1263 xbitpos = bitnum % unit;
1264 xop0 = change_address (xop0, bestmode,
1265 plus_constant (XEXP (xop0, 0),
1266 xoffset));
1267 /* Fetch it to a register in that size. */
1268 xop0 = force_reg (bestmode, xop0);
1270 /* XBITPOS counts within UNIT, which is what is expected. */
1272 else
1273 /* Get ref to first byte containing part of the field. */
1274 xop0 = change_address (xop0, byte_mode,
1275 plus_constant (XEXP (xop0, 0), xoffset));
1278 /* If op0 is a register, we need it in MAXMODE (which is usually
1279 SImode) to make it acceptable to the format of extv. */
1280 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1281 goto extv_loses;
1282 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1283 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1285 /* On big-endian machines, we count bits from the most significant.
1286 If the bit field insn does not, we must invert. */
1287 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1288 xbitpos = unit - bitsize - xbitpos;
1290 /* XBITPOS counts within a size of UNIT.
1291 Adjust to count within a size of MAXMODE. */
1292 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1293 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1295 unit = GET_MODE_BITSIZE (maxmode);
1297 if (xtarget == 0
1298 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1299 xtarget = xspec_target = gen_reg_rtx (tmode);
1301 if (GET_MODE (xtarget) != maxmode)
1303 if (GET_CODE (xtarget) == REG)
1305 int wider = (GET_MODE_SIZE (maxmode)
1306 > GET_MODE_SIZE (GET_MODE (xtarget)));
1307 xtarget = gen_lowpart (maxmode, xtarget);
1308 if (wider)
1309 xspec_target_subreg = xtarget;
1311 else
1312 xtarget = gen_reg_rtx (maxmode);
1315 /* If this machine's extv insists on a register target,
1316 make sure we have one. */
1317 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
1318 (xtarget, maxmode)))
1319 xtarget = gen_reg_rtx (maxmode);
1321 bitsize_rtx = GEN_INT (bitsize);
1322 bitpos_rtx = GEN_INT (xbitpos);
1324 pat = gen_extv (protect_from_queue (xtarget, 1),
1325 xop0, bitsize_rtx, bitpos_rtx);
1326 if (pat)
1328 emit_insn (pat);
1329 target = xtarget;
1330 spec_target = xspec_target;
1331 spec_target_subreg = xspec_target_subreg;
1333 else
1335 delete_insns_since (last);
1336 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1337 bitpos, target, 0, align);
1340 else
1341 extv_loses:
1342 #endif
1343 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1344 target, 0, align);
1346 if (target == spec_target)
1347 return target;
1348 if (target == spec_target_subreg)
1349 return spec_target;
1350 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1352 /* If the target mode is floating-point, first convert to the
1353 integer mode of that size and then access it as a floating-point
1354 value via a SUBREG. */
1355 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1357 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1358 MODE_INT, 0),
1359 target, unsignedp);
1360 if (GET_CODE (target) != REG)
1361 target = copy_to_reg (target);
1362 return gen_rtx_SUBREG (tmode, target, 0);
1364 else
1365 return convert_to_mode (tmode, target, unsignedp);
1367 return target;
1370 /* Extract a bit field using shifts and boolean operations
1371 Returns an rtx to represent the value.
1372 OP0 addresses a register (word) or memory (byte).
1373 BITPOS says which bit within the word or byte the bit field starts in.
1374 OFFSET says how many bytes farther the bit field starts;
1375 it is 0 if OP0 is a register.
1376 BITSIZE says how many bits long the bit field is.
1377 (If OP0 is a register, it may be narrower than a full word,
1378 but BITPOS still counts within a full word,
1379 which is significant on bigendian machines.)
1381 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1382 If TARGET is nonzero, attempts to store the value there
1383 and return TARGET, but this is not guaranteed.
1384 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1386 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1388 static rtx
1389 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1390 target, unsignedp, align)
1391 enum machine_mode tmode;
1392 register rtx op0, target;
1393 register int offset, bitsize, bitpos;
1394 int unsignedp;
1395 int align;
1397 int total_bits = BITS_PER_WORD;
1398 enum machine_mode mode;
1400 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1402 /* Special treatment for a bit field split across two registers. */
1403 if (bitsize + bitpos > BITS_PER_WORD)
1404 return extract_split_bit_field (op0, bitsize, bitpos,
1405 unsignedp, align);
1407 else
1409 /* Get the proper mode to use for this field. We want a mode that
1410 includes the entire field. If such a mode would be larger than
1411 a word, we won't be doing the extraction the normal way. */
1413 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1414 align * BITS_PER_UNIT, word_mode,
1415 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1417 if (mode == VOIDmode)
1418 /* The only way this should occur is if the field spans word
1419 boundaries. */
1420 return extract_split_bit_field (op0, bitsize,
1421 bitpos + offset * BITS_PER_UNIT,
1422 unsignedp, align);
1424 total_bits = GET_MODE_BITSIZE (mode);
1426 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1427 be in the range 0 to total_bits-1, and put any excess bytes in
1428 OFFSET. */
1429 if (bitpos >= total_bits)
1431 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1432 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1433 * BITS_PER_UNIT);
1436 /* Get ref to an aligned byte, halfword, or word containing the field.
1437 Adjust BITPOS to be position within a word,
1438 and OFFSET to be the offset of that word.
1439 Then alter OP0 to refer to that word. */
1440 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1441 offset -= (offset % (total_bits / BITS_PER_UNIT));
1442 op0 = change_address (op0, mode,
1443 plus_constant (XEXP (op0, 0), offset));
1446 mode = GET_MODE (op0);
1448 if (BYTES_BIG_ENDIAN)
1450 /* BITPOS is the distance between our msb and that of OP0.
1451 Convert it to the distance from the lsb. */
1453 bitpos = total_bits - bitsize - bitpos;
1456 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1457 We have reduced the big-endian case to the little-endian case. */
1459 if (unsignedp)
1461 if (bitpos)
1463 /* If the field does not already start at the lsb,
1464 shift it so it does. */
1465 tree amount = build_int_2 (bitpos, 0);
1466 /* Maybe propagate the target for the shift. */
1467 /* But not if we will return it--could confuse integrate.c. */
1468 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1469 && !REG_FUNCTION_VALUE_P (target)
1470 ? target : 0);
1471 if (tmode != mode) subtarget = 0;
1472 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1474 /* Convert the value to the desired mode. */
1475 if (mode != tmode)
1476 op0 = convert_to_mode (tmode, op0, 1);
1478 /* Unless the msb of the field used to be the msb when we shifted,
1479 mask out the upper bits. */
1481 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1482 #if 0
1483 #ifdef SLOW_ZERO_EXTEND
1484 /* Always generate an `and' if
1485 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1486 will combine fruitfully with the zero-extend. */
1487 || tmode != mode
1488 #endif
1489 #endif
1491 return expand_binop (GET_MODE (op0), and_optab, op0,
1492 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1493 target, 1, OPTAB_LIB_WIDEN);
1494 return op0;
1497 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1498 then arithmetic-shift its lsb to the lsb of the word. */
1499 op0 = force_reg (mode, op0);
1500 if (mode != tmode)
1501 target = 0;
1503 /* Find the narrowest integer mode that contains the field. */
1505 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1506 mode = GET_MODE_WIDER_MODE (mode))
1507 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1509 op0 = convert_to_mode (mode, op0, 0);
1510 break;
1513 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1515 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1516 /* Maybe propagate the target for the shift. */
1517 /* But not if we will return the result--could confuse integrate.c. */
1518 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1519 && ! REG_FUNCTION_VALUE_P (target)
1520 ? target : 0);
1521 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1524 return expand_shift (RSHIFT_EXPR, mode, op0,
1525 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1526 target, 0);
1529 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1530 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1531 complement of that if COMPLEMENT. The mask is truncated if
1532 necessary to the width of mode MODE. The mask is zero-extended if
1533 BITSIZE+BITPOS is too small for MODE. */
1535 static rtx
1536 mask_rtx (mode, bitpos, bitsize, complement)
1537 enum machine_mode mode;
1538 int bitpos, bitsize, complement;
1540 HOST_WIDE_INT masklow, maskhigh;
1542 if (bitpos < HOST_BITS_PER_WIDE_INT)
1543 masklow = (HOST_WIDE_INT) -1 << bitpos;
1544 else
1545 masklow = 0;
1547 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1548 masklow &= ((unsigned HOST_WIDE_INT) -1
1549 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1551 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1552 maskhigh = -1;
1553 else
1554 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1556 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1557 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1558 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1559 else
1560 maskhigh = 0;
1562 if (complement)
1564 maskhigh = ~maskhigh;
1565 masklow = ~masklow;
1568 return immed_double_const (masklow, maskhigh, mode);
1571 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1572 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1574 static rtx
1575 lshift_value (mode, value, bitpos, bitsize)
1576 enum machine_mode mode;
1577 rtx value;
1578 int bitpos, bitsize;
1580 unsigned HOST_WIDE_INT v = INTVAL (value);
1581 HOST_WIDE_INT low, high;
1583 if (bitsize < HOST_BITS_PER_WIDE_INT)
1584 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1586 if (bitpos < HOST_BITS_PER_WIDE_INT)
1588 low = v << bitpos;
1589 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1591 else
1593 low = 0;
1594 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1597 return immed_double_const (low, high, mode);
1600 /* Extract a bit field that is split across two words
1601 and return an RTX for the result.
1603 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1604 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1605 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1607 ALIGN is the known alignment of OP0, measured in bytes.
1608 This is also the size of the memory objects to be used. */
1610 static rtx
1611 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1612 rtx op0;
1613 int bitsize, bitpos, unsignedp, align;
1615 int unit;
1616 int bitsdone = 0;
1617 rtx result;
1618 int first = 1;
1620 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1621 much at a time. */
1622 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1623 unit = BITS_PER_WORD;
1624 else
1625 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
1627 while (bitsdone < bitsize)
1629 int thissize;
1630 rtx part, word;
1631 int thispos;
1632 int offset;
1634 offset = (bitpos + bitsdone) / unit;
1635 thispos = (bitpos + bitsdone) % unit;
1637 /* THISSIZE must not overrun a word boundary. Otherwise,
1638 extract_fixed_bit_field will call us again, and we will mutually
1639 recurse forever. */
1640 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1641 thissize = MIN (thissize, unit - thispos);
1643 /* If OP0 is a register, then handle OFFSET here.
1645 When handling multiword bitfields, extract_bit_field may pass
1646 down a word_mode SUBREG of a larger REG for a bitfield that actually
1647 crosses a word boundary. Thus, for a SUBREG, we must find
1648 the current word starting from the base register. */
1649 if (GET_CODE (op0) == SUBREG)
1651 word = operand_subword_force (SUBREG_REG (op0),
1652 SUBREG_WORD (op0) + offset,
1653 GET_MODE (SUBREG_REG (op0)));
1654 offset = 0;
1656 else if (GET_CODE (op0) == REG)
1658 word = operand_subword_force (op0, offset, GET_MODE (op0));
1659 offset = 0;
1661 else
1662 word = op0;
1664 /* Extract the parts in bit-counting order,
1665 whose meaning is determined by BYTES_PER_UNIT.
1666 OFFSET is in UNITs, and UNIT is in bits.
1667 extract_fixed_bit_field wants offset in bytes. */
1668 part = extract_fixed_bit_field (word_mode, word,
1669 offset * unit / BITS_PER_UNIT,
1670 thissize, thispos, 0, 1, align);
1671 bitsdone += thissize;
1673 /* Shift this part into place for the result. */
1674 if (BYTES_BIG_ENDIAN)
1676 if (bitsize != bitsdone)
1677 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1678 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1680 else
1682 if (bitsdone != thissize)
1683 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1684 build_int_2 (bitsdone - thissize, 0), 0, 1);
1687 if (first)
1688 result = part;
1689 else
1690 /* Combine the parts with bitwise or. This works
1691 because we extracted each part as an unsigned bit field. */
1692 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1693 OPTAB_LIB_WIDEN);
1695 first = 0;
1698 /* Unsigned bit field: we are done. */
1699 if (unsignedp)
1700 return result;
1701 /* Signed bit field: sign-extend with two arithmetic shifts. */
1702 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1703 build_int_2 (BITS_PER_WORD - bitsize, 0),
1704 NULL_RTX, 0);
1705 return expand_shift (RSHIFT_EXPR, word_mode, result,
1706 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1709 /* Add INC into TARGET. */
1711 void
1712 expand_inc (target, inc)
1713 rtx target, inc;
1715 rtx value = expand_binop (GET_MODE (target), add_optab,
1716 target, inc,
1717 target, 0, OPTAB_LIB_WIDEN);
1718 if (value != target)
1719 emit_move_insn (target, value);
1722 /* Subtract DEC from TARGET. */
1724 void
1725 expand_dec (target, dec)
1726 rtx target, dec;
1728 rtx value = expand_binop (GET_MODE (target), sub_optab,
1729 target, dec,
1730 target, 0, OPTAB_LIB_WIDEN);
1731 if (value != target)
1732 emit_move_insn (target, value);
1735 /* Output a shift instruction for expression code CODE,
1736 with SHIFTED being the rtx for the value to shift,
1737 and AMOUNT the tree for the amount to shift by.
1738 Store the result in the rtx TARGET, if that is convenient.
1739 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1740 Return the rtx for where the value is. */
1743 expand_shift (code, mode, shifted, amount, target, unsignedp)
1744 enum tree_code code;
1745 register enum machine_mode mode;
1746 rtx shifted;
1747 tree amount;
1748 register rtx target;
1749 int unsignedp;
1751 register rtx op1, temp = 0;
1752 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1753 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1754 int try;
1756 /* Previously detected shift-counts computed by NEGATE_EXPR
1757 and shifted in the other direction; but that does not work
1758 on all machines. */
1760 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1762 #ifdef SHIFT_COUNT_TRUNCATED
1763 if (SHIFT_COUNT_TRUNCATED)
1765 if (GET_CODE (op1) == CONST_INT
1766 && (unsigned HOST_WIDE_INT) INTVAL (op1) >= GET_MODE_BITSIZE (mode))
1767 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1768 % GET_MODE_BITSIZE (mode));
1769 else if (GET_CODE (op1) == SUBREG
1770 && SUBREG_WORD (op1) == 0)
1771 op1 = SUBREG_REG (op1);
1773 #endif
1775 if (op1 == const0_rtx)
1776 return shifted;
1778 for (try = 0; temp == 0 && try < 3; try++)
1780 enum optab_methods methods;
1782 if (try == 0)
1783 methods = OPTAB_DIRECT;
1784 else if (try == 1)
1785 methods = OPTAB_WIDEN;
1786 else
1787 methods = OPTAB_LIB_WIDEN;
1789 if (rotate)
1791 /* Widening does not work for rotation. */
1792 if (methods == OPTAB_WIDEN)
1793 continue;
1794 else if (methods == OPTAB_LIB_WIDEN)
1796 /* If we have been unable to open-code this by a rotation,
1797 do it as the IOR of two shifts. I.e., to rotate A
1798 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1799 where C is the bitsize of A.
1801 It is theoretically possible that the target machine might
1802 not be able to perform either shift and hence we would
1803 be making two libcalls rather than just the one for the
1804 shift (similarly if IOR could not be done). We will allow
1805 this extremely unlikely lossage to avoid complicating the
1806 code below. */
1808 rtx subtarget = target == shifted ? 0 : target;
1809 rtx temp1;
1810 tree type = TREE_TYPE (amount);
1811 tree new_amount = make_tree (type, op1);
1812 tree other_amount
1813 = fold (build (MINUS_EXPR, type,
1814 convert (type,
1815 build_int_2 (GET_MODE_BITSIZE (mode),
1816 0)),
1817 amount));
1819 shifted = force_reg (mode, shifted);
1821 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1822 mode, shifted, new_amount, subtarget, 1);
1823 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1824 mode, shifted, other_amount, 0, 1);
1825 return expand_binop (mode, ior_optab, temp, temp1, target,
1826 unsignedp, methods);
1829 temp = expand_binop (mode,
1830 left ? rotl_optab : rotr_optab,
1831 shifted, op1, target, unsignedp, methods);
1833 /* If we don't have the rotate, but we are rotating by a constant
1834 that is in range, try a rotate in the opposite direction. */
1836 if (temp == 0 && GET_CODE (op1) == CONST_INT
1837 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1838 temp = expand_binop (mode,
1839 left ? rotr_optab : rotl_optab,
1840 shifted,
1841 GEN_INT (GET_MODE_BITSIZE (mode)
1842 - INTVAL (op1)),
1843 target, unsignedp, methods);
1845 else if (unsignedp)
1846 temp = expand_binop (mode,
1847 left ? ashl_optab : lshr_optab,
1848 shifted, op1, target, unsignedp, methods);
1850 /* Do arithmetic shifts.
1851 Also, if we are going to widen the operand, we can just as well
1852 use an arithmetic right-shift instead of a logical one. */
1853 if (temp == 0 && ! rotate
1854 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1856 enum optab_methods methods1 = methods;
1858 /* If trying to widen a log shift to an arithmetic shift,
1859 don't accept an arithmetic shift of the same size. */
1860 if (unsignedp)
1861 methods1 = OPTAB_MUST_WIDEN;
1863 /* Arithmetic shift */
1865 temp = expand_binop (mode,
1866 left ? ashl_optab : ashr_optab,
1867 shifted, op1, target, unsignedp, methods1);
1870 /* We used to try extzv here for logical right shifts, but that was
1871 only useful for one machine, the VAX, and caused poor code
1872 generation there for lshrdi3, so the code was deleted and a
1873 define_expand for lshrsi3 was added to vax.md. */
1876 if (temp == 0)
1877 abort ();
1878 return temp;
1881 enum alg_code { alg_zero, alg_m, alg_shift,
1882 alg_add_t_m2, alg_sub_t_m2,
1883 alg_add_factor, alg_sub_factor,
1884 alg_add_t2_m, alg_sub_t2_m,
1885 alg_add, alg_subtract, alg_factor, alg_shiftop };
1887 /* This structure records a sequence of operations.
1888 `ops' is the number of operations recorded.
1889 `cost' is their total cost.
1890 The operations are stored in `op' and the corresponding
1891 logarithms of the integer coefficients in `log'.
1893 These are the operations:
1894 alg_zero total := 0;
1895 alg_m total := multiplicand;
1896 alg_shift total := total * coeff
1897 alg_add_t_m2 total := total + multiplicand * coeff;
1898 alg_sub_t_m2 total := total - multiplicand * coeff;
1899 alg_add_factor total := total * coeff + total;
1900 alg_sub_factor total := total * coeff - total;
1901 alg_add_t2_m total := total * coeff + multiplicand;
1902 alg_sub_t2_m total := total * coeff - multiplicand;
1904 The first operand must be either alg_zero or alg_m. */
1906 struct algorithm
1908 short cost;
1909 short ops;
1910 /* The size of the OP and LOG fields are not directly related to the
1911 word size, but the worst-case algorithms will be if we have few
1912 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
1913 In that case we will generate shift-by-2, add, shift-by-2, add,...,
1914 in total wordsize operations. */
1915 enum alg_code op[MAX_BITS_PER_WORD];
1916 char log[MAX_BITS_PER_WORD];
1919 static void synth_mult PROTO((struct algorithm *,
1920 unsigned HOST_WIDE_INT,
1921 int));
1922 static unsigned HOST_WIDE_INT choose_multiplier PROTO((unsigned HOST_WIDE_INT,
1923 int, int,
1924 unsigned HOST_WIDE_INT *,
1925 int *, int *));
1926 static unsigned HOST_WIDE_INT invert_mod2n PROTO((unsigned HOST_WIDE_INT,
1927 int));
1928 /* Compute and return the best algorithm for multiplying by T.
1929 The algorithm must cost less than cost_limit
1930 If retval.cost >= COST_LIMIT, no algorithm was found and all
1931 other field of the returned struct are undefined. */
1933 static void
1934 synth_mult (alg_out, t, cost_limit)
1935 struct algorithm *alg_out;
1936 unsigned HOST_WIDE_INT t;
1937 int cost_limit;
1939 int m;
1940 struct algorithm *alg_in, *best_alg;
1941 unsigned int cost;
1942 unsigned HOST_WIDE_INT q;
1944 /* Indicate that no algorithm is yet found. If no algorithm
1945 is found, this value will be returned and indicate failure. */
1946 alg_out->cost = cost_limit;
1948 if (cost_limit <= 0)
1949 return;
1951 /* t == 1 can be done in zero cost. */
1952 if (t == 1)
1954 alg_out->ops = 1;
1955 alg_out->cost = 0;
1956 alg_out->op[0] = alg_m;
1957 return;
1960 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
1961 fail now. */
1962 if (t == 0)
1964 if (zero_cost >= cost_limit)
1965 return;
1966 else
1968 alg_out->ops = 1;
1969 alg_out->cost = zero_cost;
1970 alg_out->op[0] = alg_zero;
1971 return;
1975 /* We'll be needing a couple extra algorithm structures now. */
1977 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
1978 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
1980 /* If we have a group of zero bits at the low-order part of T, try
1981 multiplying by the remaining bits and then doing a shift. */
1983 if ((t & 1) == 0)
1985 m = floor_log2 (t & -t); /* m = number of low zero bits */
1986 q = t >> m;
1987 cost = shift_cost[m];
1988 synth_mult (alg_in, q, cost_limit - cost);
1990 cost += alg_in->cost;
1991 if (cost < cost_limit)
1993 struct algorithm *x;
1994 x = alg_in, alg_in = best_alg, best_alg = x;
1995 best_alg->log[best_alg->ops] = m;
1996 best_alg->op[best_alg->ops] = alg_shift;
1997 cost_limit = cost;
2001 /* If we have an odd number, add or subtract one. */
2002 if ((t & 1) != 0)
2004 unsigned HOST_WIDE_INT w;
2006 for (w = 1; (w & t) != 0; w <<= 1)
2008 /* If T was -1, then W will be zero after the loop. This is another
2009 case where T ends with ...111. Handling this with (T + 1) and
2010 subtract 1 produces slightly better code and results in algorithm
2011 selection much faster than treating it like the ...0111 case
2012 below. */
2013 if (w == 0
2014 || (w > 2
2015 /* Reject the case where t is 3.
2016 Thus we prefer addition in that case. */
2017 && t != 3))
2019 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2021 cost = add_cost;
2022 synth_mult (alg_in, t + 1, cost_limit - cost);
2024 cost += alg_in->cost;
2025 if (cost < cost_limit)
2027 struct algorithm *x;
2028 x = alg_in, alg_in = best_alg, best_alg = x;
2029 best_alg->log[best_alg->ops] = 0;
2030 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2031 cost_limit = cost;
2034 else
2036 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2038 cost = add_cost;
2039 synth_mult (alg_in, t - 1, cost_limit - cost);
2041 cost += alg_in->cost;
2042 if (cost < cost_limit)
2044 struct algorithm *x;
2045 x = alg_in, alg_in = best_alg, best_alg = x;
2046 best_alg->log[best_alg->ops] = 0;
2047 best_alg->op[best_alg->ops] = alg_add_t_m2;
2048 cost_limit = cost;
2053 /* Look for factors of t of the form
2054 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2055 If we find such a factor, we can multiply by t using an algorithm that
2056 multiplies by q, shift the result by m and add/subtract it to itself.
2058 We search for large factors first and loop down, even if large factors
2059 are less probable than small; if we find a large factor we will find a
2060 good sequence quickly, and therefore be able to prune (by decreasing
2061 COST_LIMIT) the search. */
2063 for (m = floor_log2 (t - 1); m >= 2; m--)
2065 unsigned HOST_WIDE_INT d;
2067 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2068 if (t % d == 0 && t > d)
2070 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2071 synth_mult (alg_in, t / d, cost_limit - cost);
2073 cost += alg_in->cost;
2074 if (cost < cost_limit)
2076 struct algorithm *x;
2077 x = alg_in, alg_in = best_alg, best_alg = x;
2078 best_alg->log[best_alg->ops] = m;
2079 best_alg->op[best_alg->ops] = alg_add_factor;
2080 cost_limit = cost;
2082 /* Other factors will have been taken care of in the recursion. */
2083 break;
2086 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2087 if (t % d == 0 && t > d)
2089 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2090 synth_mult (alg_in, t / d, cost_limit - cost);
2092 cost += alg_in->cost;
2093 if (cost < cost_limit)
2095 struct algorithm *x;
2096 x = alg_in, alg_in = best_alg, best_alg = x;
2097 best_alg->log[best_alg->ops] = m;
2098 best_alg->op[best_alg->ops] = alg_sub_factor;
2099 cost_limit = cost;
2101 break;
2105 /* Try shift-and-add (load effective address) instructions,
2106 i.e. do a*3, a*5, a*9. */
2107 if ((t & 1) != 0)
2109 q = t - 1;
2110 q = q & -q;
2111 m = exact_log2 (q);
2112 if (m >= 0)
2114 cost = shiftadd_cost[m];
2115 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2117 cost += alg_in->cost;
2118 if (cost < cost_limit)
2120 struct algorithm *x;
2121 x = alg_in, alg_in = best_alg, best_alg = x;
2122 best_alg->log[best_alg->ops] = m;
2123 best_alg->op[best_alg->ops] = alg_add_t2_m;
2124 cost_limit = cost;
2128 q = t + 1;
2129 q = q & -q;
2130 m = exact_log2 (q);
2131 if (m >= 0)
2133 cost = shiftsub_cost[m];
2134 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2136 cost += alg_in->cost;
2137 if (cost < cost_limit)
2139 struct algorithm *x;
2140 x = alg_in, alg_in = best_alg, best_alg = x;
2141 best_alg->log[best_alg->ops] = m;
2142 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2143 cost_limit = cost;
2148 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2149 we have not found any algorithm. */
2150 if (cost_limit == alg_out->cost)
2151 return;
2153 /* If we are getting a too long sequence for `struct algorithm'
2154 to record, make this search fail. */
2155 if (best_alg->ops == MAX_BITS_PER_WORD)
2156 return;
2158 /* Copy the algorithm from temporary space to the space at alg_out.
2159 We avoid using structure assignment because the majority of
2160 best_alg is normally undefined, and this is a critical function. */
2161 alg_out->ops = best_alg->ops + 1;
2162 alg_out->cost = cost_limit;
2163 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2164 alg_out->ops * sizeof *alg_out->op);
2165 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2166 alg_out->ops * sizeof *alg_out->log);
2169 /* Perform a multiplication and return an rtx for the result.
2170 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2171 TARGET is a suggestion for where to store the result (an rtx).
2173 We check specially for a constant integer as OP1.
2174 If you want this check for OP0 as well, then before calling
2175 you should swap the two operands if OP0 would be constant. */
2178 expand_mult (mode, op0, op1, target, unsignedp)
2179 enum machine_mode mode;
2180 register rtx op0, op1, target;
2181 int unsignedp;
2183 rtx const_op1 = op1;
2185 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2186 less than or equal in size to `unsigned int' this doesn't matter.
2187 If the mode is larger than `unsigned int', then synth_mult works only
2188 if the constant value exactly fits in an `unsigned int' without any
2189 truncation. This means that multiplying by negative values does
2190 not work; results are off by 2^32 on a 32 bit machine. */
2192 /* If we are multiplying in DImode, it may still be a win
2193 to try to work with shifts and adds. */
2194 if (GET_CODE (op1) == CONST_DOUBLE
2195 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2196 && HOST_BITS_PER_INT >= BITS_PER_WORD
2197 && CONST_DOUBLE_HIGH (op1) == 0)
2198 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2199 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2200 && GET_CODE (op1) == CONST_INT
2201 && INTVAL (op1) < 0)
2202 const_op1 = 0;
2204 /* We used to test optimize here, on the grounds that it's better to
2205 produce a smaller program when -O is not used.
2206 But this causes such a terrible slowdown sometimes
2207 that it seems better to use synth_mult always. */
2209 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2211 struct algorithm alg;
2212 struct algorithm alg2;
2213 HOST_WIDE_INT val = INTVAL (op1);
2214 HOST_WIDE_INT val_so_far;
2215 rtx insn;
2216 int mult_cost;
2217 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2219 /* Try to do the computation three ways: multiply by the negative of OP1
2220 and then negate, do the multiplication directly, or do multiplication
2221 by OP1 - 1. */
2223 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2224 mult_cost = MIN (12 * add_cost, mult_cost);
2226 synth_mult (&alg, val, mult_cost);
2228 /* This works only if the inverted value actually fits in an
2229 `unsigned int' */
2230 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2232 synth_mult (&alg2, - val,
2233 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2234 if (alg2.cost + negate_cost < alg.cost)
2235 alg = alg2, variant = negate_variant;
2238 /* This proves very useful for division-by-constant. */
2239 synth_mult (&alg2, val - 1,
2240 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2241 if (alg2.cost + add_cost < alg.cost)
2242 alg = alg2, variant = add_variant;
2244 if (alg.cost < mult_cost)
2246 /* We found something cheaper than a multiply insn. */
2247 int opno;
2248 rtx accum, tem;
2250 op0 = protect_from_queue (op0, 0);
2252 /* Avoid referencing memory over and over.
2253 For speed, but also for correctness when mem is volatile. */
2254 if (GET_CODE (op0) == MEM)
2255 op0 = force_reg (mode, op0);
2257 /* ACCUM starts out either as OP0 or as a zero, depending on
2258 the first operation. */
2260 if (alg.op[0] == alg_zero)
2262 accum = copy_to_mode_reg (mode, const0_rtx);
2263 val_so_far = 0;
2265 else if (alg.op[0] == alg_m)
2267 accum = copy_to_mode_reg (mode, op0);
2268 val_so_far = 1;
2270 else
2271 abort ();
2273 for (opno = 1; opno < alg.ops; opno++)
2275 int log = alg.log[opno];
2276 int preserve = preserve_subexpressions_p ();
2277 rtx shift_subtarget = preserve ? 0 : accum;
2278 rtx add_target
2279 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2280 ? target : 0);
2281 rtx accum_target = preserve ? 0 : accum;
2283 switch (alg.op[opno])
2285 case alg_shift:
2286 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2287 build_int_2 (log, 0), NULL_RTX, 0);
2288 val_so_far <<= log;
2289 break;
2291 case alg_add_t_m2:
2292 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2293 build_int_2 (log, 0), NULL_RTX, 0);
2294 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2295 add_target ? add_target : accum_target);
2296 val_so_far += (HOST_WIDE_INT) 1 << log;
2297 break;
2299 case alg_sub_t_m2:
2300 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2301 build_int_2 (log, 0), NULL_RTX, 0);
2302 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2303 add_target ? add_target : accum_target);
2304 val_so_far -= (HOST_WIDE_INT) 1 << log;
2305 break;
2307 case alg_add_t2_m:
2308 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2309 build_int_2 (log, 0), shift_subtarget,
2311 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2312 add_target ? add_target : accum_target);
2313 val_so_far = (val_so_far << log) + 1;
2314 break;
2316 case alg_sub_t2_m:
2317 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2318 build_int_2 (log, 0), shift_subtarget,
2320 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2321 add_target ? add_target : accum_target);
2322 val_so_far = (val_so_far << log) - 1;
2323 break;
2325 case alg_add_factor:
2326 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2327 build_int_2 (log, 0), NULL_RTX, 0);
2328 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2329 add_target ? add_target : accum_target);
2330 val_so_far += val_so_far << log;
2331 break;
2333 case alg_sub_factor:
2334 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2335 build_int_2 (log, 0), NULL_RTX, 0);
2336 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2337 (add_target ? add_target
2338 : preserve ? 0 : tem));
2339 val_so_far = (val_so_far << log) - val_so_far;
2340 break;
2342 default:
2343 abort ();;
2346 /* Write a REG_EQUAL note on the last insn so that we can cse
2347 multiplication sequences. */
2349 insn = get_last_insn ();
2350 REG_NOTES (insn)
2351 = gen_rtx_EXPR_LIST (REG_EQUAL,
2352 gen_rtx_MULT (mode, op0, GEN_INT (val_so_far)),
2353 REG_NOTES (insn));
2356 if (variant == negate_variant)
2358 val_so_far = - val_so_far;
2359 accum = expand_unop (mode, neg_optab, accum, target, 0);
2361 else if (variant == add_variant)
2363 val_so_far = val_so_far + 1;
2364 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2367 if (val != val_so_far)
2368 abort ();
2370 return accum;
2374 /* This used to use umul_optab if unsigned, but for non-widening multiply
2375 there is no difference between signed and unsigned. */
2376 op0 = expand_binop (mode, smul_optab,
2377 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2378 if (op0 == 0)
2379 abort ();
2380 return op0;
2383 /* Return the smallest n such that 2**n >= X. */
2386 ceil_log2 (x)
2387 unsigned HOST_WIDE_INT x;
2389 return floor_log2 (x - 1) + 1;
2392 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2393 replace division by D, and put the least significant N bits of the result
2394 in *MULTIPLIER_PTR and return the most significant bit.
2396 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2397 needed precision is in PRECISION (should be <= N).
2399 PRECISION should be as small as possible so this function can choose
2400 multiplier more freely.
2402 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2403 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2405 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2406 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2408 static
2409 unsigned HOST_WIDE_INT
2410 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2411 unsigned HOST_WIDE_INT d;
2412 int n;
2413 int precision;
2414 unsigned HOST_WIDE_INT *multiplier_ptr;
2415 int *post_shift_ptr;
2416 int *lgup_ptr;
2418 unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
2419 unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
2420 int lgup, post_shift;
2421 int pow, pow2;
2422 unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
2424 /* lgup = ceil(log2(divisor)); */
2425 lgup = ceil_log2 (d);
2427 if (lgup > n)
2428 abort ();
2430 pow = n + lgup;
2431 pow2 = n + lgup - precision;
2433 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2435 /* We could handle this with some effort, but this case is much better
2436 handled directly with a scc insn, so rely on caller using that. */
2437 abort ();
2440 /* mlow = 2^(N + lgup)/d */
2441 if (pow >= HOST_BITS_PER_WIDE_INT)
2443 nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2444 nl = 0;
2446 else
2448 nh = 0;
2449 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2451 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2452 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2454 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2455 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2456 nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2457 else
2458 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2459 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2460 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2462 if (mhigh_hi && nh - d >= d)
2463 abort ();
2464 if (mhigh_hi > 1 || mlow_hi > 1)
2465 abort ();
2466 /* assert that mlow < mhigh. */
2467 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2468 abort();
2470 /* If precision == N, then mlow, mhigh exceed 2^N
2471 (but they do not exceed 2^(N+1)). */
2473 /* Reduce to lowest terms */
2474 for (post_shift = lgup; post_shift > 0; post_shift--)
2476 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2477 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2478 if (ml_lo >= mh_lo)
2479 break;
2481 mlow_hi = 0;
2482 mlow_lo = ml_lo;
2483 mhigh_hi = 0;
2484 mhigh_lo = mh_lo;
2487 *post_shift_ptr = post_shift;
2488 *lgup_ptr = lgup;
2489 if (n < HOST_BITS_PER_WIDE_INT)
2491 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2492 *multiplier_ptr = mhigh_lo & mask;
2493 return mhigh_lo >= mask;
2495 else
2497 *multiplier_ptr = mhigh_lo;
2498 return mhigh_hi;
2502 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2503 congruent to 1 (mod 2**N). */
2505 static unsigned HOST_WIDE_INT
2506 invert_mod2n (x, n)
2507 unsigned HOST_WIDE_INT x;
2508 int n;
2510 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2512 /* The algorithm notes that the choice y = x satisfies
2513 x*y == 1 mod 2^3, since x is assumed odd.
2514 Each iteration doubles the number of bits of significance in y. */
2516 unsigned HOST_WIDE_INT mask;
2517 unsigned HOST_WIDE_INT y = x;
2518 int nbit = 3;
2520 mask = (n == HOST_BITS_PER_WIDE_INT
2521 ? ~(unsigned HOST_WIDE_INT) 0
2522 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2524 while (nbit < n)
2526 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2527 nbit *= 2;
2529 return y;
2532 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2533 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2534 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2535 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2536 become signed.
2538 The result is put in TARGET if that is convenient.
2540 MODE is the mode of operation. */
2543 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2544 enum machine_mode mode;
2545 register rtx adj_operand, op0, op1, target;
2546 int unsignedp;
2548 rtx tem;
2549 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2551 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2552 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2553 NULL_RTX, 0);
2554 tem = expand_and (tem, op1, NULL_RTX);
2555 adj_operand
2556 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2557 adj_operand);
2559 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2560 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2561 NULL_RTX, 0);
2562 tem = expand_and (tem, op0, NULL_RTX);
2563 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2564 target);
2566 return target;
2569 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2570 in TARGET if that is convenient, and return where the result is. If the
2571 operation can not be performed, 0 is returned.
2573 MODE is the mode of operation and result.
2575 UNSIGNEDP nonzero means unsigned multiply.
2577 MAX_COST is the total allowed cost for the expanded RTL. */
2580 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2581 enum machine_mode mode;
2582 register rtx op0, target;
2583 unsigned HOST_WIDE_INT cnst1;
2584 int unsignedp;
2585 int max_cost;
2587 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2588 optab mul_highpart_optab;
2589 optab moptab;
2590 rtx tem;
2591 int size = GET_MODE_BITSIZE (mode);
2592 rtx op1, wide_op1;
2594 /* We can't support modes wider than HOST_BITS_PER_INT. */
2595 if (size > HOST_BITS_PER_WIDE_INT)
2596 abort ();
2598 op1 = GEN_INT (cnst1);
2600 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2601 wide_op1 = op1;
2602 else
2603 wide_op1
2604 = immed_double_const (cnst1,
2605 (unsignedp
2606 ? (HOST_WIDE_INT) 0
2607 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2608 wider_mode);
2610 /* expand_mult handles constant multiplication of word_mode
2611 or narrower. It does a poor job for large modes. */
2612 if (size < BITS_PER_WORD
2613 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2615 /* We have to do this, since expand_binop doesn't do conversion for
2616 multiply. Maybe change expand_binop to handle widening multiply? */
2617 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2619 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2620 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2621 build_int_2 (size, 0), NULL_RTX, 1);
2622 return convert_modes (mode, wider_mode, tem, unsignedp);
2625 if (target == 0)
2626 target = gen_reg_rtx (mode);
2628 /* Firstly, try using a multiplication insn that only generates the needed
2629 high part of the product, and in the sign flavor of unsignedp. */
2630 if (mul_highpart_cost[(int) mode] < max_cost)
2632 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2633 target = expand_binop (mode, mul_highpart_optab,
2634 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2635 if (target)
2636 return target;
2639 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2640 Need to adjust the result after the multiplication. */
2641 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2643 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2644 target = expand_binop (mode, mul_highpart_optab,
2645 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2646 if (target)
2647 /* We used the wrong signedness. Adjust the result. */
2648 return expand_mult_highpart_adjust (mode, target, op0,
2649 op1, target, unsignedp);
2652 /* Try widening multiplication. */
2653 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2654 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2655 && mul_widen_cost[(int) wider_mode] < max_cost)
2657 op1 = force_reg (mode, op1);
2658 goto try;
2661 /* Try widening the mode and perform a non-widening multiplication. */
2662 moptab = smul_optab;
2663 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2664 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2666 op1 = wide_op1;
2667 goto try;
2670 /* Try widening multiplication of opposite signedness, and adjust. */
2671 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2672 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2673 && (mul_widen_cost[(int) wider_mode]
2674 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2676 rtx regop1 = force_reg (mode, op1);
2677 tem = expand_binop (wider_mode, moptab, op0, regop1,
2678 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2679 if (tem != 0)
2681 /* Extract the high half of the just generated product. */
2682 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2683 build_int_2 (size, 0), NULL_RTX, 1);
2684 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2685 /* We used the wrong signedness. Adjust the result. */
2686 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2687 target, unsignedp);
2691 return 0;
2693 try:
2694 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2695 tem = expand_binop (wider_mode, moptab, op0, op1,
2696 NULL_RTX, unsignedp, OPTAB_WIDEN);
2697 if (tem == 0)
2698 return 0;
2700 /* Extract the high half of the just generated product. */
2701 if (mode == word_mode)
2703 return gen_highpart (mode, tem);
2705 else
2707 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2708 build_int_2 (size, 0), NULL_RTX, 1);
2709 return convert_modes (mode, wider_mode, tem, unsignedp);
2713 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2714 if that is convenient, and returning where the result is.
2715 You may request either the quotient or the remainder as the result;
2716 specify REM_FLAG nonzero to get the remainder.
2718 CODE is the expression code for which kind of division this is;
2719 it controls how rounding is done. MODE is the machine mode to use.
2720 UNSIGNEDP nonzero means do unsigned division. */
2722 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2723 and then correct it by or'ing in missing high bits
2724 if result of ANDI is nonzero.
2725 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2726 This could optimize to a bfexts instruction.
2727 But C doesn't use these operations, so their optimizations are
2728 left for later. */
2730 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2733 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2734 int rem_flag;
2735 enum tree_code code;
2736 enum machine_mode mode;
2737 register rtx op0, op1, target;
2738 int unsignedp;
2740 enum machine_mode compute_mode;
2741 register rtx tquotient;
2742 rtx quotient = 0, remainder = 0;
2743 rtx last;
2744 int size;
2745 rtx insn, set;
2746 optab optab1, optab2;
2747 int op1_is_constant, op1_is_pow2;
2748 int max_cost, extra_cost;
2750 op1_is_constant = GET_CODE (op1) == CONST_INT;
2751 op1_is_pow2 = (op1_is_constant
2752 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2753 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2756 This is the structure of expand_divmod:
2758 First comes code to fix up the operands so we can perform the operations
2759 correctly and efficiently.
2761 Second comes a switch statement with code specific for each rounding mode.
2762 For some special operands this code emits all RTL for the desired
2763 operation, for other cases, it generates only a quotient and stores it in
2764 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2765 to indicate that it has not done anything.
2767 Last comes code that finishes the operation. If QUOTIENT is set and
2768 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2769 QUOTIENT is not set, it is computed using trunc rounding.
2771 We try to generate special code for division and remainder when OP1 is a
2772 constant. If |OP1| = 2**n we can use shifts and some other fast
2773 operations. For other values of OP1, we compute a carefully selected
2774 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2775 by m.
2777 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2778 half of the product. Different strategies for generating the product are
2779 implemented in expand_mult_highpart.
2781 If what we actually want is the remainder, we generate that by another
2782 by-constant multiplication and a subtraction. */
2784 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2785 code below will malfunction if we are, so check here and handle
2786 the special case if so. */
2787 if (op1 == const1_rtx)
2788 return rem_flag ? const0_rtx : op0;
2790 if (target
2791 /* Don't use the function value register as a target
2792 since we have to read it as well as write it,
2793 and function-inlining gets confused by this. */
2794 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2795 /* Don't clobber an operand while doing a multi-step calculation. */
2796 || ((rem_flag || op1_is_constant)
2797 && (reg_mentioned_p (target, op0)
2798 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2799 || reg_mentioned_p (target, op1)
2800 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2801 target = 0;
2803 /* Get the mode in which to perform this computation. Normally it will
2804 be MODE, but sometimes we can't do the desired operation in MODE.
2805 If so, pick a wider mode in which we can do the operation. Convert
2806 to that mode at the start to avoid repeated conversions.
2808 First see what operations we need. These depend on the expression
2809 we are evaluating. (We assume that divxx3 insns exist under the
2810 same conditions that modxx3 insns and that these insns don't normally
2811 fail. If these assumptions are not correct, we may generate less
2812 efficient code in some cases.)
2814 Then see if we find a mode in which we can open-code that operation
2815 (either a division, modulus, or shift). Finally, check for the smallest
2816 mode for which we can do the operation with a library call. */
2818 /* We might want to refine this now that we have division-by-constant
2819 optimization. Since expand_mult_highpart tries so many variants, it is
2820 not straightforward to generalize this. Maybe we should make an array
2821 of possible modes in init_expmed? Save this for GCC 2.7. */
2823 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
2824 : (unsignedp ? udiv_optab : sdiv_optab));
2825 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2827 for (compute_mode = mode; compute_mode != VOIDmode;
2828 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2829 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2830 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
2831 break;
2833 if (compute_mode == VOIDmode)
2834 for (compute_mode = mode; compute_mode != VOIDmode;
2835 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2836 if (optab1->handlers[(int) compute_mode].libfunc
2837 || optab2->handlers[(int) compute_mode].libfunc)
2838 break;
2840 /* If we still couldn't find a mode, use MODE, but we'll probably abort
2841 in expand_binop. */
2842 if (compute_mode == VOIDmode)
2843 compute_mode = mode;
2845 if (target && GET_MODE (target) == compute_mode)
2846 tquotient = target;
2847 else
2848 tquotient = gen_reg_rtx (compute_mode);
2850 size = GET_MODE_BITSIZE (compute_mode);
2851 #if 0
2852 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
2853 (mode), and thereby get better code when OP1 is a constant. Do that
2854 later. It will require going over all usages of SIZE below. */
2855 size = GET_MODE_BITSIZE (mode);
2856 #endif
2858 max_cost = div_cost[(int) compute_mode]
2859 - (rem_flag ? mul_cost[(int) compute_mode] + add_cost : 0);
2861 /* Now convert to the best mode to use. */
2862 if (compute_mode != mode)
2864 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
2865 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
2867 /* convert_modes may have placed op1 into a register, so we
2868 must recompute the following. */
2869 op1_is_constant = GET_CODE (op1) == CONST_INT;
2870 op1_is_pow2 = (op1_is_constant
2871 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2872 || (! unsignedp
2873 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
2876 /* If one of the operands is a volatile MEM, copy it into a register. */
2878 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
2879 op0 = force_reg (compute_mode, op0);
2880 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
2881 op1 = force_reg (compute_mode, op1);
2883 /* If we need the remainder or if OP1 is constant, we need to
2884 put OP0 in a register in case it has any queued subexpressions. */
2885 if (rem_flag || op1_is_constant)
2886 op0 = force_reg (compute_mode, op0);
2888 last = get_last_insn ();
2890 /* Promote floor rounding to trunc rounding for unsigned operations. */
2891 if (unsignedp)
2893 if (code == FLOOR_DIV_EXPR)
2894 code = TRUNC_DIV_EXPR;
2895 if (code == FLOOR_MOD_EXPR)
2896 code = TRUNC_MOD_EXPR;
2897 if (code == EXACT_DIV_EXPR && op1_is_pow2)
2898 code = TRUNC_DIV_EXPR;
2901 if (op1 != const0_rtx)
2902 switch (code)
2904 case TRUNC_MOD_EXPR:
2905 case TRUNC_DIV_EXPR:
2906 if (op1_is_constant)
2908 if (unsignedp)
2910 unsigned HOST_WIDE_INT mh, ml;
2911 int pre_shift, post_shift;
2912 int dummy;
2913 unsigned HOST_WIDE_INT d = INTVAL (op1);
2915 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
2917 pre_shift = floor_log2 (d);
2918 if (rem_flag)
2920 remainder
2921 = expand_binop (compute_mode, and_optab, op0,
2922 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
2923 remainder, 1,
2924 OPTAB_LIB_WIDEN);
2925 if (remainder)
2926 return gen_lowpart (mode, remainder);
2928 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
2929 build_int_2 (pre_shift, 0),
2930 tquotient, 1);
2932 else if (size <= HOST_BITS_PER_WIDE_INT)
2934 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
2936 /* Most significant bit of divisor is set; emit an scc
2937 insn. */
2938 quotient = emit_store_flag (tquotient, GEU, op0, op1,
2939 compute_mode, 1, 1);
2940 if (quotient == 0)
2941 goto fail1;
2943 else
2945 /* Find a suitable multiplier and right shift count
2946 instead of multiplying with D. */
2948 mh = choose_multiplier (d, size, size,
2949 &ml, &post_shift, &dummy);
2951 /* If the suggested multiplier is more than SIZE bits,
2952 we can do better for even divisors, using an
2953 initial right shift. */
2954 if (mh != 0 && (d & 1) == 0)
2956 pre_shift = floor_log2 (d & -d);
2957 mh = choose_multiplier (d >> pre_shift, size,
2958 size - pre_shift,
2959 &ml, &post_shift, &dummy);
2960 if (mh)
2961 abort ();
2963 else
2964 pre_shift = 0;
2966 if (mh != 0)
2968 rtx t1, t2, t3, t4;
2970 extra_cost = (shift_cost[post_shift - 1]
2971 + shift_cost[1] + 2 * add_cost);
2972 t1 = expand_mult_highpart (compute_mode, op0, ml,
2973 NULL_RTX, 1,
2974 max_cost - extra_cost);
2975 if (t1 == 0)
2976 goto fail1;
2977 t2 = force_operand (gen_rtx_MINUS (compute_mode,
2978 op0, t1),
2979 NULL_RTX);
2980 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
2981 build_int_2 (1, 0), NULL_RTX,1);
2982 t4 = force_operand (gen_rtx_PLUS (compute_mode,
2983 t1, t3),
2984 NULL_RTX);
2985 quotient
2986 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
2987 build_int_2 (post_shift - 1, 0),
2988 tquotient, 1);
2990 else
2992 rtx t1, t2;
2994 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
2995 build_int_2 (pre_shift, 0),
2996 NULL_RTX, 1);
2997 extra_cost = (shift_cost[pre_shift]
2998 + shift_cost[post_shift]);
2999 t2 = expand_mult_highpart (compute_mode, t1, ml,
3000 NULL_RTX, 1,
3001 max_cost - extra_cost);
3002 if (t2 == 0)
3003 goto fail1;
3004 quotient
3005 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3006 build_int_2 (post_shift, 0),
3007 tquotient, 1);
3011 else /* Too wide mode to use tricky code */
3012 break;
3014 insn = get_last_insn ();
3015 if (insn != last
3016 && (set = single_set (insn)) != 0
3017 && SET_DEST (set) == quotient)
3018 REG_NOTES (insn)
3019 = gen_rtx_EXPR_LIST (REG_EQUAL,
3020 gen_rtx_UDIV (compute_mode, op0, op1),
3021 REG_NOTES (insn));
3023 else /* TRUNC_DIV, signed */
3025 unsigned HOST_WIDE_INT ml;
3026 int lgup, post_shift;
3027 HOST_WIDE_INT d = INTVAL (op1);
3028 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3030 /* n rem d = n rem -d */
3031 if (rem_flag && d < 0)
3033 d = abs_d;
3034 op1 = GEN_INT (abs_d);
3037 if (d == 1)
3038 quotient = op0;
3039 else if (d == -1)
3040 quotient = expand_unop (compute_mode, neg_optab, op0,
3041 tquotient, 0);
3042 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3044 /* This case is not handled correctly below. */
3045 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3046 compute_mode, 1, 1);
3047 if (quotient == 0)
3048 goto fail1;
3050 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3051 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3053 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3055 lgup = floor_log2 (abs_d);
3056 if (abs_d != 2 && BRANCH_COST < 3)
3058 rtx label = gen_label_rtx ();
3059 rtx t1;
3061 t1 = copy_to_mode_reg (compute_mode, op0);
3062 do_cmp_and_jump (t1, const0_rtx, GE,
3063 compute_mode, label);
3064 expand_inc (t1, GEN_INT (abs_d - 1));
3065 emit_label (label);
3066 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3067 build_int_2 (lgup, 0),
3068 tquotient, 0);
3070 else
3072 rtx t1, t2, t3;
3073 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3074 build_int_2 (size - 1, 0),
3075 NULL_RTX, 0);
3076 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3077 build_int_2 (size - lgup, 0),
3078 NULL_RTX, 1);
3079 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3080 op0, t2),
3081 NULL_RTX);
3082 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3083 build_int_2 (lgup, 0),
3084 tquotient, 0);
3087 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3088 the quotient. */
3089 if (d < 0)
3091 insn = get_last_insn ();
3092 if (insn != last
3093 && (set = single_set (insn)) != 0
3094 && SET_DEST (set) == quotient)
3095 REG_NOTES (insn)
3096 = gen_rtx_EXPR_LIST (REG_EQUAL,
3097 gen_rtx_DIV (compute_mode,
3098 op0,
3099 GEN_INT (abs_d)),
3100 REG_NOTES (insn));
3102 quotient = expand_unop (compute_mode, neg_optab,
3103 quotient, quotient, 0);
3106 else if (size <= HOST_BITS_PER_WIDE_INT)
3108 choose_multiplier (abs_d, size, size - 1,
3109 &ml, &post_shift, &lgup);
3110 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3112 rtx t1, t2, t3;
3114 extra_cost = (shift_cost[post_shift]
3115 + shift_cost[size - 1] + add_cost);
3116 t1 = expand_mult_highpart (compute_mode, op0, ml,
3117 NULL_RTX, 0,
3118 max_cost - extra_cost);
3119 if (t1 == 0)
3120 goto fail1;
3121 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3122 build_int_2 (post_shift, 0), NULL_RTX, 0);
3123 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3124 build_int_2 (size - 1, 0), NULL_RTX, 0);
3125 if (d < 0)
3126 quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t2),
3127 tquotient);
3128 else
3129 quotient = force_operand (gen_rtx_MINUS (compute_mode, t2, t3),
3130 tquotient);
3132 else
3134 rtx t1, t2, t3, t4;
3136 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3137 extra_cost = (shift_cost[post_shift]
3138 + shift_cost[size - 1] + 2 * add_cost);
3139 t1 = expand_mult_highpart (compute_mode, op0, ml,
3140 NULL_RTX, 0,
3141 max_cost - extra_cost);
3142 if (t1 == 0)
3143 goto fail1;
3144 t2 = force_operand (gen_rtx_PLUS (compute_mode, t1, op0),
3145 NULL_RTX);
3146 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3147 build_int_2 (post_shift, 0), NULL_RTX, 0);
3148 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3149 build_int_2 (size - 1, 0), NULL_RTX, 0);
3150 if (d < 0)
3151 quotient = force_operand (gen_rtx_MINUS (compute_mode, t4, t3),
3152 tquotient);
3153 else
3154 quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t4),
3155 tquotient);
3158 else /* Too wide mode to use tricky code */
3159 break;
3161 insn = get_last_insn ();
3162 if (insn != last
3163 && (set = single_set (insn)) != 0
3164 && SET_DEST (set) == quotient)
3165 REG_NOTES (insn)
3166 = gen_rtx_EXPR_LIST (REG_EQUAL,
3167 gen_rtx_DIV (compute_mode, op0, op1),
3168 REG_NOTES (insn));
3170 break;
3172 fail1:
3173 delete_insns_since (last);
3174 break;
3176 case FLOOR_DIV_EXPR:
3177 case FLOOR_MOD_EXPR:
3178 /* We will come here only for signed operations. */
3179 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3181 unsigned HOST_WIDE_INT mh, ml;
3182 int pre_shift, lgup, post_shift;
3183 HOST_WIDE_INT d = INTVAL (op1);
3185 if (d > 0)
3187 /* We could just as easily deal with negative constants here,
3188 but it does not seem worth the trouble for GCC 2.6. */
3189 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3191 pre_shift = floor_log2 (d);
3192 if (rem_flag)
3194 remainder = expand_binop (compute_mode, and_optab, op0,
3195 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3196 remainder, 0, OPTAB_LIB_WIDEN);
3197 if (remainder)
3198 return gen_lowpart (mode, remainder);
3200 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3201 build_int_2 (pre_shift, 0),
3202 tquotient, 0);
3204 else
3206 rtx t1, t2, t3, t4;
3208 mh = choose_multiplier (d, size, size - 1,
3209 &ml, &post_shift, &lgup);
3210 if (mh)
3211 abort ();
3213 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3214 build_int_2 (size - 1, 0), NULL_RTX, 0);
3215 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3216 NULL_RTX, 0, OPTAB_WIDEN);
3217 extra_cost = (shift_cost[post_shift]
3218 + shift_cost[size - 1] + 2 * add_cost);
3219 t3 = expand_mult_highpart (compute_mode, t2, ml,
3220 NULL_RTX, 1,
3221 max_cost - extra_cost);
3222 if (t3 != 0)
3224 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3225 build_int_2 (post_shift, 0),
3226 NULL_RTX, 1);
3227 quotient = expand_binop (compute_mode, xor_optab,
3228 t4, t1, tquotient, 0,
3229 OPTAB_WIDEN);
3233 else
3235 rtx nsign, t1, t2, t3, t4;
3236 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3237 op0, constm1_rtx), NULL_RTX);
3238 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3239 0, OPTAB_WIDEN);
3240 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3241 build_int_2 (size - 1, 0), NULL_RTX, 0);
3242 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3243 NULL_RTX);
3244 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3245 NULL_RTX, 0);
3246 if (t4)
3248 rtx t5;
3249 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3250 NULL_RTX, 0);
3251 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3252 t4, t5),
3253 tquotient);
3258 if (quotient != 0)
3259 break;
3260 delete_insns_since (last);
3262 /* Try using an instruction that produces both the quotient and
3263 remainder, using truncation. We can easily compensate the quotient
3264 or remainder to get floor rounding, once we have the remainder.
3265 Notice that we compute also the final remainder value here,
3266 and return the result right away. */
3267 if (target == 0 || GET_MODE (target) != compute_mode)
3268 target = gen_reg_rtx (compute_mode);
3270 if (rem_flag)
3272 remainder
3273 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3274 quotient = gen_reg_rtx (compute_mode);
3276 else
3278 quotient
3279 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3280 remainder = gen_reg_rtx (compute_mode);
3283 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3284 quotient, remainder, 0))
3286 /* This could be computed with a branch-less sequence.
3287 Save that for later. */
3288 rtx tem;
3289 rtx label = gen_label_rtx ();
3290 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3291 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3292 NULL_RTX, 0, OPTAB_WIDEN);
3293 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3294 expand_dec (quotient, const1_rtx);
3295 expand_inc (remainder, op1);
3296 emit_label (label);
3297 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3300 /* No luck with division elimination or divmod. Have to do it
3301 by conditionally adjusting op0 *and* the result. */
3303 rtx label1, label2, label3, label4, label5;
3304 rtx adjusted_op0;
3305 rtx tem;
3307 quotient = gen_reg_rtx (compute_mode);
3308 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3309 label1 = gen_label_rtx ();
3310 label2 = gen_label_rtx ();
3311 label3 = gen_label_rtx ();
3312 label4 = gen_label_rtx ();
3313 label5 = gen_label_rtx ();
3314 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3315 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3316 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3317 quotient, 0, OPTAB_LIB_WIDEN);
3318 if (tem != quotient)
3319 emit_move_insn (quotient, tem);
3320 emit_jump_insn (gen_jump (label5));
3321 emit_barrier ();
3322 emit_label (label1);
3323 expand_inc (adjusted_op0, const1_rtx);
3324 emit_jump_insn (gen_jump (label4));
3325 emit_barrier ();
3326 emit_label (label2);
3327 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3328 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3329 quotient, 0, OPTAB_LIB_WIDEN);
3330 if (tem != quotient)
3331 emit_move_insn (quotient, tem);
3332 emit_jump_insn (gen_jump (label5));
3333 emit_barrier ();
3334 emit_label (label3);
3335 expand_dec (adjusted_op0, const1_rtx);
3336 emit_label (label4);
3337 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3338 quotient, 0, OPTAB_LIB_WIDEN);
3339 if (tem != quotient)
3340 emit_move_insn (quotient, tem);
3341 expand_dec (quotient, const1_rtx);
3342 emit_label (label5);
3344 break;
3346 case CEIL_DIV_EXPR:
3347 case CEIL_MOD_EXPR:
3348 if (unsignedp)
3350 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3352 rtx t1, t2, t3;
3353 unsigned HOST_WIDE_INT d = INTVAL (op1);
3354 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3355 build_int_2 (floor_log2 (d), 0),
3356 tquotient, 1);
3357 t2 = expand_binop (compute_mode, and_optab, op0,
3358 GEN_INT (d - 1),
3359 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3360 t3 = gen_reg_rtx (compute_mode);
3361 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3362 compute_mode, 1, 1);
3363 if (t3 == 0)
3365 rtx lab;
3366 lab = gen_label_rtx ();
3367 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3368 expand_inc (t1, const1_rtx);
3369 emit_label (lab);
3370 quotient = t1;
3372 else
3373 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3374 t1, t3),
3375 tquotient);
3376 break;
3379 /* Try using an instruction that produces both the quotient and
3380 remainder, using truncation. We can easily compensate the
3381 quotient or remainder to get ceiling rounding, once we have the
3382 remainder. Notice that we compute also the final remainder
3383 value here, and return the result right away. */
3384 if (target == 0 || GET_MODE (target) != compute_mode)
3385 target = gen_reg_rtx (compute_mode);
3387 if (rem_flag)
3389 remainder = (GET_CODE (target) == REG
3390 ? target : gen_reg_rtx (compute_mode));
3391 quotient = gen_reg_rtx (compute_mode);
3393 else
3395 quotient = (GET_CODE (target) == REG
3396 ? target : gen_reg_rtx (compute_mode));
3397 remainder = gen_reg_rtx (compute_mode);
3400 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3401 remainder, 1))
3403 /* This could be computed with a branch-less sequence.
3404 Save that for later. */
3405 rtx label = gen_label_rtx ();
3406 do_cmp_and_jump (remainder, const0_rtx, EQ,
3407 compute_mode, label);
3408 expand_inc (quotient, const1_rtx);
3409 expand_dec (remainder, op1);
3410 emit_label (label);
3411 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3414 /* No luck with division elimination or divmod. Have to do it
3415 by conditionally adjusting op0 *and* the result. */
3417 rtx label1, label2;
3418 rtx adjusted_op0, tem;
3420 quotient = gen_reg_rtx (compute_mode);
3421 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3422 label1 = gen_label_rtx ();
3423 label2 = gen_label_rtx ();
3424 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3425 compute_mode, label1);
3426 emit_move_insn (quotient, const0_rtx);
3427 emit_jump_insn (gen_jump (label2));
3428 emit_barrier ();
3429 emit_label (label1);
3430 expand_dec (adjusted_op0, const1_rtx);
3431 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3432 quotient, 1, OPTAB_LIB_WIDEN);
3433 if (tem != quotient)
3434 emit_move_insn (quotient, tem);
3435 expand_inc (quotient, const1_rtx);
3436 emit_label (label2);
3439 else /* signed */
3441 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3442 && INTVAL (op1) >= 0)
3444 /* This is extremely similar to the code for the unsigned case
3445 above. For 2.7 we should merge these variants, but for
3446 2.6.1 I don't want to touch the code for unsigned since that
3447 get used in C. The signed case will only be used by other
3448 languages (Ada). */
3450 rtx t1, t2, t3;
3451 unsigned HOST_WIDE_INT d = INTVAL (op1);
3452 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3453 build_int_2 (floor_log2 (d), 0),
3454 tquotient, 0);
3455 t2 = expand_binop (compute_mode, and_optab, op0,
3456 GEN_INT (d - 1),
3457 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3458 t3 = gen_reg_rtx (compute_mode);
3459 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3460 compute_mode, 1, 1);
3461 if (t3 == 0)
3463 rtx lab;
3464 lab = gen_label_rtx ();
3465 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3466 expand_inc (t1, const1_rtx);
3467 emit_label (lab);
3468 quotient = t1;
3470 else
3471 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3472 t1, t3),
3473 tquotient);
3474 break;
3477 /* Try using an instruction that produces both the quotient and
3478 remainder, using truncation. We can easily compensate the
3479 quotient or remainder to get ceiling rounding, once we have the
3480 remainder. Notice that we compute also the final remainder
3481 value here, and return the result right away. */
3482 if (target == 0 || GET_MODE (target) != compute_mode)
3483 target = gen_reg_rtx (compute_mode);
3484 if (rem_flag)
3486 remainder= (GET_CODE (target) == REG
3487 ? target : gen_reg_rtx (compute_mode));
3488 quotient = gen_reg_rtx (compute_mode);
3490 else
3492 quotient = (GET_CODE (target) == REG
3493 ? target : gen_reg_rtx (compute_mode));
3494 remainder = gen_reg_rtx (compute_mode);
3497 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3498 remainder, 0))
3500 /* This could be computed with a branch-less sequence.
3501 Save that for later. */
3502 rtx tem;
3503 rtx label = gen_label_rtx ();
3504 do_cmp_and_jump (remainder, const0_rtx, EQ,
3505 compute_mode, label);
3506 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3507 NULL_RTX, 0, OPTAB_WIDEN);
3508 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3509 expand_inc (quotient, const1_rtx);
3510 expand_dec (remainder, op1);
3511 emit_label (label);
3512 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3515 /* No luck with division elimination or divmod. Have to do it
3516 by conditionally adjusting op0 *and* the result. */
3518 rtx label1, label2, label3, label4, label5;
3519 rtx adjusted_op0;
3520 rtx tem;
3522 quotient = gen_reg_rtx (compute_mode);
3523 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3524 label1 = gen_label_rtx ();
3525 label2 = gen_label_rtx ();
3526 label3 = gen_label_rtx ();
3527 label4 = gen_label_rtx ();
3528 label5 = gen_label_rtx ();
3529 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3530 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3531 compute_mode, label1);
3532 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3533 quotient, 0, OPTAB_LIB_WIDEN);
3534 if (tem != quotient)
3535 emit_move_insn (quotient, tem);
3536 emit_jump_insn (gen_jump (label5));
3537 emit_barrier ();
3538 emit_label (label1);
3539 expand_dec (adjusted_op0, const1_rtx);
3540 emit_jump_insn (gen_jump (label4));
3541 emit_barrier ();
3542 emit_label (label2);
3543 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3544 compute_mode, label3);
3545 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3546 quotient, 0, OPTAB_LIB_WIDEN);
3547 if (tem != quotient)
3548 emit_move_insn (quotient, tem);
3549 emit_jump_insn (gen_jump (label5));
3550 emit_barrier ();
3551 emit_label (label3);
3552 expand_inc (adjusted_op0, const1_rtx);
3553 emit_label (label4);
3554 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3555 quotient, 0, OPTAB_LIB_WIDEN);
3556 if (tem != quotient)
3557 emit_move_insn (quotient, tem);
3558 expand_inc (quotient, const1_rtx);
3559 emit_label (label5);
3562 break;
3564 case EXACT_DIV_EXPR:
3565 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3567 HOST_WIDE_INT d = INTVAL (op1);
3568 unsigned HOST_WIDE_INT ml;
3569 int post_shift;
3570 rtx t1;
3572 post_shift = floor_log2 (d & -d);
3573 ml = invert_mod2n (d >> post_shift, size);
3574 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3575 unsignedp);
3576 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3577 build_int_2 (post_shift, 0),
3578 NULL_RTX, unsignedp);
3580 insn = get_last_insn ();
3581 REG_NOTES (insn)
3582 = gen_rtx_EXPR_LIST (REG_EQUAL,
3583 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3584 compute_mode,
3585 op0, op1),
3586 REG_NOTES (insn));
3588 break;
3590 case ROUND_DIV_EXPR:
3591 case ROUND_MOD_EXPR:
3592 if (unsignedp)
3594 rtx tem;
3595 rtx label;
3596 label = gen_label_rtx ();
3597 quotient = gen_reg_rtx (compute_mode);
3598 remainder = gen_reg_rtx (compute_mode);
3599 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3601 rtx tem;
3602 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3603 quotient, 1, OPTAB_LIB_WIDEN);
3604 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3605 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3606 remainder, 1, OPTAB_LIB_WIDEN);
3608 tem = plus_constant (op1, -1);
3609 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3610 build_int_2 (1, 0), NULL_RTX, 1);
3611 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3612 expand_inc (quotient, const1_rtx);
3613 expand_dec (remainder, op1);
3614 emit_label (label);
3616 else
3618 rtx abs_rem, abs_op1, tem, mask;
3619 rtx label;
3620 label = gen_label_rtx ();
3621 quotient = gen_reg_rtx (compute_mode);
3622 remainder = gen_reg_rtx (compute_mode);
3623 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3625 rtx tem;
3626 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3627 quotient, 0, OPTAB_LIB_WIDEN);
3628 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3629 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3630 remainder, 0, OPTAB_LIB_WIDEN);
3632 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0, 0);
3633 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0, 0);
3634 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3635 build_int_2 (1, 0), NULL_RTX, 1);
3636 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3637 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3638 NULL_RTX, 0, OPTAB_WIDEN);
3639 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3640 build_int_2 (size - 1, 0), NULL_RTX, 0);
3641 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3642 NULL_RTX, 0, OPTAB_WIDEN);
3643 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3644 NULL_RTX, 0, OPTAB_WIDEN);
3645 expand_inc (quotient, tem);
3646 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3647 NULL_RTX, 0, OPTAB_WIDEN);
3648 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3649 NULL_RTX, 0, OPTAB_WIDEN);
3650 expand_dec (remainder, tem);
3651 emit_label (label);
3653 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3655 default:
3656 abort ();
3659 if (quotient == 0)
3661 if (target && GET_MODE (target) != compute_mode)
3662 target = 0;
3664 if (rem_flag)
3666 /* Try to produce the remainder directly without a library call. */
3667 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3668 op0, op1, target,
3669 unsignedp, OPTAB_WIDEN);
3670 if (remainder == 0)
3672 /* No luck there. Can we do remainder and divide at once
3673 without a library call? */
3674 remainder = gen_reg_rtx (compute_mode);
3675 if (! expand_twoval_binop ((unsignedp
3676 ? udivmod_optab
3677 : sdivmod_optab),
3678 op0, op1,
3679 NULL_RTX, remainder, unsignedp))
3680 remainder = 0;
3683 if (remainder)
3684 return gen_lowpart (mode, remainder);
3687 /* Produce the quotient. Try a quotient insn, but not a library call.
3688 If we have a divmod in this mode, use it in preference to widening
3689 the div (for this test we assume it will not fail). Note that optab2
3690 is set to the one of the two optabs that the call below will use. */
3691 quotient
3692 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3693 op0, op1, rem_flag ? NULL_RTX : target,
3694 unsignedp,
3695 ((optab2->handlers[(int) compute_mode].insn_code
3696 != CODE_FOR_nothing)
3697 ? OPTAB_DIRECT : OPTAB_WIDEN));
3699 if (quotient == 0)
3701 /* No luck there. Try a quotient-and-remainder insn,
3702 keeping the quotient alone. */
3703 quotient = gen_reg_rtx (compute_mode);
3704 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3705 op0, op1,
3706 quotient, NULL_RTX, unsignedp))
3708 quotient = 0;
3709 if (! rem_flag)
3710 /* Still no luck. If we are not computing the remainder,
3711 use a library call for the quotient. */
3712 quotient = sign_expand_binop (compute_mode,
3713 udiv_optab, sdiv_optab,
3714 op0, op1, target,
3715 unsignedp, OPTAB_LIB_WIDEN);
3720 if (rem_flag)
3722 if (target && GET_MODE (target) != compute_mode)
3723 target = 0;
3725 if (quotient == 0)
3726 /* No divide instruction either. Use library for remainder. */
3727 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3728 op0, op1, target,
3729 unsignedp, OPTAB_LIB_WIDEN);
3730 else
3732 /* We divided. Now finish doing X - Y * (X / Y). */
3733 remainder = expand_mult (compute_mode, quotient, op1,
3734 NULL_RTX, unsignedp);
3735 remainder = expand_binop (compute_mode, sub_optab, op0,
3736 remainder, target, unsignedp,
3737 OPTAB_LIB_WIDEN);
3741 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3744 /* Return a tree node with data type TYPE, describing the value of X.
3745 Usually this is an RTL_EXPR, if there is no obvious better choice.
3746 X may be an expression, however we only support those expressions
3747 generated by loop.c. */
3749 tree
3750 make_tree (type, x)
3751 tree type;
3752 rtx x;
3754 tree t;
3756 switch (GET_CODE (x))
3758 case CONST_INT:
3759 t = build_int_2 (INTVAL (x),
3760 TREE_UNSIGNED (type) || INTVAL (x) >= 0 ? 0 : -1);
3761 TREE_TYPE (t) = type;
3762 return t;
3764 case CONST_DOUBLE:
3765 if (GET_MODE (x) == VOIDmode)
3767 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3768 TREE_TYPE (t) = type;
3770 else
3772 REAL_VALUE_TYPE d;
3774 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3775 t = build_real (type, d);
3778 return t;
3780 case PLUS:
3781 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3782 make_tree (type, XEXP (x, 1))));
3784 case MINUS:
3785 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3786 make_tree (type, XEXP (x, 1))));
3788 case NEG:
3789 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3791 case MULT:
3792 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
3793 make_tree (type, XEXP (x, 1))));
3795 case ASHIFT:
3796 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
3797 make_tree (type, XEXP (x, 1))));
3799 case LSHIFTRT:
3800 return fold (convert (type,
3801 build (RSHIFT_EXPR, unsigned_type (type),
3802 make_tree (unsigned_type (type),
3803 XEXP (x, 0)),
3804 make_tree (type, XEXP (x, 1)))));
3806 case ASHIFTRT:
3807 return fold (convert (type,
3808 build (RSHIFT_EXPR, signed_type (type),
3809 make_tree (signed_type (type), XEXP (x, 0)),
3810 make_tree (type, XEXP (x, 1)))));
3812 case DIV:
3813 if (TREE_CODE (type) != REAL_TYPE)
3814 t = signed_type (type);
3815 else
3816 t = type;
3818 return fold (convert (type,
3819 build (TRUNC_DIV_EXPR, t,
3820 make_tree (t, XEXP (x, 0)),
3821 make_tree (t, XEXP (x, 1)))));
3822 case UDIV:
3823 t = unsigned_type (type);
3824 return fold (convert (type,
3825 build (TRUNC_DIV_EXPR, t,
3826 make_tree (t, XEXP (x, 0)),
3827 make_tree (t, XEXP (x, 1)))));
3828 default:
3829 t = make_node (RTL_EXPR);
3830 TREE_TYPE (t) = type;
3831 RTL_EXPR_RTL (t) = x;
3832 /* There are no insns to be output
3833 when this rtl_expr is used. */
3834 RTL_EXPR_SEQUENCE (t) = 0;
3835 return t;
3839 /* Return an rtx representing the value of X * MULT + ADD.
3840 TARGET is a suggestion for where to store the result (an rtx).
3841 MODE is the machine mode for the computation.
3842 X and MULT must have mode MODE. ADD may have a different mode.
3843 So can X (defaults to same as MODE).
3844 UNSIGNEDP is non-zero to do unsigned multiplication.
3845 This may emit insns. */
3848 expand_mult_add (x, target, mult, add, mode, unsignedp)
3849 rtx x, target, mult, add;
3850 enum machine_mode mode;
3851 int unsignedp;
3853 tree type = type_for_mode (mode, unsignedp);
3854 tree add_type = (GET_MODE (add) == VOIDmode
3855 ? type : type_for_mode (GET_MODE (add), unsignedp));
3856 tree result = fold (build (PLUS_EXPR, type,
3857 fold (build (MULT_EXPR, type,
3858 make_tree (type, x),
3859 make_tree (type, mult))),
3860 make_tree (add_type, add)));
3862 return expand_expr (result, target, VOIDmode, 0);
3865 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
3866 and returning TARGET.
3868 If TARGET is 0, a pseudo-register or constant is returned. */
3871 expand_and (op0, op1, target)
3872 rtx op0, op1, target;
3874 enum machine_mode mode = VOIDmode;
3875 rtx tem;
3877 if (GET_MODE (op0) != VOIDmode)
3878 mode = GET_MODE (op0);
3879 else if (GET_MODE (op1) != VOIDmode)
3880 mode = GET_MODE (op1);
3882 if (mode != VOIDmode)
3883 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
3884 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
3885 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
3886 else
3887 abort ();
3889 if (target == 0)
3890 target = tem;
3891 else if (tem != target)
3892 emit_move_insn (target, tem);
3893 return target;
3896 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
3897 and storing in TARGET. Normally return TARGET.
3898 Return 0 if that cannot be done.
3900 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
3901 it is VOIDmode, they cannot both be CONST_INT.
3903 UNSIGNEDP is for the case where we have to widen the operands
3904 to perform the operation. It says to use zero-extension.
3906 NORMALIZEP is 1 if we should convert the result to be either zero
3907 or one. Normalize is -1 if we should convert the result to be
3908 either zero or -1. If NORMALIZEP is zero, the result will be left
3909 "raw" out of the scc insn. */
3912 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
3913 rtx target;
3914 enum rtx_code code;
3915 rtx op0, op1;
3916 enum machine_mode mode;
3917 int unsignedp;
3918 int normalizep;
3920 rtx subtarget;
3921 enum insn_code icode;
3922 enum machine_mode compare_mode;
3923 enum machine_mode target_mode = GET_MODE (target);
3924 rtx tem;
3925 rtx last = get_last_insn ();
3926 rtx pattern, comparison;
3928 /* If one operand is constant, make it the second one. Only do this
3929 if the other operand is not constant as well. */
3931 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
3932 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
3934 tem = op0;
3935 op0 = op1;
3936 op1 = tem;
3937 code = swap_condition (code);
3940 if (mode == VOIDmode)
3941 mode = GET_MODE (op0);
3943 /* For some comparisons with 1 and -1, we can convert this to
3944 comparisons with zero. This will often produce more opportunities for
3945 store-flag insns. */
3947 switch (code)
3949 case LT:
3950 if (op1 == const1_rtx)
3951 op1 = const0_rtx, code = LE;
3952 break;
3953 case LE:
3954 if (op1 == constm1_rtx)
3955 op1 = const0_rtx, code = LT;
3956 break;
3957 case GE:
3958 if (op1 == const1_rtx)
3959 op1 = const0_rtx, code = GT;
3960 break;
3961 case GT:
3962 if (op1 == constm1_rtx)
3963 op1 = const0_rtx, code = GE;
3964 break;
3965 case GEU:
3966 if (op1 == const1_rtx)
3967 op1 = const0_rtx, code = NE;
3968 break;
3969 case LTU:
3970 if (op1 == const1_rtx)
3971 op1 = const0_rtx, code = EQ;
3972 break;
3973 default:
3974 break;
3977 /* From now on, we won't change CODE, so set ICODE now. */
3978 icode = setcc_gen_code[(int) code];
3980 /* If this is A < 0 or A >= 0, we can do this by taking the ones
3981 complement of A (for GE) and shifting the sign bit to the low bit. */
3982 if (op1 == const0_rtx && (code == LT || code == GE)
3983 && GET_MODE_CLASS (mode) == MODE_INT
3984 && (normalizep || STORE_FLAG_VALUE == 1
3985 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
3986 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
3987 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
3989 subtarget = target;
3991 /* If the result is to be wider than OP0, it is best to convert it
3992 first. If it is to be narrower, it is *incorrect* to convert it
3993 first. */
3994 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
3996 op0 = protect_from_queue (op0, 0);
3997 op0 = convert_modes (target_mode, mode, op0, 0);
3998 mode = target_mode;
4001 if (target_mode != mode)
4002 subtarget = 0;
4004 if (code == GE)
4005 op0 = expand_unop (mode, one_cmpl_optab, op0,
4006 ((STORE_FLAG_VALUE == 1 || normalizep)
4007 ? 0 : subtarget), 0);
4009 if (STORE_FLAG_VALUE == 1 || normalizep)
4010 /* If we are supposed to produce a 0/1 value, we want to do
4011 a logical shift from the sign bit to the low-order bit; for
4012 a -1/0 value, we do an arithmetic shift. */
4013 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4014 size_int (GET_MODE_BITSIZE (mode) - 1),
4015 subtarget, normalizep != -1);
4017 if (mode != target_mode)
4018 op0 = convert_modes (target_mode, mode, op0, 0);
4020 return op0;
4023 if (icode != CODE_FOR_nothing)
4025 /* We think we may be able to do this with a scc insn. Emit the
4026 comparison and then the scc insn.
4028 compare_from_rtx may call emit_queue, which would be deleted below
4029 if the scc insn fails. So call it ourselves before setting LAST. */
4031 emit_queue ();
4032 last = get_last_insn ();
4034 comparison
4035 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4036 if (GET_CODE (comparison) == CONST_INT)
4037 return (comparison == const0_rtx ? const0_rtx
4038 : normalizep == 1 ? const1_rtx
4039 : normalizep == -1 ? constm1_rtx
4040 : const_true_rtx);
4042 /* If the code of COMPARISON doesn't match CODE, something is
4043 wrong; we can no longer be sure that we have the operation.
4044 We could handle this case, but it should not happen. */
4046 if (GET_CODE (comparison) != code)
4047 abort ();
4049 /* Get a reference to the target in the proper mode for this insn. */
4050 compare_mode = insn_operand_mode[(int) icode][0];
4051 subtarget = target;
4052 if (preserve_subexpressions_p ()
4053 || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
4054 subtarget = gen_reg_rtx (compare_mode);
4056 pattern = GEN_FCN (icode) (subtarget);
4057 if (pattern)
4059 emit_insn (pattern);
4061 /* If we are converting to a wider mode, first convert to
4062 TARGET_MODE, then normalize. This produces better combining
4063 opportunities on machines that have a SIGN_EXTRACT when we are
4064 testing a single bit. This mostly benefits the 68k.
4066 If STORE_FLAG_VALUE does not have the sign bit set when
4067 interpreted in COMPARE_MODE, we can do this conversion as
4068 unsigned, which is usually more efficient. */
4069 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4071 convert_move (target, subtarget,
4072 (GET_MODE_BITSIZE (compare_mode)
4073 <= HOST_BITS_PER_WIDE_INT)
4074 && 0 == (STORE_FLAG_VALUE
4075 & ((HOST_WIDE_INT) 1
4076 << (GET_MODE_BITSIZE (compare_mode) -1))));
4077 op0 = target;
4078 compare_mode = target_mode;
4080 else
4081 op0 = subtarget;
4083 /* If we want to keep subexpressions around, don't reuse our
4084 last target. */
4086 if (preserve_subexpressions_p ())
4087 subtarget = 0;
4089 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4090 we don't have to do anything. */
4091 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4093 else if (normalizep == - STORE_FLAG_VALUE)
4094 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4096 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4097 makes it hard to use a value of just the sign bit due to
4098 ANSI integer constant typing rules. */
4099 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4100 && (STORE_FLAG_VALUE
4101 & ((HOST_WIDE_INT) 1
4102 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4103 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4104 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4105 subtarget, normalizep == 1);
4106 else if (STORE_FLAG_VALUE & 1)
4108 op0 = expand_and (op0, const1_rtx, subtarget);
4109 if (normalizep == -1)
4110 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4112 else
4113 abort ();
4115 /* If we were converting to a smaller mode, do the
4116 conversion now. */
4117 if (target_mode != compare_mode)
4119 convert_move (target, op0, 0);
4120 return target;
4122 else
4123 return op0;
4127 delete_insns_since (last);
4129 /* If expensive optimizations, use different pseudo registers for each
4130 insn, instead of reusing the same pseudo. This leads to better CSE,
4131 but slows down the compiler, since there are more pseudos */
4132 subtarget = (!flag_expensive_optimizations
4133 && (target_mode == mode)) ? target : NULL_RTX;
4135 /* If we reached here, we can't do this with a scc insn. However, there
4136 are some comparisons that can be done directly. For example, if
4137 this is an equality comparison of integers, we can try to exclusive-or
4138 (or subtract) the two operands and use a recursive call to try the
4139 comparison with zero. Don't do any of these cases if branches are
4140 very cheap. */
4142 if (BRANCH_COST > 0
4143 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4144 && op1 != const0_rtx)
4146 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4147 OPTAB_WIDEN);
4149 if (tem == 0)
4150 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4151 OPTAB_WIDEN);
4152 if (tem != 0)
4153 tem = emit_store_flag (target, code, tem, const0_rtx,
4154 mode, unsignedp, normalizep);
4155 if (tem == 0)
4156 delete_insns_since (last);
4157 return tem;
4160 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4161 the constant zero. Reject all other comparisons at this point. Only
4162 do LE and GT if branches are expensive since they are expensive on
4163 2-operand machines. */
4165 if (BRANCH_COST == 0
4166 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4167 || (code != EQ && code != NE
4168 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4169 return 0;
4171 /* See what we need to return. We can only return a 1, -1, or the
4172 sign bit. */
4174 if (normalizep == 0)
4176 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4177 normalizep = STORE_FLAG_VALUE;
4179 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4180 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4181 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4183 else
4184 return 0;
4187 /* Try to put the result of the comparison in the sign bit. Assume we can't
4188 do the necessary operation below. */
4190 tem = 0;
4192 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4193 the sign bit set. */
4195 if (code == LE)
4197 /* This is destructive, so SUBTARGET can't be OP0. */
4198 if (rtx_equal_p (subtarget, op0))
4199 subtarget = 0;
4201 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4202 OPTAB_WIDEN);
4203 if (tem)
4204 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4205 OPTAB_WIDEN);
4208 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4209 number of bits in the mode of OP0, minus one. */
4211 if (code == GT)
4213 if (rtx_equal_p (subtarget, op0))
4214 subtarget = 0;
4216 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4217 size_int (GET_MODE_BITSIZE (mode) - 1),
4218 subtarget, 0);
4219 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4220 OPTAB_WIDEN);
4223 if (code == EQ || code == NE)
4225 /* For EQ or NE, one way to do the comparison is to apply an operation
4226 that converts the operand into a positive number if it is non-zero
4227 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4228 for NE we negate. This puts the result in the sign bit. Then we
4229 normalize with a shift, if needed.
4231 Two operations that can do the above actions are ABS and FFS, so try
4232 them. If that doesn't work, and MODE is smaller than a full word,
4233 we can use zero-extension to the wider mode (an unsigned conversion)
4234 as the operation. */
4236 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4237 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4238 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4239 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4240 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4242 op0 = protect_from_queue (op0, 0);
4243 tem = convert_modes (word_mode, mode, op0, 1);
4244 mode = word_mode;
4247 if (tem != 0)
4249 if (code == EQ)
4250 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4251 0, OPTAB_WIDEN);
4252 else
4253 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4256 /* If we couldn't do it that way, for NE we can "or" the two's complement
4257 of the value with itself. For EQ, we take the one's complement of
4258 that "or", which is an extra insn, so we only handle EQ if branches
4259 are expensive. */
4261 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4263 if (rtx_equal_p (subtarget, op0))
4264 subtarget = 0;
4266 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4267 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4268 OPTAB_WIDEN);
4270 if (tem && code == EQ)
4271 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4275 if (tem && normalizep)
4276 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4277 size_int (GET_MODE_BITSIZE (mode) - 1),
4278 subtarget, normalizep == 1);
4280 if (tem)
4282 if (GET_MODE (tem) != target_mode)
4284 convert_move (target, tem, 0);
4285 tem = target;
4287 else if (!subtarget)
4289 emit_move_insn (target, tem);
4290 tem = target;
4293 else
4294 delete_insns_since (last);
4296 return tem;
4299 /* Like emit_store_flag, but always succeeds. */
4302 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4303 rtx target;
4304 enum rtx_code code;
4305 rtx op0, op1;
4306 enum machine_mode mode;
4307 int unsignedp;
4308 int normalizep;
4310 rtx tem, label;
4312 /* First see if emit_store_flag can do the job. */
4313 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4314 if (tem != 0)
4315 return tem;
4317 if (normalizep == 0)
4318 normalizep = 1;
4320 /* If this failed, we have to do this with set/compare/jump/set code. */
4322 if (GET_CODE (target) != REG
4323 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4324 target = gen_reg_rtx (GET_MODE (target));
4326 emit_move_insn (target, const1_rtx);
4327 tem = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4328 if (GET_CODE (tem) == CONST_INT)
4329 return tem;
4331 label = gen_label_rtx ();
4332 if (bcc_gen_fctn[(int) code] == 0)
4333 abort ();
4335 emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
4336 emit_move_insn (target, const0_rtx);
4337 emit_label (label);
4339 return target;
4342 /* Perform possibly multi-word comparison and conditional jump to LABEL
4343 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4345 The algorithm is based on the code in expr.c:do_jump.
4347 Note that this does not perform a general comparison. Only variants
4348 generated within expmed.c are correctly handled, others abort (but could
4349 be handled if needed). */
4351 static void
4352 do_cmp_and_jump (arg1, arg2, op, mode, label)
4353 rtx arg1, arg2, label;
4354 enum rtx_code op;
4355 enum machine_mode mode;
4357 /* If this mode is an integer too wide to compare properly,
4358 compare word by word. Rely on cse to optimize constant cases. */
4360 if (GET_MODE_CLASS (mode) == MODE_INT && !can_compare_p (mode))
4362 rtx label2 = gen_label_rtx ();
4364 switch (op)
4366 case LTU:
4367 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4368 break;
4370 case LEU:
4371 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4372 break;
4374 case LT:
4375 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4376 break;
4378 case GT:
4379 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4380 break;
4382 case GE:
4383 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4384 break;
4386 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4387 that's the only equality operations we do */
4388 case EQ:
4389 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4390 abort();
4391 do_jump_by_parts_equality_rtx (arg1, label2, label);
4392 break;
4394 case NE:
4395 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4396 abort();
4397 do_jump_by_parts_equality_rtx (arg1, label, label2);
4398 break;
4400 default:
4401 abort();
4404 emit_label (label2);
4406 else
4408 emit_cmp_insn(arg1, arg2, op, NULL_RTX, mode, 0, 0);
4409 if (bcc_gen_fctn[(int) op] == 0)
4410 abort ();
4411 emit_jump_insn ((*bcc_gen_fctn[(int) op]) (label));