* invoke.texi (C++ Dialect Options): Document -fhonor-std.
[official-gcc.git] / gcc / expmed.c
blobc6efd6257d700b5730a138382aad8940d93b4cdd
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "insn-flags.h"
29 #include "insn-codes.h"
30 #include "insn-config.h"
31 #include "expr.h"
32 #include "real.h"
33 #include "recog.h"
35 static void store_fixed_bit_field PROTO((rtx, int, int, int, rtx, int));
36 static void store_split_bit_field PROTO((rtx, int, int, rtx, int));
37 static rtx extract_fixed_bit_field PROTO((enum machine_mode, rtx, int,
38 int, int, rtx, int, int));
39 static rtx mask_rtx PROTO((enum machine_mode, int,
40 int, int));
41 static rtx lshift_value PROTO((enum machine_mode, rtx,
42 int, int));
43 static rtx extract_split_bit_field PROTO((rtx, int, int, int, int));
44 static void do_cmp_and_jump PROTO((rtx, rtx, enum rtx_code,
45 enum machine_mode, rtx));
47 #define CEIL(x,y) (((x) + (y) - 1) / (y))
49 /* Non-zero means divides or modulus operations are relatively cheap for
50 powers of two, so don't use branches; emit the operation instead.
51 Usually, this will mean that the MD file will emit non-branch
52 sequences. */
54 static int sdiv_pow2_cheap, smod_pow2_cheap;
56 #ifndef SLOW_UNALIGNED_ACCESS
57 #define SLOW_UNALIGNED_ACCESS STRICT_ALIGNMENT
58 #endif
60 /* For compilers that support multiple targets with different word sizes,
61 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
62 is the H8/300(H) compiler. */
64 #ifndef MAX_BITS_PER_WORD
65 #define MAX_BITS_PER_WORD BITS_PER_WORD
66 #endif
68 /* Cost of various pieces of RTL. Note that some of these are indexed by shift count,
69 and some by mode. */
70 static int add_cost, negate_cost, zero_cost;
71 static int shift_cost[MAX_BITS_PER_WORD];
72 static int shiftadd_cost[MAX_BITS_PER_WORD];
73 static int shiftsub_cost[MAX_BITS_PER_WORD];
74 static int mul_cost[NUM_MACHINE_MODES];
75 static int div_cost[NUM_MACHINE_MODES];
76 static int mul_widen_cost[NUM_MACHINE_MODES];
77 static int mul_highpart_cost[NUM_MACHINE_MODES];
79 void
80 init_expmed ()
82 char *free_point;
83 /* This is "some random pseudo register" for purposes of calling recog
84 to see what insns exist. */
85 rtx reg = gen_rtx_REG (word_mode, 10000);
86 rtx shift_insn, shiftadd_insn, shiftsub_insn;
87 int dummy;
88 int m;
89 enum machine_mode mode, wider_mode;
91 start_sequence ();
93 /* Since we are on the permanent obstack, we must be sure we save this
94 spot AFTER we call start_sequence, since it will reuse the rtl it
95 makes. */
96 free_point = (char *) oballoc (0);
98 reg = gen_rtx (REG, word_mode, 10000);
100 zero_cost = rtx_cost (const0_rtx, 0);
101 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
103 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
104 gen_rtx_ASHIFT (word_mode, reg,
105 const0_rtx)));
107 shiftadd_insn
108 = emit_insn (gen_rtx_SET (VOIDmode, reg,
109 gen_rtx_PLUS (word_mode,
110 gen_rtx_MULT (word_mode,
111 reg, const0_rtx),
112 reg)));
114 shiftsub_insn
115 = emit_insn (gen_rtx_SET (VOIDmode, reg,
116 gen_rtx_MINUS (word_mode,
117 gen_rtx_MULT (word_mode,
118 reg, const0_rtx),
119 reg)));
121 init_recog ();
123 shift_cost[0] = 0;
124 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
126 for (m = 1; m < MAX_BITS_PER_WORD; m++)
128 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
130 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
131 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
132 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
134 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1)
135 = GEN_INT ((HOST_WIDE_INT) 1 << m);
136 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
137 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
139 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1)
140 = GEN_INT ((HOST_WIDE_INT) 1 << m);
141 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
142 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
145 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
147 sdiv_pow2_cheap
148 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
149 <= 2 * add_cost);
150 smod_pow2_cheap
151 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
152 <= 2 * add_cost);
154 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
155 mode != VOIDmode;
156 mode = GET_MODE_WIDER_MODE (mode))
158 reg = gen_rtx_REG (mode, 10000);
159 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
160 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
161 wider_mode = GET_MODE_WIDER_MODE (mode);
162 if (wider_mode != VOIDmode)
164 mul_widen_cost[(int) wider_mode]
165 = rtx_cost (gen_rtx_MULT (wider_mode,
166 gen_rtx_ZERO_EXTEND (wider_mode, reg),
167 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
168 SET);
169 mul_highpart_cost[(int) mode]
170 = rtx_cost (gen_rtx_TRUNCATE
171 (mode,
172 gen_rtx_LSHIFTRT
173 (wider_mode,
174 gen_rtx_MULT (wider_mode,
175 gen_rtx_ZERO_EXTEND (wider_mode, reg),
176 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
177 GEN_INT (GET_MODE_BITSIZE (mode)))),
178 SET);
182 /* Free the objects we just allocated. */
183 end_sequence ();
184 obfree (free_point);
187 /* Return an rtx representing minus the value of X.
188 MODE is the intended mode of the result,
189 useful if X is a CONST_INT. */
192 negate_rtx (mode, x)
193 enum machine_mode mode;
194 rtx x;
196 rtx result = simplify_unary_operation (NEG, mode, x, mode);
198 if (result == 0)
199 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
201 return result;
204 /* Generate code to store value from rtx VALUE
205 into a bit-field within structure STR_RTX
206 containing BITSIZE bits starting at bit BITNUM.
207 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
208 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
209 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
211 /* ??? Note that there are two different ideas here for how
212 to determine the size to count bits within, for a register.
213 One is BITS_PER_WORD, and the other is the size of operand 3
214 of the insv pattern. (The latter assumes that an n-bit machine
215 will be able to insert bit fields up to n bits wide.)
216 It isn't certain that either of these is right.
217 extract_bit_field has the same quandary. */
220 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, align, total_size)
221 rtx str_rtx;
222 register int bitsize;
223 int bitnum;
224 enum machine_mode fieldmode;
225 rtx value;
226 int align;
227 int total_size;
229 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
230 register int offset = bitnum / unit;
231 register int bitpos = bitnum % unit;
232 register rtx op0 = str_rtx;
234 if (GET_CODE (str_rtx) == MEM && ! MEM_IN_STRUCT_P (str_rtx))
235 abort ();
237 /* Discount the part of the structure before the desired byte.
238 We need to know how many bytes are safe to reference after it. */
239 if (total_size >= 0)
240 total_size -= (bitpos / BIGGEST_ALIGNMENT
241 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
243 while (GET_CODE (op0) == SUBREG)
245 /* The following line once was done only if WORDS_BIG_ENDIAN,
246 but I think that is a mistake. WORDS_BIG_ENDIAN is
247 meaningful at a much higher level; when structures are copied
248 between memory and regs, the higher-numbered regs
249 always get higher addresses. */
250 offset += SUBREG_WORD (op0);
251 /* We used to adjust BITPOS here, but now we do the whole adjustment
252 right after the loop. */
253 op0 = SUBREG_REG (op0);
256 /* If OP0 is a register, BITPOS must count within a word.
257 But as we have it, it counts within whatever size OP0 now has.
258 On a bigendian machine, these are not the same, so convert. */
259 if (BYTES_BIG_ENDIAN
260 && GET_CODE (op0) != MEM
261 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
262 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
264 value = protect_from_queue (value, 0);
266 if (flag_force_mem)
267 value = force_not_mem (value);
269 /* Note that the adjustment of BITPOS above has no effect on whether
270 BITPOS is 0 in a REG bigger than a word. */
271 if (GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
272 && (GET_CODE (op0) != MEM
273 || ! SLOW_UNALIGNED_ACCESS
274 || (offset * BITS_PER_UNIT % bitsize == 0
275 && align % GET_MODE_SIZE (fieldmode) == 0))
276 && bitpos == 0 && bitsize == GET_MODE_BITSIZE (fieldmode))
278 /* Storing in a full-word or multi-word field in a register
279 can be done with just SUBREG. */
280 if (GET_MODE (op0) != fieldmode)
282 if (GET_CODE (op0) == REG)
283 op0 = gen_rtx_SUBREG (fieldmode, op0, offset);
284 else
285 op0 = change_address (op0, fieldmode,
286 plus_constant (XEXP (op0, 0), offset));
288 emit_move_insn (op0, value);
289 return value;
292 /* Storing an lsb-aligned field in a register
293 can be done with a movestrict instruction. */
295 if (GET_CODE (op0) != MEM
296 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
297 && bitsize == GET_MODE_BITSIZE (fieldmode)
298 && (GET_MODE (op0) == fieldmode
299 || (movstrict_optab->handlers[(int) fieldmode].insn_code
300 != CODE_FOR_nothing)))
302 /* Get appropriate low part of the value being stored. */
303 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
304 value = gen_lowpart (fieldmode, value);
305 else if (!(GET_CODE (value) == SYMBOL_REF
306 || GET_CODE (value) == LABEL_REF
307 || GET_CODE (value) == CONST))
308 value = convert_to_mode (fieldmode, value, 0);
310 if (GET_MODE (op0) == fieldmode)
311 emit_move_insn (op0, value);
312 else
314 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
315 if(! (*insn_operand_predicate[icode][1]) (value, fieldmode))
316 value = copy_to_mode_reg (fieldmode, value);
317 emit_insn (GEN_FCN (icode)
318 (gen_rtx_SUBREG (fieldmode, op0, offset), value));
320 return value;
323 /* Handle fields bigger than a word. */
325 if (bitsize > BITS_PER_WORD)
327 /* Here we transfer the words of the field
328 in the order least significant first.
329 This is because the most significant word is the one which may
330 be less than full.
331 However, only do that if the value is not BLKmode. */
333 int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
335 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
336 int i;
338 /* This is the mode we must force value to, so that there will be enough
339 subwords to extract. Note that fieldmode will often (always?) be
340 VOIDmode, because that is what store_field uses to indicate that this
341 is a bit field, but passing VOIDmode to operand_subword_force will
342 result in an abort. */
343 fieldmode = mode_for_size (nwords * BITS_PER_WORD, MODE_INT, 0);
345 for (i = 0; i < nwords; i++)
347 /* If I is 0, use the low-order word in both field and target;
348 if I is 1, use the next to lowest word; and so on. */
349 int wordnum = (backwards ? nwords - i - 1 : i);
350 int bit_offset = (backwards
351 ? MAX (bitsize - (i + 1) * BITS_PER_WORD, 0)
352 : i * BITS_PER_WORD);
353 store_bit_field (op0, MIN (BITS_PER_WORD,
354 bitsize - i * BITS_PER_WORD),
355 bitnum + bit_offset, word_mode,
356 operand_subword_force (value, wordnum,
357 (GET_MODE (value) == VOIDmode
358 ? fieldmode
359 : GET_MODE (value))),
360 align, total_size);
362 return value;
365 /* From here on we can assume that the field to be stored in is
366 a full-word (whatever type that is), since it is shorter than a word. */
368 /* OFFSET is the number of words or bytes (UNIT says which)
369 from STR_RTX to the first word or byte containing part of the field. */
371 if (GET_CODE (op0) == REG)
373 if (offset != 0
374 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
375 op0 = gen_rtx_SUBREG (TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
376 op0, offset);
377 offset = 0;
379 else
381 op0 = protect_from_queue (op0, 1);
384 /* If VALUE is a floating-point mode, access it as an integer of the
385 corresponding size. This can occur on a machine with 64 bit registers
386 that uses SFmode for float. This can also occur for unaligned float
387 structure fields. */
388 if (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT)
390 if (GET_CODE (value) != REG)
391 value = copy_to_reg (value);
392 value = gen_rtx_SUBREG (word_mode, value, 0);
395 /* Now OFFSET is nonzero only if OP0 is memory
396 and is therefore always measured in bytes. */
398 #ifdef HAVE_insv
399 if (HAVE_insv
400 && GET_MODE (value) != BLKmode
401 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
402 /* Ensure insv's size is wide enough for this field. */
403 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3])
404 >= bitsize)
405 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
406 && (bitsize + bitpos
407 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_insv][3]))))
409 int xbitpos = bitpos;
410 rtx value1;
411 rtx xop0 = op0;
412 rtx last = get_last_insn ();
413 rtx pat;
414 enum machine_mode maxmode
415 = insn_operand_mode[(int) CODE_FOR_insv][3];
417 int save_volatile_ok = volatile_ok;
418 volatile_ok = 1;
420 /* If this machine's insv can only insert into a register, copy OP0
421 into a register and save it back later. */
422 /* This used to check flag_force_mem, but that was a serious
423 de-optimization now that flag_force_mem is enabled by -O2. */
424 if (GET_CODE (op0) == MEM
425 && ! ((*insn_operand_predicate[(int) CODE_FOR_insv][0])
426 (op0, VOIDmode)))
428 rtx tempreg;
429 enum machine_mode bestmode;
431 /* Get the mode to use for inserting into this field. If OP0 is
432 BLKmode, get the smallest mode consistent with the alignment. If
433 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
434 mode. Otherwise, use the smallest mode containing the field. */
436 if (GET_MODE (op0) == BLKmode
437 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
438 bestmode
439 = get_best_mode (bitsize, bitnum, align * BITS_PER_UNIT, maxmode,
440 MEM_VOLATILE_P (op0));
441 else
442 bestmode = GET_MODE (op0);
444 if (bestmode == VOIDmode
445 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
446 goto insv_loses;
448 /* Adjust address to point to the containing unit of that mode. */
449 unit = GET_MODE_BITSIZE (bestmode);
450 /* Compute offset as multiple of this unit, counting in bytes. */
451 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
452 bitpos = bitnum % unit;
453 op0 = change_address (op0, bestmode,
454 plus_constant (XEXP (op0, 0), offset));
456 /* Fetch that unit, store the bitfield in it, then store the unit. */
457 tempreg = copy_to_reg (op0);
458 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
459 align, total_size);
460 emit_move_insn (op0, tempreg);
461 return value;
463 volatile_ok = save_volatile_ok;
465 /* Add OFFSET into OP0's address. */
466 if (GET_CODE (xop0) == MEM)
467 xop0 = change_address (xop0, byte_mode,
468 plus_constant (XEXP (xop0, 0), offset));
470 /* If xop0 is a register, we need it in MAXMODE
471 to make it acceptable to the format of insv. */
472 if (GET_CODE (xop0) == SUBREG)
473 /* We can't just change the mode, because this might clobber op0,
474 and we will need the original value of op0 if insv fails. */
475 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_WORD (xop0));
476 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
477 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
479 /* On big-endian machines, we count bits from the most significant.
480 If the bit field insn does not, we must invert. */
482 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
483 xbitpos = unit - bitsize - xbitpos;
485 /* We have been counting XBITPOS within UNIT.
486 Count instead within the size of the register. */
487 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
488 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
490 unit = GET_MODE_BITSIZE (maxmode);
492 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
493 value1 = value;
494 if (GET_MODE (value) != maxmode)
496 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
498 /* Optimization: Don't bother really extending VALUE
499 if it has all the bits we will actually use. However,
500 if we must narrow it, be sure we do it correctly. */
502 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
504 /* Avoid making subreg of a subreg, or of a mem. */
505 if (GET_CODE (value1) != REG)
506 value1 = copy_to_reg (value1);
507 value1 = gen_rtx_SUBREG (maxmode, value1, 0);
509 else
510 value1 = gen_lowpart (maxmode, value1);
512 else if (!CONSTANT_P (value))
513 /* Parse phase is supposed to make VALUE's data type
514 match that of the component reference, which is a type
515 at least as wide as the field; so VALUE should have
516 a mode that corresponds to that type. */
517 abort ();
520 /* If this machine's insv insists on a register,
521 get VALUE1 into a register. */
522 if (! ((*insn_operand_predicate[(int) CODE_FOR_insv][3])
523 (value1, maxmode)))
524 value1 = force_reg (maxmode, value1);
526 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
527 if (pat)
528 emit_insn (pat);
529 else
531 delete_insns_since (last);
532 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
535 else
536 insv_loses:
537 #endif
538 /* Insv is not available; store using shifts and boolean ops. */
539 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, align);
540 return value;
543 /* Use shifts and boolean operations to store VALUE
544 into a bit field of width BITSIZE
545 in a memory location specified by OP0 except offset by OFFSET bytes.
546 (OFFSET must be 0 if OP0 is a register.)
547 The field starts at position BITPOS within the byte.
548 (If OP0 is a register, it may be a full word or a narrower mode,
549 but BITPOS still counts within a full word,
550 which is significant on bigendian machines.)
551 STRUCT_ALIGN is the alignment the structure is known to have (in bytes).
553 Note that protect_from_queue has already been done on OP0 and VALUE. */
555 static void
556 store_fixed_bit_field (op0, offset, bitsize, bitpos, value, struct_align)
557 register rtx op0;
558 register int offset, bitsize, bitpos;
559 register rtx value;
560 int struct_align;
562 register enum machine_mode mode;
563 int total_bits = BITS_PER_WORD;
564 rtx subtarget, temp;
565 int all_zero = 0;
566 int all_one = 0;
568 if (! SLOW_UNALIGNED_ACCESS)
569 struct_align = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
571 /* There is a case not handled here:
572 a structure with a known alignment of just a halfword
573 and a field split across two aligned halfwords within the structure.
574 Or likewise a structure with a known alignment of just a byte
575 and a field split across two bytes.
576 Such cases are not supposed to be able to occur. */
578 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
580 if (offset != 0)
581 abort ();
582 /* Special treatment for a bit field split across two registers. */
583 if (bitsize + bitpos > BITS_PER_WORD)
585 store_split_bit_field (op0, bitsize, bitpos,
586 value, BITS_PER_WORD);
587 return;
590 else
592 /* Get the proper mode to use for this field. We want a mode that
593 includes the entire field. If such a mode would be larger than
594 a word, we won't be doing the extraction the normal way. */
596 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
597 struct_align * BITS_PER_UNIT, word_mode,
598 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
600 if (mode == VOIDmode)
602 /* The only way this should occur is if the field spans word
603 boundaries. */
604 store_split_bit_field (op0,
605 bitsize, bitpos + offset * BITS_PER_UNIT,
606 value, struct_align);
607 return;
610 total_bits = GET_MODE_BITSIZE (mode);
612 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
613 be in the range 0 to total_bits-1, and put any excess bytes in
614 OFFSET. */
615 if (bitpos >= total_bits)
617 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
618 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
619 * BITS_PER_UNIT);
622 /* Get ref to an aligned byte, halfword, or word containing the field.
623 Adjust BITPOS to be position within a word,
624 and OFFSET to be the offset of that word.
625 Then alter OP0 to refer to that word. */
626 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
627 offset -= (offset % (total_bits / BITS_PER_UNIT));
628 op0 = change_address (op0, mode,
629 plus_constant (XEXP (op0, 0), offset));
632 mode = GET_MODE (op0);
634 /* Now MODE is either some integral mode for a MEM as OP0,
635 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
636 The bit field is contained entirely within OP0.
637 BITPOS is the starting bit number within OP0.
638 (OP0's mode may actually be narrower than MODE.) */
640 if (BYTES_BIG_ENDIAN)
641 /* BITPOS is the distance between our msb
642 and that of the containing datum.
643 Convert it to the distance from the lsb. */
644 bitpos = total_bits - bitsize - bitpos;
646 /* Now BITPOS is always the distance between our lsb
647 and that of OP0. */
649 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
650 we must first convert its mode to MODE. */
652 if (GET_CODE (value) == CONST_INT)
654 register HOST_WIDE_INT v = INTVAL (value);
656 if (bitsize < HOST_BITS_PER_WIDE_INT)
657 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
659 if (v == 0)
660 all_zero = 1;
661 else if ((bitsize < HOST_BITS_PER_WIDE_INT
662 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
663 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
664 all_one = 1;
666 value = lshift_value (mode, value, bitpos, bitsize);
668 else
670 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
671 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
673 if (GET_MODE (value) != mode)
675 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
676 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
677 value = gen_lowpart (mode, value);
678 else
679 value = convert_to_mode (mode, value, 1);
682 if (must_and)
683 value = expand_binop (mode, and_optab, value,
684 mask_rtx (mode, 0, bitsize, 0),
685 NULL_RTX, 1, OPTAB_LIB_WIDEN);
686 if (bitpos > 0)
687 value = expand_shift (LSHIFT_EXPR, mode, value,
688 build_int_2 (bitpos, 0), NULL_RTX, 1);
691 /* Now clear the chosen bits in OP0,
692 except that if VALUE is -1 we need not bother. */
694 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
696 if (! all_one)
698 temp = expand_binop (mode, and_optab, op0,
699 mask_rtx (mode, bitpos, bitsize, 1),
700 subtarget, 1, OPTAB_LIB_WIDEN);
701 subtarget = temp;
703 else
704 temp = op0;
706 /* Now logical-or VALUE into OP0, unless it is zero. */
708 if (! all_zero)
709 temp = expand_binop (mode, ior_optab, temp, value,
710 subtarget, 1, OPTAB_LIB_WIDEN);
711 if (op0 != temp)
712 emit_move_insn (op0, temp);
715 /* Store a bit field that is split across multiple accessible memory objects.
717 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
718 BITSIZE is the field width; BITPOS the position of its first bit
719 (within the word).
720 VALUE is the value to store.
721 ALIGN is the known alignment of OP0, measured in bytes.
722 This is also the size of the memory objects to be used.
724 This does not yet handle fields wider than BITS_PER_WORD. */
726 static void
727 store_split_bit_field (op0, bitsize, bitpos, value, align)
728 rtx op0;
729 int bitsize, bitpos;
730 rtx value;
731 int align;
733 int unit;
734 int bitsdone = 0;
736 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
737 much at a time. */
738 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
739 unit = BITS_PER_WORD;
740 else
741 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
743 /* If VALUE is a constant other than a CONST_INT, get it into a register in
744 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
745 that VALUE might be a floating-point constant. */
746 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
748 rtx word = gen_lowpart_common (word_mode, value);
750 if (word && (value != word))
751 value = word;
752 else
753 value = gen_lowpart_common (word_mode,
754 force_reg (GET_MODE (value) != VOIDmode
755 ? GET_MODE (value)
756 : word_mode, value));
758 else if (GET_CODE (value) == ADDRESSOF)
759 value = copy_to_reg (value);
761 while (bitsdone < bitsize)
763 int thissize;
764 rtx part, word;
765 int thispos;
766 int offset;
768 offset = (bitpos + bitsdone) / unit;
769 thispos = (bitpos + bitsdone) % unit;
771 /* THISSIZE must not overrun a word boundary. Otherwise,
772 store_fixed_bit_field will call us again, and we will mutually
773 recurse forever. */
774 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
775 thissize = MIN (thissize, unit - thispos);
777 if (BYTES_BIG_ENDIAN)
779 int total_bits;
781 /* We must do an endian conversion exactly the same way as it is
782 done in extract_bit_field, so that the two calls to
783 extract_fixed_bit_field will have comparable arguments. */
784 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
785 total_bits = BITS_PER_WORD;
786 else
787 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
789 /* Fetch successively less significant portions. */
790 if (GET_CODE (value) == CONST_INT)
791 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
792 >> (bitsize - bitsdone - thissize))
793 & (((HOST_WIDE_INT) 1 << thissize) - 1));
794 else
795 /* The args are chosen so that the last part includes the
796 lsb. Give extract_bit_field the value it needs (with
797 endianness compensation) to fetch the piece we want.
799 ??? We have no idea what the alignment of VALUE is, so
800 we have to use a guess. */
801 part
802 = extract_fixed_bit_field
803 (word_mode, value, 0, thissize,
804 total_bits - bitsize + bitsdone, NULL_RTX, 1,
805 GET_MODE (value) == VOIDmode
806 ? UNITS_PER_WORD
807 : (GET_MODE (value) == BLKmode
809 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
811 else
813 /* Fetch successively more significant portions. */
814 if (GET_CODE (value) == CONST_INT)
815 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
816 >> bitsdone)
817 & (((HOST_WIDE_INT) 1 << thissize) - 1));
818 else
819 part
820 = extract_fixed_bit_field
821 (word_mode, value, 0, thissize, bitsdone, NULL_RTX, 1,
822 GET_MODE (value) == VOIDmode
823 ? UNITS_PER_WORD
824 : (GET_MODE (value) == BLKmode
826 : GET_MODE_ALIGNMENT (GET_MODE (value)) / BITS_PER_UNIT));
829 /* If OP0 is a register, then handle OFFSET here.
831 When handling multiword bitfields, extract_bit_field may pass
832 down a word_mode SUBREG of a larger REG for a bitfield that actually
833 crosses a word boundary. Thus, for a SUBREG, we must find
834 the current word starting from the base register. */
835 if (GET_CODE (op0) == SUBREG)
837 word = operand_subword_force (SUBREG_REG (op0),
838 SUBREG_WORD (op0) + offset,
839 GET_MODE (SUBREG_REG (op0)));
840 offset = 0;
842 else if (GET_CODE (op0) == REG)
844 word = operand_subword_force (op0, offset, GET_MODE (op0));
845 offset = 0;
847 else
848 word = op0;
850 /* OFFSET is in UNITs, and UNIT is in bits.
851 store_fixed_bit_field wants offset in bytes. */
852 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT,
853 thissize, thispos, part, align);
854 bitsdone += thissize;
858 /* Generate code to extract a byte-field from STR_RTX
859 containing BITSIZE bits, starting at BITNUM,
860 and put it in TARGET if possible (if TARGET is nonzero).
861 Regardless of TARGET, we return the rtx for where the value is placed.
862 It may be a QUEUED.
864 STR_RTX is the structure containing the byte (a REG or MEM).
865 UNSIGNEDP is nonzero if this is an unsigned bit field.
866 MODE is the natural mode of the field value once extracted.
867 TMODE is the mode the caller would like the value to have;
868 but the value may be returned with type MODE instead.
870 ALIGN is the alignment that STR_RTX is known to have, measured in bytes.
871 TOTAL_SIZE is the size in bytes of the containing structure,
872 or -1 if varying.
874 If a TARGET is specified and we can store in it at no extra cost,
875 we do so, and return TARGET.
876 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
877 if they are equally easy. */
880 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
881 target, mode, tmode, align, total_size)
882 rtx str_rtx;
883 register int bitsize;
884 int bitnum;
885 int unsignedp;
886 rtx target;
887 enum machine_mode mode, tmode;
888 int align;
889 int total_size;
891 int unit = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
892 register int offset = bitnum / unit;
893 register int bitpos = bitnum % unit;
894 register rtx op0 = str_rtx;
895 rtx spec_target = target;
896 rtx spec_target_subreg = 0;
898 /* Discount the part of the structure before the desired byte.
899 We need to know how many bytes are safe to reference after it. */
900 if (total_size >= 0)
901 total_size -= (bitpos / BIGGEST_ALIGNMENT
902 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
904 if (tmode == VOIDmode)
905 tmode = mode;
906 while (GET_CODE (op0) == SUBREG)
908 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
909 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
911 offset += SUBREG_WORD (op0);
913 inner_size = MIN (inner_size, BITS_PER_WORD);
915 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
917 bitpos += inner_size - outer_size;
918 if (bitpos > unit)
920 offset += (bitpos / unit);
921 bitpos %= unit;
925 op0 = SUBREG_REG (op0);
928 /* ??? We currently assume TARGET is at least as big as BITSIZE.
929 If that's wrong, the solution is to test for it and set TARGET to 0
930 if needed. */
932 /* If OP0 is a register, BITPOS must count within a word.
933 But as we have it, it counts within whatever size OP0 now has.
934 On a bigendian machine, these are not the same, so convert. */
935 if (BYTES_BIG_ENDIAN
936 && GET_CODE (op0) != MEM
937 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
938 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
940 /* Extracting a full-word or multi-word value
941 from a structure in a register or aligned memory.
942 This can be done with just SUBREG.
943 So too extracting a subword value in
944 the least significant part of the register. */
946 if (((GET_CODE (op0) == REG
947 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
948 GET_MODE_BITSIZE (GET_MODE (op0))))
949 || (GET_CODE (op0) == MEM
950 && (! SLOW_UNALIGNED_ACCESS
951 || (offset * BITS_PER_UNIT % bitsize == 0
952 && align * BITS_PER_UNIT % bitsize == 0))))
953 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
954 && bitpos % BITS_PER_WORD == 0)
955 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
956 && (BYTES_BIG_ENDIAN
957 ? bitpos + bitsize == BITS_PER_WORD
958 : bitpos == 0))))
960 enum machine_mode mode1
961 = mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0);
963 if (mode1 != GET_MODE (op0))
965 if (GET_CODE (op0) == REG)
966 op0 = gen_rtx_SUBREG (mode1, op0, offset);
967 else
968 op0 = change_address (op0, mode1,
969 plus_constant (XEXP (op0, 0), offset));
971 if (mode1 != mode)
972 return convert_to_mode (tmode, op0, unsignedp);
973 return op0;
976 /* Handle fields bigger than a word. */
978 if (bitsize > BITS_PER_WORD)
980 /* Here we transfer the words of the field
981 in the order least significant first.
982 This is because the most significant word is the one which may
983 be less than full. */
985 int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
986 int i;
988 if (target == 0 || GET_CODE (target) != REG)
989 target = gen_reg_rtx (mode);
991 /* Indicate for flow that the entire target reg is being set. */
992 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
994 for (i = 0; i < nwords; i++)
996 /* If I is 0, use the low-order word in both field and target;
997 if I is 1, use the next to lowest word; and so on. */
998 /* Word number in TARGET to use. */
999 int wordnum = (WORDS_BIG_ENDIAN
1000 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1001 : i);
1002 /* Offset from start of field in OP0. */
1003 int bit_offset = (WORDS_BIG_ENDIAN
1004 ? MAX (0, bitsize - (i + 1) * BITS_PER_WORD)
1005 : i * BITS_PER_WORD);
1006 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1007 rtx result_part
1008 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1009 bitsize - i * BITS_PER_WORD),
1010 bitnum + bit_offset,
1011 1, target_part, mode, word_mode,
1012 align, total_size);
1014 if (target_part == 0)
1015 abort ();
1017 if (result_part != target_part)
1018 emit_move_insn (target_part, result_part);
1021 if (unsignedp)
1023 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1024 need to be zero'd out. */
1025 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1027 int i,total_words;
1029 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1030 for (i = nwords; i < total_words; i++)
1032 int wordnum = WORDS_BIG_ENDIAN ? total_words - i - 1 : i;
1033 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1034 emit_move_insn (target_part, const0_rtx);
1037 return target;
1040 /* Signed bit field: sign-extend with two arithmetic shifts. */
1041 target = expand_shift (LSHIFT_EXPR, mode, target,
1042 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1043 NULL_RTX, 0);
1044 return expand_shift (RSHIFT_EXPR, mode, target,
1045 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1046 NULL_RTX, 0);
1049 /* From here on we know the desired field is smaller than a word
1050 so we can assume it is an integer. So we can safely extract it as one
1051 size of integer, if necessary, and then truncate or extend
1052 to the size that is wanted. */
1054 /* OFFSET is the number of words or bytes (UNIT says which)
1055 from STR_RTX to the first word or byte containing part of the field. */
1057 if (GET_CODE (op0) == REG)
1059 if (offset != 0
1060 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1061 op0 = gen_rtx_SUBREG (TYPE_MODE (type_for_size (BITS_PER_WORD, 0)),
1062 op0, offset);
1063 offset = 0;
1065 else
1067 op0 = protect_from_queue (str_rtx, 1);
1070 /* Now OFFSET is nonzero only for memory operands. */
1072 if (unsignedp)
1074 #ifdef HAVE_extzv
1075 if (HAVE_extzv
1076 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0])
1077 >= bitsize)
1078 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1079 && (bitsize + bitpos
1080 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extzv][0]))))
1082 int xbitpos = bitpos, xoffset = offset;
1083 rtx bitsize_rtx, bitpos_rtx;
1084 rtx last = get_last_insn ();
1085 rtx xop0 = op0;
1086 rtx xtarget = target;
1087 rtx xspec_target = spec_target;
1088 rtx xspec_target_subreg = spec_target_subreg;
1089 rtx pat;
1090 enum machine_mode maxmode
1091 = insn_operand_mode[(int) CODE_FOR_extzv][0];
1093 if (GET_CODE (xop0) == MEM)
1095 int save_volatile_ok = volatile_ok;
1096 volatile_ok = 1;
1098 /* Is the memory operand acceptable? */
1099 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][1])
1100 (xop0, GET_MODE (xop0))))
1102 /* No, load into a reg and extract from there. */
1103 enum machine_mode bestmode;
1105 /* Get the mode to use for inserting into this field. If
1106 OP0 is BLKmode, get the smallest mode consistent with the
1107 alignment. If OP0 is a non-BLKmode object that is no
1108 wider than MAXMODE, use its mode. Otherwise, use the
1109 smallest mode containing the field. */
1111 if (GET_MODE (xop0) == BLKmode
1112 || (GET_MODE_SIZE (GET_MODE (op0))
1113 > GET_MODE_SIZE (maxmode)))
1114 bestmode = get_best_mode (bitsize, bitnum,
1115 align * BITS_PER_UNIT, maxmode,
1116 MEM_VOLATILE_P (xop0));
1117 else
1118 bestmode = GET_MODE (xop0);
1120 if (bestmode == VOIDmode
1121 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1122 goto extzv_loses;
1124 /* Compute offset as multiple of this unit,
1125 counting in bytes. */
1126 unit = GET_MODE_BITSIZE (bestmode);
1127 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1128 xbitpos = bitnum % unit;
1129 xop0 = change_address (xop0, bestmode,
1130 plus_constant (XEXP (xop0, 0),
1131 xoffset));
1132 /* Fetch it to a register in that size. */
1133 xop0 = force_reg (bestmode, xop0);
1135 /* XBITPOS counts within UNIT, which is what is expected. */
1137 else
1138 /* Get ref to first byte containing part of the field. */
1139 xop0 = change_address (xop0, byte_mode,
1140 plus_constant (XEXP (xop0, 0), xoffset));
1142 volatile_ok = save_volatile_ok;
1145 /* If op0 is a register, we need it in MAXMODE (which is usually
1146 SImode). to make it acceptable to the format of extzv. */
1147 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1148 goto extzv_loses;
1149 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1150 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1152 /* On big-endian machines, we count bits from the most significant.
1153 If the bit field insn does not, we must invert. */
1154 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1155 xbitpos = unit - bitsize - xbitpos;
1157 /* Now convert from counting within UNIT to counting in MAXMODE. */
1158 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1159 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1161 unit = GET_MODE_BITSIZE (maxmode);
1163 if (xtarget == 0
1164 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1165 xtarget = xspec_target = gen_reg_rtx (tmode);
1167 if (GET_MODE (xtarget) != maxmode)
1169 if (GET_CODE (xtarget) == REG)
1171 int wider = (GET_MODE_SIZE (maxmode)
1172 > GET_MODE_SIZE (GET_MODE (xtarget)));
1173 xtarget = gen_lowpart (maxmode, xtarget);
1174 if (wider)
1175 xspec_target_subreg = xtarget;
1177 else
1178 xtarget = gen_reg_rtx (maxmode);
1181 /* If this machine's extzv insists on a register target,
1182 make sure we have one. */
1183 if (! ((*insn_operand_predicate[(int) CODE_FOR_extzv][0])
1184 (xtarget, maxmode)))
1185 xtarget = gen_reg_rtx (maxmode);
1187 bitsize_rtx = GEN_INT (bitsize);
1188 bitpos_rtx = GEN_INT (xbitpos);
1190 pat = gen_extzv (protect_from_queue (xtarget, 1),
1191 xop0, bitsize_rtx, bitpos_rtx);
1192 if (pat)
1194 emit_insn (pat);
1195 target = xtarget;
1196 spec_target = xspec_target;
1197 spec_target_subreg = xspec_target_subreg;
1199 else
1201 delete_insns_since (last);
1202 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1203 bitpos, target, 1, align);
1206 else
1207 extzv_loses:
1208 #endif
1209 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1210 target, 1, align);
1212 else
1214 #ifdef HAVE_extv
1215 if (HAVE_extv
1216 && (GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0])
1217 >= bitsize)
1218 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1219 && (bitsize + bitpos
1220 > GET_MODE_BITSIZE (insn_operand_mode[(int) CODE_FOR_extv][0]))))
1222 int xbitpos = bitpos, xoffset = offset;
1223 rtx bitsize_rtx, bitpos_rtx;
1224 rtx last = get_last_insn ();
1225 rtx xop0 = op0, xtarget = target;
1226 rtx xspec_target = spec_target;
1227 rtx xspec_target_subreg = spec_target_subreg;
1228 rtx pat;
1229 enum machine_mode maxmode
1230 = insn_operand_mode[(int) CODE_FOR_extv][0];
1232 if (GET_CODE (xop0) == MEM)
1234 /* Is the memory operand acceptable? */
1235 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][1])
1236 (xop0, GET_MODE (xop0))))
1238 /* No, load into a reg and extract from there. */
1239 enum machine_mode bestmode;
1241 /* Get the mode to use for inserting into this field. If
1242 OP0 is BLKmode, get the smallest mode consistent with the
1243 alignment. If OP0 is a non-BLKmode object that is no
1244 wider than MAXMODE, use its mode. Otherwise, use the
1245 smallest mode containing the field. */
1247 if (GET_MODE (xop0) == BLKmode
1248 || (GET_MODE_SIZE (GET_MODE (op0))
1249 > GET_MODE_SIZE (maxmode)))
1250 bestmode = get_best_mode (bitsize, bitnum,
1251 align * BITS_PER_UNIT, maxmode,
1252 MEM_VOLATILE_P (xop0));
1253 else
1254 bestmode = GET_MODE (xop0);
1256 if (bestmode == VOIDmode
1257 || (SLOW_UNALIGNED_ACCESS && GET_MODE_SIZE (bestmode) > align))
1258 goto extv_loses;
1260 /* Compute offset as multiple of this unit,
1261 counting in bytes. */
1262 unit = GET_MODE_BITSIZE (bestmode);
1263 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1264 xbitpos = bitnum % unit;
1265 xop0 = change_address (xop0, bestmode,
1266 plus_constant (XEXP (xop0, 0),
1267 xoffset));
1268 /* Fetch it to a register in that size. */
1269 xop0 = force_reg (bestmode, xop0);
1271 /* XBITPOS counts within UNIT, which is what is expected. */
1273 else
1274 /* Get ref to first byte containing part of the field. */
1275 xop0 = change_address (xop0, byte_mode,
1276 plus_constant (XEXP (xop0, 0), xoffset));
1279 /* If op0 is a register, we need it in MAXMODE (which is usually
1280 SImode) to make it acceptable to the format of extv. */
1281 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1282 goto extv_loses;
1283 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1284 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1286 /* On big-endian machines, we count bits from the most significant.
1287 If the bit field insn does not, we must invert. */
1288 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1289 xbitpos = unit - bitsize - xbitpos;
1291 /* XBITPOS counts within a size of UNIT.
1292 Adjust to count within a size of MAXMODE. */
1293 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1294 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1296 unit = GET_MODE_BITSIZE (maxmode);
1298 if (xtarget == 0
1299 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1300 xtarget = xspec_target = gen_reg_rtx (tmode);
1302 if (GET_MODE (xtarget) != maxmode)
1304 if (GET_CODE (xtarget) == REG)
1306 int wider = (GET_MODE_SIZE (maxmode)
1307 > GET_MODE_SIZE (GET_MODE (xtarget)));
1308 xtarget = gen_lowpart (maxmode, xtarget);
1309 if (wider)
1310 xspec_target_subreg = xtarget;
1312 else
1313 xtarget = gen_reg_rtx (maxmode);
1316 /* If this machine's extv insists on a register target,
1317 make sure we have one. */
1318 if (! ((*insn_operand_predicate[(int) CODE_FOR_extv][0])
1319 (xtarget, maxmode)))
1320 xtarget = gen_reg_rtx (maxmode);
1322 bitsize_rtx = GEN_INT (bitsize);
1323 bitpos_rtx = GEN_INT (xbitpos);
1325 pat = gen_extv (protect_from_queue (xtarget, 1),
1326 xop0, bitsize_rtx, bitpos_rtx);
1327 if (pat)
1329 emit_insn (pat);
1330 target = xtarget;
1331 spec_target = xspec_target;
1332 spec_target_subreg = xspec_target_subreg;
1334 else
1336 delete_insns_since (last);
1337 target = extract_fixed_bit_field (tmode, op0, offset, bitsize,
1338 bitpos, target, 0, align);
1341 else
1342 extv_loses:
1343 #endif
1344 target = extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1345 target, 0, align);
1347 if (target == spec_target)
1348 return target;
1349 if (target == spec_target_subreg)
1350 return spec_target;
1351 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1353 /* If the target mode is floating-point, first convert to the
1354 integer mode of that size and then access it as a floating-point
1355 value via a SUBREG. */
1356 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1358 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1359 MODE_INT, 0),
1360 target, unsignedp);
1361 if (GET_CODE (target) != REG)
1362 target = copy_to_reg (target);
1363 return gen_rtx_SUBREG (tmode, target, 0);
1365 else
1366 return convert_to_mode (tmode, target, unsignedp);
1368 return target;
1371 /* Extract a bit field using shifts and boolean operations
1372 Returns an rtx to represent the value.
1373 OP0 addresses a register (word) or memory (byte).
1374 BITPOS says which bit within the word or byte the bit field starts in.
1375 OFFSET says how many bytes farther the bit field starts;
1376 it is 0 if OP0 is a register.
1377 BITSIZE says how many bits long the bit field is.
1378 (If OP0 is a register, it may be narrower than a full word,
1379 but BITPOS still counts within a full word,
1380 which is significant on bigendian machines.)
1382 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1383 If TARGET is nonzero, attempts to store the value there
1384 and return TARGET, but this is not guaranteed.
1385 If TARGET is not used, create a pseudo-reg of mode TMODE for the value.
1387 ALIGN is the alignment that STR_RTX is known to have, measured in bytes. */
1389 static rtx
1390 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1391 target, unsignedp, align)
1392 enum machine_mode tmode;
1393 register rtx op0, target;
1394 register int offset, bitsize, bitpos;
1395 int unsignedp;
1396 int align;
1398 int total_bits = BITS_PER_WORD;
1399 enum machine_mode mode;
1401 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1403 /* Special treatment for a bit field split across two registers. */
1404 if (bitsize + bitpos > BITS_PER_WORD)
1405 return extract_split_bit_field (op0, bitsize, bitpos,
1406 unsignedp, align);
1408 else
1410 /* Get the proper mode to use for this field. We want a mode that
1411 includes the entire field. If such a mode would be larger than
1412 a word, we won't be doing the extraction the normal way. */
1414 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1415 align * BITS_PER_UNIT, word_mode,
1416 GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0));
1418 if (mode == VOIDmode)
1419 /* The only way this should occur is if the field spans word
1420 boundaries. */
1421 return extract_split_bit_field (op0, bitsize,
1422 bitpos + offset * BITS_PER_UNIT,
1423 unsignedp, align);
1425 total_bits = GET_MODE_BITSIZE (mode);
1427 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1428 be in the range 0 to total_bits-1, and put any excess bytes in
1429 OFFSET. */
1430 if (bitpos >= total_bits)
1432 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1433 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1434 * BITS_PER_UNIT);
1437 /* Get ref to an aligned byte, halfword, or word containing the field.
1438 Adjust BITPOS to be position within a word,
1439 and OFFSET to be the offset of that word.
1440 Then alter OP0 to refer to that word. */
1441 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1442 offset -= (offset % (total_bits / BITS_PER_UNIT));
1443 op0 = change_address (op0, mode,
1444 plus_constant (XEXP (op0, 0), offset));
1447 mode = GET_MODE (op0);
1449 if (BYTES_BIG_ENDIAN)
1451 /* BITPOS is the distance between our msb and that of OP0.
1452 Convert it to the distance from the lsb. */
1454 bitpos = total_bits - bitsize - bitpos;
1457 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1458 We have reduced the big-endian case to the little-endian case. */
1460 if (unsignedp)
1462 if (bitpos)
1464 /* If the field does not already start at the lsb,
1465 shift it so it does. */
1466 tree amount = build_int_2 (bitpos, 0);
1467 /* Maybe propagate the target for the shift. */
1468 /* But not if we will return it--could confuse integrate.c. */
1469 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1470 && !REG_FUNCTION_VALUE_P (target)
1471 ? target : 0);
1472 if (tmode != mode) subtarget = 0;
1473 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1475 /* Convert the value to the desired mode. */
1476 if (mode != tmode)
1477 op0 = convert_to_mode (tmode, op0, 1);
1479 /* Unless the msb of the field used to be the msb when we shifted,
1480 mask out the upper bits. */
1482 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize
1483 #if 0
1484 #ifdef SLOW_ZERO_EXTEND
1485 /* Always generate an `and' if
1486 we just zero-extended op0 and SLOW_ZERO_EXTEND, since it
1487 will combine fruitfully with the zero-extend. */
1488 || tmode != mode
1489 #endif
1490 #endif
1492 return expand_binop (GET_MODE (op0), and_optab, op0,
1493 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1494 target, 1, OPTAB_LIB_WIDEN);
1495 return op0;
1498 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1499 then arithmetic-shift its lsb to the lsb of the word. */
1500 op0 = force_reg (mode, op0);
1501 if (mode != tmode)
1502 target = 0;
1504 /* Find the narrowest integer mode that contains the field. */
1506 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1507 mode = GET_MODE_WIDER_MODE (mode))
1508 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1510 op0 = convert_to_mode (mode, op0, 0);
1511 break;
1514 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1516 tree amount = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1517 /* Maybe propagate the target for the shift. */
1518 /* But not if we will return the result--could confuse integrate.c. */
1519 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1520 && ! REG_FUNCTION_VALUE_P (target)
1521 ? target : 0);
1522 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1525 return expand_shift (RSHIFT_EXPR, mode, op0,
1526 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1527 target, 0);
1530 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1531 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1532 complement of that if COMPLEMENT. The mask is truncated if
1533 necessary to the width of mode MODE. The mask is zero-extended if
1534 BITSIZE+BITPOS is too small for MODE. */
1536 static rtx
1537 mask_rtx (mode, bitpos, bitsize, complement)
1538 enum machine_mode mode;
1539 int bitpos, bitsize, complement;
1541 HOST_WIDE_INT masklow, maskhigh;
1543 if (bitpos < HOST_BITS_PER_WIDE_INT)
1544 masklow = (HOST_WIDE_INT) -1 << bitpos;
1545 else
1546 masklow = 0;
1548 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1549 masklow &= ((unsigned HOST_WIDE_INT) -1
1550 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1552 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1553 maskhigh = -1;
1554 else
1555 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1557 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1558 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1559 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1560 else
1561 maskhigh = 0;
1563 if (complement)
1565 maskhigh = ~maskhigh;
1566 masklow = ~masklow;
1569 return immed_double_const (masklow, maskhigh, mode);
1572 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1573 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1575 static rtx
1576 lshift_value (mode, value, bitpos, bitsize)
1577 enum machine_mode mode;
1578 rtx value;
1579 int bitpos, bitsize;
1581 unsigned HOST_WIDE_INT v = INTVAL (value);
1582 HOST_WIDE_INT low, high;
1584 if (bitsize < HOST_BITS_PER_WIDE_INT)
1585 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1587 if (bitpos < HOST_BITS_PER_WIDE_INT)
1589 low = v << bitpos;
1590 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1592 else
1594 low = 0;
1595 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1598 return immed_double_const (low, high, mode);
1601 /* Extract a bit field that is split across two words
1602 and return an RTX for the result.
1604 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1605 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1606 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
1608 ALIGN is the known alignment of OP0, measured in bytes.
1609 This is also the size of the memory objects to be used. */
1611 static rtx
1612 extract_split_bit_field (op0, bitsize, bitpos, unsignedp, align)
1613 rtx op0;
1614 int bitsize, bitpos, unsignedp, align;
1616 int unit;
1617 int bitsdone = 0;
1618 rtx result = NULL_RTX;
1619 int first = 1;
1621 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1622 much at a time. */
1623 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1624 unit = BITS_PER_WORD;
1625 else
1626 unit = MIN (align * BITS_PER_UNIT, BITS_PER_WORD);
1628 while (bitsdone < bitsize)
1630 int thissize;
1631 rtx part, word;
1632 int thispos;
1633 int offset;
1635 offset = (bitpos + bitsdone) / unit;
1636 thispos = (bitpos + bitsdone) % unit;
1638 /* THISSIZE must not overrun a word boundary. Otherwise,
1639 extract_fixed_bit_field will call us again, and we will mutually
1640 recurse forever. */
1641 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1642 thissize = MIN (thissize, unit - thispos);
1644 /* If OP0 is a register, then handle OFFSET here.
1646 When handling multiword bitfields, extract_bit_field may pass
1647 down a word_mode SUBREG of a larger REG for a bitfield that actually
1648 crosses a word boundary. Thus, for a SUBREG, we must find
1649 the current word starting from the base register. */
1650 if (GET_CODE (op0) == SUBREG)
1652 word = operand_subword_force (SUBREG_REG (op0),
1653 SUBREG_WORD (op0) + offset,
1654 GET_MODE (SUBREG_REG (op0)));
1655 offset = 0;
1657 else if (GET_CODE (op0) == REG)
1659 word = operand_subword_force (op0, offset, GET_MODE (op0));
1660 offset = 0;
1662 else
1663 word = op0;
1665 /* Extract the parts in bit-counting order,
1666 whose meaning is determined by BYTES_PER_UNIT.
1667 OFFSET is in UNITs, and UNIT is in bits.
1668 extract_fixed_bit_field wants offset in bytes. */
1669 part = extract_fixed_bit_field (word_mode, word,
1670 offset * unit / BITS_PER_UNIT,
1671 thissize, thispos, 0, 1, align);
1672 bitsdone += thissize;
1674 /* Shift this part into place for the result. */
1675 if (BYTES_BIG_ENDIAN)
1677 if (bitsize != bitsdone)
1678 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1679 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1681 else
1683 if (bitsdone != thissize)
1684 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1685 build_int_2 (bitsdone - thissize, 0), 0, 1);
1688 if (first)
1689 result = part;
1690 else
1691 /* Combine the parts with bitwise or. This works
1692 because we extracted each part as an unsigned bit field. */
1693 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1694 OPTAB_LIB_WIDEN);
1696 first = 0;
1699 /* Unsigned bit field: we are done. */
1700 if (unsignedp)
1701 return result;
1702 /* Signed bit field: sign-extend with two arithmetic shifts. */
1703 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1704 build_int_2 (BITS_PER_WORD - bitsize, 0),
1705 NULL_RTX, 0);
1706 return expand_shift (RSHIFT_EXPR, word_mode, result,
1707 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1710 /* Add INC into TARGET. */
1712 void
1713 expand_inc (target, inc)
1714 rtx target, inc;
1716 rtx value = expand_binop (GET_MODE (target), add_optab,
1717 target, inc,
1718 target, 0, OPTAB_LIB_WIDEN);
1719 if (value != target)
1720 emit_move_insn (target, value);
1723 /* Subtract DEC from TARGET. */
1725 void
1726 expand_dec (target, dec)
1727 rtx target, dec;
1729 rtx value = expand_binop (GET_MODE (target), sub_optab,
1730 target, dec,
1731 target, 0, OPTAB_LIB_WIDEN);
1732 if (value != target)
1733 emit_move_insn (target, value);
1736 /* Output a shift instruction for expression code CODE,
1737 with SHIFTED being the rtx for the value to shift,
1738 and AMOUNT the tree for the amount to shift by.
1739 Store the result in the rtx TARGET, if that is convenient.
1740 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1741 Return the rtx for where the value is. */
1744 expand_shift (code, mode, shifted, amount, target, unsignedp)
1745 enum tree_code code;
1746 register enum machine_mode mode;
1747 rtx shifted;
1748 tree amount;
1749 register rtx target;
1750 int unsignedp;
1752 register rtx op1, temp = 0;
1753 register int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1754 register int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1755 int try;
1757 /* Previously detected shift-counts computed by NEGATE_EXPR
1758 and shifted in the other direction; but that does not work
1759 on all machines. */
1761 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1763 #ifdef SHIFT_COUNT_TRUNCATED
1764 if (SHIFT_COUNT_TRUNCATED)
1766 if (GET_CODE (op1) == CONST_INT
1767 && (unsigned HOST_WIDE_INT) INTVAL (op1) >= GET_MODE_BITSIZE (mode))
1768 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1769 % GET_MODE_BITSIZE (mode));
1770 else if (GET_CODE (op1) == SUBREG
1771 && SUBREG_WORD (op1) == 0)
1772 op1 = SUBREG_REG (op1);
1774 #endif
1776 if (op1 == const0_rtx)
1777 return shifted;
1779 for (try = 0; temp == 0 && try < 3; try++)
1781 enum optab_methods methods;
1783 if (try == 0)
1784 methods = OPTAB_DIRECT;
1785 else if (try == 1)
1786 methods = OPTAB_WIDEN;
1787 else
1788 methods = OPTAB_LIB_WIDEN;
1790 if (rotate)
1792 /* Widening does not work for rotation. */
1793 if (methods == OPTAB_WIDEN)
1794 continue;
1795 else if (methods == OPTAB_LIB_WIDEN)
1797 /* If we have been unable to open-code this by a rotation,
1798 do it as the IOR of two shifts. I.e., to rotate A
1799 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1800 where C is the bitsize of A.
1802 It is theoretically possible that the target machine might
1803 not be able to perform either shift and hence we would
1804 be making two libcalls rather than just the one for the
1805 shift (similarly if IOR could not be done). We will allow
1806 this extremely unlikely lossage to avoid complicating the
1807 code below. */
1809 rtx subtarget = target == shifted ? 0 : target;
1810 rtx temp1;
1811 tree type = TREE_TYPE (amount);
1812 tree new_amount = make_tree (type, op1);
1813 tree other_amount
1814 = fold (build (MINUS_EXPR, type,
1815 convert (type,
1816 build_int_2 (GET_MODE_BITSIZE (mode),
1817 0)),
1818 amount));
1820 shifted = force_reg (mode, shifted);
1822 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1823 mode, shifted, new_amount, subtarget, 1);
1824 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1825 mode, shifted, other_amount, 0, 1);
1826 return expand_binop (mode, ior_optab, temp, temp1, target,
1827 unsignedp, methods);
1830 temp = expand_binop (mode,
1831 left ? rotl_optab : rotr_optab,
1832 shifted, op1, target, unsignedp, methods);
1834 /* If we don't have the rotate, but we are rotating by a constant
1835 that is in range, try a rotate in the opposite direction. */
1837 if (temp == 0 && GET_CODE (op1) == CONST_INT
1838 && INTVAL (op1) > 0 && INTVAL (op1) < GET_MODE_BITSIZE (mode))
1839 temp = expand_binop (mode,
1840 left ? rotr_optab : rotl_optab,
1841 shifted,
1842 GEN_INT (GET_MODE_BITSIZE (mode)
1843 - INTVAL (op1)),
1844 target, unsignedp, methods);
1846 else if (unsignedp)
1847 temp = expand_binop (mode,
1848 left ? ashl_optab : lshr_optab,
1849 shifted, op1, target, unsignedp, methods);
1851 /* Do arithmetic shifts.
1852 Also, if we are going to widen the operand, we can just as well
1853 use an arithmetic right-shift instead of a logical one. */
1854 if (temp == 0 && ! rotate
1855 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
1857 enum optab_methods methods1 = methods;
1859 /* If trying to widen a log shift to an arithmetic shift,
1860 don't accept an arithmetic shift of the same size. */
1861 if (unsignedp)
1862 methods1 = OPTAB_MUST_WIDEN;
1864 /* Arithmetic shift */
1866 temp = expand_binop (mode,
1867 left ? ashl_optab : ashr_optab,
1868 shifted, op1, target, unsignedp, methods1);
1871 /* We used to try extzv here for logical right shifts, but that was
1872 only useful for one machine, the VAX, and caused poor code
1873 generation there for lshrdi3, so the code was deleted and a
1874 define_expand for lshrsi3 was added to vax.md. */
1877 if (temp == 0)
1878 abort ();
1879 return temp;
1882 enum alg_code { alg_zero, alg_m, alg_shift,
1883 alg_add_t_m2, alg_sub_t_m2,
1884 alg_add_factor, alg_sub_factor,
1885 alg_add_t2_m, alg_sub_t2_m,
1886 alg_add, alg_subtract, alg_factor, alg_shiftop };
1888 /* This structure records a sequence of operations.
1889 `ops' is the number of operations recorded.
1890 `cost' is their total cost.
1891 The operations are stored in `op' and the corresponding
1892 logarithms of the integer coefficients in `log'.
1894 These are the operations:
1895 alg_zero total := 0;
1896 alg_m total := multiplicand;
1897 alg_shift total := total * coeff
1898 alg_add_t_m2 total := total + multiplicand * coeff;
1899 alg_sub_t_m2 total := total - multiplicand * coeff;
1900 alg_add_factor total := total * coeff + total;
1901 alg_sub_factor total := total * coeff - total;
1902 alg_add_t2_m total := total * coeff + multiplicand;
1903 alg_sub_t2_m total := total * coeff - multiplicand;
1905 The first operand must be either alg_zero or alg_m. */
1907 struct algorithm
1909 short cost;
1910 short ops;
1911 /* The size of the OP and LOG fields are not directly related to the
1912 word size, but the worst-case algorithms will be if we have few
1913 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
1914 In that case we will generate shift-by-2, add, shift-by-2, add,...,
1915 in total wordsize operations. */
1916 enum alg_code op[MAX_BITS_PER_WORD];
1917 char log[MAX_BITS_PER_WORD];
1920 static void synth_mult PROTO((struct algorithm *,
1921 unsigned HOST_WIDE_INT,
1922 int));
1923 static unsigned HOST_WIDE_INT choose_multiplier PROTO((unsigned HOST_WIDE_INT,
1924 int, int,
1925 unsigned HOST_WIDE_INT *,
1926 int *, int *));
1927 static unsigned HOST_WIDE_INT invert_mod2n PROTO((unsigned HOST_WIDE_INT,
1928 int));
1929 /* Compute and return the best algorithm for multiplying by T.
1930 The algorithm must cost less than cost_limit
1931 If retval.cost >= COST_LIMIT, no algorithm was found and all
1932 other field of the returned struct are undefined. */
1934 static void
1935 synth_mult (alg_out, t, cost_limit)
1936 struct algorithm *alg_out;
1937 unsigned HOST_WIDE_INT t;
1938 int cost_limit;
1940 int m;
1941 struct algorithm *alg_in, *best_alg;
1942 unsigned int cost;
1943 unsigned HOST_WIDE_INT q;
1945 /* Indicate that no algorithm is yet found. If no algorithm
1946 is found, this value will be returned and indicate failure. */
1947 alg_out->cost = cost_limit;
1949 if (cost_limit <= 0)
1950 return;
1952 /* t == 1 can be done in zero cost. */
1953 if (t == 1)
1955 alg_out->ops = 1;
1956 alg_out->cost = 0;
1957 alg_out->op[0] = alg_m;
1958 return;
1961 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
1962 fail now. */
1963 if (t == 0)
1965 if (zero_cost >= cost_limit)
1966 return;
1967 else
1969 alg_out->ops = 1;
1970 alg_out->cost = zero_cost;
1971 alg_out->op[0] = alg_zero;
1972 return;
1976 /* We'll be needing a couple extra algorithm structures now. */
1978 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
1979 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
1981 /* If we have a group of zero bits at the low-order part of T, try
1982 multiplying by the remaining bits and then doing a shift. */
1984 if ((t & 1) == 0)
1986 m = floor_log2 (t & -t); /* m = number of low zero bits */
1987 q = t >> m;
1988 cost = shift_cost[m];
1989 synth_mult (alg_in, q, cost_limit - cost);
1991 cost += alg_in->cost;
1992 if (cost < cost_limit)
1994 struct algorithm *x;
1995 x = alg_in, alg_in = best_alg, best_alg = x;
1996 best_alg->log[best_alg->ops] = m;
1997 best_alg->op[best_alg->ops] = alg_shift;
1998 cost_limit = cost;
2002 /* If we have an odd number, add or subtract one. */
2003 if ((t & 1) != 0)
2005 unsigned HOST_WIDE_INT w;
2007 for (w = 1; (w & t) != 0; w <<= 1)
2009 /* If T was -1, then W will be zero after the loop. This is another
2010 case where T ends with ...111. Handling this with (T + 1) and
2011 subtract 1 produces slightly better code and results in algorithm
2012 selection much faster than treating it like the ...0111 case
2013 below. */
2014 if (w == 0
2015 || (w > 2
2016 /* Reject the case where t is 3.
2017 Thus we prefer addition in that case. */
2018 && t != 3))
2020 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2022 cost = add_cost;
2023 synth_mult (alg_in, t + 1, cost_limit - cost);
2025 cost += alg_in->cost;
2026 if (cost < cost_limit)
2028 struct algorithm *x;
2029 x = alg_in, alg_in = best_alg, best_alg = x;
2030 best_alg->log[best_alg->ops] = 0;
2031 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2032 cost_limit = cost;
2035 else
2037 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2039 cost = add_cost;
2040 synth_mult (alg_in, t - 1, cost_limit - cost);
2042 cost += alg_in->cost;
2043 if (cost < cost_limit)
2045 struct algorithm *x;
2046 x = alg_in, alg_in = best_alg, best_alg = x;
2047 best_alg->log[best_alg->ops] = 0;
2048 best_alg->op[best_alg->ops] = alg_add_t_m2;
2049 cost_limit = cost;
2054 /* Look for factors of t of the form
2055 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2056 If we find such a factor, we can multiply by t using an algorithm that
2057 multiplies by q, shift the result by m and add/subtract it to itself.
2059 We search for large factors first and loop down, even if large factors
2060 are less probable than small; if we find a large factor we will find a
2061 good sequence quickly, and therefore be able to prune (by decreasing
2062 COST_LIMIT) the search. */
2064 for (m = floor_log2 (t - 1); m >= 2; m--)
2066 unsigned HOST_WIDE_INT d;
2068 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2069 if (t % d == 0 && t > d)
2071 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2072 synth_mult (alg_in, t / d, cost_limit - cost);
2074 cost += alg_in->cost;
2075 if (cost < cost_limit)
2077 struct algorithm *x;
2078 x = alg_in, alg_in = best_alg, best_alg = x;
2079 best_alg->log[best_alg->ops] = m;
2080 best_alg->op[best_alg->ops] = alg_add_factor;
2081 cost_limit = cost;
2083 /* Other factors will have been taken care of in the recursion. */
2084 break;
2087 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2088 if (t % d == 0 && t > d)
2090 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2091 synth_mult (alg_in, t / d, cost_limit - cost);
2093 cost += alg_in->cost;
2094 if (cost < cost_limit)
2096 struct algorithm *x;
2097 x = alg_in, alg_in = best_alg, best_alg = x;
2098 best_alg->log[best_alg->ops] = m;
2099 best_alg->op[best_alg->ops] = alg_sub_factor;
2100 cost_limit = cost;
2102 break;
2106 /* Try shift-and-add (load effective address) instructions,
2107 i.e. do a*3, a*5, a*9. */
2108 if ((t & 1) != 0)
2110 q = t - 1;
2111 q = q & -q;
2112 m = exact_log2 (q);
2113 if (m >= 0)
2115 cost = shiftadd_cost[m];
2116 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2118 cost += alg_in->cost;
2119 if (cost < cost_limit)
2121 struct algorithm *x;
2122 x = alg_in, alg_in = best_alg, best_alg = x;
2123 best_alg->log[best_alg->ops] = m;
2124 best_alg->op[best_alg->ops] = alg_add_t2_m;
2125 cost_limit = cost;
2129 q = t + 1;
2130 q = q & -q;
2131 m = exact_log2 (q);
2132 if (m >= 0)
2134 cost = shiftsub_cost[m];
2135 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2137 cost += alg_in->cost;
2138 if (cost < cost_limit)
2140 struct algorithm *x;
2141 x = alg_in, alg_in = best_alg, best_alg = x;
2142 best_alg->log[best_alg->ops] = m;
2143 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2144 cost_limit = cost;
2149 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2150 we have not found any algorithm. */
2151 if (cost_limit == alg_out->cost)
2152 return;
2154 /* If we are getting a too long sequence for `struct algorithm'
2155 to record, make this search fail. */
2156 if (best_alg->ops == MAX_BITS_PER_WORD)
2157 return;
2159 /* Copy the algorithm from temporary space to the space at alg_out.
2160 We avoid using structure assignment because the majority of
2161 best_alg is normally undefined, and this is a critical function. */
2162 alg_out->ops = best_alg->ops + 1;
2163 alg_out->cost = cost_limit;
2164 bcopy ((char *) best_alg->op, (char *) alg_out->op,
2165 alg_out->ops * sizeof *alg_out->op);
2166 bcopy ((char *) best_alg->log, (char *) alg_out->log,
2167 alg_out->ops * sizeof *alg_out->log);
2170 /* Perform a multiplication and return an rtx for the result.
2171 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2172 TARGET is a suggestion for where to store the result (an rtx).
2174 We check specially for a constant integer as OP1.
2175 If you want this check for OP0 as well, then before calling
2176 you should swap the two operands if OP0 would be constant. */
2179 expand_mult (mode, op0, op1, target, unsignedp)
2180 enum machine_mode mode;
2181 register rtx op0, op1, target;
2182 int unsignedp;
2184 rtx const_op1 = op1;
2186 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2187 less than or equal in size to `unsigned int' this doesn't matter.
2188 If the mode is larger than `unsigned int', then synth_mult works only
2189 if the constant value exactly fits in an `unsigned int' without any
2190 truncation. This means that multiplying by negative values does
2191 not work; results are off by 2^32 on a 32 bit machine. */
2193 /* If we are multiplying in DImode, it may still be a win
2194 to try to work with shifts and adds. */
2195 if (GET_CODE (op1) == CONST_DOUBLE
2196 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2197 && HOST_BITS_PER_INT >= BITS_PER_WORD
2198 && CONST_DOUBLE_HIGH (op1) == 0)
2199 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2200 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2201 && GET_CODE (op1) == CONST_INT
2202 && INTVAL (op1) < 0)
2203 const_op1 = 0;
2205 /* We used to test optimize here, on the grounds that it's better to
2206 produce a smaller program when -O is not used.
2207 But this causes such a terrible slowdown sometimes
2208 that it seems better to use synth_mult always. */
2210 if (const_op1 && GET_CODE (const_op1) == CONST_INT)
2212 struct algorithm alg;
2213 struct algorithm alg2;
2214 HOST_WIDE_INT val = INTVAL (op1);
2215 HOST_WIDE_INT val_so_far;
2216 rtx insn;
2217 int mult_cost;
2218 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2220 /* Try to do the computation three ways: multiply by the negative of OP1
2221 and then negate, do the multiplication directly, or do multiplication
2222 by OP1 - 1. */
2224 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2225 mult_cost = MIN (12 * add_cost, mult_cost);
2227 synth_mult (&alg, val, mult_cost);
2229 /* This works only if the inverted value actually fits in an
2230 `unsigned int' */
2231 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2233 synth_mult (&alg2, - val,
2234 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2235 if (alg2.cost + negate_cost < alg.cost)
2236 alg = alg2, variant = negate_variant;
2239 /* This proves very useful for division-by-constant. */
2240 synth_mult (&alg2, val - 1,
2241 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2242 if (alg2.cost + add_cost < alg.cost)
2243 alg = alg2, variant = add_variant;
2245 if (alg.cost < mult_cost)
2247 /* We found something cheaper than a multiply insn. */
2248 int opno;
2249 rtx accum, tem;
2251 op0 = protect_from_queue (op0, 0);
2253 /* Avoid referencing memory over and over.
2254 For speed, but also for correctness when mem is volatile. */
2255 if (GET_CODE (op0) == MEM)
2256 op0 = force_reg (mode, op0);
2258 /* ACCUM starts out either as OP0 or as a zero, depending on
2259 the first operation. */
2261 if (alg.op[0] == alg_zero)
2263 accum = copy_to_mode_reg (mode, const0_rtx);
2264 val_so_far = 0;
2266 else if (alg.op[0] == alg_m)
2268 accum = copy_to_mode_reg (mode, op0);
2269 val_so_far = 1;
2271 else
2272 abort ();
2274 for (opno = 1; opno < alg.ops; opno++)
2276 int log = alg.log[opno];
2277 int preserve = preserve_subexpressions_p ();
2278 rtx shift_subtarget = preserve ? 0 : accum;
2279 rtx add_target
2280 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2281 && ! preserve)
2282 ? target : 0;
2283 rtx accum_target = preserve ? 0 : accum;
2285 switch (alg.op[opno])
2287 case alg_shift:
2288 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2289 build_int_2 (log, 0), NULL_RTX, 0);
2290 val_so_far <<= log;
2291 break;
2293 case alg_add_t_m2:
2294 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2295 build_int_2 (log, 0), NULL_RTX, 0);
2296 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2297 add_target ? add_target : accum_target);
2298 val_so_far += (HOST_WIDE_INT) 1 << log;
2299 break;
2301 case alg_sub_t_m2:
2302 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2303 build_int_2 (log, 0), NULL_RTX, 0);
2304 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2305 add_target ? add_target : accum_target);
2306 val_so_far -= (HOST_WIDE_INT) 1 << log;
2307 break;
2309 case alg_add_t2_m:
2310 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2311 build_int_2 (log, 0), shift_subtarget,
2313 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2314 add_target ? add_target : accum_target);
2315 val_so_far = (val_so_far << log) + 1;
2316 break;
2318 case alg_sub_t2_m:
2319 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2320 build_int_2 (log, 0), shift_subtarget,
2322 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2323 add_target ? add_target : accum_target);
2324 val_so_far = (val_so_far << log) - 1;
2325 break;
2327 case alg_add_factor:
2328 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2329 build_int_2 (log, 0), NULL_RTX, 0);
2330 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2331 add_target ? add_target : accum_target);
2332 val_so_far += val_so_far << log;
2333 break;
2335 case alg_sub_factor:
2336 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2337 build_int_2 (log, 0), NULL_RTX, 0);
2338 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2339 (add_target ? add_target
2340 : preserve ? 0 : tem));
2341 val_so_far = (val_so_far << log) - val_so_far;
2342 break;
2344 default:
2345 abort ();;
2348 /* Write a REG_EQUAL note on the last insn so that we can cse
2349 multiplication sequences. */
2351 insn = get_last_insn ();
2352 REG_NOTES (insn)
2353 = gen_rtx_EXPR_LIST (REG_EQUAL,
2354 gen_rtx_MULT (mode, op0, GEN_INT (val_so_far)),
2355 REG_NOTES (insn));
2358 if (variant == negate_variant)
2360 val_so_far = - val_so_far;
2361 accum = expand_unop (mode, neg_optab, accum, target, 0);
2363 else if (variant == add_variant)
2365 val_so_far = val_so_far + 1;
2366 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2369 if (val != val_so_far)
2370 abort ();
2372 return accum;
2376 /* This used to use umul_optab if unsigned, but for non-widening multiply
2377 there is no difference between signed and unsigned. */
2378 op0 = expand_binop (mode, smul_optab,
2379 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2380 if (op0 == 0)
2381 abort ();
2382 return op0;
2385 /* Return the smallest n such that 2**n >= X. */
2388 ceil_log2 (x)
2389 unsigned HOST_WIDE_INT x;
2391 return floor_log2 (x - 1) + 1;
2394 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2395 replace division by D, and put the least significant N bits of the result
2396 in *MULTIPLIER_PTR and return the most significant bit.
2398 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2399 needed precision is in PRECISION (should be <= N).
2401 PRECISION should be as small as possible so this function can choose
2402 multiplier more freely.
2404 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2405 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2407 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2408 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2410 static
2411 unsigned HOST_WIDE_INT
2412 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2413 unsigned HOST_WIDE_INT d;
2414 int n;
2415 int precision;
2416 unsigned HOST_WIDE_INT *multiplier_ptr;
2417 int *post_shift_ptr;
2418 int *lgup_ptr;
2420 unsigned HOST_WIDE_INT mhigh_hi, mhigh_lo;
2421 unsigned HOST_WIDE_INT mlow_hi, mlow_lo;
2422 int lgup, post_shift;
2423 int pow, pow2;
2424 unsigned HOST_WIDE_INT nh, nl, dummy1, dummy2;
2426 /* lgup = ceil(log2(divisor)); */
2427 lgup = ceil_log2 (d);
2429 if (lgup > n)
2430 abort ();
2432 pow = n + lgup;
2433 pow2 = n + lgup - precision;
2435 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2437 /* We could handle this with some effort, but this case is much better
2438 handled directly with a scc insn, so rely on caller using that. */
2439 abort ();
2442 /* mlow = 2^(N + lgup)/d */
2443 if (pow >= HOST_BITS_PER_WIDE_INT)
2445 nh = (unsigned HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2446 nl = 0;
2448 else
2450 nh = 0;
2451 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2453 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2454 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2456 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2457 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2458 nh |= (unsigned HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2459 else
2460 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2461 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2462 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2464 if (mhigh_hi && nh - d >= d)
2465 abort ();
2466 if (mhigh_hi > 1 || mlow_hi > 1)
2467 abort ();
2468 /* assert that mlow < mhigh. */
2469 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2470 abort();
2472 /* If precision == N, then mlow, mhigh exceed 2^N
2473 (but they do not exceed 2^(N+1)). */
2475 /* Reduce to lowest terms */
2476 for (post_shift = lgup; post_shift > 0; post_shift--)
2478 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2479 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2480 if (ml_lo >= mh_lo)
2481 break;
2483 mlow_hi = 0;
2484 mlow_lo = ml_lo;
2485 mhigh_hi = 0;
2486 mhigh_lo = mh_lo;
2489 *post_shift_ptr = post_shift;
2490 *lgup_ptr = lgup;
2491 if (n < HOST_BITS_PER_WIDE_INT)
2493 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2494 *multiplier_ptr = mhigh_lo & mask;
2495 return mhigh_lo >= mask;
2497 else
2499 *multiplier_ptr = mhigh_lo;
2500 return mhigh_hi;
2504 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2505 congruent to 1 (mod 2**N). */
2507 static unsigned HOST_WIDE_INT
2508 invert_mod2n (x, n)
2509 unsigned HOST_WIDE_INT x;
2510 int n;
2512 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2514 /* The algorithm notes that the choice y = x satisfies
2515 x*y == 1 mod 2^3, since x is assumed odd.
2516 Each iteration doubles the number of bits of significance in y. */
2518 unsigned HOST_WIDE_INT mask;
2519 unsigned HOST_WIDE_INT y = x;
2520 int nbit = 3;
2522 mask = (n == HOST_BITS_PER_WIDE_INT
2523 ? ~(unsigned HOST_WIDE_INT) 0
2524 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2526 while (nbit < n)
2528 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2529 nbit *= 2;
2531 return y;
2534 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2535 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2536 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2537 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2538 become signed.
2540 The result is put in TARGET if that is convenient.
2542 MODE is the mode of operation. */
2545 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2546 enum machine_mode mode;
2547 register rtx adj_operand, op0, op1, target;
2548 int unsignedp;
2550 rtx tem;
2551 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2553 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2554 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2555 NULL_RTX, 0);
2556 tem = expand_and (tem, op1, NULL_RTX);
2557 adj_operand
2558 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2559 adj_operand);
2561 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2562 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2563 NULL_RTX, 0);
2564 tem = expand_and (tem, op0, NULL_RTX);
2565 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2566 target);
2568 return target;
2571 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2572 in TARGET if that is convenient, and return where the result is. If the
2573 operation can not be performed, 0 is returned.
2575 MODE is the mode of operation and result.
2577 UNSIGNEDP nonzero means unsigned multiply.
2579 MAX_COST is the total allowed cost for the expanded RTL. */
2582 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2583 enum machine_mode mode;
2584 register rtx op0, target;
2585 unsigned HOST_WIDE_INT cnst1;
2586 int unsignedp;
2587 int max_cost;
2589 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2590 optab mul_highpart_optab;
2591 optab moptab;
2592 rtx tem;
2593 int size = GET_MODE_BITSIZE (mode);
2594 rtx op1, wide_op1;
2596 /* We can't support modes wider than HOST_BITS_PER_INT. */
2597 if (size > HOST_BITS_PER_WIDE_INT)
2598 abort ();
2600 op1 = GEN_INT (cnst1);
2602 if (GET_MODE_BITSIZE (wider_mode) <= HOST_BITS_PER_INT)
2603 wide_op1 = op1;
2604 else
2605 wide_op1
2606 = immed_double_const (cnst1,
2607 (unsignedp
2608 ? (HOST_WIDE_INT) 0
2609 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2610 wider_mode);
2612 /* expand_mult handles constant multiplication of word_mode
2613 or narrower. It does a poor job for large modes. */
2614 if (size < BITS_PER_WORD
2615 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2617 /* We have to do this, since expand_binop doesn't do conversion for
2618 multiply. Maybe change expand_binop to handle widening multiply? */
2619 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2621 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, unsignedp);
2622 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2623 build_int_2 (size, 0), NULL_RTX, 1);
2624 return convert_modes (mode, wider_mode, tem, unsignedp);
2627 if (target == 0)
2628 target = gen_reg_rtx (mode);
2630 /* Firstly, try using a multiplication insn that only generates the needed
2631 high part of the product, and in the sign flavor of unsignedp. */
2632 if (mul_highpart_cost[(int) mode] < max_cost)
2634 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2635 target = expand_binop (mode, mul_highpart_optab,
2636 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2637 if (target)
2638 return target;
2641 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2642 Need to adjust the result after the multiplication. */
2643 if (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost < max_cost)
2645 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2646 target = expand_binop (mode, mul_highpart_optab,
2647 op0, wide_op1, target, unsignedp, OPTAB_DIRECT);
2648 if (target)
2649 /* We used the wrong signedness. Adjust the result. */
2650 return expand_mult_highpart_adjust (mode, target, op0,
2651 op1, target, unsignedp);
2654 /* Try widening multiplication. */
2655 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2656 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2657 && mul_widen_cost[(int) wider_mode] < max_cost)
2659 op1 = force_reg (mode, op1);
2660 goto try;
2663 /* Try widening the mode and perform a non-widening multiplication. */
2664 moptab = smul_optab;
2665 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2666 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2668 op1 = wide_op1;
2669 goto try;
2672 /* Try widening multiplication of opposite signedness, and adjust. */
2673 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2674 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2675 && (mul_widen_cost[(int) wider_mode]
2676 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2678 rtx regop1 = force_reg (mode, op1);
2679 tem = expand_binop (wider_mode, moptab, op0, regop1,
2680 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2681 if (tem != 0)
2683 /* Extract the high half of the just generated product. */
2684 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2685 build_int_2 (size, 0), NULL_RTX, 1);
2686 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2687 /* We used the wrong signedness. Adjust the result. */
2688 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2689 target, unsignedp);
2693 return 0;
2695 try:
2696 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2697 tem = expand_binop (wider_mode, moptab, op0, op1,
2698 NULL_RTX, unsignedp, OPTAB_WIDEN);
2699 if (tem == 0)
2700 return 0;
2702 /* Extract the high half of the just generated product. */
2703 if (mode == word_mode)
2705 return gen_highpart (mode, tem);
2707 else
2709 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2710 build_int_2 (size, 0), NULL_RTX, 1);
2711 return convert_modes (mode, wider_mode, tem, unsignedp);
2715 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2716 if that is convenient, and returning where the result is.
2717 You may request either the quotient or the remainder as the result;
2718 specify REM_FLAG nonzero to get the remainder.
2720 CODE is the expression code for which kind of division this is;
2721 it controls how rounding is done. MODE is the machine mode to use.
2722 UNSIGNEDP nonzero means do unsigned division. */
2724 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2725 and then correct it by or'ing in missing high bits
2726 if result of ANDI is nonzero.
2727 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2728 This could optimize to a bfexts instruction.
2729 But C doesn't use these operations, so their optimizations are
2730 left for later. */
2732 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2735 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2736 int rem_flag;
2737 enum tree_code code;
2738 enum machine_mode mode;
2739 register rtx op0, op1, target;
2740 int unsignedp;
2742 enum machine_mode compute_mode;
2743 register rtx tquotient;
2744 rtx quotient = 0, remainder = 0;
2745 rtx last;
2746 int size;
2747 rtx insn, set;
2748 optab optab1, optab2;
2749 int op1_is_constant, op1_is_pow2;
2750 int max_cost, extra_cost;
2751 static HOST_WIDE_INT last_div_const = 0;
2753 op1_is_constant = GET_CODE (op1) == CONST_INT;
2754 op1_is_pow2 = (op1_is_constant
2755 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2756 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2759 This is the structure of expand_divmod:
2761 First comes code to fix up the operands so we can perform the operations
2762 correctly and efficiently.
2764 Second comes a switch statement with code specific for each rounding mode.
2765 For some special operands this code emits all RTL for the desired
2766 operation, for other cases, it generates only a quotient and stores it in
2767 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2768 to indicate that it has not done anything.
2770 Last comes code that finishes the operation. If QUOTIENT is set and
2771 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2772 QUOTIENT is not set, it is computed using trunc rounding.
2774 We try to generate special code for division and remainder when OP1 is a
2775 constant. If |OP1| = 2**n we can use shifts and some other fast
2776 operations. For other values of OP1, we compute a carefully selected
2777 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2778 by m.
2780 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2781 half of the product. Different strategies for generating the product are
2782 implemented in expand_mult_highpart.
2784 If what we actually want is the remainder, we generate that by another
2785 by-constant multiplication and a subtraction. */
2787 /* We shouldn't be called with OP1 == const1_rtx, but some of the
2788 code below will malfunction if we are, so check here and handle
2789 the special case if so. */
2790 if (op1 == const1_rtx)
2791 return rem_flag ? const0_rtx : op0;
2793 if (target
2794 /* Don't use the function value register as a target
2795 since we have to read it as well as write it,
2796 and function-inlining gets confused by this. */
2797 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
2798 /* Don't clobber an operand while doing a multi-step calculation. */
2799 || ((rem_flag || op1_is_constant)
2800 && (reg_mentioned_p (target, op0)
2801 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
2802 || reg_mentioned_p (target, op1)
2803 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
2804 target = 0;
2806 /* Get the mode in which to perform this computation. Normally it will
2807 be MODE, but sometimes we can't do the desired operation in MODE.
2808 If so, pick a wider mode in which we can do the operation. Convert
2809 to that mode at the start to avoid repeated conversions.
2811 First see what operations we need. These depend on the expression
2812 we are evaluating. (We assume that divxx3 insns exist under the
2813 same conditions that modxx3 insns and that these insns don't normally
2814 fail. If these assumptions are not correct, we may generate less
2815 efficient code in some cases.)
2817 Then see if we find a mode in which we can open-code that operation
2818 (either a division, modulus, or shift). Finally, check for the smallest
2819 mode for which we can do the operation with a library call. */
2821 /* We might want to refine this now that we have division-by-constant
2822 optimization. Since expand_mult_highpart tries so many variants, it is
2823 not straightforward to generalize this. Maybe we should make an array
2824 of possible modes in init_expmed? Save this for GCC 2.7. */
2826 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
2827 : (unsignedp ? udiv_optab : sdiv_optab));
2828 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
2830 for (compute_mode = mode; compute_mode != VOIDmode;
2831 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2832 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
2833 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
2834 break;
2836 if (compute_mode == VOIDmode)
2837 for (compute_mode = mode; compute_mode != VOIDmode;
2838 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
2839 if (optab1->handlers[(int) compute_mode].libfunc
2840 || optab2->handlers[(int) compute_mode].libfunc)
2841 break;
2843 /* If we still couldn't find a mode, use MODE, but we'll probably abort
2844 in expand_binop. */
2845 if (compute_mode == VOIDmode)
2846 compute_mode = mode;
2848 if (target && GET_MODE (target) == compute_mode)
2849 tquotient = target;
2850 else
2851 tquotient = gen_reg_rtx (compute_mode);
2853 size = GET_MODE_BITSIZE (compute_mode);
2854 #if 0
2855 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
2856 (mode), and thereby get better code when OP1 is a constant. Do that
2857 later. It will require going over all usages of SIZE below. */
2858 size = GET_MODE_BITSIZE (mode);
2859 #endif
2861 /* Only deduct something for a REM if the last divide done was
2862 for a different constant. Then set the constant of the last
2863 divide. */
2864 max_cost = div_cost[(int) compute_mode]
2865 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
2866 && INTVAL (op1) == last_div_const)
2867 ? mul_cost[(int) compute_mode] + add_cost : 0);
2869 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
2871 /* Now convert to the best mode to use. */
2872 if (compute_mode != mode)
2874 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
2875 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
2877 /* convert_modes may have placed op1 into a register, so we
2878 must recompute the following. */
2879 op1_is_constant = GET_CODE (op1) == CONST_INT;
2880 op1_is_pow2 = (op1_is_constant
2881 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2882 || (! unsignedp
2883 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
2886 /* If one of the operands is a volatile MEM, copy it into a register. */
2888 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
2889 op0 = force_reg (compute_mode, op0);
2890 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
2891 op1 = force_reg (compute_mode, op1);
2893 /* If we need the remainder or if OP1 is constant, we need to
2894 put OP0 in a register in case it has any queued subexpressions. */
2895 if (rem_flag || op1_is_constant)
2896 op0 = force_reg (compute_mode, op0);
2898 last = get_last_insn ();
2900 /* Promote floor rounding to trunc rounding for unsigned operations. */
2901 if (unsignedp)
2903 if (code == FLOOR_DIV_EXPR)
2904 code = TRUNC_DIV_EXPR;
2905 if (code == FLOOR_MOD_EXPR)
2906 code = TRUNC_MOD_EXPR;
2907 if (code == EXACT_DIV_EXPR && op1_is_pow2)
2908 code = TRUNC_DIV_EXPR;
2911 if (op1 != const0_rtx)
2912 switch (code)
2914 case TRUNC_MOD_EXPR:
2915 case TRUNC_DIV_EXPR:
2916 if (op1_is_constant)
2918 if (unsignedp)
2920 unsigned HOST_WIDE_INT mh, ml;
2921 int pre_shift, post_shift;
2922 int dummy;
2923 unsigned HOST_WIDE_INT d = INTVAL (op1);
2925 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
2927 pre_shift = floor_log2 (d);
2928 if (rem_flag)
2930 remainder
2931 = expand_binop (compute_mode, and_optab, op0,
2932 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
2933 remainder, 1,
2934 OPTAB_LIB_WIDEN);
2935 if (remainder)
2936 return gen_lowpart (mode, remainder);
2938 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
2939 build_int_2 (pre_shift, 0),
2940 tquotient, 1);
2942 else if (size <= HOST_BITS_PER_WIDE_INT)
2944 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
2946 /* Most significant bit of divisor is set; emit an scc
2947 insn. */
2948 quotient = emit_store_flag (tquotient, GEU, op0, op1,
2949 compute_mode, 1, 1);
2950 if (quotient == 0)
2951 goto fail1;
2953 else
2955 /* Find a suitable multiplier and right shift count
2956 instead of multiplying with D. */
2958 mh = choose_multiplier (d, size, size,
2959 &ml, &post_shift, &dummy);
2961 /* If the suggested multiplier is more than SIZE bits,
2962 we can do better for even divisors, using an
2963 initial right shift. */
2964 if (mh != 0 && (d & 1) == 0)
2966 pre_shift = floor_log2 (d & -d);
2967 mh = choose_multiplier (d >> pre_shift, size,
2968 size - pre_shift,
2969 &ml, &post_shift, &dummy);
2970 if (mh)
2971 abort ();
2973 else
2974 pre_shift = 0;
2976 if (mh != 0)
2978 rtx t1, t2, t3, t4;
2980 extra_cost = (shift_cost[post_shift - 1]
2981 + shift_cost[1] + 2 * add_cost);
2982 t1 = expand_mult_highpart (compute_mode, op0, ml,
2983 NULL_RTX, 1,
2984 max_cost - extra_cost);
2985 if (t1 == 0)
2986 goto fail1;
2987 t2 = force_operand (gen_rtx_MINUS (compute_mode,
2988 op0, t1),
2989 NULL_RTX);
2990 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
2991 build_int_2 (1, 0), NULL_RTX,1);
2992 t4 = force_operand (gen_rtx_PLUS (compute_mode,
2993 t1, t3),
2994 NULL_RTX);
2995 quotient
2996 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
2997 build_int_2 (post_shift - 1, 0),
2998 tquotient, 1);
3000 else
3002 rtx t1, t2;
3004 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3005 build_int_2 (pre_shift, 0),
3006 NULL_RTX, 1);
3007 extra_cost = (shift_cost[pre_shift]
3008 + shift_cost[post_shift]);
3009 t2 = expand_mult_highpart (compute_mode, t1, ml,
3010 NULL_RTX, 1,
3011 max_cost - extra_cost);
3012 if (t2 == 0)
3013 goto fail1;
3014 quotient
3015 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3016 build_int_2 (post_shift, 0),
3017 tquotient, 1);
3021 else /* Too wide mode to use tricky code */
3022 break;
3024 insn = get_last_insn ();
3025 if (insn != last
3026 && (set = single_set (insn)) != 0
3027 && SET_DEST (set) == quotient)
3028 REG_NOTES (insn)
3029 = gen_rtx_EXPR_LIST (REG_EQUAL,
3030 gen_rtx_UDIV (compute_mode, op0, op1),
3031 REG_NOTES (insn));
3033 else /* TRUNC_DIV, signed */
3035 unsigned HOST_WIDE_INT ml;
3036 int lgup, post_shift;
3037 HOST_WIDE_INT d = INTVAL (op1);
3038 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3040 /* n rem d = n rem -d */
3041 if (rem_flag && d < 0)
3043 d = abs_d;
3044 op1 = GEN_INT (abs_d);
3047 if (d == 1)
3048 quotient = op0;
3049 else if (d == -1)
3050 quotient = expand_unop (compute_mode, neg_optab, op0,
3051 tquotient, 0);
3052 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3054 /* This case is not handled correctly below. */
3055 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3056 compute_mode, 1, 1);
3057 if (quotient == 0)
3058 goto fail1;
3060 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3061 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap))
3063 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3065 lgup = floor_log2 (abs_d);
3066 if (abs_d != 2 && BRANCH_COST < 3)
3068 rtx label = gen_label_rtx ();
3069 rtx t1;
3071 t1 = copy_to_mode_reg (compute_mode, op0);
3072 do_cmp_and_jump (t1, const0_rtx, GE,
3073 compute_mode, label);
3074 expand_inc (t1, GEN_INT (abs_d - 1));
3075 emit_label (label);
3076 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3077 build_int_2 (lgup, 0),
3078 tquotient, 0);
3080 else
3082 rtx t1, t2, t3;
3083 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3084 build_int_2 (size - 1, 0),
3085 NULL_RTX, 0);
3086 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3087 build_int_2 (size - lgup, 0),
3088 NULL_RTX, 1);
3089 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3090 op0, t2),
3091 NULL_RTX);
3092 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3093 build_int_2 (lgup, 0),
3094 tquotient, 0);
3097 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3098 the quotient. */
3099 if (d < 0)
3101 insn = get_last_insn ();
3102 if (insn != last
3103 && (set = single_set (insn)) != 0
3104 && SET_DEST (set) == quotient)
3105 REG_NOTES (insn)
3106 = gen_rtx_EXPR_LIST (REG_EQUAL,
3107 gen_rtx_DIV (compute_mode,
3108 op0,
3109 GEN_INT (abs_d)),
3110 REG_NOTES (insn));
3112 quotient = expand_unop (compute_mode, neg_optab,
3113 quotient, quotient, 0);
3116 else if (size <= HOST_BITS_PER_WIDE_INT)
3118 choose_multiplier (abs_d, size, size - 1,
3119 &ml, &post_shift, &lgup);
3120 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3122 rtx t1, t2, t3;
3124 extra_cost = (shift_cost[post_shift]
3125 + shift_cost[size - 1] + add_cost);
3126 t1 = expand_mult_highpart (compute_mode, op0, ml,
3127 NULL_RTX, 0,
3128 max_cost - extra_cost);
3129 if (t1 == 0)
3130 goto fail1;
3131 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3132 build_int_2 (post_shift, 0), NULL_RTX, 0);
3133 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3134 build_int_2 (size - 1, 0), NULL_RTX, 0);
3135 if (d < 0)
3136 quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t2),
3137 tquotient);
3138 else
3139 quotient = force_operand (gen_rtx_MINUS (compute_mode, t2, t3),
3140 tquotient);
3142 else
3144 rtx t1, t2, t3, t4;
3146 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3147 extra_cost = (shift_cost[post_shift]
3148 + shift_cost[size - 1] + 2 * add_cost);
3149 t1 = expand_mult_highpart (compute_mode, op0, ml,
3150 NULL_RTX, 0,
3151 max_cost - extra_cost);
3152 if (t1 == 0)
3153 goto fail1;
3154 t2 = force_operand (gen_rtx_PLUS (compute_mode, t1, op0),
3155 NULL_RTX);
3156 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3157 build_int_2 (post_shift, 0), NULL_RTX, 0);
3158 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3159 build_int_2 (size - 1, 0), NULL_RTX, 0);
3160 if (d < 0)
3161 quotient = force_operand (gen_rtx_MINUS (compute_mode, t4, t3),
3162 tquotient);
3163 else
3164 quotient = force_operand (gen_rtx_MINUS (compute_mode, t3, t4),
3165 tquotient);
3168 else /* Too wide mode to use tricky code */
3169 break;
3171 insn = get_last_insn ();
3172 if (insn != last
3173 && (set = single_set (insn)) != 0
3174 && SET_DEST (set) == quotient)
3175 REG_NOTES (insn)
3176 = gen_rtx_EXPR_LIST (REG_EQUAL,
3177 gen_rtx_DIV (compute_mode, op0, op1),
3178 REG_NOTES (insn));
3180 break;
3182 fail1:
3183 delete_insns_since (last);
3184 break;
3186 case FLOOR_DIV_EXPR:
3187 case FLOOR_MOD_EXPR:
3188 /* We will come here only for signed operations. */
3189 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3191 unsigned HOST_WIDE_INT mh, ml;
3192 int pre_shift, lgup, post_shift;
3193 HOST_WIDE_INT d = INTVAL (op1);
3195 if (d > 0)
3197 /* We could just as easily deal with negative constants here,
3198 but it does not seem worth the trouble for GCC 2.6. */
3199 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3201 pre_shift = floor_log2 (d);
3202 if (rem_flag)
3204 remainder = expand_binop (compute_mode, and_optab, op0,
3205 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3206 remainder, 0, OPTAB_LIB_WIDEN);
3207 if (remainder)
3208 return gen_lowpart (mode, remainder);
3210 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3211 build_int_2 (pre_shift, 0),
3212 tquotient, 0);
3214 else
3216 rtx t1, t2, t3, t4;
3218 mh = choose_multiplier (d, size, size - 1,
3219 &ml, &post_shift, &lgup);
3220 if (mh)
3221 abort ();
3223 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3224 build_int_2 (size - 1, 0), NULL_RTX, 0);
3225 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3226 NULL_RTX, 0, OPTAB_WIDEN);
3227 extra_cost = (shift_cost[post_shift]
3228 + shift_cost[size - 1] + 2 * add_cost);
3229 t3 = expand_mult_highpart (compute_mode, t2, ml,
3230 NULL_RTX, 1,
3231 max_cost - extra_cost);
3232 if (t3 != 0)
3234 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3235 build_int_2 (post_shift, 0),
3236 NULL_RTX, 1);
3237 quotient = expand_binop (compute_mode, xor_optab,
3238 t4, t1, tquotient, 0,
3239 OPTAB_WIDEN);
3243 else
3245 rtx nsign, t1, t2, t3, t4;
3246 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3247 op0, constm1_rtx), NULL_RTX);
3248 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3249 0, OPTAB_WIDEN);
3250 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3251 build_int_2 (size - 1, 0), NULL_RTX, 0);
3252 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3253 NULL_RTX);
3254 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3255 NULL_RTX, 0);
3256 if (t4)
3258 rtx t5;
3259 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3260 NULL_RTX, 0);
3261 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3262 t4, t5),
3263 tquotient);
3268 if (quotient != 0)
3269 break;
3270 delete_insns_since (last);
3272 /* Try using an instruction that produces both the quotient and
3273 remainder, using truncation. We can easily compensate the quotient
3274 or remainder to get floor rounding, once we have the remainder.
3275 Notice that we compute also the final remainder value here,
3276 and return the result right away. */
3277 if (target == 0 || GET_MODE (target) != compute_mode)
3278 target = gen_reg_rtx (compute_mode);
3280 if (rem_flag)
3282 remainder
3283 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3284 quotient = gen_reg_rtx (compute_mode);
3286 else
3288 quotient
3289 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3290 remainder = gen_reg_rtx (compute_mode);
3293 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3294 quotient, remainder, 0))
3296 /* This could be computed with a branch-less sequence.
3297 Save that for later. */
3298 rtx tem;
3299 rtx label = gen_label_rtx ();
3300 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3301 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3302 NULL_RTX, 0, OPTAB_WIDEN);
3303 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3304 expand_dec (quotient, const1_rtx);
3305 expand_inc (remainder, op1);
3306 emit_label (label);
3307 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3310 /* No luck with division elimination or divmod. Have to do it
3311 by conditionally adjusting op0 *and* the result. */
3313 rtx label1, label2, label3, label4, label5;
3314 rtx adjusted_op0;
3315 rtx tem;
3317 quotient = gen_reg_rtx (compute_mode);
3318 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3319 label1 = gen_label_rtx ();
3320 label2 = gen_label_rtx ();
3321 label3 = gen_label_rtx ();
3322 label4 = gen_label_rtx ();
3323 label5 = gen_label_rtx ();
3324 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3325 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3326 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3327 quotient, 0, OPTAB_LIB_WIDEN);
3328 if (tem != quotient)
3329 emit_move_insn (quotient, tem);
3330 emit_jump_insn (gen_jump (label5));
3331 emit_barrier ();
3332 emit_label (label1);
3333 expand_inc (adjusted_op0, const1_rtx);
3334 emit_jump_insn (gen_jump (label4));
3335 emit_barrier ();
3336 emit_label (label2);
3337 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3338 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3339 quotient, 0, OPTAB_LIB_WIDEN);
3340 if (tem != quotient)
3341 emit_move_insn (quotient, tem);
3342 emit_jump_insn (gen_jump (label5));
3343 emit_barrier ();
3344 emit_label (label3);
3345 expand_dec (adjusted_op0, const1_rtx);
3346 emit_label (label4);
3347 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3348 quotient, 0, OPTAB_LIB_WIDEN);
3349 if (tem != quotient)
3350 emit_move_insn (quotient, tem);
3351 expand_dec (quotient, const1_rtx);
3352 emit_label (label5);
3354 break;
3356 case CEIL_DIV_EXPR:
3357 case CEIL_MOD_EXPR:
3358 if (unsignedp)
3360 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3362 rtx t1, t2, t3;
3363 unsigned HOST_WIDE_INT d = INTVAL (op1);
3364 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3365 build_int_2 (floor_log2 (d), 0),
3366 tquotient, 1);
3367 t2 = expand_binop (compute_mode, and_optab, op0,
3368 GEN_INT (d - 1),
3369 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3370 t3 = gen_reg_rtx (compute_mode);
3371 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3372 compute_mode, 1, 1);
3373 if (t3 == 0)
3375 rtx lab;
3376 lab = gen_label_rtx ();
3377 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3378 expand_inc (t1, const1_rtx);
3379 emit_label (lab);
3380 quotient = t1;
3382 else
3383 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3384 t1, t3),
3385 tquotient);
3386 break;
3389 /* Try using an instruction that produces both the quotient and
3390 remainder, using truncation. We can easily compensate the
3391 quotient or remainder to get ceiling rounding, once we have the
3392 remainder. Notice that we compute also the final remainder
3393 value here, and return the result right away. */
3394 if (target == 0 || GET_MODE (target) != compute_mode)
3395 target = gen_reg_rtx (compute_mode);
3397 if (rem_flag)
3399 remainder = (GET_CODE (target) == REG
3400 ? target : gen_reg_rtx (compute_mode));
3401 quotient = gen_reg_rtx (compute_mode);
3403 else
3405 quotient = (GET_CODE (target) == REG
3406 ? target : gen_reg_rtx (compute_mode));
3407 remainder = gen_reg_rtx (compute_mode);
3410 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3411 remainder, 1))
3413 /* This could be computed with a branch-less sequence.
3414 Save that for later. */
3415 rtx label = gen_label_rtx ();
3416 do_cmp_and_jump (remainder, const0_rtx, EQ,
3417 compute_mode, label);
3418 expand_inc (quotient, const1_rtx);
3419 expand_dec (remainder, op1);
3420 emit_label (label);
3421 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3424 /* No luck with division elimination or divmod. Have to do it
3425 by conditionally adjusting op0 *and* the result. */
3427 rtx label1, label2;
3428 rtx adjusted_op0, tem;
3430 quotient = gen_reg_rtx (compute_mode);
3431 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3432 label1 = gen_label_rtx ();
3433 label2 = gen_label_rtx ();
3434 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3435 compute_mode, label1);
3436 emit_move_insn (quotient, const0_rtx);
3437 emit_jump_insn (gen_jump (label2));
3438 emit_barrier ();
3439 emit_label (label1);
3440 expand_dec (adjusted_op0, const1_rtx);
3441 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3442 quotient, 1, OPTAB_LIB_WIDEN);
3443 if (tem != quotient)
3444 emit_move_insn (quotient, tem);
3445 expand_inc (quotient, const1_rtx);
3446 emit_label (label2);
3449 else /* signed */
3451 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3452 && INTVAL (op1) >= 0)
3454 /* This is extremely similar to the code for the unsigned case
3455 above. For 2.7 we should merge these variants, but for
3456 2.6.1 I don't want to touch the code for unsigned since that
3457 get used in C. The signed case will only be used by other
3458 languages (Ada). */
3460 rtx t1, t2, t3;
3461 unsigned HOST_WIDE_INT d = INTVAL (op1);
3462 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3463 build_int_2 (floor_log2 (d), 0),
3464 tquotient, 0);
3465 t2 = expand_binop (compute_mode, and_optab, op0,
3466 GEN_INT (d - 1),
3467 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3468 t3 = gen_reg_rtx (compute_mode);
3469 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3470 compute_mode, 1, 1);
3471 if (t3 == 0)
3473 rtx lab;
3474 lab = gen_label_rtx ();
3475 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3476 expand_inc (t1, const1_rtx);
3477 emit_label (lab);
3478 quotient = t1;
3480 else
3481 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3482 t1, t3),
3483 tquotient);
3484 break;
3487 /* Try using an instruction that produces both the quotient and
3488 remainder, using truncation. We can easily compensate the
3489 quotient or remainder to get ceiling rounding, once we have the
3490 remainder. Notice that we compute also the final remainder
3491 value here, and return the result right away. */
3492 if (target == 0 || GET_MODE (target) != compute_mode)
3493 target = gen_reg_rtx (compute_mode);
3494 if (rem_flag)
3496 remainder= (GET_CODE (target) == REG
3497 ? target : gen_reg_rtx (compute_mode));
3498 quotient = gen_reg_rtx (compute_mode);
3500 else
3502 quotient = (GET_CODE (target) == REG
3503 ? target : gen_reg_rtx (compute_mode));
3504 remainder = gen_reg_rtx (compute_mode);
3507 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3508 remainder, 0))
3510 /* This could be computed with a branch-less sequence.
3511 Save that for later. */
3512 rtx tem;
3513 rtx label = gen_label_rtx ();
3514 do_cmp_and_jump (remainder, const0_rtx, EQ,
3515 compute_mode, label);
3516 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3517 NULL_RTX, 0, OPTAB_WIDEN);
3518 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3519 expand_inc (quotient, const1_rtx);
3520 expand_dec (remainder, op1);
3521 emit_label (label);
3522 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3525 /* No luck with division elimination or divmod. Have to do it
3526 by conditionally adjusting op0 *and* the result. */
3528 rtx label1, label2, label3, label4, label5;
3529 rtx adjusted_op0;
3530 rtx tem;
3532 quotient = gen_reg_rtx (compute_mode);
3533 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3534 label1 = gen_label_rtx ();
3535 label2 = gen_label_rtx ();
3536 label3 = gen_label_rtx ();
3537 label4 = gen_label_rtx ();
3538 label5 = gen_label_rtx ();
3539 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3540 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3541 compute_mode, label1);
3542 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3543 quotient, 0, OPTAB_LIB_WIDEN);
3544 if (tem != quotient)
3545 emit_move_insn (quotient, tem);
3546 emit_jump_insn (gen_jump (label5));
3547 emit_barrier ();
3548 emit_label (label1);
3549 expand_dec (adjusted_op0, const1_rtx);
3550 emit_jump_insn (gen_jump (label4));
3551 emit_barrier ();
3552 emit_label (label2);
3553 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3554 compute_mode, label3);
3555 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3556 quotient, 0, OPTAB_LIB_WIDEN);
3557 if (tem != quotient)
3558 emit_move_insn (quotient, tem);
3559 emit_jump_insn (gen_jump (label5));
3560 emit_barrier ();
3561 emit_label (label3);
3562 expand_inc (adjusted_op0, const1_rtx);
3563 emit_label (label4);
3564 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3565 quotient, 0, OPTAB_LIB_WIDEN);
3566 if (tem != quotient)
3567 emit_move_insn (quotient, tem);
3568 expand_inc (quotient, const1_rtx);
3569 emit_label (label5);
3572 break;
3574 case EXACT_DIV_EXPR:
3575 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3577 HOST_WIDE_INT d = INTVAL (op1);
3578 unsigned HOST_WIDE_INT ml;
3579 int post_shift;
3580 rtx t1;
3582 post_shift = floor_log2 (d & -d);
3583 ml = invert_mod2n (d >> post_shift, size);
3584 t1 = expand_mult (compute_mode, op0, GEN_INT (ml), NULL_RTX,
3585 unsignedp);
3586 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3587 build_int_2 (post_shift, 0),
3588 NULL_RTX, unsignedp);
3590 insn = get_last_insn ();
3591 REG_NOTES (insn)
3592 = gen_rtx_EXPR_LIST (REG_EQUAL,
3593 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3594 compute_mode,
3595 op0, op1),
3596 REG_NOTES (insn));
3598 break;
3600 case ROUND_DIV_EXPR:
3601 case ROUND_MOD_EXPR:
3602 if (unsignedp)
3604 rtx tem;
3605 rtx label;
3606 label = gen_label_rtx ();
3607 quotient = gen_reg_rtx (compute_mode);
3608 remainder = gen_reg_rtx (compute_mode);
3609 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3611 rtx tem;
3612 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3613 quotient, 1, OPTAB_LIB_WIDEN);
3614 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3615 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3616 remainder, 1, OPTAB_LIB_WIDEN);
3618 tem = plus_constant (op1, -1);
3619 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3620 build_int_2 (1, 0), NULL_RTX, 1);
3621 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3622 expand_inc (quotient, const1_rtx);
3623 expand_dec (remainder, op1);
3624 emit_label (label);
3626 else
3628 rtx abs_rem, abs_op1, tem, mask;
3629 rtx label;
3630 label = gen_label_rtx ();
3631 quotient = gen_reg_rtx (compute_mode);
3632 remainder = gen_reg_rtx (compute_mode);
3633 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3635 rtx tem;
3636 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3637 quotient, 0, OPTAB_LIB_WIDEN);
3638 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3639 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3640 remainder, 0, OPTAB_LIB_WIDEN);
3642 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 0, 0);
3643 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 0, 0);
3644 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3645 build_int_2 (1, 0), NULL_RTX, 1);
3646 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3647 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3648 NULL_RTX, 0, OPTAB_WIDEN);
3649 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3650 build_int_2 (size - 1, 0), NULL_RTX, 0);
3651 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3652 NULL_RTX, 0, OPTAB_WIDEN);
3653 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3654 NULL_RTX, 0, OPTAB_WIDEN);
3655 expand_inc (quotient, tem);
3656 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3657 NULL_RTX, 0, OPTAB_WIDEN);
3658 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3659 NULL_RTX, 0, OPTAB_WIDEN);
3660 expand_dec (remainder, tem);
3661 emit_label (label);
3663 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3665 default:
3666 abort ();
3669 if (quotient == 0)
3671 if (target && GET_MODE (target) != compute_mode)
3672 target = 0;
3674 if (rem_flag)
3676 /* Try to produce the remainder directly without a library call. */
3677 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3678 op0, op1, target,
3679 unsignedp, OPTAB_WIDEN);
3680 if (remainder == 0)
3682 /* No luck there. Can we do remainder and divide at once
3683 without a library call? */
3684 remainder = gen_reg_rtx (compute_mode);
3685 if (! expand_twoval_binop ((unsignedp
3686 ? udivmod_optab
3687 : sdivmod_optab),
3688 op0, op1,
3689 NULL_RTX, remainder, unsignedp))
3690 remainder = 0;
3693 if (remainder)
3694 return gen_lowpart (mode, remainder);
3697 /* Produce the quotient. Try a quotient insn, but not a library call.
3698 If we have a divmod in this mode, use it in preference to widening
3699 the div (for this test we assume it will not fail). Note that optab2
3700 is set to the one of the two optabs that the call below will use. */
3701 quotient
3702 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3703 op0, op1, rem_flag ? NULL_RTX : target,
3704 unsignedp,
3705 ((optab2->handlers[(int) compute_mode].insn_code
3706 != CODE_FOR_nothing)
3707 ? OPTAB_DIRECT : OPTAB_WIDEN));
3709 if (quotient == 0)
3711 /* No luck there. Try a quotient-and-remainder insn,
3712 keeping the quotient alone. */
3713 quotient = gen_reg_rtx (compute_mode);
3714 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3715 op0, op1,
3716 quotient, NULL_RTX, unsignedp))
3718 quotient = 0;
3719 if (! rem_flag)
3720 /* Still no luck. If we are not computing the remainder,
3721 use a library call for the quotient. */
3722 quotient = sign_expand_binop (compute_mode,
3723 udiv_optab, sdiv_optab,
3724 op0, op1, target,
3725 unsignedp, OPTAB_LIB_WIDEN);
3730 if (rem_flag)
3732 if (target && GET_MODE (target) != compute_mode)
3733 target = 0;
3735 if (quotient == 0)
3736 /* No divide instruction either. Use library for remainder. */
3737 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3738 op0, op1, target,
3739 unsignedp, OPTAB_LIB_WIDEN);
3740 else
3742 /* We divided. Now finish doing X - Y * (X / Y). */
3743 remainder = expand_mult (compute_mode, quotient, op1,
3744 NULL_RTX, unsignedp);
3745 remainder = expand_binop (compute_mode, sub_optab, op0,
3746 remainder, target, unsignedp,
3747 OPTAB_LIB_WIDEN);
3751 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3754 /* Return a tree node with data type TYPE, describing the value of X.
3755 Usually this is an RTL_EXPR, if there is no obvious better choice.
3756 X may be an expression, however we only support those expressions
3757 generated by loop.c. */
3759 tree
3760 make_tree (type, x)
3761 tree type;
3762 rtx x;
3764 tree t;
3766 switch (GET_CODE (x))
3768 case CONST_INT:
3769 t = build_int_2 (INTVAL (x),
3770 (TREE_UNSIGNED (type)
3771 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
3772 || INTVAL (x) >= 0 ? 0 : -1);
3773 TREE_TYPE (t) = type;
3774 return t;
3776 case CONST_DOUBLE:
3777 if (GET_MODE (x) == VOIDmode)
3779 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
3780 TREE_TYPE (t) = type;
3782 else
3784 REAL_VALUE_TYPE d;
3786 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
3787 t = build_real (type, d);
3790 return t;
3792 case PLUS:
3793 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3794 make_tree (type, XEXP (x, 1))));
3796 case MINUS:
3797 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
3798 make_tree (type, XEXP (x, 1))));
3800 case NEG:
3801 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
3803 case MULT:
3804 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
3805 make_tree (type, XEXP (x, 1))));
3807 case ASHIFT:
3808 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
3809 make_tree (type, XEXP (x, 1))));
3811 case LSHIFTRT:
3812 return fold (convert (type,
3813 build (RSHIFT_EXPR, unsigned_type (type),
3814 make_tree (unsigned_type (type),
3815 XEXP (x, 0)),
3816 make_tree (type, XEXP (x, 1)))));
3818 case ASHIFTRT:
3819 return fold (convert (type,
3820 build (RSHIFT_EXPR, signed_type (type),
3821 make_tree (signed_type (type), XEXP (x, 0)),
3822 make_tree (type, XEXP (x, 1)))));
3824 case DIV:
3825 if (TREE_CODE (type) != REAL_TYPE)
3826 t = signed_type (type);
3827 else
3828 t = type;
3830 return fold (convert (type,
3831 build (TRUNC_DIV_EXPR, t,
3832 make_tree (t, XEXP (x, 0)),
3833 make_tree (t, XEXP (x, 1)))));
3834 case UDIV:
3835 t = unsigned_type (type);
3836 return fold (convert (type,
3837 build (TRUNC_DIV_EXPR, t,
3838 make_tree (t, XEXP (x, 0)),
3839 make_tree (t, XEXP (x, 1)))));
3840 default:
3841 t = make_node (RTL_EXPR);
3842 TREE_TYPE (t) = type;
3843 RTL_EXPR_RTL (t) = x;
3844 /* There are no insns to be output
3845 when this rtl_expr is used. */
3846 RTL_EXPR_SEQUENCE (t) = 0;
3847 return t;
3851 /* Return an rtx representing the value of X * MULT + ADD.
3852 TARGET is a suggestion for where to store the result (an rtx).
3853 MODE is the machine mode for the computation.
3854 X and MULT must have mode MODE. ADD may have a different mode.
3855 So can X (defaults to same as MODE).
3856 UNSIGNEDP is non-zero to do unsigned multiplication.
3857 This may emit insns. */
3860 expand_mult_add (x, target, mult, add, mode, unsignedp)
3861 rtx x, target, mult, add;
3862 enum machine_mode mode;
3863 int unsignedp;
3865 tree type = type_for_mode (mode, unsignedp);
3866 tree add_type = (GET_MODE (add) == VOIDmode
3867 ? type : type_for_mode (GET_MODE (add), unsignedp));
3868 tree result = fold (build (PLUS_EXPR, type,
3869 fold (build (MULT_EXPR, type,
3870 make_tree (type, x),
3871 make_tree (type, mult))),
3872 make_tree (add_type, add)));
3874 return expand_expr (result, target, VOIDmode, 0);
3877 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
3878 and returning TARGET.
3880 If TARGET is 0, a pseudo-register or constant is returned. */
3883 expand_and (op0, op1, target)
3884 rtx op0, op1, target;
3886 enum machine_mode mode = VOIDmode;
3887 rtx tem;
3889 if (GET_MODE (op0) != VOIDmode)
3890 mode = GET_MODE (op0);
3891 else if (GET_MODE (op1) != VOIDmode)
3892 mode = GET_MODE (op1);
3894 if (mode != VOIDmode)
3895 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
3896 else if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT)
3897 tem = GEN_INT (INTVAL (op0) & INTVAL (op1));
3898 else
3899 abort ();
3901 if (target == 0)
3902 target = tem;
3903 else if (tem != target)
3904 emit_move_insn (target, tem);
3905 return target;
3908 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
3909 and storing in TARGET. Normally return TARGET.
3910 Return 0 if that cannot be done.
3912 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
3913 it is VOIDmode, they cannot both be CONST_INT.
3915 UNSIGNEDP is for the case where we have to widen the operands
3916 to perform the operation. It says to use zero-extension.
3918 NORMALIZEP is 1 if we should convert the result to be either zero
3919 or one. Normalize is -1 if we should convert the result to be
3920 either zero or -1. If NORMALIZEP is zero, the result will be left
3921 "raw" out of the scc insn. */
3924 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
3925 rtx target;
3926 enum rtx_code code;
3927 rtx op0, op1;
3928 enum machine_mode mode;
3929 int unsignedp;
3930 int normalizep;
3932 rtx subtarget;
3933 enum insn_code icode;
3934 enum machine_mode compare_mode;
3935 enum machine_mode target_mode = GET_MODE (target);
3936 rtx tem;
3937 rtx last = get_last_insn ();
3938 rtx pattern, comparison;
3940 /* If one operand is constant, make it the second one. Only do this
3941 if the other operand is not constant as well. */
3943 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
3944 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
3946 tem = op0;
3947 op0 = op1;
3948 op1 = tem;
3949 code = swap_condition (code);
3952 if (mode == VOIDmode)
3953 mode = GET_MODE (op0);
3955 /* For some comparisons with 1 and -1, we can convert this to
3956 comparisons with zero. This will often produce more opportunities for
3957 store-flag insns. */
3959 switch (code)
3961 case LT:
3962 if (op1 == const1_rtx)
3963 op1 = const0_rtx, code = LE;
3964 break;
3965 case LE:
3966 if (op1 == constm1_rtx)
3967 op1 = const0_rtx, code = LT;
3968 break;
3969 case GE:
3970 if (op1 == const1_rtx)
3971 op1 = const0_rtx, code = GT;
3972 break;
3973 case GT:
3974 if (op1 == constm1_rtx)
3975 op1 = const0_rtx, code = GE;
3976 break;
3977 case GEU:
3978 if (op1 == const1_rtx)
3979 op1 = const0_rtx, code = NE;
3980 break;
3981 case LTU:
3982 if (op1 == const1_rtx)
3983 op1 = const0_rtx, code = EQ;
3984 break;
3985 default:
3986 break;
3989 /* From now on, we won't change CODE, so set ICODE now. */
3990 icode = setcc_gen_code[(int) code];
3992 /* If this is A < 0 or A >= 0, we can do this by taking the ones
3993 complement of A (for GE) and shifting the sign bit to the low bit. */
3994 if (op1 == const0_rtx && (code == LT || code == GE)
3995 && GET_MODE_CLASS (mode) == MODE_INT
3996 && (normalizep || STORE_FLAG_VALUE == 1
3997 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
3998 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
3999 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4001 subtarget = target;
4003 /* If the result is to be wider than OP0, it is best to convert it
4004 first. If it is to be narrower, it is *incorrect* to convert it
4005 first. */
4006 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4008 op0 = protect_from_queue (op0, 0);
4009 op0 = convert_modes (target_mode, mode, op0, 0);
4010 mode = target_mode;
4013 if (target_mode != mode)
4014 subtarget = 0;
4016 if (code == GE)
4017 op0 = expand_unop (mode, one_cmpl_optab, op0,
4018 ((STORE_FLAG_VALUE == 1 || normalizep)
4019 ? 0 : subtarget), 0);
4021 if (STORE_FLAG_VALUE == 1 || normalizep)
4022 /* If we are supposed to produce a 0/1 value, we want to do
4023 a logical shift from the sign bit to the low-order bit; for
4024 a -1/0 value, we do an arithmetic shift. */
4025 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4026 size_int (GET_MODE_BITSIZE (mode) - 1),
4027 subtarget, normalizep != -1);
4029 if (mode != target_mode)
4030 op0 = convert_modes (target_mode, mode, op0, 0);
4032 return op0;
4035 if (icode != CODE_FOR_nothing)
4037 /* We think we may be able to do this with a scc insn. Emit the
4038 comparison and then the scc insn.
4040 compare_from_rtx may call emit_queue, which would be deleted below
4041 if the scc insn fails. So call it ourselves before setting LAST. */
4043 emit_queue ();
4044 last = get_last_insn ();
4046 comparison
4047 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4048 if (GET_CODE (comparison) == CONST_INT)
4049 return (comparison == const0_rtx ? const0_rtx
4050 : normalizep == 1 ? const1_rtx
4051 : normalizep == -1 ? constm1_rtx
4052 : const_true_rtx);
4054 /* If the code of COMPARISON doesn't match CODE, something is
4055 wrong; we can no longer be sure that we have the operation.
4056 We could handle this case, but it should not happen. */
4058 if (GET_CODE (comparison) != code)
4059 abort ();
4061 /* Get a reference to the target in the proper mode for this insn. */
4062 compare_mode = insn_operand_mode[(int) icode][0];
4063 subtarget = target;
4064 if (preserve_subexpressions_p ()
4065 || ! (*insn_operand_predicate[(int) icode][0]) (subtarget, compare_mode))
4066 subtarget = gen_reg_rtx (compare_mode);
4068 pattern = GEN_FCN (icode) (subtarget);
4069 if (pattern)
4071 emit_insn (pattern);
4073 /* If we are converting to a wider mode, first convert to
4074 TARGET_MODE, then normalize. This produces better combining
4075 opportunities on machines that have a SIGN_EXTRACT when we are
4076 testing a single bit. This mostly benefits the 68k.
4078 If STORE_FLAG_VALUE does not have the sign bit set when
4079 interpreted in COMPARE_MODE, we can do this conversion as
4080 unsigned, which is usually more efficient. */
4081 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4083 convert_move (target, subtarget,
4084 (GET_MODE_BITSIZE (compare_mode)
4085 <= HOST_BITS_PER_WIDE_INT)
4086 && 0 == (STORE_FLAG_VALUE
4087 & ((HOST_WIDE_INT) 1
4088 << (GET_MODE_BITSIZE (compare_mode) -1))));
4089 op0 = target;
4090 compare_mode = target_mode;
4092 else
4093 op0 = subtarget;
4095 /* If we want to keep subexpressions around, don't reuse our
4096 last target. */
4098 if (preserve_subexpressions_p ())
4099 subtarget = 0;
4101 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4102 we don't have to do anything. */
4103 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4105 else if (normalizep == - STORE_FLAG_VALUE)
4106 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4108 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4109 makes it hard to use a value of just the sign bit due to
4110 ANSI integer constant typing rules. */
4111 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4112 && (STORE_FLAG_VALUE
4113 & ((HOST_WIDE_INT) 1
4114 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4115 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4116 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4117 subtarget, normalizep == 1);
4118 else if (STORE_FLAG_VALUE & 1)
4120 op0 = expand_and (op0, const1_rtx, subtarget);
4121 if (normalizep == -1)
4122 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4124 else
4125 abort ();
4127 /* If we were converting to a smaller mode, do the
4128 conversion now. */
4129 if (target_mode != compare_mode)
4131 convert_move (target, op0, 0);
4132 return target;
4134 else
4135 return op0;
4139 delete_insns_since (last);
4141 /* If expensive optimizations, use different pseudo registers for each
4142 insn, instead of reusing the same pseudo. This leads to better CSE,
4143 but slows down the compiler, since there are more pseudos */
4144 subtarget = (!flag_expensive_optimizations
4145 && (target_mode == mode)) ? target : NULL_RTX;
4147 /* If we reached here, we can't do this with a scc insn. However, there
4148 are some comparisons that can be done directly. For example, if
4149 this is an equality comparison of integers, we can try to exclusive-or
4150 (or subtract) the two operands and use a recursive call to try the
4151 comparison with zero. Don't do any of these cases if branches are
4152 very cheap. */
4154 if (BRANCH_COST > 0
4155 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4156 && op1 != const0_rtx)
4158 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4159 OPTAB_WIDEN);
4161 if (tem == 0)
4162 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4163 OPTAB_WIDEN);
4164 if (tem != 0)
4165 tem = emit_store_flag (target, code, tem, const0_rtx,
4166 mode, unsignedp, normalizep);
4167 if (tem == 0)
4168 delete_insns_since (last);
4169 return tem;
4172 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4173 the constant zero. Reject all other comparisons at this point. Only
4174 do LE and GT if branches are expensive since they are expensive on
4175 2-operand machines. */
4177 if (BRANCH_COST == 0
4178 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4179 || (code != EQ && code != NE
4180 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4181 return 0;
4183 /* See what we need to return. We can only return a 1, -1, or the
4184 sign bit. */
4186 if (normalizep == 0)
4188 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4189 normalizep = STORE_FLAG_VALUE;
4191 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4192 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4193 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4195 else
4196 return 0;
4199 /* Try to put the result of the comparison in the sign bit. Assume we can't
4200 do the necessary operation below. */
4202 tem = 0;
4204 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4205 the sign bit set. */
4207 if (code == LE)
4209 /* This is destructive, so SUBTARGET can't be OP0. */
4210 if (rtx_equal_p (subtarget, op0))
4211 subtarget = 0;
4213 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4214 OPTAB_WIDEN);
4215 if (tem)
4216 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4217 OPTAB_WIDEN);
4220 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4221 number of bits in the mode of OP0, minus one. */
4223 if (code == GT)
4225 if (rtx_equal_p (subtarget, op0))
4226 subtarget = 0;
4228 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4229 size_int (GET_MODE_BITSIZE (mode) - 1),
4230 subtarget, 0);
4231 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4232 OPTAB_WIDEN);
4235 if (code == EQ || code == NE)
4237 /* For EQ or NE, one way to do the comparison is to apply an operation
4238 that converts the operand into a positive number if it is non-zero
4239 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4240 for NE we negate. This puts the result in the sign bit. Then we
4241 normalize with a shift, if needed.
4243 Two operations that can do the above actions are ABS and FFS, so try
4244 them. If that doesn't work, and MODE is smaller than a full word,
4245 we can use zero-extension to the wider mode (an unsigned conversion)
4246 as the operation. */
4248 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4249 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4250 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4251 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4252 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4254 op0 = protect_from_queue (op0, 0);
4255 tem = convert_modes (word_mode, mode, op0, 1);
4256 mode = word_mode;
4259 if (tem != 0)
4261 if (code == EQ)
4262 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4263 0, OPTAB_WIDEN);
4264 else
4265 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4268 /* If we couldn't do it that way, for NE we can "or" the two's complement
4269 of the value with itself. For EQ, we take the one's complement of
4270 that "or", which is an extra insn, so we only handle EQ if branches
4271 are expensive. */
4273 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4275 if (rtx_equal_p (subtarget, op0))
4276 subtarget = 0;
4278 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4279 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4280 OPTAB_WIDEN);
4282 if (tem && code == EQ)
4283 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4287 if (tem && normalizep)
4288 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4289 size_int (GET_MODE_BITSIZE (mode) - 1),
4290 subtarget, normalizep == 1);
4292 if (tem)
4294 if (GET_MODE (tem) != target_mode)
4296 convert_move (target, tem, 0);
4297 tem = target;
4299 else if (!subtarget)
4301 emit_move_insn (target, tem);
4302 tem = target;
4305 else
4306 delete_insns_since (last);
4308 return tem;
4311 /* Like emit_store_flag, but always succeeds. */
4314 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4315 rtx target;
4316 enum rtx_code code;
4317 rtx op0, op1;
4318 enum machine_mode mode;
4319 int unsignedp;
4320 int normalizep;
4322 rtx tem, label;
4324 /* First see if emit_store_flag can do the job. */
4325 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4326 if (tem != 0)
4327 return tem;
4329 if (normalizep == 0)
4330 normalizep = 1;
4332 /* If this failed, we have to do this with set/compare/jump/set code. */
4334 if (GET_CODE (target) != REG
4335 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4336 target = gen_reg_rtx (GET_MODE (target));
4338 emit_move_insn (target, const1_rtx);
4339 tem = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX, 0);
4340 if (GET_CODE (tem) == CONST_INT)
4341 return tem;
4343 label = gen_label_rtx ();
4344 if (bcc_gen_fctn[(int) code] == 0)
4345 abort ();
4347 emit_jump_insn ((*bcc_gen_fctn[(int) code]) (label));
4348 emit_move_insn (target, const0_rtx);
4349 emit_label (label);
4351 return target;
4354 /* Perform possibly multi-word comparison and conditional jump to LABEL
4355 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4357 The algorithm is based on the code in expr.c:do_jump.
4359 Note that this does not perform a general comparison. Only variants
4360 generated within expmed.c are correctly handled, others abort (but could
4361 be handled if needed). */
4363 static void
4364 do_cmp_and_jump (arg1, arg2, op, mode, label)
4365 rtx arg1, arg2, label;
4366 enum rtx_code op;
4367 enum machine_mode mode;
4369 /* If this mode is an integer too wide to compare properly,
4370 compare word by word. Rely on cse to optimize constant cases. */
4372 if (GET_MODE_CLASS (mode) == MODE_INT && !can_compare_p (mode))
4374 rtx label2 = gen_label_rtx ();
4376 switch (op)
4378 case LTU:
4379 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4380 break;
4382 case LEU:
4383 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4384 break;
4386 case LT:
4387 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4388 break;
4390 case GT:
4391 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4392 break;
4394 case GE:
4395 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4396 break;
4398 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4399 that's the only equality operations we do */
4400 case EQ:
4401 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4402 abort();
4403 do_jump_by_parts_equality_rtx (arg1, label2, label);
4404 break;
4406 case NE:
4407 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4408 abort();
4409 do_jump_by_parts_equality_rtx (arg1, label, label2);
4410 break;
4412 default:
4413 abort();
4416 emit_label (label2);
4418 else
4420 emit_cmp_insn(arg1, arg2, op, NULL_RTX, mode, 0, 0);
4421 if (bcc_gen_fctn[(int) op] == 0)
4422 abort ();
4423 emit_jump_insn ((*bcc_gen_fctn[(int) op]) (label));