* emit-rtl.c: Likewise.
[official-gcc.git] / gcc / expmed.c
blob7670f1ce7f431509d73d25c6e04fda2166c4f22e
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "toplev.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "expr.h"
33 #include "optabs.h"
34 #include "real.h"
35 #include "recog.h"
36 #include "langhooks.h"
38 static void store_fixed_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
39 unsigned HOST_WIDE_INT,
40 unsigned HOST_WIDE_INT, rtx));
41 static void store_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
42 unsigned HOST_WIDE_INT, rtx));
43 static rtx extract_fixed_bit_field PARAMS ((enum machine_mode, rtx,
44 unsigned HOST_WIDE_INT,
45 unsigned HOST_WIDE_INT,
46 unsigned HOST_WIDE_INT,
47 rtx, int));
48 static rtx mask_rtx PARAMS ((enum machine_mode, int,
49 int, int));
50 static rtx lshift_value PARAMS ((enum machine_mode, rtx,
51 int, int));
52 static rtx extract_split_bit_field PARAMS ((rtx, unsigned HOST_WIDE_INT,
53 unsigned HOST_WIDE_INT, int));
54 static void do_cmp_and_jump PARAMS ((rtx, rtx, enum rtx_code,
55 enum machine_mode, rtx));
57 /* Non-zero means divides or modulus operations are relatively cheap for
58 powers of two, so don't use branches; emit the operation instead.
59 Usually, this will mean that the MD file will emit non-branch
60 sequences. */
62 static int sdiv_pow2_cheap, smod_pow2_cheap;
64 #ifndef SLOW_UNALIGNED_ACCESS
65 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
66 #endif
68 /* For compilers that support multiple targets with different word sizes,
69 MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD. An example
70 is the H8/300(H) compiler. */
72 #ifndef MAX_BITS_PER_WORD
73 #define MAX_BITS_PER_WORD BITS_PER_WORD
74 #endif
76 /* Reduce conditional compilation elsewhere. */
77 #ifndef HAVE_insv
78 #define HAVE_insv 0
79 #define CODE_FOR_insv CODE_FOR_nothing
80 #define gen_insv(a,b,c,d) NULL_RTX
81 #endif
82 #ifndef HAVE_extv
83 #define HAVE_extv 0
84 #define CODE_FOR_extv CODE_FOR_nothing
85 #define gen_extv(a,b,c,d) NULL_RTX
86 #endif
87 #ifndef HAVE_extzv
88 #define HAVE_extzv 0
89 #define CODE_FOR_extzv CODE_FOR_nothing
90 #define gen_extzv(a,b,c,d) NULL_RTX
91 #endif
93 /* Cost of various pieces of RTL. Note that some of these are indexed by
94 shift count and some by mode. */
95 static int add_cost, negate_cost, zero_cost;
96 static int shift_cost[MAX_BITS_PER_WORD];
97 static int shiftadd_cost[MAX_BITS_PER_WORD];
98 static int shiftsub_cost[MAX_BITS_PER_WORD];
99 static int mul_cost[NUM_MACHINE_MODES];
100 static int div_cost[NUM_MACHINE_MODES];
101 static int mul_widen_cost[NUM_MACHINE_MODES];
102 static int mul_highpart_cost[NUM_MACHINE_MODES];
104 void
105 init_expmed ()
107 /* This is "some random pseudo register" for purposes of calling recog
108 to see what insns exist. */
109 rtx reg = gen_rtx_REG (word_mode, 10000);
110 rtx shift_insn, shiftadd_insn, shiftsub_insn;
111 int dummy;
112 int m;
113 enum machine_mode mode, wider_mode;
115 start_sequence ();
117 reg = gen_rtx_REG (word_mode, 10000);
119 zero_cost = rtx_cost (const0_rtx, 0);
120 add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
122 shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
123 gen_rtx_ASHIFT (word_mode, reg,
124 const0_rtx)));
126 shiftadd_insn
127 = emit_insn (gen_rtx_SET (VOIDmode, reg,
128 gen_rtx_PLUS (word_mode,
129 gen_rtx_MULT (word_mode,
130 reg, const0_rtx),
131 reg)));
133 shiftsub_insn
134 = emit_insn (gen_rtx_SET (VOIDmode, reg,
135 gen_rtx_MINUS (word_mode,
136 gen_rtx_MULT (word_mode,
137 reg, const0_rtx),
138 reg)));
140 init_recog ();
142 shift_cost[0] = 0;
143 shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
145 for (m = 1; m < MAX_BITS_PER_WORD; m++)
147 rtx c_int = GEN_INT ((HOST_WIDE_INT) 1 << m);
148 shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
150 XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
151 if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
152 shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
154 XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1) = c_int;
155 if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
156 shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
158 XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1) = c_int;
159 if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
160 shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
163 negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
165 sdiv_pow2_cheap
166 = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
167 <= 2 * add_cost);
168 smod_pow2_cheap
169 = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
170 <= 2 * add_cost);
172 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
173 mode != VOIDmode;
174 mode = GET_MODE_WIDER_MODE (mode))
176 reg = gen_rtx_REG (mode, 10000);
177 div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
178 mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
179 wider_mode = GET_MODE_WIDER_MODE (mode);
180 if (wider_mode != VOIDmode)
182 mul_widen_cost[(int) wider_mode]
183 = rtx_cost (gen_rtx_MULT (wider_mode,
184 gen_rtx_ZERO_EXTEND (wider_mode, reg),
185 gen_rtx_ZERO_EXTEND (wider_mode, reg)),
186 SET);
187 mul_highpart_cost[(int) mode]
188 = rtx_cost (gen_rtx_TRUNCATE
189 (mode,
190 gen_rtx_LSHIFTRT (wider_mode,
191 gen_rtx_MULT (wider_mode,
192 gen_rtx_ZERO_EXTEND
193 (wider_mode, reg),
194 gen_rtx_ZERO_EXTEND
195 (wider_mode, reg)),
196 GEN_INT (GET_MODE_BITSIZE (mode)))),
197 SET);
201 end_sequence ();
204 /* Return an rtx representing minus the value of X.
205 MODE is the intended mode of the result,
206 useful if X is a CONST_INT. */
209 negate_rtx (mode, x)
210 enum machine_mode mode;
211 rtx x;
213 rtx result = simplify_unary_operation (NEG, mode, x, mode);
215 if (result == 0)
216 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
218 return result;
221 /* Report on the availability of insv/extv/extzv and the desired mode
222 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
223 is false; else the mode of the specified operand. If OPNO is -1,
224 all the caller cares about is whether the insn is available. */
225 enum machine_mode
226 mode_for_extraction (pattern, opno)
227 enum extraction_pattern pattern;
228 int opno;
230 const struct insn_data *data;
232 switch (pattern)
234 case EP_insv:
235 if (HAVE_insv)
237 data = &insn_data[CODE_FOR_insv];
238 break;
240 return MAX_MACHINE_MODE;
242 case EP_extv:
243 if (HAVE_extv)
245 data = &insn_data[CODE_FOR_extv];
246 break;
248 return MAX_MACHINE_MODE;
250 case EP_extzv:
251 if (HAVE_extzv)
253 data = &insn_data[CODE_FOR_extzv];
254 break;
256 return MAX_MACHINE_MODE;
258 default:
259 abort ();
262 if (opno == -1)
263 return VOIDmode;
265 /* Everyone who uses this function used to follow it with
266 if (result == VOIDmode) result = word_mode; */
267 if (data->operand[opno].mode == VOIDmode)
268 return word_mode;
269 return data->operand[opno].mode;
273 /* Generate code to store value from rtx VALUE
274 into a bit-field within structure STR_RTX
275 containing BITSIZE bits starting at bit BITNUM.
276 FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
277 ALIGN is the alignment that STR_RTX is known to have.
278 TOTAL_SIZE is the size of the structure in bytes, or -1 if varying. */
280 /* ??? Note that there are two different ideas here for how
281 to determine the size to count bits within, for a register.
282 One is BITS_PER_WORD, and the other is the size of operand 3
283 of the insv pattern.
285 If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
286 else, we use the mode of operand 3. */
289 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, total_size)
290 rtx str_rtx;
291 unsigned HOST_WIDE_INT bitsize;
292 unsigned HOST_WIDE_INT bitnum;
293 enum machine_mode fieldmode;
294 rtx value;
295 HOST_WIDE_INT total_size;
297 unsigned int unit
298 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
299 unsigned HOST_WIDE_INT offset = bitnum / unit;
300 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
301 rtx op0 = str_rtx;
302 int byte_offset;
304 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
306 /* Discount the part of the structure before the desired byte.
307 We need to know how many bytes are safe to reference after it. */
308 if (total_size >= 0)
309 total_size -= (bitpos / BIGGEST_ALIGNMENT
310 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
312 while (GET_CODE (op0) == SUBREG)
314 /* The following line once was done only if WORDS_BIG_ENDIAN,
315 but I think that is a mistake. WORDS_BIG_ENDIAN is
316 meaningful at a much higher level; when structures are copied
317 between memory and regs, the higher-numbered regs
318 always get higher addresses. */
319 offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
320 /* We used to adjust BITPOS here, but now we do the whole adjustment
321 right after the loop. */
322 op0 = SUBREG_REG (op0);
325 value = protect_from_queue (value, 0);
327 if (flag_force_mem)
329 int old_generating_concat_p = generating_concat_p;
330 generating_concat_p = 0;
331 value = force_not_mem (value);
332 generating_concat_p = old_generating_concat_p;
335 /* If the target is a register, overwriting the entire object, or storing
336 a full-word or multi-word field can be done with just a SUBREG.
338 If the target is memory, storing any naturally aligned field can be
339 done with a simple store. For targets that support fast unaligned
340 memory, any naturally sized, unit aligned field can be done directly. */
342 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
343 + (offset * UNITS_PER_WORD);
345 if (bitpos == 0
346 && bitsize == GET_MODE_BITSIZE (fieldmode)
347 && (GET_CODE (op0) != MEM
348 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
349 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
350 && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
351 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
352 || (offset * BITS_PER_UNIT % bitsize == 0
353 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
355 if (GET_MODE (op0) != fieldmode)
357 if (GET_CODE (op0) == SUBREG)
359 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
360 || GET_MODE_CLASS (fieldmode) == MODE_INT
361 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
362 op0 = SUBREG_REG (op0);
363 else
364 /* Else we've got some float mode source being extracted into
365 a different float mode destination -- this combination of
366 subregs results in Severe Tire Damage. */
367 abort ();
369 if (GET_CODE (op0) == REG)
370 op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
371 else
372 op0 = adjust_address (op0, fieldmode, offset);
374 emit_move_insn (op0, value);
375 return value;
378 /* Make sure we are playing with integral modes. Pun with subregs
379 if we aren't. This must come after the entire register case above,
380 since that case is valid for any mode. The following cases are only
381 valid for integral modes. */
383 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
384 if (imode != GET_MODE (op0))
386 if (GET_CODE (op0) == MEM)
387 op0 = adjust_address (op0, imode, 0);
388 else if (imode != BLKmode)
389 op0 = gen_lowpart (imode, op0);
390 else
391 abort ();
395 /* We may be accessing data outside the field, which means
396 we can alias adjacent data. */
397 if (GET_CODE (op0) == MEM)
399 op0 = shallow_copy_rtx (op0);
400 set_mem_alias_set (op0, 0);
401 set_mem_expr (op0, 0);
404 /* If OP0 is a register, BITPOS must count within a word.
405 But as we have it, it counts within whatever size OP0 now has.
406 On a bigendian machine, these are not the same, so convert. */
407 if (BYTES_BIG_ENDIAN
408 && GET_CODE (op0) != MEM
409 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
410 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
412 /* Storing an lsb-aligned field in a register
413 can be done with a movestrict instruction. */
415 if (GET_CODE (op0) != MEM
416 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
417 && bitsize == GET_MODE_BITSIZE (fieldmode)
418 && (movstrict_optab->handlers[(int) fieldmode].insn_code
419 != CODE_FOR_nothing))
421 int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
423 /* Get appropriate low part of the value being stored. */
424 if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
425 value = gen_lowpart (fieldmode, value);
426 else if (!(GET_CODE (value) == SYMBOL_REF
427 || GET_CODE (value) == LABEL_REF
428 || GET_CODE (value) == CONST))
429 value = convert_to_mode (fieldmode, value, 0);
431 if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
432 value = copy_to_mode_reg (fieldmode, value);
434 if (GET_CODE (op0) == SUBREG)
436 if (GET_MODE (SUBREG_REG (op0)) == fieldmode
437 || GET_MODE_CLASS (fieldmode) == MODE_INT
438 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
439 op0 = SUBREG_REG (op0);
440 else
441 /* Else we've got some float mode source being extracted into
442 a different float mode destination -- this combination of
443 subregs results in Severe Tire Damage. */
444 abort ();
447 emit_insn (GEN_FCN (icode)
448 (gen_rtx_SUBREG (fieldmode, op0,
449 (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
450 + (offset * UNITS_PER_WORD)),
451 value));
453 return value;
456 /* Handle fields bigger than a word. */
458 if (bitsize > BITS_PER_WORD)
460 /* Here we transfer the words of the field
461 in the order least significant first.
462 This is because the most significant word is the one which may
463 be less than full.
464 However, only do that if the value is not BLKmode. */
466 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
467 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
468 unsigned int i;
470 /* This is the mode we must force value to, so that there will be enough
471 subwords to extract. Note that fieldmode will often (always?) be
472 VOIDmode, because that is what store_field uses to indicate that this
473 is a bit field, but passing VOIDmode to operand_subword_force will
474 result in an abort. */
475 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
477 for (i = 0; i < nwords; i++)
479 /* If I is 0, use the low-order word in both field and target;
480 if I is 1, use the next to lowest word; and so on. */
481 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
482 unsigned int bit_offset = (backwards
483 ? MAX ((int) bitsize - ((int) i + 1)
484 * BITS_PER_WORD,
486 : (int) i * BITS_PER_WORD);
488 store_bit_field (op0, MIN (BITS_PER_WORD,
489 bitsize - i * BITS_PER_WORD),
490 bitnum + bit_offset, word_mode,
491 operand_subword_force (value, wordnum,
492 (GET_MODE (value) == VOIDmode
493 ? fieldmode
494 : GET_MODE (value))),
495 total_size);
497 return value;
500 /* From here on we can assume that the field to be stored in is
501 a full-word (whatever type that is), since it is shorter than a word. */
503 /* OFFSET is the number of words or bytes (UNIT says which)
504 from STR_RTX to the first word or byte containing part of the field. */
506 if (GET_CODE (op0) != MEM)
508 if (offset != 0
509 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
511 if (GET_CODE (op0) != REG)
513 /* Since this is a destination (lvalue), we can't copy it to a
514 pseudo. We can trivially remove a SUBREG that does not
515 change the size of the operand. Such a SUBREG may have been
516 added above. Otherwise, abort. */
517 if (GET_CODE (op0) == SUBREG
518 && (GET_MODE_SIZE (GET_MODE (op0))
519 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
520 op0 = SUBREG_REG (op0);
521 else
522 abort ();
524 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
525 op0, (offset * UNITS_PER_WORD));
527 offset = 0;
529 else
530 op0 = protect_from_queue (op0, 1);
532 /* If VALUE is a floating-point mode, access it as an integer of the
533 corresponding size. This can occur on a machine with 64 bit registers
534 that uses SFmode for float. This can also occur for unaligned float
535 structure fields. */
536 if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
537 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
538 value = gen_lowpart (word_mode, value);
540 /* Now OFFSET is nonzero only if OP0 is memory
541 and is therefore always measured in bytes. */
543 if (HAVE_insv
544 && GET_MODE (value) != BLKmode
545 && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
546 /* Ensure insv's size is wide enough for this field. */
547 && (GET_MODE_BITSIZE (op_mode) >= bitsize)
548 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
549 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
551 int xbitpos = bitpos;
552 rtx value1;
553 rtx xop0 = op0;
554 rtx last = get_last_insn ();
555 rtx pat;
556 enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
557 int save_volatile_ok = volatile_ok;
559 volatile_ok = 1;
561 /* If this machine's insv can only insert into a register, copy OP0
562 into a register and save it back later. */
563 /* This used to check flag_force_mem, but that was a serious
564 de-optimization now that flag_force_mem is enabled by -O2. */
565 if (GET_CODE (op0) == MEM
566 && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
567 (op0, VOIDmode)))
569 rtx tempreg;
570 enum machine_mode bestmode;
572 /* Get the mode to use for inserting into this field. If OP0 is
573 BLKmode, get the smallest mode consistent with the alignment. If
574 OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
575 mode. Otherwise, use the smallest mode containing the field. */
577 if (GET_MODE (op0) == BLKmode
578 || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
579 bestmode
580 = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
581 MEM_VOLATILE_P (op0));
582 else
583 bestmode = GET_MODE (op0);
585 if (bestmode == VOIDmode
586 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
587 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
588 goto insv_loses;
590 /* Adjust address to point to the containing unit of that mode.
591 Compute offset as multiple of this unit, counting in bytes. */
592 unit = GET_MODE_BITSIZE (bestmode);
593 offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
594 bitpos = bitnum % unit;
595 op0 = adjust_address (op0, bestmode, offset);
597 /* Fetch that unit, store the bitfield in it, then store
598 the unit. */
599 tempreg = copy_to_reg (op0);
600 store_bit_field (tempreg, bitsize, bitpos, fieldmode, value,
601 total_size);
602 emit_move_insn (op0, tempreg);
603 return value;
605 volatile_ok = save_volatile_ok;
607 /* Add OFFSET into OP0's address. */
608 if (GET_CODE (xop0) == MEM)
609 xop0 = adjust_address (xop0, byte_mode, offset);
611 /* If xop0 is a register, we need it in MAXMODE
612 to make it acceptable to the format of insv. */
613 if (GET_CODE (xop0) == SUBREG)
614 /* We can't just change the mode, because this might clobber op0,
615 and we will need the original value of op0 if insv fails. */
616 xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
617 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
618 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
620 /* On big-endian machines, we count bits from the most significant.
621 If the bit field insn does not, we must invert. */
623 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
624 xbitpos = unit - bitsize - xbitpos;
626 /* We have been counting XBITPOS within UNIT.
627 Count instead within the size of the register. */
628 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
629 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
631 unit = GET_MODE_BITSIZE (maxmode);
633 /* Convert VALUE to maxmode (which insv insn wants) in VALUE1. */
634 value1 = value;
635 if (GET_MODE (value) != maxmode)
637 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
639 /* Optimization: Don't bother really extending VALUE
640 if it has all the bits we will actually use. However,
641 if we must narrow it, be sure we do it correctly. */
643 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
645 rtx tmp;
647 tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
648 if (! tmp)
649 tmp = simplify_gen_subreg (maxmode,
650 force_reg (GET_MODE (value),
651 value1),
652 GET_MODE (value), 0);
653 value1 = tmp;
655 else
656 value1 = gen_lowpart (maxmode, value1);
658 else if (GET_CODE (value) == CONST_INT)
659 value1 = gen_int_mode (INTVAL (value), maxmode);
660 else if (!CONSTANT_P (value))
661 /* Parse phase is supposed to make VALUE's data type
662 match that of the component reference, which is a type
663 at least as wide as the field; so VALUE should have
664 a mode that corresponds to that type. */
665 abort ();
668 /* If this machine's insv insists on a register,
669 get VALUE1 into a register. */
670 if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
671 (value1, maxmode)))
672 value1 = force_reg (maxmode, value1);
674 pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
675 if (pat)
676 emit_insn (pat);
677 else
679 delete_insns_since (last);
680 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
683 else
684 insv_loses:
685 /* Insv is not available; store using shifts and boolean ops. */
686 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
687 return value;
690 /* Use shifts and boolean operations to store VALUE
691 into a bit field of width BITSIZE
692 in a memory location specified by OP0 except offset by OFFSET bytes.
693 (OFFSET must be 0 if OP0 is a register.)
694 The field starts at position BITPOS within the byte.
695 (If OP0 is a register, it may be a full word or a narrower mode,
696 but BITPOS still counts within a full word,
697 which is significant on bigendian machines.)
699 Note that protect_from_queue has already been done on OP0 and VALUE. */
701 static void
702 store_fixed_bit_field (op0, offset, bitsize, bitpos, value)
703 rtx op0;
704 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
705 rtx value;
707 enum machine_mode mode;
708 unsigned int total_bits = BITS_PER_WORD;
709 rtx subtarget, temp;
710 int all_zero = 0;
711 int all_one = 0;
713 /* There is a case not handled here:
714 a structure with a known alignment of just a halfword
715 and a field split across two aligned halfwords within the structure.
716 Or likewise a structure with a known alignment of just a byte
717 and a field split across two bytes.
718 Such cases are not supposed to be able to occur. */
720 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
722 if (offset != 0)
723 abort ();
724 /* Special treatment for a bit field split across two registers. */
725 if (bitsize + bitpos > BITS_PER_WORD)
727 store_split_bit_field (op0, bitsize, bitpos, value);
728 return;
731 else
733 /* Get the proper mode to use for this field. We want a mode that
734 includes the entire field. If such a mode would be larger than
735 a word, we won't be doing the extraction the normal way.
736 We don't want a mode bigger than the destination. */
738 mode = GET_MODE (op0);
739 if (GET_MODE_BITSIZE (mode) == 0
740 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
741 mode = word_mode;
742 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
743 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
745 if (mode == VOIDmode)
747 /* The only way this should occur is if the field spans word
748 boundaries. */
749 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
750 value);
751 return;
754 total_bits = GET_MODE_BITSIZE (mode);
756 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
757 be in the range 0 to total_bits-1, and put any excess bytes in
758 OFFSET. */
759 if (bitpos >= total_bits)
761 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
762 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
763 * BITS_PER_UNIT);
766 /* Get ref to an aligned byte, halfword, or word containing the field.
767 Adjust BITPOS to be position within a word,
768 and OFFSET to be the offset of that word.
769 Then alter OP0 to refer to that word. */
770 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
771 offset -= (offset % (total_bits / BITS_PER_UNIT));
772 op0 = adjust_address (op0, mode, offset);
775 mode = GET_MODE (op0);
777 /* Now MODE is either some integral mode for a MEM as OP0,
778 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
779 The bit field is contained entirely within OP0.
780 BITPOS is the starting bit number within OP0.
781 (OP0's mode may actually be narrower than MODE.) */
783 if (BYTES_BIG_ENDIAN)
784 /* BITPOS is the distance between our msb
785 and that of the containing datum.
786 Convert it to the distance from the lsb. */
787 bitpos = total_bits - bitsize - bitpos;
789 /* Now BITPOS is always the distance between our lsb
790 and that of OP0. */
792 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
793 we must first convert its mode to MODE. */
795 if (GET_CODE (value) == CONST_INT)
797 HOST_WIDE_INT v = INTVAL (value);
799 if (bitsize < HOST_BITS_PER_WIDE_INT)
800 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
802 if (v == 0)
803 all_zero = 1;
804 else if ((bitsize < HOST_BITS_PER_WIDE_INT
805 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
806 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
807 all_one = 1;
809 value = lshift_value (mode, value, bitpos, bitsize);
811 else
813 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
814 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
816 if (GET_MODE (value) != mode)
818 if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
819 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
820 value = gen_lowpart (mode, value);
821 else
822 value = convert_to_mode (mode, value, 1);
825 if (must_and)
826 value = expand_binop (mode, and_optab, value,
827 mask_rtx (mode, 0, bitsize, 0),
828 NULL_RTX, 1, OPTAB_LIB_WIDEN);
829 if (bitpos > 0)
830 value = expand_shift (LSHIFT_EXPR, mode, value,
831 build_int_2 (bitpos, 0), NULL_RTX, 1);
834 /* Now clear the chosen bits in OP0,
835 except that if VALUE is -1 we need not bother. */
837 subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
839 if (! all_one)
841 temp = expand_binop (mode, and_optab, op0,
842 mask_rtx (mode, bitpos, bitsize, 1),
843 subtarget, 1, OPTAB_LIB_WIDEN);
844 subtarget = temp;
846 else
847 temp = op0;
849 /* Now logical-or VALUE into OP0, unless it is zero. */
851 if (! all_zero)
852 temp = expand_binop (mode, ior_optab, temp, value,
853 subtarget, 1, OPTAB_LIB_WIDEN);
854 if (op0 != temp)
855 emit_move_insn (op0, temp);
858 /* Store a bit field that is split across multiple accessible memory objects.
860 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
861 BITSIZE is the field width; BITPOS the position of its first bit
862 (within the word).
863 VALUE is the value to store.
865 This does not yet handle fields wider than BITS_PER_WORD. */
867 static void
868 store_split_bit_field (op0, bitsize, bitpos, value)
869 rtx op0;
870 unsigned HOST_WIDE_INT bitsize, bitpos;
871 rtx value;
873 unsigned int unit;
874 unsigned int bitsdone = 0;
876 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
877 much at a time. */
878 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
879 unit = BITS_PER_WORD;
880 else
881 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
883 /* If VALUE is a constant other than a CONST_INT, get it into a register in
884 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
885 that VALUE might be a floating-point constant. */
886 if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
888 rtx word = gen_lowpart_common (word_mode, value);
890 if (word && (value != word))
891 value = word;
892 else
893 value = gen_lowpart_common (word_mode,
894 force_reg (GET_MODE (value) != VOIDmode
895 ? GET_MODE (value)
896 : word_mode, value));
898 else if (GET_CODE (value) == ADDRESSOF)
899 value = copy_to_reg (value);
901 while (bitsdone < bitsize)
903 unsigned HOST_WIDE_INT thissize;
904 rtx part, word;
905 unsigned HOST_WIDE_INT thispos;
906 unsigned HOST_WIDE_INT offset;
908 offset = (bitpos + bitsdone) / unit;
909 thispos = (bitpos + bitsdone) % unit;
911 /* THISSIZE must not overrun a word boundary. Otherwise,
912 store_fixed_bit_field will call us again, and we will mutually
913 recurse forever. */
914 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
915 thissize = MIN (thissize, unit - thispos);
917 if (BYTES_BIG_ENDIAN)
919 int total_bits;
921 /* We must do an endian conversion exactly the same way as it is
922 done in extract_bit_field, so that the two calls to
923 extract_fixed_bit_field will have comparable arguments. */
924 if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
925 total_bits = BITS_PER_WORD;
926 else
927 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
929 /* Fetch successively less significant portions. */
930 if (GET_CODE (value) == CONST_INT)
931 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
932 >> (bitsize - bitsdone - thissize))
933 & (((HOST_WIDE_INT) 1 << thissize) - 1));
934 else
935 /* The args are chosen so that the last part includes the
936 lsb. Give extract_bit_field the value it needs (with
937 endianness compensation) to fetch the piece we want. */
938 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
939 total_bits - bitsize + bitsdone,
940 NULL_RTX, 1);
942 else
944 /* Fetch successively more significant portions. */
945 if (GET_CODE (value) == CONST_INT)
946 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
947 >> bitsdone)
948 & (((HOST_WIDE_INT) 1 << thissize) - 1));
949 else
950 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
951 bitsdone, NULL_RTX, 1);
954 /* If OP0 is a register, then handle OFFSET here.
956 When handling multiword bitfields, extract_bit_field may pass
957 down a word_mode SUBREG of a larger REG for a bitfield that actually
958 crosses a word boundary. Thus, for a SUBREG, we must find
959 the current word starting from the base register. */
960 if (GET_CODE (op0) == SUBREG)
962 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
963 word = operand_subword_force (SUBREG_REG (op0), word_offset,
964 GET_MODE (SUBREG_REG (op0)));
965 offset = 0;
967 else if (GET_CODE (op0) == REG)
969 word = operand_subword_force (op0, offset, GET_MODE (op0));
970 offset = 0;
972 else
973 word = op0;
975 /* OFFSET is in UNITs, and UNIT is in bits.
976 store_fixed_bit_field wants offset in bytes. */
977 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
978 thispos, part);
979 bitsdone += thissize;
983 /* Generate code to extract a byte-field from STR_RTX
984 containing BITSIZE bits, starting at BITNUM,
985 and put it in TARGET if possible (if TARGET is nonzero).
986 Regardless of TARGET, we return the rtx for where the value is placed.
987 It may be a QUEUED.
989 STR_RTX is the structure containing the byte (a REG or MEM).
990 UNSIGNEDP is nonzero if this is an unsigned bit field.
991 MODE is the natural mode of the field value once extracted.
992 TMODE is the mode the caller would like the value to have;
993 but the value may be returned with type MODE instead.
995 TOTAL_SIZE is the size in bytes of the containing structure,
996 or -1 if varying.
998 If a TARGET is specified and we can store in it at no extra cost,
999 we do so, and return TARGET.
1000 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1001 if they are equally easy. */
1004 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
1005 target, mode, tmode, total_size)
1006 rtx str_rtx;
1007 unsigned HOST_WIDE_INT bitsize;
1008 unsigned HOST_WIDE_INT bitnum;
1009 int unsignedp;
1010 rtx target;
1011 enum machine_mode mode, tmode;
1012 HOST_WIDE_INT total_size;
1014 unsigned int unit
1015 = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1016 unsigned HOST_WIDE_INT offset = bitnum / unit;
1017 unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1018 rtx op0 = str_rtx;
1019 rtx spec_target = target;
1020 rtx spec_target_subreg = 0;
1021 enum machine_mode int_mode;
1022 enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1023 enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1024 enum machine_mode mode1;
1025 int byte_offset;
1027 /* Discount the part of the structure before the desired byte.
1028 We need to know how many bytes are safe to reference after it. */
1029 if (total_size >= 0)
1030 total_size -= (bitpos / BIGGEST_ALIGNMENT
1031 * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1033 if (tmode == VOIDmode)
1034 tmode = mode;
1035 while (GET_CODE (op0) == SUBREG)
1037 int outer_size = GET_MODE_BITSIZE (GET_MODE (op0));
1038 int inner_size = GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)));
1040 offset += SUBREG_BYTE (op0) / UNITS_PER_WORD;
1042 inner_size = MIN (inner_size, BITS_PER_WORD);
1044 if (BYTES_BIG_ENDIAN && (outer_size < inner_size))
1046 bitpos += inner_size - outer_size;
1047 if (bitpos > unit)
1049 offset += (bitpos / unit);
1050 bitpos %= unit;
1054 op0 = SUBREG_REG (op0);
1057 if (GET_CODE (op0) == REG
1058 && mode == GET_MODE (op0)
1059 && bitnum == 0
1060 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1062 /* We're trying to extract a full register from itself. */
1063 return op0;
1066 /* Make sure we are playing with integral modes. Pun with subregs
1067 if we aren't. */
1069 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1070 if (imode != GET_MODE (op0))
1072 if (GET_CODE (op0) == MEM)
1073 op0 = adjust_address (op0, imode, 0);
1074 else if (imode != BLKmode)
1075 op0 = gen_lowpart (imode, op0);
1076 else
1077 abort ();
1081 /* We may be accessing data outside the field, which means
1082 we can alias adjacent data. */
1083 if (GET_CODE (op0) == MEM)
1085 op0 = shallow_copy_rtx (op0);
1086 set_mem_alias_set (op0, 0);
1087 set_mem_expr (op0, 0);
1090 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1091 If that's wrong, the solution is to test for it and set TARGET to 0
1092 if needed. */
1094 /* If OP0 is a register, BITPOS must count within a word.
1095 But as we have it, it counts within whatever size OP0 now has.
1096 On a bigendian machine, these are not the same, so convert. */
1097 if (BYTES_BIG_ENDIAN
1098 && GET_CODE (op0) != MEM
1099 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1100 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1102 /* Extracting a full-word or multi-word value
1103 from a structure in a register or aligned memory.
1104 This can be done with just SUBREG.
1105 So too extracting a subword value in
1106 the least significant part of the register. */
1108 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
1109 + (offset * UNITS_PER_WORD);
1111 mode1 = (VECTOR_MODE_P (tmode)
1112 ? mode
1113 : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
1115 if (((GET_CODE (op0) != MEM
1116 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1117 GET_MODE_BITSIZE (GET_MODE (op0)))
1118 && GET_MODE_SIZE (mode1) != 0
1119 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1120 || (GET_CODE (op0) == MEM
1121 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1122 || (offset * BITS_PER_UNIT % bitsize == 0
1123 && MEM_ALIGN (op0) % bitsize == 0))))
1124 && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1125 && bitpos % BITS_PER_WORD == 0)
1126 || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1127 /* ??? The big endian test here is wrong. This is correct
1128 if the value is in a register, and if mode_for_size is not
1129 the same mode as op0. This causes us to get unnecessarily
1130 inefficient code from the Thumb port when -mbig-endian. */
1131 && (BYTES_BIG_ENDIAN
1132 ? bitpos + bitsize == BITS_PER_WORD
1133 : bitpos == 0))))
1135 if (mode1 != GET_MODE (op0))
1137 if (GET_CODE (op0) == SUBREG)
1139 if (GET_MODE (SUBREG_REG (op0)) == mode1
1140 || GET_MODE_CLASS (mode1) == MODE_INT
1141 || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1142 op0 = SUBREG_REG (op0);
1143 else
1144 /* Else we've got some float mode source being extracted into
1145 a different float mode destination -- this combination of
1146 subregs results in Severe Tire Damage. */
1147 goto no_subreg_mode_swap;
1149 if (GET_CODE (op0) == REG)
1150 op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1151 else
1152 op0 = adjust_address (op0, mode1, offset);
1154 if (mode1 != mode)
1155 return convert_to_mode (tmode, op0, unsignedp);
1156 return op0;
1158 no_subreg_mode_swap:
1160 /* Handle fields bigger than a word. */
1162 if (bitsize > BITS_PER_WORD)
1164 /* Here we transfer the words of the field
1165 in the order least significant first.
1166 This is because the most significant word is the one which may
1167 be less than full. */
1169 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1170 unsigned int i;
1172 if (target == 0 || GET_CODE (target) != REG)
1173 target = gen_reg_rtx (mode);
1175 /* Indicate for flow that the entire target reg is being set. */
1176 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1178 for (i = 0; i < nwords; i++)
1180 /* If I is 0, use the low-order word in both field and target;
1181 if I is 1, use the next to lowest word; and so on. */
1182 /* Word number in TARGET to use. */
1183 unsigned int wordnum
1184 = (WORDS_BIG_ENDIAN
1185 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1186 : i);
1187 /* Offset from start of field in OP0. */
1188 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1189 ? MAX (0, ((int) bitsize - ((int) i + 1)
1190 * (int) BITS_PER_WORD))
1191 : (int) i * BITS_PER_WORD);
1192 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1193 rtx result_part
1194 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1195 bitsize - i * BITS_PER_WORD),
1196 bitnum + bit_offset, 1, target_part, mode,
1197 word_mode, total_size);
1199 if (target_part == 0)
1200 abort ();
1202 if (result_part != target_part)
1203 emit_move_insn (target_part, result_part);
1206 if (unsignedp)
1208 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1209 need to be zero'd out. */
1210 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1212 unsigned int i, total_words;
1214 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1215 for (i = nwords; i < total_words; i++)
1216 emit_move_insn
1217 (operand_subword (target,
1218 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1219 1, VOIDmode),
1220 const0_rtx);
1222 return target;
1225 /* Signed bit field: sign-extend with two arithmetic shifts. */
1226 target = expand_shift (LSHIFT_EXPR, mode, target,
1227 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1228 NULL_RTX, 0);
1229 return expand_shift (RSHIFT_EXPR, mode, target,
1230 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1231 NULL_RTX, 0);
1234 /* From here on we know the desired field is smaller than a word. */
1236 /* Check if there is a correspondingly-sized integer field, so we can
1237 safely extract it as one size of integer, if necessary; then
1238 truncate or extend to the size that is wanted; then use SUBREGs or
1239 convert_to_mode to get one of the modes we really wanted. */
1241 int_mode = int_mode_for_mode (tmode);
1242 if (int_mode == BLKmode)
1243 int_mode = int_mode_for_mode (mode);
1244 if (int_mode == BLKmode)
1245 abort (); /* Should probably push op0 out to memory and then
1246 do a load. */
1248 /* OFFSET is the number of words or bytes (UNIT says which)
1249 from STR_RTX to the first word or byte containing part of the field. */
1251 if (GET_CODE (op0) != MEM)
1253 if (offset != 0
1254 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1256 if (GET_CODE (op0) != REG)
1257 op0 = copy_to_reg (op0);
1258 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1259 op0, (offset * UNITS_PER_WORD));
1261 offset = 0;
1263 else
1264 op0 = protect_from_queue (str_rtx, 1);
1266 /* Now OFFSET is nonzero only for memory operands. */
1268 if (unsignedp)
1270 if (HAVE_extzv
1271 && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1272 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1273 && (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1275 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1276 rtx bitsize_rtx, bitpos_rtx;
1277 rtx last = get_last_insn ();
1278 rtx xop0 = op0;
1279 rtx xtarget = target;
1280 rtx xspec_target = spec_target;
1281 rtx xspec_target_subreg = spec_target_subreg;
1282 rtx pat;
1283 enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1285 if (GET_CODE (xop0) == MEM)
1287 int save_volatile_ok = volatile_ok;
1288 volatile_ok = 1;
1290 /* Is the memory operand acceptable? */
1291 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1292 (xop0, GET_MODE (xop0))))
1294 /* No, load into a reg and extract from there. */
1295 enum machine_mode bestmode;
1297 /* Get the mode to use for inserting into this field. If
1298 OP0 is BLKmode, get the smallest mode consistent with the
1299 alignment. If OP0 is a non-BLKmode object that is no
1300 wider than MAXMODE, use its mode. Otherwise, use the
1301 smallest mode containing the field. */
1303 if (GET_MODE (xop0) == BLKmode
1304 || (GET_MODE_SIZE (GET_MODE (op0))
1305 > GET_MODE_SIZE (maxmode)))
1306 bestmode = get_best_mode (bitsize, bitnum,
1307 MEM_ALIGN (xop0), maxmode,
1308 MEM_VOLATILE_P (xop0));
1309 else
1310 bestmode = GET_MODE (xop0);
1312 if (bestmode == VOIDmode
1313 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1314 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1315 goto extzv_loses;
1317 /* Compute offset as multiple of this unit,
1318 counting in bytes. */
1319 unit = GET_MODE_BITSIZE (bestmode);
1320 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1321 xbitpos = bitnum % unit;
1322 xop0 = adjust_address (xop0, bestmode, xoffset);
1324 /* Fetch it to a register in that size. */
1325 xop0 = force_reg (bestmode, xop0);
1327 /* XBITPOS counts within UNIT, which is what is expected. */
1329 else
1330 /* Get ref to first byte containing part of the field. */
1331 xop0 = adjust_address (xop0, byte_mode, xoffset);
1333 volatile_ok = save_volatile_ok;
1336 /* If op0 is a register, we need it in MAXMODE (which is usually
1337 SImode). to make it acceptable to the format of extzv. */
1338 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1339 goto extzv_loses;
1340 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1341 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1343 /* On big-endian machines, we count bits from the most significant.
1344 If the bit field insn does not, we must invert. */
1345 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1346 xbitpos = unit - bitsize - xbitpos;
1348 /* Now convert from counting within UNIT to counting in MAXMODE. */
1349 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1350 xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1352 unit = GET_MODE_BITSIZE (maxmode);
1354 if (xtarget == 0
1355 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1356 xtarget = xspec_target = gen_reg_rtx (tmode);
1358 if (GET_MODE (xtarget) != maxmode)
1360 if (GET_CODE (xtarget) == REG)
1362 int wider = (GET_MODE_SIZE (maxmode)
1363 > GET_MODE_SIZE (GET_MODE (xtarget)));
1364 xtarget = gen_lowpart (maxmode, xtarget);
1365 if (wider)
1366 xspec_target_subreg = xtarget;
1368 else
1369 xtarget = gen_reg_rtx (maxmode);
1372 /* If this machine's extzv insists on a register target,
1373 make sure we have one. */
1374 if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1375 (xtarget, maxmode)))
1376 xtarget = gen_reg_rtx (maxmode);
1378 bitsize_rtx = GEN_INT (bitsize);
1379 bitpos_rtx = GEN_INT (xbitpos);
1381 pat = gen_extzv (protect_from_queue (xtarget, 1),
1382 xop0, bitsize_rtx, bitpos_rtx);
1383 if (pat)
1385 emit_insn (pat);
1386 target = xtarget;
1387 spec_target = xspec_target;
1388 spec_target_subreg = xspec_target_subreg;
1390 else
1392 delete_insns_since (last);
1393 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1394 bitpos, target, 1);
1397 else
1398 extzv_loses:
1399 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1400 bitpos, target, 1);
1402 else
1404 if (HAVE_extv
1405 && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1406 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1407 && (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1409 int xbitpos = bitpos, xoffset = offset;
1410 rtx bitsize_rtx, bitpos_rtx;
1411 rtx last = get_last_insn ();
1412 rtx xop0 = op0, xtarget = target;
1413 rtx xspec_target = spec_target;
1414 rtx xspec_target_subreg = spec_target_subreg;
1415 rtx pat;
1416 enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1418 if (GET_CODE (xop0) == MEM)
1420 /* Is the memory operand acceptable? */
1421 if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1422 (xop0, GET_MODE (xop0))))
1424 /* No, load into a reg and extract from there. */
1425 enum machine_mode bestmode;
1427 /* Get the mode to use for inserting into this field. If
1428 OP0 is BLKmode, get the smallest mode consistent with the
1429 alignment. If OP0 is a non-BLKmode object that is no
1430 wider than MAXMODE, use its mode. Otherwise, use the
1431 smallest mode containing the field. */
1433 if (GET_MODE (xop0) == BLKmode
1434 || (GET_MODE_SIZE (GET_MODE (op0))
1435 > GET_MODE_SIZE (maxmode)))
1436 bestmode = get_best_mode (bitsize, bitnum,
1437 MEM_ALIGN (xop0), maxmode,
1438 MEM_VOLATILE_P (xop0));
1439 else
1440 bestmode = GET_MODE (xop0);
1442 if (bestmode == VOIDmode
1443 || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1444 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1445 goto extv_loses;
1447 /* Compute offset as multiple of this unit,
1448 counting in bytes. */
1449 unit = GET_MODE_BITSIZE (bestmode);
1450 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1451 xbitpos = bitnum % unit;
1452 xop0 = adjust_address (xop0, bestmode, xoffset);
1454 /* Fetch it to a register in that size. */
1455 xop0 = force_reg (bestmode, xop0);
1457 /* XBITPOS counts within UNIT, which is what is expected. */
1459 else
1460 /* Get ref to first byte containing part of the field. */
1461 xop0 = adjust_address (xop0, byte_mode, xoffset);
1464 /* If op0 is a register, we need it in MAXMODE (which is usually
1465 SImode) to make it acceptable to the format of extv. */
1466 if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1467 goto extv_loses;
1468 if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1469 xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1471 /* On big-endian machines, we count bits from the most significant.
1472 If the bit field insn does not, we must invert. */
1473 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1474 xbitpos = unit - bitsize - xbitpos;
1476 /* XBITPOS counts within a size of UNIT.
1477 Adjust to count within a size of MAXMODE. */
1478 if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1479 xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1481 unit = GET_MODE_BITSIZE (maxmode);
1483 if (xtarget == 0
1484 || (flag_force_mem && GET_CODE (xtarget) == MEM))
1485 xtarget = xspec_target = gen_reg_rtx (tmode);
1487 if (GET_MODE (xtarget) != maxmode)
1489 if (GET_CODE (xtarget) == REG)
1491 int wider = (GET_MODE_SIZE (maxmode)
1492 > GET_MODE_SIZE (GET_MODE (xtarget)));
1493 xtarget = gen_lowpart (maxmode, xtarget);
1494 if (wider)
1495 xspec_target_subreg = xtarget;
1497 else
1498 xtarget = gen_reg_rtx (maxmode);
1501 /* If this machine's extv insists on a register target,
1502 make sure we have one. */
1503 if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1504 (xtarget, maxmode)))
1505 xtarget = gen_reg_rtx (maxmode);
1507 bitsize_rtx = GEN_INT (bitsize);
1508 bitpos_rtx = GEN_INT (xbitpos);
1510 pat = gen_extv (protect_from_queue (xtarget, 1),
1511 xop0, bitsize_rtx, bitpos_rtx);
1512 if (pat)
1514 emit_insn (pat);
1515 target = xtarget;
1516 spec_target = xspec_target;
1517 spec_target_subreg = xspec_target_subreg;
1519 else
1521 delete_insns_since (last);
1522 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1523 bitpos, target, 0);
1526 else
1527 extv_loses:
1528 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1529 bitpos, target, 0);
1531 if (target == spec_target)
1532 return target;
1533 if (target == spec_target_subreg)
1534 return spec_target;
1535 if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1537 /* If the target mode is floating-point, first convert to the
1538 integer mode of that size and then access it as a floating-point
1539 value via a SUBREG. */
1540 if (GET_MODE_CLASS (tmode) != MODE_INT
1541 && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
1543 target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1544 MODE_INT, 0),
1545 target, unsignedp);
1546 return gen_lowpart (tmode, target);
1548 else
1549 return convert_to_mode (tmode, target, unsignedp);
1551 return target;
1554 /* Extract a bit field using shifts and boolean operations
1555 Returns an rtx to represent the value.
1556 OP0 addresses a register (word) or memory (byte).
1557 BITPOS says which bit within the word or byte the bit field starts in.
1558 OFFSET says how many bytes farther the bit field starts;
1559 it is 0 if OP0 is a register.
1560 BITSIZE says how many bits long the bit field is.
1561 (If OP0 is a register, it may be narrower than a full word,
1562 but BITPOS still counts within a full word,
1563 which is significant on bigendian machines.)
1565 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1566 If TARGET is nonzero, attempts to store the value there
1567 and return TARGET, but this is not guaranteed.
1568 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1570 static rtx
1571 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1572 target, unsignedp)
1573 enum machine_mode tmode;
1574 rtx op0, target;
1575 unsigned HOST_WIDE_INT offset, bitsize, bitpos;
1576 int unsignedp;
1578 unsigned int total_bits = BITS_PER_WORD;
1579 enum machine_mode mode;
1581 if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1583 /* Special treatment for a bit field split across two registers. */
1584 if (bitsize + bitpos > BITS_PER_WORD)
1585 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1587 else
1589 /* Get the proper mode to use for this field. We want a mode that
1590 includes the entire field. If such a mode would be larger than
1591 a word, we won't be doing the extraction the normal way. */
1593 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1594 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1596 if (mode == VOIDmode)
1597 /* The only way this should occur is if the field spans word
1598 boundaries. */
1599 return extract_split_bit_field (op0, bitsize,
1600 bitpos + offset * BITS_PER_UNIT,
1601 unsignedp);
1603 total_bits = GET_MODE_BITSIZE (mode);
1605 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1606 be in the range 0 to total_bits-1, and put any excess bytes in
1607 OFFSET. */
1608 if (bitpos >= total_bits)
1610 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1611 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1612 * BITS_PER_UNIT);
1615 /* Get ref to an aligned byte, halfword, or word containing the field.
1616 Adjust BITPOS to be position within a word,
1617 and OFFSET to be the offset of that word.
1618 Then alter OP0 to refer to that word. */
1619 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1620 offset -= (offset % (total_bits / BITS_PER_UNIT));
1621 op0 = adjust_address (op0, mode, offset);
1624 mode = GET_MODE (op0);
1626 if (BYTES_BIG_ENDIAN)
1627 /* BITPOS is the distance between our msb and that of OP0.
1628 Convert it to the distance from the lsb. */
1629 bitpos = total_bits - bitsize - bitpos;
1631 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1632 We have reduced the big-endian case to the little-endian case. */
1634 if (unsignedp)
1636 if (bitpos)
1638 /* If the field does not already start at the lsb,
1639 shift it so it does. */
1640 tree amount = build_int_2 (bitpos, 0);
1641 /* Maybe propagate the target for the shift. */
1642 /* But not if we will return it--could confuse integrate.c. */
1643 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1644 && !REG_FUNCTION_VALUE_P (target)
1645 ? target : 0);
1646 if (tmode != mode) subtarget = 0;
1647 op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1649 /* Convert the value to the desired mode. */
1650 if (mode != tmode)
1651 op0 = convert_to_mode (tmode, op0, 1);
1653 /* Unless the msb of the field used to be the msb when we shifted,
1654 mask out the upper bits. */
1656 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1657 return expand_binop (GET_MODE (op0), and_optab, op0,
1658 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1659 target, 1, OPTAB_LIB_WIDEN);
1660 return op0;
1663 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1664 then arithmetic-shift its lsb to the lsb of the word. */
1665 op0 = force_reg (mode, op0);
1666 if (mode != tmode)
1667 target = 0;
1669 /* Find the narrowest integer mode that contains the field. */
1671 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1672 mode = GET_MODE_WIDER_MODE (mode))
1673 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1675 op0 = convert_to_mode (mode, op0, 0);
1676 break;
1679 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1681 tree amount
1682 = build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1683 /* Maybe propagate the target for the shift. */
1684 /* But not if we will return the result--could confuse integrate.c. */
1685 rtx subtarget = (target != 0 && GET_CODE (target) == REG
1686 && ! REG_FUNCTION_VALUE_P (target)
1687 ? target : 0);
1688 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1691 return expand_shift (RSHIFT_EXPR, mode, op0,
1692 build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1693 target, 0);
1696 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1697 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1698 complement of that if COMPLEMENT. The mask is truncated if
1699 necessary to the width of mode MODE. The mask is zero-extended if
1700 BITSIZE+BITPOS is too small for MODE. */
1702 static rtx
1703 mask_rtx (mode, bitpos, bitsize, complement)
1704 enum machine_mode mode;
1705 int bitpos, bitsize, complement;
1707 HOST_WIDE_INT masklow, maskhigh;
1709 if (bitpos < HOST_BITS_PER_WIDE_INT)
1710 masklow = (HOST_WIDE_INT) -1 << bitpos;
1711 else
1712 masklow = 0;
1714 if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1715 masklow &= ((unsigned HOST_WIDE_INT) -1
1716 >> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1718 if (bitpos <= HOST_BITS_PER_WIDE_INT)
1719 maskhigh = -1;
1720 else
1721 maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1723 if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1724 maskhigh &= ((unsigned HOST_WIDE_INT) -1
1725 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1726 else
1727 maskhigh = 0;
1729 if (complement)
1731 maskhigh = ~maskhigh;
1732 masklow = ~masklow;
1735 return immed_double_const (masklow, maskhigh, mode);
1738 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1739 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1741 static rtx
1742 lshift_value (mode, value, bitpos, bitsize)
1743 enum machine_mode mode;
1744 rtx value;
1745 int bitpos, bitsize;
1747 unsigned HOST_WIDE_INT v = INTVAL (value);
1748 HOST_WIDE_INT low, high;
1750 if (bitsize < HOST_BITS_PER_WIDE_INT)
1751 v &= ~((HOST_WIDE_INT) -1 << bitsize);
1753 if (bitpos < HOST_BITS_PER_WIDE_INT)
1755 low = v << bitpos;
1756 high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1758 else
1760 low = 0;
1761 high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1764 return immed_double_const (low, high, mode);
1767 /* Extract a bit field that is split across two words
1768 and return an RTX for the result.
1770 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1771 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1772 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1774 static rtx
1775 extract_split_bit_field (op0, bitsize, bitpos, unsignedp)
1776 rtx op0;
1777 unsigned HOST_WIDE_INT bitsize, bitpos;
1778 int unsignedp;
1780 unsigned int unit;
1781 unsigned int bitsdone = 0;
1782 rtx result = NULL_RTX;
1783 int first = 1;
1785 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1786 much at a time. */
1787 if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1788 unit = BITS_PER_WORD;
1789 else
1790 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1792 while (bitsdone < bitsize)
1794 unsigned HOST_WIDE_INT thissize;
1795 rtx part, word;
1796 unsigned HOST_WIDE_INT thispos;
1797 unsigned HOST_WIDE_INT offset;
1799 offset = (bitpos + bitsdone) / unit;
1800 thispos = (bitpos + bitsdone) % unit;
1802 /* THISSIZE must not overrun a word boundary. Otherwise,
1803 extract_fixed_bit_field will call us again, and we will mutually
1804 recurse forever. */
1805 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1806 thissize = MIN (thissize, unit - thispos);
1808 /* If OP0 is a register, then handle OFFSET here.
1810 When handling multiword bitfields, extract_bit_field may pass
1811 down a word_mode SUBREG of a larger REG for a bitfield that actually
1812 crosses a word boundary. Thus, for a SUBREG, we must find
1813 the current word starting from the base register. */
1814 if (GET_CODE (op0) == SUBREG)
1816 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1817 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1818 GET_MODE (SUBREG_REG (op0)));
1819 offset = 0;
1821 else if (GET_CODE (op0) == REG)
1823 word = operand_subword_force (op0, offset, GET_MODE (op0));
1824 offset = 0;
1826 else
1827 word = op0;
1829 /* Extract the parts in bit-counting order,
1830 whose meaning is determined by BYTES_PER_UNIT.
1831 OFFSET is in UNITs, and UNIT is in bits.
1832 extract_fixed_bit_field wants offset in bytes. */
1833 part = extract_fixed_bit_field (word_mode, word,
1834 offset * unit / BITS_PER_UNIT,
1835 thissize, thispos, 0, 1);
1836 bitsdone += thissize;
1838 /* Shift this part into place for the result. */
1839 if (BYTES_BIG_ENDIAN)
1841 if (bitsize != bitsdone)
1842 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1843 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1845 else
1847 if (bitsdone != thissize)
1848 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1849 build_int_2 (bitsdone - thissize, 0), 0, 1);
1852 if (first)
1853 result = part;
1854 else
1855 /* Combine the parts with bitwise or. This works
1856 because we extracted each part as an unsigned bit field. */
1857 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1858 OPTAB_LIB_WIDEN);
1860 first = 0;
1863 /* Unsigned bit field: we are done. */
1864 if (unsignedp)
1865 return result;
1866 /* Signed bit field: sign-extend with two arithmetic shifts. */
1867 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1868 build_int_2 (BITS_PER_WORD - bitsize, 0),
1869 NULL_RTX, 0);
1870 return expand_shift (RSHIFT_EXPR, word_mode, result,
1871 build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1874 /* Add INC into TARGET. */
1876 void
1877 expand_inc (target, inc)
1878 rtx target, inc;
1880 rtx value = expand_binop (GET_MODE (target), add_optab,
1881 target, inc,
1882 target, 0, OPTAB_LIB_WIDEN);
1883 if (value != target)
1884 emit_move_insn (target, value);
1887 /* Subtract DEC from TARGET. */
1889 void
1890 expand_dec (target, dec)
1891 rtx target, dec;
1893 rtx value = expand_binop (GET_MODE (target), sub_optab,
1894 target, dec,
1895 target, 0, OPTAB_LIB_WIDEN);
1896 if (value != target)
1897 emit_move_insn (target, value);
1900 /* Output a shift instruction for expression code CODE,
1901 with SHIFTED being the rtx for the value to shift,
1902 and AMOUNT the tree for the amount to shift by.
1903 Store the result in the rtx TARGET, if that is convenient.
1904 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1905 Return the rtx for where the value is. */
1908 expand_shift (code, mode, shifted, amount, target, unsignedp)
1909 enum tree_code code;
1910 enum machine_mode mode;
1911 rtx shifted;
1912 tree amount;
1913 rtx target;
1914 int unsignedp;
1916 rtx op1, temp = 0;
1917 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1918 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1919 int try;
1921 /* Previously detected shift-counts computed by NEGATE_EXPR
1922 and shifted in the other direction; but that does not work
1923 on all machines. */
1925 op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1927 #ifdef SHIFT_COUNT_TRUNCATED
1928 if (SHIFT_COUNT_TRUNCATED)
1930 if (GET_CODE (op1) == CONST_INT
1931 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1932 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1933 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1934 % GET_MODE_BITSIZE (mode));
1935 else if (GET_CODE (op1) == SUBREG
1936 && subreg_lowpart_p (op1))
1937 op1 = SUBREG_REG (op1);
1939 #endif
1941 if (op1 == const0_rtx)
1942 return shifted;
1944 for (try = 0; temp == 0 && try < 3; try++)
1946 enum optab_methods methods;
1948 if (try == 0)
1949 methods = OPTAB_DIRECT;
1950 else if (try == 1)
1951 methods = OPTAB_WIDEN;
1952 else
1953 methods = OPTAB_LIB_WIDEN;
1955 if (rotate)
1957 /* Widening does not work for rotation. */
1958 if (methods == OPTAB_WIDEN)
1959 continue;
1960 else if (methods == OPTAB_LIB_WIDEN)
1962 /* If we have been unable to open-code this by a rotation,
1963 do it as the IOR of two shifts. I.e., to rotate A
1964 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1965 where C is the bitsize of A.
1967 It is theoretically possible that the target machine might
1968 not be able to perform either shift and hence we would
1969 be making two libcalls rather than just the one for the
1970 shift (similarly if IOR could not be done). We will allow
1971 this extremely unlikely lossage to avoid complicating the
1972 code below. */
1974 rtx subtarget = target == shifted ? 0 : target;
1975 rtx temp1;
1976 tree type = TREE_TYPE (amount);
1977 tree new_amount = make_tree (type, op1);
1978 tree other_amount
1979 = fold (build (MINUS_EXPR, type,
1980 convert (type,
1981 build_int_2 (GET_MODE_BITSIZE (mode),
1982 0)),
1983 amount));
1985 shifted = force_reg (mode, shifted);
1987 temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1988 mode, shifted, new_amount, subtarget, 1);
1989 temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1990 mode, shifted, other_amount, 0, 1);
1991 return expand_binop (mode, ior_optab, temp, temp1, target,
1992 unsignedp, methods);
1995 temp = expand_binop (mode,
1996 left ? rotl_optab : rotr_optab,
1997 shifted, op1, target, unsignedp, methods);
1999 /* If we don't have the rotate, but we are rotating by a constant
2000 that is in range, try a rotate in the opposite direction. */
2002 if (temp == 0 && GET_CODE (op1) == CONST_INT
2003 && INTVAL (op1) > 0
2004 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
2005 temp = expand_binop (mode,
2006 left ? rotr_optab : rotl_optab,
2007 shifted,
2008 GEN_INT (GET_MODE_BITSIZE (mode)
2009 - INTVAL (op1)),
2010 target, unsignedp, methods);
2012 else if (unsignedp)
2013 temp = expand_binop (mode,
2014 left ? ashl_optab : lshr_optab,
2015 shifted, op1, target, unsignedp, methods);
2017 /* Do arithmetic shifts.
2018 Also, if we are going to widen the operand, we can just as well
2019 use an arithmetic right-shift instead of a logical one. */
2020 if (temp == 0 && ! rotate
2021 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2023 enum optab_methods methods1 = methods;
2025 /* If trying to widen a log shift to an arithmetic shift,
2026 don't accept an arithmetic shift of the same size. */
2027 if (unsignedp)
2028 methods1 = OPTAB_MUST_WIDEN;
2030 /* Arithmetic shift */
2032 temp = expand_binop (mode,
2033 left ? ashl_optab : ashr_optab,
2034 shifted, op1, target, unsignedp, methods1);
2037 /* We used to try extzv here for logical right shifts, but that was
2038 only useful for one machine, the VAX, and caused poor code
2039 generation there for lshrdi3, so the code was deleted and a
2040 define_expand for lshrsi3 was added to vax.md. */
2043 if (temp == 0)
2044 abort ();
2045 return temp;
2048 enum alg_code { alg_zero, alg_m, alg_shift,
2049 alg_add_t_m2, alg_sub_t_m2,
2050 alg_add_factor, alg_sub_factor,
2051 alg_add_t2_m, alg_sub_t2_m,
2052 alg_add, alg_subtract, alg_factor, alg_shiftop };
2054 /* This structure records a sequence of operations.
2055 `ops' is the number of operations recorded.
2056 `cost' is their total cost.
2057 The operations are stored in `op' and the corresponding
2058 logarithms of the integer coefficients in `log'.
2060 These are the operations:
2061 alg_zero total := 0;
2062 alg_m total := multiplicand;
2063 alg_shift total := total * coeff
2064 alg_add_t_m2 total := total + multiplicand * coeff;
2065 alg_sub_t_m2 total := total - multiplicand * coeff;
2066 alg_add_factor total := total * coeff + total;
2067 alg_sub_factor total := total * coeff - total;
2068 alg_add_t2_m total := total * coeff + multiplicand;
2069 alg_sub_t2_m total := total * coeff - multiplicand;
2071 The first operand must be either alg_zero or alg_m. */
2073 struct algorithm
2075 short cost;
2076 short ops;
2077 /* The size of the OP and LOG fields are not directly related to the
2078 word size, but the worst-case algorithms will be if we have few
2079 consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2080 In that case we will generate shift-by-2, add, shift-by-2, add,...,
2081 in total wordsize operations. */
2082 enum alg_code op[MAX_BITS_PER_WORD];
2083 char log[MAX_BITS_PER_WORD];
2086 static void synth_mult PARAMS ((struct algorithm *,
2087 unsigned HOST_WIDE_INT,
2088 int));
2089 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2090 int, int,
2091 unsigned HOST_WIDE_INT *,
2092 int *, int *));
2093 static unsigned HOST_WIDE_INT invert_mod2n PARAMS ((unsigned HOST_WIDE_INT,
2094 int));
2095 /* Compute and return the best algorithm for multiplying by T.
2096 The algorithm must cost less than cost_limit
2097 If retval.cost >= COST_LIMIT, no algorithm was found and all
2098 other field of the returned struct are undefined. */
2100 static void
2101 synth_mult (alg_out, t, cost_limit)
2102 struct algorithm *alg_out;
2103 unsigned HOST_WIDE_INT t;
2104 int cost_limit;
2106 int m;
2107 struct algorithm *alg_in, *best_alg;
2108 int cost;
2109 unsigned HOST_WIDE_INT q;
2111 /* Indicate that no algorithm is yet found. If no algorithm
2112 is found, this value will be returned and indicate failure. */
2113 alg_out->cost = cost_limit;
2115 if (cost_limit <= 0)
2116 return;
2118 /* t == 1 can be done in zero cost. */
2119 if (t == 1)
2121 alg_out->ops = 1;
2122 alg_out->cost = 0;
2123 alg_out->op[0] = alg_m;
2124 return;
2127 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2128 fail now. */
2129 if (t == 0)
2131 if (zero_cost >= cost_limit)
2132 return;
2133 else
2135 alg_out->ops = 1;
2136 alg_out->cost = zero_cost;
2137 alg_out->op[0] = alg_zero;
2138 return;
2142 /* We'll be needing a couple extra algorithm structures now. */
2144 alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2145 best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2147 /* If we have a group of zero bits at the low-order part of T, try
2148 multiplying by the remaining bits and then doing a shift. */
2150 if ((t & 1) == 0)
2152 m = floor_log2 (t & -t); /* m = number of low zero bits */
2153 if (m < BITS_PER_WORD)
2155 q = t >> m;
2156 cost = shift_cost[m];
2157 synth_mult (alg_in, q, cost_limit - cost);
2159 cost += alg_in->cost;
2160 if (cost < cost_limit)
2162 struct algorithm *x;
2163 x = alg_in, alg_in = best_alg, best_alg = x;
2164 best_alg->log[best_alg->ops] = m;
2165 best_alg->op[best_alg->ops] = alg_shift;
2166 cost_limit = cost;
2171 /* If we have an odd number, add or subtract one. */
2172 if ((t & 1) != 0)
2174 unsigned HOST_WIDE_INT w;
2176 for (w = 1; (w & t) != 0; w <<= 1)
2178 /* If T was -1, then W will be zero after the loop. This is another
2179 case where T ends with ...111. Handling this with (T + 1) and
2180 subtract 1 produces slightly better code and results in algorithm
2181 selection much faster than treating it like the ...0111 case
2182 below. */
2183 if (w == 0
2184 || (w > 2
2185 /* Reject the case where t is 3.
2186 Thus we prefer addition in that case. */
2187 && t != 3))
2189 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2191 cost = add_cost;
2192 synth_mult (alg_in, t + 1, cost_limit - cost);
2194 cost += alg_in->cost;
2195 if (cost < cost_limit)
2197 struct algorithm *x;
2198 x = alg_in, alg_in = best_alg, best_alg = x;
2199 best_alg->log[best_alg->ops] = 0;
2200 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2201 cost_limit = cost;
2204 else
2206 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2208 cost = add_cost;
2209 synth_mult (alg_in, t - 1, cost_limit - cost);
2211 cost += alg_in->cost;
2212 if (cost < cost_limit)
2214 struct algorithm *x;
2215 x = alg_in, alg_in = best_alg, best_alg = x;
2216 best_alg->log[best_alg->ops] = 0;
2217 best_alg->op[best_alg->ops] = alg_add_t_m2;
2218 cost_limit = cost;
2223 /* Look for factors of t of the form
2224 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2225 If we find such a factor, we can multiply by t using an algorithm that
2226 multiplies by q, shift the result by m and add/subtract it to itself.
2228 We search for large factors first and loop down, even if large factors
2229 are less probable than small; if we find a large factor we will find a
2230 good sequence quickly, and therefore be able to prune (by decreasing
2231 COST_LIMIT) the search. */
2233 for (m = floor_log2 (t - 1); m >= 2; m--)
2235 unsigned HOST_WIDE_INT d;
2237 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2238 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2240 cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2241 synth_mult (alg_in, t / d, cost_limit - cost);
2243 cost += alg_in->cost;
2244 if (cost < cost_limit)
2246 struct algorithm *x;
2247 x = alg_in, alg_in = best_alg, best_alg = x;
2248 best_alg->log[best_alg->ops] = m;
2249 best_alg->op[best_alg->ops] = alg_add_factor;
2250 cost_limit = cost;
2252 /* Other factors will have been taken care of in the recursion. */
2253 break;
2256 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2257 if (t % d == 0 && t > d && m < BITS_PER_WORD)
2259 cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2260 synth_mult (alg_in, t / d, cost_limit - cost);
2262 cost += alg_in->cost;
2263 if (cost < cost_limit)
2265 struct algorithm *x;
2266 x = alg_in, alg_in = best_alg, best_alg = x;
2267 best_alg->log[best_alg->ops] = m;
2268 best_alg->op[best_alg->ops] = alg_sub_factor;
2269 cost_limit = cost;
2271 break;
2275 /* Try shift-and-add (load effective address) instructions,
2276 i.e. do a*3, a*5, a*9. */
2277 if ((t & 1) != 0)
2279 q = t - 1;
2280 q = q & -q;
2281 m = exact_log2 (q);
2282 if (m >= 0 && m < BITS_PER_WORD)
2284 cost = shiftadd_cost[m];
2285 synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2287 cost += alg_in->cost;
2288 if (cost < cost_limit)
2290 struct algorithm *x;
2291 x = alg_in, alg_in = best_alg, best_alg = x;
2292 best_alg->log[best_alg->ops] = m;
2293 best_alg->op[best_alg->ops] = alg_add_t2_m;
2294 cost_limit = cost;
2298 q = t + 1;
2299 q = q & -q;
2300 m = exact_log2 (q);
2301 if (m >= 0 && m < BITS_PER_WORD)
2303 cost = shiftsub_cost[m];
2304 synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2306 cost += alg_in->cost;
2307 if (cost < cost_limit)
2309 struct algorithm *x;
2310 x = alg_in, alg_in = best_alg, best_alg = x;
2311 best_alg->log[best_alg->ops] = m;
2312 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2313 cost_limit = cost;
2318 /* If cost_limit has not decreased since we stored it in alg_out->cost,
2319 we have not found any algorithm. */
2320 if (cost_limit == alg_out->cost)
2321 return;
2323 /* If we are getting a too long sequence for `struct algorithm'
2324 to record, make this search fail. */
2325 if (best_alg->ops == MAX_BITS_PER_WORD)
2326 return;
2328 /* Copy the algorithm from temporary space to the space at alg_out.
2329 We avoid using structure assignment because the majority of
2330 best_alg is normally undefined, and this is a critical function. */
2331 alg_out->ops = best_alg->ops + 1;
2332 alg_out->cost = cost_limit;
2333 memcpy (alg_out->op, best_alg->op,
2334 alg_out->ops * sizeof *alg_out->op);
2335 memcpy (alg_out->log, best_alg->log,
2336 alg_out->ops * sizeof *alg_out->log);
2339 /* Perform a multiplication and return an rtx for the result.
2340 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2341 TARGET is a suggestion for where to store the result (an rtx).
2343 We check specially for a constant integer as OP1.
2344 If you want this check for OP0 as well, then before calling
2345 you should swap the two operands if OP0 would be constant. */
2348 expand_mult (mode, op0, op1, target, unsignedp)
2349 enum machine_mode mode;
2350 rtx op0, op1, target;
2351 int unsignedp;
2353 rtx const_op1 = op1;
2355 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2356 less than or equal in size to `unsigned int' this doesn't matter.
2357 If the mode is larger than `unsigned int', then synth_mult works only
2358 if the constant value exactly fits in an `unsigned int' without any
2359 truncation. This means that multiplying by negative values does
2360 not work; results are off by 2^32 on a 32 bit machine. */
2362 /* If we are multiplying in DImode, it may still be a win
2363 to try to work with shifts and adds. */
2364 if (GET_CODE (op1) == CONST_DOUBLE
2365 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2366 && HOST_BITS_PER_INT >= BITS_PER_WORD
2367 && CONST_DOUBLE_HIGH (op1) == 0)
2368 const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2369 else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2370 && GET_CODE (op1) == CONST_INT
2371 && INTVAL (op1) < 0)
2372 const_op1 = 0;
2374 /* We used to test optimize here, on the grounds that it's better to
2375 produce a smaller program when -O is not used.
2376 But this causes such a terrible slowdown sometimes
2377 that it seems better to use synth_mult always. */
2379 if (const_op1 && GET_CODE (const_op1) == CONST_INT
2380 && (unsignedp || ! flag_trapv))
2382 struct algorithm alg;
2383 struct algorithm alg2;
2384 HOST_WIDE_INT val = INTVAL (op1);
2385 HOST_WIDE_INT val_so_far;
2386 rtx insn;
2387 int mult_cost;
2388 enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2390 /* op0 must be register to make mult_cost match the precomputed
2391 shiftadd_cost array. */
2392 op0 = force_reg (mode, op0);
2394 /* Try to do the computation three ways: multiply by the negative of OP1
2395 and then negate, do the multiplication directly, or do multiplication
2396 by OP1 - 1. */
2398 mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2399 mult_cost = MIN (12 * add_cost, mult_cost);
2401 synth_mult (&alg, val, mult_cost);
2403 /* This works only if the inverted value actually fits in an
2404 `unsigned int' */
2405 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2407 synth_mult (&alg2, - val,
2408 (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2409 if (alg2.cost + negate_cost < alg.cost)
2410 alg = alg2, variant = negate_variant;
2413 /* This proves very useful for division-by-constant. */
2414 synth_mult (&alg2, val - 1,
2415 (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2416 if (alg2.cost + add_cost < alg.cost)
2417 alg = alg2, variant = add_variant;
2419 if (alg.cost < mult_cost)
2421 /* We found something cheaper than a multiply insn. */
2422 int opno;
2423 rtx accum, tem;
2424 enum machine_mode nmode;
2426 op0 = protect_from_queue (op0, 0);
2428 /* Avoid referencing memory over and over.
2429 For speed, but also for correctness when mem is volatile. */
2430 if (GET_CODE (op0) == MEM)
2431 op0 = force_reg (mode, op0);
2433 /* ACCUM starts out either as OP0 or as a zero, depending on
2434 the first operation. */
2436 if (alg.op[0] == alg_zero)
2438 accum = copy_to_mode_reg (mode, const0_rtx);
2439 val_so_far = 0;
2441 else if (alg.op[0] == alg_m)
2443 accum = copy_to_mode_reg (mode, op0);
2444 val_so_far = 1;
2446 else
2447 abort ();
2449 for (opno = 1; opno < alg.ops; opno++)
2451 int log = alg.log[opno];
2452 int preserve = preserve_subexpressions_p ();
2453 rtx shift_subtarget = preserve ? 0 : accum;
2454 rtx add_target
2455 = (opno == alg.ops - 1 && target != 0 && variant != add_variant
2456 && ! preserve)
2457 ? target : 0;
2458 rtx accum_target = preserve ? 0 : accum;
2460 switch (alg.op[opno])
2462 case alg_shift:
2463 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2464 build_int_2 (log, 0), NULL_RTX, 0);
2465 val_so_far <<= log;
2466 break;
2468 case alg_add_t_m2:
2469 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2470 build_int_2 (log, 0), NULL_RTX, 0);
2471 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2472 add_target
2473 ? add_target : accum_target);
2474 val_so_far += (HOST_WIDE_INT) 1 << log;
2475 break;
2477 case alg_sub_t_m2:
2478 tem = expand_shift (LSHIFT_EXPR, mode, op0,
2479 build_int_2 (log, 0), NULL_RTX, 0);
2480 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2481 add_target
2482 ? add_target : accum_target);
2483 val_so_far -= (HOST_WIDE_INT) 1 << log;
2484 break;
2486 case alg_add_t2_m:
2487 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2488 build_int_2 (log, 0), shift_subtarget,
2490 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2491 add_target
2492 ? add_target : accum_target);
2493 val_so_far = (val_so_far << log) + 1;
2494 break;
2496 case alg_sub_t2_m:
2497 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2498 build_int_2 (log, 0), shift_subtarget,
2500 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2501 add_target
2502 ? add_target : accum_target);
2503 val_so_far = (val_so_far << log) - 1;
2504 break;
2506 case alg_add_factor:
2507 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2508 build_int_2 (log, 0), NULL_RTX, 0);
2509 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2510 add_target
2511 ? add_target : accum_target);
2512 val_so_far += val_so_far << log;
2513 break;
2515 case alg_sub_factor:
2516 tem = expand_shift (LSHIFT_EXPR, mode, accum,
2517 build_int_2 (log, 0), NULL_RTX, 0);
2518 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2519 (add_target ? add_target
2520 : preserve ? 0 : tem));
2521 val_so_far = (val_so_far << log) - val_so_far;
2522 break;
2524 default:
2525 abort ();
2528 /* Write a REG_EQUAL note on the last insn so that we can cse
2529 multiplication sequences. Note that if ACCUM is a SUBREG,
2530 we've set the inner register and must properly indicate
2531 that. */
2533 tem = op0, nmode = mode;
2534 if (GET_CODE (accum) == SUBREG)
2536 nmode = GET_MODE (SUBREG_REG (accum));
2537 tem = gen_lowpart (nmode, op0);
2540 insn = get_last_insn ();
2541 set_unique_reg_note (insn,
2542 REG_EQUAL,
2543 gen_rtx_MULT (nmode, tem,
2544 GEN_INT (val_so_far)));
2547 if (variant == negate_variant)
2549 val_so_far = - val_so_far;
2550 accum = expand_unop (mode, neg_optab, accum, target, 0);
2552 else if (variant == add_variant)
2554 val_so_far = val_so_far + 1;
2555 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2558 if (val != val_so_far)
2559 abort ();
2561 return accum;
2565 /* This used to use umul_optab if unsigned, but for non-widening multiply
2566 there is no difference between signed and unsigned. */
2567 op0 = expand_binop (mode,
2568 ! unsignedp
2569 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2570 ? smulv_optab : smul_optab,
2571 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2572 if (op0 == 0)
2573 abort ();
2574 return op0;
2577 /* Return the smallest n such that 2**n >= X. */
2580 ceil_log2 (x)
2581 unsigned HOST_WIDE_INT x;
2583 return floor_log2 (x - 1) + 1;
2586 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2587 replace division by D, and put the least significant N bits of the result
2588 in *MULTIPLIER_PTR and return the most significant bit.
2590 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2591 needed precision is in PRECISION (should be <= N).
2593 PRECISION should be as small as possible so this function can choose
2594 multiplier more freely.
2596 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
2597 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2599 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2600 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
2602 static
2603 unsigned HOST_WIDE_INT
2604 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2605 unsigned HOST_WIDE_INT d;
2606 int n;
2607 int precision;
2608 unsigned HOST_WIDE_INT *multiplier_ptr;
2609 int *post_shift_ptr;
2610 int *lgup_ptr;
2612 HOST_WIDE_INT mhigh_hi, mlow_hi;
2613 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2614 int lgup, post_shift;
2615 int pow, pow2;
2616 unsigned HOST_WIDE_INT nl, dummy1;
2617 HOST_WIDE_INT nh, dummy2;
2619 /* lgup = ceil(log2(divisor)); */
2620 lgup = ceil_log2 (d);
2622 if (lgup > n)
2623 abort ();
2625 pow = n + lgup;
2626 pow2 = n + lgup - precision;
2628 if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2630 /* We could handle this with some effort, but this case is much better
2631 handled directly with a scc insn, so rely on caller using that. */
2632 abort ();
2635 /* mlow = 2^(N + lgup)/d */
2636 if (pow >= HOST_BITS_PER_WIDE_INT)
2638 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2639 nl = 0;
2641 else
2643 nh = 0;
2644 nl = (unsigned HOST_WIDE_INT) 1 << pow;
2646 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2647 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
2649 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2650 if (pow2 >= HOST_BITS_PER_WIDE_INT)
2651 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2652 else
2653 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2654 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2655 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2657 if (mhigh_hi && nh - d >= d)
2658 abort ();
2659 if (mhigh_hi > 1 || mlow_hi > 1)
2660 abort ();
2661 /* assert that mlow < mhigh. */
2662 if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2663 abort ();
2665 /* If precision == N, then mlow, mhigh exceed 2^N
2666 (but they do not exceed 2^(N+1)). */
2668 /* Reduce to lowest terms */
2669 for (post_shift = lgup; post_shift > 0; post_shift--)
2671 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2672 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2673 if (ml_lo >= mh_lo)
2674 break;
2676 mlow_hi = 0;
2677 mlow_lo = ml_lo;
2678 mhigh_hi = 0;
2679 mhigh_lo = mh_lo;
2682 *post_shift_ptr = post_shift;
2683 *lgup_ptr = lgup;
2684 if (n < HOST_BITS_PER_WIDE_INT)
2686 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2687 *multiplier_ptr = mhigh_lo & mask;
2688 return mhigh_lo >= mask;
2690 else
2692 *multiplier_ptr = mhigh_lo;
2693 return mhigh_hi;
2697 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2698 congruent to 1 (mod 2**N). */
2700 static unsigned HOST_WIDE_INT
2701 invert_mod2n (x, n)
2702 unsigned HOST_WIDE_INT x;
2703 int n;
2705 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
2707 /* The algorithm notes that the choice y = x satisfies
2708 x*y == 1 mod 2^3, since x is assumed odd.
2709 Each iteration doubles the number of bits of significance in y. */
2711 unsigned HOST_WIDE_INT mask;
2712 unsigned HOST_WIDE_INT y = x;
2713 int nbit = 3;
2715 mask = (n == HOST_BITS_PER_WIDE_INT
2716 ? ~(unsigned HOST_WIDE_INT) 0
2717 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2719 while (nbit < n)
2721 y = y * (2 - x*y) & mask; /* Modulo 2^N */
2722 nbit *= 2;
2724 return y;
2727 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2728 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
2729 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
2730 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2731 become signed.
2733 The result is put in TARGET if that is convenient.
2735 MODE is the mode of operation. */
2738 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2739 enum machine_mode mode;
2740 rtx adj_operand, op0, op1, target;
2741 int unsignedp;
2743 rtx tem;
2744 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2746 tem = expand_shift (RSHIFT_EXPR, mode, op0,
2747 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2748 NULL_RTX, 0);
2749 tem = expand_and (mode, tem, op1, NULL_RTX);
2750 adj_operand
2751 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2752 adj_operand);
2754 tem = expand_shift (RSHIFT_EXPR, mode, op1,
2755 build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2756 NULL_RTX, 0);
2757 tem = expand_and (mode, tem, op0, NULL_RTX);
2758 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2759 target);
2761 return target;
2764 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2765 in TARGET if that is convenient, and return where the result is. If the
2766 operation can not be performed, 0 is returned.
2768 MODE is the mode of operation and result.
2770 UNSIGNEDP nonzero means unsigned multiply.
2772 MAX_COST is the total allowed cost for the expanded RTL. */
2775 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2776 enum machine_mode mode;
2777 rtx op0, target;
2778 unsigned HOST_WIDE_INT cnst1;
2779 int unsignedp;
2780 int max_cost;
2782 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2783 optab mul_highpart_optab;
2784 optab moptab;
2785 rtx tem;
2786 int size = GET_MODE_BITSIZE (mode);
2787 rtx op1, wide_op1;
2789 /* We can't support modes wider than HOST_BITS_PER_INT. */
2790 if (size > HOST_BITS_PER_WIDE_INT)
2791 abort ();
2793 op1 = gen_int_mode (cnst1, mode);
2795 wide_op1
2796 = immed_double_const (cnst1,
2797 (unsignedp
2798 ? (HOST_WIDE_INT) 0
2799 : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2800 wider_mode);
2802 /* expand_mult handles constant multiplication of word_mode
2803 or narrower. It does a poor job for large modes. */
2804 if (size < BITS_PER_WORD
2805 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2807 /* We have to do this, since expand_binop doesn't do conversion for
2808 multiply. Maybe change expand_binop to handle widening multiply? */
2809 op0 = convert_to_mode (wider_mode, op0, unsignedp);
2811 /* We know that this can't have signed overflow, so pretend this is
2812 an unsigned multiply. */
2813 tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
2814 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2815 build_int_2 (size, 0), NULL_RTX, 1);
2816 return convert_modes (mode, wider_mode, tem, unsignedp);
2819 if (target == 0)
2820 target = gen_reg_rtx (mode);
2822 /* Firstly, try using a multiplication insn that only generates the needed
2823 high part of the product, and in the sign flavor of unsignedp. */
2824 if (mul_highpart_cost[(int) mode] < max_cost)
2826 mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2827 target = expand_binop (mode, mul_highpart_optab,
2828 op0, op1, target, unsignedp, OPTAB_DIRECT);
2829 if (target)
2830 return target;
2833 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2834 Need to adjust the result after the multiplication. */
2835 if (size - 1 < BITS_PER_WORD
2836 && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2837 < max_cost))
2839 mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2840 target = expand_binop (mode, mul_highpart_optab,
2841 op0, op1, target, unsignedp, OPTAB_DIRECT);
2842 if (target)
2843 /* We used the wrong signedness. Adjust the result. */
2844 return expand_mult_highpart_adjust (mode, target, op0,
2845 op1, target, unsignedp);
2848 /* Try widening multiplication. */
2849 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2850 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2851 && mul_widen_cost[(int) wider_mode] < max_cost)
2853 op1 = force_reg (mode, op1);
2854 goto try;
2857 /* Try widening the mode and perform a non-widening multiplication. */
2858 moptab = smul_optab;
2859 if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2860 && size - 1 < BITS_PER_WORD
2861 && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2863 op1 = wide_op1;
2864 goto try;
2867 /* Try widening multiplication of opposite signedness, and adjust. */
2868 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2869 if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2870 && size - 1 < BITS_PER_WORD
2871 && (mul_widen_cost[(int) wider_mode]
2872 + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2874 rtx regop1 = force_reg (mode, op1);
2875 tem = expand_binop (wider_mode, moptab, op0, regop1,
2876 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2877 if (tem != 0)
2879 /* Extract the high half of the just generated product. */
2880 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2881 build_int_2 (size, 0), NULL_RTX, 1);
2882 tem = convert_modes (mode, wider_mode, tem, unsignedp);
2883 /* We used the wrong signedness. Adjust the result. */
2884 return expand_mult_highpart_adjust (mode, tem, op0, op1,
2885 target, unsignedp);
2889 return 0;
2891 try:
2892 /* Pass NULL_RTX as target since TARGET has wrong mode. */
2893 tem = expand_binop (wider_mode, moptab, op0, op1,
2894 NULL_RTX, unsignedp, OPTAB_WIDEN);
2895 if (tem == 0)
2896 return 0;
2898 /* Extract the high half of the just generated product. */
2899 if (mode == word_mode)
2901 return gen_highpart (mode, tem);
2903 else
2905 tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2906 build_int_2 (size, 0), NULL_RTX, 1);
2907 return convert_modes (mode, wider_mode, tem, unsignedp);
2911 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2912 if that is convenient, and returning where the result is.
2913 You may request either the quotient or the remainder as the result;
2914 specify REM_FLAG nonzero to get the remainder.
2916 CODE is the expression code for which kind of division this is;
2917 it controls how rounding is done. MODE is the machine mode to use.
2918 UNSIGNEDP nonzero means do unsigned division. */
2920 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2921 and then correct it by or'ing in missing high bits
2922 if result of ANDI is nonzero.
2923 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2924 This could optimize to a bfexts instruction.
2925 But C doesn't use these operations, so their optimizations are
2926 left for later. */
2927 /* ??? For modulo, we don't actually need the highpart of the first product,
2928 the low part will do nicely. And for small divisors, the second multiply
2929 can also be a low-part only multiply or even be completely left out.
2930 E.g. to calculate the remainder of a division by 3 with a 32 bit
2931 multiply, multiply with 0x55555556 and extract the upper two bits;
2932 the result is exact for inputs up to 0x1fffffff.
2933 The input range can be reduced by using cross-sum rules.
2934 For odd divisors >= 3, the following table gives right shift counts
2935 so that if an number is shifted by an integer multiple of the given
2936 amount, the remainder stays the same:
2937 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2938 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2939 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2940 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2941 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2943 Cross-sum rules for even numbers can be derived by leaving as many bits
2944 to the right alone as the divisor has zeros to the right.
2945 E.g. if x is an unsigned 32 bit number:
2946 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2949 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2952 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2953 int rem_flag;
2954 enum tree_code code;
2955 enum machine_mode mode;
2956 rtx op0, op1, target;
2957 int unsignedp;
2959 enum machine_mode compute_mode;
2960 rtx tquotient;
2961 rtx quotient = 0, remainder = 0;
2962 rtx last;
2963 int size;
2964 rtx insn, set;
2965 optab optab1, optab2;
2966 int op1_is_constant, op1_is_pow2;
2967 int max_cost, extra_cost;
2968 static HOST_WIDE_INT last_div_const = 0;
2970 op1_is_constant = GET_CODE (op1) == CONST_INT;
2971 op1_is_pow2 = (op1_is_constant
2972 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2973 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2976 This is the structure of expand_divmod:
2978 First comes code to fix up the operands so we can perform the operations
2979 correctly and efficiently.
2981 Second comes a switch statement with code specific for each rounding mode.
2982 For some special operands this code emits all RTL for the desired
2983 operation, for other cases, it generates only a quotient and stores it in
2984 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
2985 to indicate that it has not done anything.
2987 Last comes code that finishes the operation. If QUOTIENT is set and
2988 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
2989 QUOTIENT is not set, it is computed using trunc rounding.
2991 We try to generate special code for division and remainder when OP1 is a
2992 constant. If |OP1| = 2**n we can use shifts and some other fast
2993 operations. For other values of OP1, we compute a carefully selected
2994 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2995 by m.
2997 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2998 half of the product. Different strategies for generating the product are
2999 implemented in expand_mult_highpart.
3001 If what we actually want is the remainder, we generate that by another
3002 by-constant multiplication and a subtraction. */
3004 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3005 code below will malfunction if we are, so check here and handle
3006 the special case if so. */
3007 if (op1 == const1_rtx)
3008 return rem_flag ? const0_rtx : op0;
3010 /* When dividing by -1, we could get an overflow.
3011 negv_optab can handle overflows. */
3012 if (! unsignedp && op1 == constm1_rtx)
3014 if (rem_flag)
3015 return const0_rtx;
3016 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3017 ? negv_optab : neg_optab, op0, target, 0);
3020 if (target
3021 /* Don't use the function value register as a target
3022 since we have to read it as well as write it,
3023 and function-inlining gets confused by this. */
3024 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3025 /* Don't clobber an operand while doing a multi-step calculation. */
3026 || ((rem_flag || op1_is_constant)
3027 && (reg_mentioned_p (target, op0)
3028 || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3029 || reg_mentioned_p (target, op1)
3030 || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3031 target = 0;
3033 /* Get the mode in which to perform this computation. Normally it will
3034 be MODE, but sometimes we can't do the desired operation in MODE.
3035 If so, pick a wider mode in which we can do the operation. Convert
3036 to that mode at the start to avoid repeated conversions.
3038 First see what operations we need. These depend on the expression
3039 we are evaluating. (We assume that divxx3 insns exist under the
3040 same conditions that modxx3 insns and that these insns don't normally
3041 fail. If these assumptions are not correct, we may generate less
3042 efficient code in some cases.)
3044 Then see if we find a mode in which we can open-code that operation
3045 (either a division, modulus, or shift). Finally, check for the smallest
3046 mode for which we can do the operation with a library call. */
3048 /* We might want to refine this now that we have division-by-constant
3049 optimization. Since expand_mult_highpart tries so many variants, it is
3050 not straightforward to generalize this. Maybe we should make an array
3051 of possible modes in init_expmed? Save this for GCC 2.7. */
3053 optab1 = (op1_is_pow2 ? (unsignedp ? lshr_optab : ashr_optab)
3054 : (unsignedp ? udiv_optab : sdiv_optab));
3055 optab2 = (op1_is_pow2 ? optab1 : (unsignedp ? udivmod_optab : sdivmod_optab));
3057 for (compute_mode = mode; compute_mode != VOIDmode;
3058 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3059 if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3060 || optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3061 break;
3063 if (compute_mode == VOIDmode)
3064 for (compute_mode = mode; compute_mode != VOIDmode;
3065 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3066 if (optab1->handlers[(int) compute_mode].libfunc
3067 || optab2->handlers[(int) compute_mode].libfunc)
3068 break;
3070 /* If we still couldn't find a mode, use MODE, but we'll probably abort
3071 in expand_binop. */
3072 if (compute_mode == VOIDmode)
3073 compute_mode = mode;
3075 if (target && GET_MODE (target) == compute_mode)
3076 tquotient = target;
3077 else
3078 tquotient = gen_reg_rtx (compute_mode);
3080 size = GET_MODE_BITSIZE (compute_mode);
3081 #if 0
3082 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3083 (mode), and thereby get better code when OP1 is a constant. Do that
3084 later. It will require going over all usages of SIZE below. */
3085 size = GET_MODE_BITSIZE (mode);
3086 #endif
3088 /* Only deduct something for a REM if the last divide done was
3089 for a different constant. Then set the constant of the last
3090 divide. */
3091 max_cost = div_cost[(int) compute_mode]
3092 - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3093 && INTVAL (op1) == last_div_const)
3094 ? mul_cost[(int) compute_mode] + add_cost : 0);
3096 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3098 /* Now convert to the best mode to use. */
3099 if (compute_mode != mode)
3101 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3102 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3104 /* convert_modes may have placed op1 into a register, so we
3105 must recompute the following. */
3106 op1_is_constant = GET_CODE (op1) == CONST_INT;
3107 op1_is_pow2 = (op1_is_constant
3108 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3109 || (! unsignedp
3110 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3113 /* If one of the operands is a volatile MEM, copy it into a register. */
3115 if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3116 op0 = force_reg (compute_mode, op0);
3117 if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3118 op1 = force_reg (compute_mode, op1);
3120 /* If we need the remainder or if OP1 is constant, we need to
3121 put OP0 in a register in case it has any queued subexpressions. */
3122 if (rem_flag || op1_is_constant)
3123 op0 = force_reg (compute_mode, op0);
3125 last = get_last_insn ();
3127 /* Promote floor rounding to trunc rounding for unsigned operations. */
3128 if (unsignedp)
3130 if (code == FLOOR_DIV_EXPR)
3131 code = TRUNC_DIV_EXPR;
3132 if (code == FLOOR_MOD_EXPR)
3133 code = TRUNC_MOD_EXPR;
3134 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3135 code = TRUNC_DIV_EXPR;
3138 if (op1 != const0_rtx)
3139 switch (code)
3141 case TRUNC_MOD_EXPR:
3142 case TRUNC_DIV_EXPR:
3143 if (op1_is_constant)
3145 if (unsignedp)
3147 unsigned HOST_WIDE_INT mh, ml;
3148 int pre_shift, post_shift;
3149 int dummy;
3150 unsigned HOST_WIDE_INT d = INTVAL (op1);
3152 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3154 pre_shift = floor_log2 (d);
3155 if (rem_flag)
3157 remainder
3158 = expand_binop (compute_mode, and_optab, op0,
3159 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3160 remainder, 1,
3161 OPTAB_LIB_WIDEN);
3162 if (remainder)
3163 return gen_lowpart (mode, remainder);
3165 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3166 build_int_2 (pre_shift, 0),
3167 tquotient, 1);
3169 else if (size <= HOST_BITS_PER_WIDE_INT)
3171 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3173 /* Most significant bit of divisor is set; emit an scc
3174 insn. */
3175 quotient = emit_store_flag (tquotient, GEU, op0, op1,
3176 compute_mode, 1, 1);
3177 if (quotient == 0)
3178 goto fail1;
3180 else
3182 /* Find a suitable multiplier and right shift count
3183 instead of multiplying with D. */
3185 mh = choose_multiplier (d, size, size,
3186 &ml, &post_shift, &dummy);
3188 /* If the suggested multiplier is more than SIZE bits,
3189 we can do better for even divisors, using an
3190 initial right shift. */
3191 if (mh != 0 && (d & 1) == 0)
3193 pre_shift = floor_log2 (d & -d);
3194 mh = choose_multiplier (d >> pre_shift, size,
3195 size - pre_shift,
3196 &ml, &post_shift, &dummy);
3197 if (mh)
3198 abort ();
3200 else
3201 pre_shift = 0;
3203 if (mh != 0)
3205 rtx t1, t2, t3, t4;
3207 if (post_shift - 1 >= BITS_PER_WORD)
3208 goto fail1;
3210 extra_cost = (shift_cost[post_shift - 1]
3211 + shift_cost[1] + 2 * add_cost);
3212 t1 = expand_mult_highpart (compute_mode, op0, ml,
3213 NULL_RTX, 1,
3214 max_cost - extra_cost);
3215 if (t1 == 0)
3216 goto fail1;
3217 t2 = force_operand (gen_rtx_MINUS (compute_mode,
3218 op0, t1),
3219 NULL_RTX);
3220 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3221 build_int_2 (1, 0), NULL_RTX,1);
3222 t4 = force_operand (gen_rtx_PLUS (compute_mode,
3223 t1, t3),
3224 NULL_RTX);
3225 quotient
3226 = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3227 build_int_2 (post_shift - 1, 0),
3228 tquotient, 1);
3230 else
3232 rtx t1, t2;
3234 if (pre_shift >= BITS_PER_WORD
3235 || post_shift >= BITS_PER_WORD)
3236 goto fail1;
3238 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3239 build_int_2 (pre_shift, 0),
3240 NULL_RTX, 1);
3241 extra_cost = (shift_cost[pre_shift]
3242 + shift_cost[post_shift]);
3243 t2 = expand_mult_highpart (compute_mode, t1, ml,
3244 NULL_RTX, 1,
3245 max_cost - extra_cost);
3246 if (t2 == 0)
3247 goto fail1;
3248 quotient
3249 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3250 build_int_2 (post_shift, 0),
3251 tquotient, 1);
3255 else /* Too wide mode to use tricky code */
3256 break;
3258 insn = get_last_insn ();
3259 if (insn != last
3260 && (set = single_set (insn)) != 0
3261 && SET_DEST (set) == quotient)
3262 set_unique_reg_note (insn,
3263 REG_EQUAL,
3264 gen_rtx_UDIV (compute_mode, op0, op1));
3266 else /* TRUNC_DIV, signed */
3268 unsigned HOST_WIDE_INT ml;
3269 int lgup, post_shift;
3270 HOST_WIDE_INT d = INTVAL (op1);
3271 unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3273 /* n rem d = n rem -d */
3274 if (rem_flag && d < 0)
3276 d = abs_d;
3277 op1 = gen_int_mode (abs_d, compute_mode);
3280 if (d == 1)
3281 quotient = op0;
3282 else if (d == -1)
3283 quotient = expand_unop (compute_mode, neg_optab, op0,
3284 tquotient, 0);
3285 else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3287 /* This case is not handled correctly below. */
3288 quotient = emit_store_flag (tquotient, EQ, op0, op1,
3289 compute_mode, 1, 1);
3290 if (quotient == 0)
3291 goto fail1;
3293 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3294 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap)
3295 /* ??? The cheap metric is computed only for
3296 word_mode. If this operation is wider, this may
3297 not be so. Assume true if the optab has an
3298 expander for this mode. */
3299 && (((rem_flag ? smod_optab : sdiv_optab)
3300 ->handlers[(int) compute_mode].insn_code
3301 != CODE_FOR_nothing)
3302 || (sdivmod_optab->handlers[(int) compute_mode]
3303 .insn_code != CODE_FOR_nothing)))
3305 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3307 lgup = floor_log2 (abs_d);
3308 if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3310 rtx label = gen_label_rtx ();
3311 rtx t1;
3313 t1 = copy_to_mode_reg (compute_mode, op0);
3314 do_cmp_and_jump (t1, const0_rtx, GE,
3315 compute_mode, label);
3316 expand_inc (t1, gen_int_mode (abs_d - 1,
3317 compute_mode));
3318 emit_label (label);
3319 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3320 build_int_2 (lgup, 0),
3321 tquotient, 0);
3323 else
3325 rtx t1, t2, t3;
3326 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3327 build_int_2 (size - 1, 0),
3328 NULL_RTX, 0);
3329 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3330 build_int_2 (size - lgup, 0),
3331 NULL_RTX, 1);
3332 t3 = force_operand (gen_rtx_PLUS (compute_mode,
3333 op0, t2),
3334 NULL_RTX);
3335 quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3336 build_int_2 (lgup, 0),
3337 tquotient, 0);
3340 /* We have computed OP0 / abs(OP1). If OP1 is negative, negate
3341 the quotient. */
3342 if (d < 0)
3344 insn = get_last_insn ();
3345 if (insn != last
3346 && (set = single_set (insn)) != 0
3347 && SET_DEST (set) == quotient
3348 && abs_d < ((unsigned HOST_WIDE_INT) 1
3349 << (HOST_BITS_PER_WIDE_INT - 1)))
3350 set_unique_reg_note (insn,
3351 REG_EQUAL,
3352 gen_rtx_DIV (compute_mode,
3353 op0,
3354 GEN_INT
3355 (trunc_int_for_mode
3356 (abs_d,
3357 compute_mode))));
3359 quotient = expand_unop (compute_mode, neg_optab,
3360 quotient, quotient, 0);
3363 else if (size <= HOST_BITS_PER_WIDE_INT)
3365 choose_multiplier (abs_d, size, size - 1,
3366 &ml, &post_shift, &lgup);
3367 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3369 rtx t1, t2, t3;
3371 if (post_shift >= BITS_PER_WORD
3372 || size - 1 >= BITS_PER_WORD)
3373 goto fail1;
3375 extra_cost = (shift_cost[post_shift]
3376 + shift_cost[size - 1] + add_cost);
3377 t1 = expand_mult_highpart (compute_mode, op0, ml,
3378 NULL_RTX, 0,
3379 max_cost - extra_cost);
3380 if (t1 == 0)
3381 goto fail1;
3382 t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3383 build_int_2 (post_shift, 0), NULL_RTX, 0);
3384 t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3385 build_int_2 (size - 1, 0), NULL_RTX, 0);
3386 if (d < 0)
3387 quotient
3388 = force_operand (gen_rtx_MINUS (compute_mode,
3389 t3, t2),
3390 tquotient);
3391 else
3392 quotient
3393 = force_operand (gen_rtx_MINUS (compute_mode,
3394 t2, t3),
3395 tquotient);
3397 else
3399 rtx t1, t2, t3, t4;
3401 if (post_shift >= BITS_PER_WORD
3402 || size - 1 >= BITS_PER_WORD)
3403 goto fail1;
3405 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3406 extra_cost = (shift_cost[post_shift]
3407 + shift_cost[size - 1] + 2 * add_cost);
3408 t1 = expand_mult_highpart (compute_mode, op0, ml,
3409 NULL_RTX, 0,
3410 max_cost - extra_cost);
3411 if (t1 == 0)
3412 goto fail1;
3413 t2 = force_operand (gen_rtx_PLUS (compute_mode,
3414 t1, op0),
3415 NULL_RTX);
3416 t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3417 build_int_2 (post_shift, 0),
3418 NULL_RTX, 0);
3419 t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3420 build_int_2 (size - 1, 0),
3421 NULL_RTX, 0);
3422 if (d < 0)
3423 quotient
3424 = force_operand (gen_rtx_MINUS (compute_mode,
3425 t4, t3),
3426 tquotient);
3427 else
3428 quotient
3429 = force_operand (gen_rtx_MINUS (compute_mode,
3430 t3, t4),
3431 tquotient);
3434 else /* Too wide mode to use tricky code */
3435 break;
3437 insn = get_last_insn ();
3438 if (insn != last
3439 && (set = single_set (insn)) != 0
3440 && SET_DEST (set) == quotient)
3441 set_unique_reg_note (insn,
3442 REG_EQUAL,
3443 gen_rtx_DIV (compute_mode, op0, op1));
3445 break;
3447 fail1:
3448 delete_insns_since (last);
3449 break;
3451 case FLOOR_DIV_EXPR:
3452 case FLOOR_MOD_EXPR:
3453 /* We will come here only for signed operations. */
3454 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3456 unsigned HOST_WIDE_INT mh, ml;
3457 int pre_shift, lgup, post_shift;
3458 HOST_WIDE_INT d = INTVAL (op1);
3460 if (d > 0)
3462 /* We could just as easily deal with negative constants here,
3463 but it does not seem worth the trouble for GCC 2.6. */
3464 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3466 pre_shift = floor_log2 (d);
3467 if (rem_flag)
3469 remainder = expand_binop (compute_mode, and_optab, op0,
3470 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3471 remainder, 0, OPTAB_LIB_WIDEN);
3472 if (remainder)
3473 return gen_lowpart (mode, remainder);
3475 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3476 build_int_2 (pre_shift, 0),
3477 tquotient, 0);
3479 else
3481 rtx t1, t2, t3, t4;
3483 mh = choose_multiplier (d, size, size - 1,
3484 &ml, &post_shift, &lgup);
3485 if (mh)
3486 abort ();
3488 if (post_shift < BITS_PER_WORD
3489 && size - 1 < BITS_PER_WORD)
3491 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3492 build_int_2 (size - 1, 0),
3493 NULL_RTX, 0);
3494 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3495 NULL_RTX, 0, OPTAB_WIDEN);
3496 extra_cost = (shift_cost[post_shift]
3497 + shift_cost[size - 1] + 2 * add_cost);
3498 t3 = expand_mult_highpart (compute_mode, t2, ml,
3499 NULL_RTX, 1,
3500 max_cost - extra_cost);
3501 if (t3 != 0)
3503 t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3504 build_int_2 (post_shift, 0),
3505 NULL_RTX, 1);
3506 quotient = expand_binop (compute_mode, xor_optab,
3507 t4, t1, tquotient, 0,
3508 OPTAB_WIDEN);
3513 else
3515 rtx nsign, t1, t2, t3, t4;
3516 t1 = force_operand (gen_rtx_PLUS (compute_mode,
3517 op0, constm1_rtx), NULL_RTX);
3518 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3519 0, OPTAB_WIDEN);
3520 nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3521 build_int_2 (size - 1, 0), NULL_RTX, 0);
3522 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3523 NULL_RTX);
3524 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3525 NULL_RTX, 0);
3526 if (t4)
3528 rtx t5;
3529 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3530 NULL_RTX, 0);
3531 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3532 t4, t5),
3533 tquotient);
3538 if (quotient != 0)
3539 break;
3540 delete_insns_since (last);
3542 /* Try using an instruction that produces both the quotient and
3543 remainder, using truncation. We can easily compensate the quotient
3544 or remainder to get floor rounding, once we have the remainder.
3545 Notice that we compute also the final remainder value here,
3546 and return the result right away. */
3547 if (target == 0 || GET_MODE (target) != compute_mode)
3548 target = gen_reg_rtx (compute_mode);
3550 if (rem_flag)
3552 remainder
3553 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3554 quotient = gen_reg_rtx (compute_mode);
3556 else
3558 quotient
3559 = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3560 remainder = gen_reg_rtx (compute_mode);
3563 if (expand_twoval_binop (sdivmod_optab, op0, op1,
3564 quotient, remainder, 0))
3566 /* This could be computed with a branch-less sequence.
3567 Save that for later. */
3568 rtx tem;
3569 rtx label = gen_label_rtx ();
3570 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3571 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3572 NULL_RTX, 0, OPTAB_WIDEN);
3573 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3574 expand_dec (quotient, const1_rtx);
3575 expand_inc (remainder, op1);
3576 emit_label (label);
3577 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3580 /* No luck with division elimination or divmod. Have to do it
3581 by conditionally adjusting op0 *and* the result. */
3583 rtx label1, label2, label3, label4, label5;
3584 rtx adjusted_op0;
3585 rtx tem;
3587 quotient = gen_reg_rtx (compute_mode);
3588 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3589 label1 = gen_label_rtx ();
3590 label2 = gen_label_rtx ();
3591 label3 = gen_label_rtx ();
3592 label4 = gen_label_rtx ();
3593 label5 = gen_label_rtx ();
3594 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3595 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3596 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3597 quotient, 0, OPTAB_LIB_WIDEN);
3598 if (tem != quotient)
3599 emit_move_insn (quotient, tem);
3600 emit_jump_insn (gen_jump (label5));
3601 emit_barrier ();
3602 emit_label (label1);
3603 expand_inc (adjusted_op0, const1_rtx);
3604 emit_jump_insn (gen_jump (label4));
3605 emit_barrier ();
3606 emit_label (label2);
3607 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3608 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3609 quotient, 0, OPTAB_LIB_WIDEN);
3610 if (tem != quotient)
3611 emit_move_insn (quotient, tem);
3612 emit_jump_insn (gen_jump (label5));
3613 emit_barrier ();
3614 emit_label (label3);
3615 expand_dec (adjusted_op0, const1_rtx);
3616 emit_label (label4);
3617 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3618 quotient, 0, OPTAB_LIB_WIDEN);
3619 if (tem != quotient)
3620 emit_move_insn (quotient, tem);
3621 expand_dec (quotient, const1_rtx);
3622 emit_label (label5);
3624 break;
3626 case CEIL_DIV_EXPR:
3627 case CEIL_MOD_EXPR:
3628 if (unsignedp)
3630 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3632 rtx t1, t2, t3;
3633 unsigned HOST_WIDE_INT d = INTVAL (op1);
3634 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3635 build_int_2 (floor_log2 (d), 0),
3636 tquotient, 1);
3637 t2 = expand_binop (compute_mode, and_optab, op0,
3638 GEN_INT (d - 1),
3639 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3640 t3 = gen_reg_rtx (compute_mode);
3641 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3642 compute_mode, 1, 1);
3643 if (t3 == 0)
3645 rtx lab;
3646 lab = gen_label_rtx ();
3647 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3648 expand_inc (t1, const1_rtx);
3649 emit_label (lab);
3650 quotient = t1;
3652 else
3653 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3654 t1, t3),
3655 tquotient);
3656 break;
3659 /* Try using an instruction that produces both the quotient and
3660 remainder, using truncation. We can easily compensate the
3661 quotient or remainder to get ceiling rounding, once we have the
3662 remainder. Notice that we compute also the final remainder
3663 value here, and return the result right away. */
3664 if (target == 0 || GET_MODE (target) != compute_mode)
3665 target = gen_reg_rtx (compute_mode);
3667 if (rem_flag)
3669 remainder = (GET_CODE (target) == REG
3670 ? target : gen_reg_rtx (compute_mode));
3671 quotient = gen_reg_rtx (compute_mode);
3673 else
3675 quotient = (GET_CODE (target) == REG
3676 ? target : gen_reg_rtx (compute_mode));
3677 remainder = gen_reg_rtx (compute_mode);
3680 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3681 remainder, 1))
3683 /* This could be computed with a branch-less sequence.
3684 Save that for later. */
3685 rtx label = gen_label_rtx ();
3686 do_cmp_and_jump (remainder, const0_rtx, EQ,
3687 compute_mode, label);
3688 expand_inc (quotient, const1_rtx);
3689 expand_dec (remainder, op1);
3690 emit_label (label);
3691 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3694 /* No luck with division elimination or divmod. Have to do it
3695 by conditionally adjusting op0 *and* the result. */
3697 rtx label1, label2;
3698 rtx adjusted_op0, tem;
3700 quotient = gen_reg_rtx (compute_mode);
3701 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3702 label1 = gen_label_rtx ();
3703 label2 = gen_label_rtx ();
3704 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3705 compute_mode, label1);
3706 emit_move_insn (quotient, const0_rtx);
3707 emit_jump_insn (gen_jump (label2));
3708 emit_barrier ();
3709 emit_label (label1);
3710 expand_dec (adjusted_op0, const1_rtx);
3711 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3712 quotient, 1, OPTAB_LIB_WIDEN);
3713 if (tem != quotient)
3714 emit_move_insn (quotient, tem);
3715 expand_inc (quotient, const1_rtx);
3716 emit_label (label2);
3719 else /* signed */
3721 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3722 && INTVAL (op1) >= 0)
3724 /* This is extremely similar to the code for the unsigned case
3725 above. For 2.7 we should merge these variants, but for
3726 2.6.1 I don't want to touch the code for unsigned since that
3727 get used in C. The signed case will only be used by other
3728 languages (Ada). */
3730 rtx t1, t2, t3;
3731 unsigned HOST_WIDE_INT d = INTVAL (op1);
3732 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3733 build_int_2 (floor_log2 (d), 0),
3734 tquotient, 0);
3735 t2 = expand_binop (compute_mode, and_optab, op0,
3736 GEN_INT (d - 1),
3737 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3738 t3 = gen_reg_rtx (compute_mode);
3739 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3740 compute_mode, 1, 1);
3741 if (t3 == 0)
3743 rtx lab;
3744 lab = gen_label_rtx ();
3745 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3746 expand_inc (t1, const1_rtx);
3747 emit_label (lab);
3748 quotient = t1;
3750 else
3751 quotient = force_operand (gen_rtx_PLUS (compute_mode,
3752 t1, t3),
3753 tquotient);
3754 break;
3757 /* Try using an instruction that produces both the quotient and
3758 remainder, using truncation. We can easily compensate the
3759 quotient or remainder to get ceiling rounding, once we have the
3760 remainder. Notice that we compute also the final remainder
3761 value here, and return the result right away. */
3762 if (target == 0 || GET_MODE (target) != compute_mode)
3763 target = gen_reg_rtx (compute_mode);
3764 if (rem_flag)
3766 remainder= (GET_CODE (target) == REG
3767 ? target : gen_reg_rtx (compute_mode));
3768 quotient = gen_reg_rtx (compute_mode);
3770 else
3772 quotient = (GET_CODE (target) == REG
3773 ? target : gen_reg_rtx (compute_mode));
3774 remainder = gen_reg_rtx (compute_mode);
3777 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3778 remainder, 0))
3780 /* This could be computed with a branch-less sequence.
3781 Save that for later. */
3782 rtx tem;
3783 rtx label = gen_label_rtx ();
3784 do_cmp_and_jump (remainder, const0_rtx, EQ,
3785 compute_mode, label);
3786 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3787 NULL_RTX, 0, OPTAB_WIDEN);
3788 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3789 expand_inc (quotient, const1_rtx);
3790 expand_dec (remainder, op1);
3791 emit_label (label);
3792 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3795 /* No luck with division elimination or divmod. Have to do it
3796 by conditionally adjusting op0 *and* the result. */
3798 rtx label1, label2, label3, label4, label5;
3799 rtx adjusted_op0;
3800 rtx tem;
3802 quotient = gen_reg_rtx (compute_mode);
3803 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3804 label1 = gen_label_rtx ();
3805 label2 = gen_label_rtx ();
3806 label3 = gen_label_rtx ();
3807 label4 = gen_label_rtx ();
3808 label5 = gen_label_rtx ();
3809 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3810 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3811 compute_mode, label1);
3812 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3813 quotient, 0, OPTAB_LIB_WIDEN);
3814 if (tem != quotient)
3815 emit_move_insn (quotient, tem);
3816 emit_jump_insn (gen_jump (label5));
3817 emit_barrier ();
3818 emit_label (label1);
3819 expand_dec (adjusted_op0, const1_rtx);
3820 emit_jump_insn (gen_jump (label4));
3821 emit_barrier ();
3822 emit_label (label2);
3823 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3824 compute_mode, label3);
3825 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3826 quotient, 0, OPTAB_LIB_WIDEN);
3827 if (tem != quotient)
3828 emit_move_insn (quotient, tem);
3829 emit_jump_insn (gen_jump (label5));
3830 emit_barrier ();
3831 emit_label (label3);
3832 expand_inc (adjusted_op0, const1_rtx);
3833 emit_label (label4);
3834 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3835 quotient, 0, OPTAB_LIB_WIDEN);
3836 if (tem != quotient)
3837 emit_move_insn (quotient, tem);
3838 expand_inc (quotient, const1_rtx);
3839 emit_label (label5);
3842 break;
3844 case EXACT_DIV_EXPR:
3845 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3847 HOST_WIDE_INT d = INTVAL (op1);
3848 unsigned HOST_WIDE_INT ml;
3849 int pre_shift;
3850 rtx t1;
3852 pre_shift = floor_log2 (d & -d);
3853 ml = invert_mod2n (d >> pre_shift, size);
3854 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3855 build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3856 quotient = expand_mult (compute_mode, t1,
3857 gen_int_mode (ml, compute_mode),
3858 NULL_RTX, 0);
3860 insn = get_last_insn ();
3861 set_unique_reg_note (insn,
3862 REG_EQUAL,
3863 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3864 compute_mode,
3865 op0, op1));
3867 break;
3869 case ROUND_DIV_EXPR:
3870 case ROUND_MOD_EXPR:
3871 if (unsignedp)
3873 rtx tem;
3874 rtx label;
3875 label = gen_label_rtx ();
3876 quotient = gen_reg_rtx (compute_mode);
3877 remainder = gen_reg_rtx (compute_mode);
3878 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3880 rtx tem;
3881 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3882 quotient, 1, OPTAB_LIB_WIDEN);
3883 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3884 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3885 remainder, 1, OPTAB_LIB_WIDEN);
3887 tem = plus_constant (op1, -1);
3888 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3889 build_int_2 (1, 0), NULL_RTX, 1);
3890 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3891 expand_inc (quotient, const1_rtx);
3892 expand_dec (remainder, op1);
3893 emit_label (label);
3895 else
3897 rtx abs_rem, abs_op1, tem, mask;
3898 rtx label;
3899 label = gen_label_rtx ();
3900 quotient = gen_reg_rtx (compute_mode);
3901 remainder = gen_reg_rtx (compute_mode);
3902 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3904 rtx tem;
3905 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3906 quotient, 0, OPTAB_LIB_WIDEN);
3907 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3908 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3909 remainder, 0, OPTAB_LIB_WIDEN);
3911 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3912 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
3913 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3914 build_int_2 (1, 0), NULL_RTX, 1);
3915 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3916 tem = expand_binop (compute_mode, xor_optab, op0, op1,
3917 NULL_RTX, 0, OPTAB_WIDEN);
3918 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3919 build_int_2 (size - 1, 0), NULL_RTX, 0);
3920 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3921 NULL_RTX, 0, OPTAB_WIDEN);
3922 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3923 NULL_RTX, 0, OPTAB_WIDEN);
3924 expand_inc (quotient, tem);
3925 tem = expand_binop (compute_mode, xor_optab, mask, op1,
3926 NULL_RTX, 0, OPTAB_WIDEN);
3927 tem = expand_binop (compute_mode, sub_optab, tem, mask,
3928 NULL_RTX, 0, OPTAB_WIDEN);
3929 expand_dec (remainder, tem);
3930 emit_label (label);
3932 return gen_lowpart (mode, rem_flag ? remainder : quotient);
3934 default:
3935 abort ();
3938 if (quotient == 0)
3940 if (target && GET_MODE (target) != compute_mode)
3941 target = 0;
3943 if (rem_flag)
3945 /* Try to produce the remainder without producing the quotient.
3946 If we seem to have a divmod pattern that does not require widening,
3947 don't try widening here. We should really have an WIDEN argument
3948 to expand_twoval_binop, since what we'd really like to do here is
3949 1) try a mod insn in compute_mode
3950 2) try a divmod insn in compute_mode
3951 3) try a div insn in compute_mode and multiply-subtract to get
3952 remainder
3953 4) try the same things with widening allowed. */
3954 remainder
3955 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3956 op0, op1, target,
3957 unsignedp,
3958 ((optab2->handlers[(int) compute_mode].insn_code
3959 != CODE_FOR_nothing)
3960 ? OPTAB_DIRECT : OPTAB_WIDEN));
3961 if (remainder == 0)
3963 /* No luck there. Can we do remainder and divide at once
3964 without a library call? */
3965 remainder = gen_reg_rtx (compute_mode);
3966 if (! expand_twoval_binop ((unsignedp
3967 ? udivmod_optab
3968 : sdivmod_optab),
3969 op0, op1,
3970 NULL_RTX, remainder, unsignedp))
3971 remainder = 0;
3974 if (remainder)
3975 return gen_lowpart (mode, remainder);
3978 /* Produce the quotient. Try a quotient insn, but not a library call.
3979 If we have a divmod in this mode, use it in preference to widening
3980 the div (for this test we assume it will not fail). Note that optab2
3981 is set to the one of the two optabs that the call below will use. */
3982 quotient
3983 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3984 op0, op1, rem_flag ? NULL_RTX : target,
3985 unsignedp,
3986 ((optab2->handlers[(int) compute_mode].insn_code
3987 != CODE_FOR_nothing)
3988 ? OPTAB_DIRECT : OPTAB_WIDEN));
3990 if (quotient == 0)
3992 /* No luck there. Try a quotient-and-remainder insn,
3993 keeping the quotient alone. */
3994 quotient = gen_reg_rtx (compute_mode);
3995 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3996 op0, op1,
3997 quotient, NULL_RTX, unsignedp))
3999 quotient = 0;
4000 if (! rem_flag)
4001 /* Still no luck. If we are not computing the remainder,
4002 use a library call for the quotient. */
4003 quotient = sign_expand_binop (compute_mode,
4004 udiv_optab, sdiv_optab,
4005 op0, op1, target,
4006 unsignedp, OPTAB_LIB_WIDEN);
4011 if (rem_flag)
4013 if (target && GET_MODE (target) != compute_mode)
4014 target = 0;
4016 if (quotient == 0)
4017 /* No divide instruction either. Use library for remainder. */
4018 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4019 op0, op1, target,
4020 unsignedp, OPTAB_LIB_WIDEN);
4021 else
4023 /* We divided. Now finish doing X - Y * (X / Y). */
4024 remainder = expand_mult (compute_mode, quotient, op1,
4025 NULL_RTX, unsignedp);
4026 remainder = expand_binop (compute_mode, sub_optab, op0,
4027 remainder, target, unsignedp,
4028 OPTAB_LIB_WIDEN);
4032 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4035 /* Return a tree node with data type TYPE, describing the value of X.
4036 Usually this is an RTL_EXPR, if there is no obvious better choice.
4037 X may be an expression, however we only support those expressions
4038 generated by loop.c. */
4040 tree
4041 make_tree (type, x)
4042 tree type;
4043 rtx x;
4045 tree t;
4047 switch (GET_CODE (x))
4049 case CONST_INT:
4050 t = build_int_2 (INTVAL (x),
4051 (TREE_UNSIGNED (type)
4052 && (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4053 || INTVAL (x) >= 0 ? 0 : -1);
4054 TREE_TYPE (t) = type;
4055 return t;
4057 case CONST_DOUBLE:
4058 if (GET_MODE (x) == VOIDmode)
4060 t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4061 TREE_TYPE (t) = type;
4063 else
4065 REAL_VALUE_TYPE d;
4067 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4068 t = build_real (type, d);
4071 return t;
4073 case CONST_VECTOR:
4075 int i, units;
4076 rtx elt;
4077 tree t = NULL_TREE;
4079 units = CONST_VECTOR_NUNITS (x);
4081 /* Build a tree with vector elements. */
4082 for (i = units - 1; i >= 0; --i)
4084 elt = CONST_VECTOR_ELT (x, i);
4085 t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4088 return build_vector (type, t);
4091 case PLUS:
4092 return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4093 make_tree (type, XEXP (x, 1))));
4095 case MINUS:
4096 return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4097 make_tree (type, XEXP (x, 1))));
4099 case NEG:
4100 return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4102 case MULT:
4103 return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4104 make_tree (type, XEXP (x, 1))));
4106 case ASHIFT:
4107 return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4108 make_tree (type, XEXP (x, 1))));
4110 case LSHIFTRT:
4111 t = (*lang_hooks.types.unsigned_type) (type);
4112 return fold (convert (type,
4113 build (RSHIFT_EXPR, t,
4114 make_tree (t, XEXP (x, 0)),
4115 make_tree (type, XEXP (x, 1)))));
4117 case ASHIFTRT:
4118 t = (*lang_hooks.types.signed_type) (type);
4119 return fold (convert (type,
4120 build (RSHIFT_EXPR, t,
4121 make_tree (t, XEXP (x, 0)),
4122 make_tree (type, XEXP (x, 1)))));
4124 case DIV:
4125 if (TREE_CODE (type) != REAL_TYPE)
4126 t = (*lang_hooks.types.signed_type) (type);
4127 else
4128 t = type;
4130 return fold (convert (type,
4131 build (TRUNC_DIV_EXPR, t,
4132 make_tree (t, XEXP (x, 0)),
4133 make_tree (t, XEXP (x, 1)))));
4134 case UDIV:
4135 t = (*lang_hooks.types.unsigned_type) (type);
4136 return fold (convert (type,
4137 build (TRUNC_DIV_EXPR, t,
4138 make_tree (t, XEXP (x, 0)),
4139 make_tree (t, XEXP (x, 1)))));
4140 default:
4141 t = make_node (RTL_EXPR);
4142 TREE_TYPE (t) = type;
4144 #ifdef POINTERS_EXTEND_UNSIGNED
4145 /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4146 ptr_mode. So convert. */
4147 if (POINTER_TYPE_P (type) && GET_MODE (x) != TYPE_MODE (type))
4148 x = convert_memory_address (TYPE_MODE (type), x);
4149 #endif
4151 RTL_EXPR_RTL (t) = x;
4152 /* There are no insns to be output
4153 when this rtl_expr is used. */
4154 RTL_EXPR_SEQUENCE (t) = 0;
4155 return t;
4159 /* Check whether the multiplication X * MULT + ADD overflows.
4160 X, MULT and ADD must be CONST_*.
4161 MODE is the machine mode for the computation.
4162 X and MULT must have mode MODE. ADD may have a different mode.
4163 So can X (defaults to same as MODE).
4164 UNSIGNEDP is non-zero to do unsigned multiplication. */
4166 bool
4167 const_mult_add_overflow_p (x, mult, add, mode, unsignedp)
4168 rtx x, mult, add;
4169 enum machine_mode mode;
4170 int unsignedp;
4172 tree type, mult_type, add_type, result;
4174 type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4176 /* In order to get a proper overflow indication from an unsigned
4177 type, we have to pretend that it's a sizetype. */
4178 mult_type = type;
4179 if (unsignedp)
4181 mult_type = copy_node (type);
4182 TYPE_IS_SIZETYPE (mult_type) = 1;
4185 add_type = (GET_MODE (add) == VOIDmode ? mult_type
4186 : (*lang_hooks.types.type_for_mode) (GET_MODE (add), unsignedp));
4188 result = fold (build (PLUS_EXPR, mult_type,
4189 fold (build (MULT_EXPR, mult_type,
4190 make_tree (mult_type, x),
4191 make_tree (mult_type, mult))),
4192 make_tree (add_type, add)));
4194 return TREE_CONSTANT_OVERFLOW (result);
4197 /* Return an rtx representing the value of X * MULT + ADD.
4198 TARGET is a suggestion for where to store the result (an rtx).
4199 MODE is the machine mode for the computation.
4200 X and MULT must have mode MODE. ADD may have a different mode.
4201 So can X (defaults to same as MODE).
4202 UNSIGNEDP is non-zero to do unsigned multiplication.
4203 This may emit insns. */
4206 expand_mult_add (x, target, mult, add, mode, unsignedp)
4207 rtx x, target, mult, add;
4208 enum machine_mode mode;
4209 int unsignedp;
4211 tree type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4212 tree add_type = (GET_MODE (add) == VOIDmode
4213 ? type: (*lang_hooks.types.type_for_mode) (GET_MODE (add),
4214 unsignedp));
4215 tree result = fold (build (PLUS_EXPR, type,
4216 fold (build (MULT_EXPR, type,
4217 make_tree (type, x),
4218 make_tree (type, mult))),
4219 make_tree (add_type, add)));
4221 return expand_expr (result, target, VOIDmode, 0);
4224 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4225 and returning TARGET.
4227 If TARGET is 0, a pseudo-register or constant is returned. */
4230 expand_and (mode, op0, op1, target)
4231 enum machine_mode mode;
4232 rtx op0, op1, target;
4234 rtx tem = 0;
4236 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4237 tem = simplify_binary_operation (AND, mode, op0, op1);
4238 if (tem == 0)
4239 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4241 if (target == 0)
4242 target = tem;
4243 else if (tem != target)
4244 emit_move_insn (target, tem);
4245 return target;
4248 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4249 and storing in TARGET. Normally return TARGET.
4250 Return 0 if that cannot be done.
4252 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
4253 it is VOIDmode, they cannot both be CONST_INT.
4255 UNSIGNEDP is for the case where we have to widen the operands
4256 to perform the operation. It says to use zero-extension.
4258 NORMALIZEP is 1 if we should convert the result to be either zero
4259 or one. Normalize is -1 if we should convert the result to be
4260 either zero or -1. If NORMALIZEP is zero, the result will be left
4261 "raw" out of the scc insn. */
4264 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4265 rtx target;
4266 enum rtx_code code;
4267 rtx op0, op1;
4268 enum machine_mode mode;
4269 int unsignedp;
4270 int normalizep;
4272 rtx subtarget;
4273 enum insn_code icode;
4274 enum machine_mode compare_mode;
4275 enum machine_mode target_mode = GET_MODE (target);
4276 rtx tem;
4277 rtx last = get_last_insn ();
4278 rtx pattern, comparison;
4280 /* ??? Ok to do this and then fail? */
4281 op0 = protect_from_queue (op0, 0);
4282 op1 = protect_from_queue (op1, 0);
4284 if (unsignedp)
4285 code = unsigned_condition (code);
4287 /* If one operand is constant, make it the second one. Only do this
4288 if the other operand is not constant as well. */
4290 if (swap_commutative_operands_p (op0, op1))
4292 tem = op0;
4293 op0 = op1;
4294 op1 = tem;
4295 code = swap_condition (code);
4298 if (mode == VOIDmode)
4299 mode = GET_MODE (op0);
4301 /* For some comparisons with 1 and -1, we can convert this to
4302 comparisons with zero. This will often produce more opportunities for
4303 store-flag insns. */
4305 switch (code)
4307 case LT:
4308 if (op1 == const1_rtx)
4309 op1 = const0_rtx, code = LE;
4310 break;
4311 case LE:
4312 if (op1 == constm1_rtx)
4313 op1 = const0_rtx, code = LT;
4314 break;
4315 case GE:
4316 if (op1 == const1_rtx)
4317 op1 = const0_rtx, code = GT;
4318 break;
4319 case GT:
4320 if (op1 == constm1_rtx)
4321 op1 = const0_rtx, code = GE;
4322 break;
4323 case GEU:
4324 if (op1 == const1_rtx)
4325 op1 = const0_rtx, code = NE;
4326 break;
4327 case LTU:
4328 if (op1 == const1_rtx)
4329 op1 = const0_rtx, code = EQ;
4330 break;
4331 default:
4332 break;
4335 /* If we are comparing a double-word integer with zero, we can convert
4336 the comparison into one involving a single word. */
4337 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4338 && GET_MODE_CLASS (mode) == MODE_INT
4339 && op1 == const0_rtx
4340 && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
4342 if (code == EQ || code == NE)
4344 /* Do a logical OR of the two words and compare the result. */
4345 rtx op0h = gen_highpart (word_mode, op0);
4346 rtx op0l = gen_lowpart (word_mode, op0);
4347 rtx op0both = expand_binop (word_mode, ior_optab, op0h, op0l,
4348 NULL_RTX, unsignedp, OPTAB_DIRECT);
4349 if (op0both != 0)
4350 return emit_store_flag (target, code, op0both, op1, word_mode,
4351 unsignedp, normalizep);
4353 else if (code == LT || code == GE)
4354 /* If testing the sign bit, can just test on high word. */
4355 return emit_store_flag (target, code, gen_highpart (word_mode, op0),
4356 op1, word_mode, unsignedp, normalizep);
4359 /* From now on, we won't change CODE, so set ICODE now. */
4360 icode = setcc_gen_code[(int) code];
4362 /* If this is A < 0 or A >= 0, we can do this by taking the ones
4363 complement of A (for GE) and shifting the sign bit to the low bit. */
4364 if (op1 == const0_rtx && (code == LT || code == GE)
4365 && GET_MODE_CLASS (mode) == MODE_INT
4366 && (normalizep || STORE_FLAG_VALUE == 1
4367 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4368 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4369 == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4371 subtarget = target;
4373 /* If the result is to be wider than OP0, it is best to convert it
4374 first. If it is to be narrower, it is *incorrect* to convert it
4375 first. */
4376 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4378 op0 = protect_from_queue (op0, 0);
4379 op0 = convert_modes (target_mode, mode, op0, 0);
4380 mode = target_mode;
4383 if (target_mode != mode)
4384 subtarget = 0;
4386 if (code == GE)
4387 op0 = expand_unop (mode, one_cmpl_optab, op0,
4388 ((STORE_FLAG_VALUE == 1 || normalizep)
4389 ? 0 : subtarget), 0);
4391 if (STORE_FLAG_VALUE == 1 || normalizep)
4392 /* If we are supposed to produce a 0/1 value, we want to do
4393 a logical shift from the sign bit to the low-order bit; for
4394 a -1/0 value, we do an arithmetic shift. */
4395 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4396 size_int (GET_MODE_BITSIZE (mode) - 1),
4397 subtarget, normalizep != -1);
4399 if (mode != target_mode)
4400 op0 = convert_modes (target_mode, mode, op0, 0);
4402 return op0;
4405 if (icode != CODE_FOR_nothing)
4407 insn_operand_predicate_fn pred;
4409 /* We think we may be able to do this with a scc insn. Emit the
4410 comparison and then the scc insn.
4412 compare_from_rtx may call emit_queue, which would be deleted below
4413 if the scc insn fails. So call it ourselves before setting LAST.
4414 Likewise for do_pending_stack_adjust. */
4416 emit_queue ();
4417 do_pending_stack_adjust ();
4418 last = get_last_insn ();
4420 comparison
4421 = compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4422 if (GET_CODE (comparison) == CONST_INT)
4423 return (comparison == const0_rtx ? const0_rtx
4424 : normalizep == 1 ? const1_rtx
4425 : normalizep == -1 ? constm1_rtx
4426 : const_true_rtx);
4428 /* The code of COMPARISON may not match CODE if compare_from_rtx
4429 decided to swap its operands and reverse the original code.
4431 We know that compare_from_rtx returns either a CONST_INT or
4432 a new comparison code, so it is safe to just extract the
4433 code from COMPARISON. */
4434 code = GET_CODE (comparison);
4436 /* Get a reference to the target in the proper mode for this insn. */
4437 compare_mode = insn_data[(int) icode].operand[0].mode;
4438 subtarget = target;
4439 pred = insn_data[(int) icode].operand[0].predicate;
4440 if (preserve_subexpressions_p ()
4441 || ! (*pred) (subtarget, compare_mode))
4442 subtarget = gen_reg_rtx (compare_mode);
4444 pattern = GEN_FCN (icode) (subtarget);
4445 if (pattern)
4447 emit_insn (pattern);
4449 /* If we are converting to a wider mode, first convert to
4450 TARGET_MODE, then normalize. This produces better combining
4451 opportunities on machines that have a SIGN_EXTRACT when we are
4452 testing a single bit. This mostly benefits the 68k.
4454 If STORE_FLAG_VALUE does not have the sign bit set when
4455 interpreted in COMPARE_MODE, we can do this conversion as
4456 unsigned, which is usually more efficient. */
4457 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4459 convert_move (target, subtarget,
4460 (GET_MODE_BITSIZE (compare_mode)
4461 <= HOST_BITS_PER_WIDE_INT)
4462 && 0 == (STORE_FLAG_VALUE
4463 & ((HOST_WIDE_INT) 1
4464 << (GET_MODE_BITSIZE (compare_mode) -1))));
4465 op0 = target;
4466 compare_mode = target_mode;
4468 else
4469 op0 = subtarget;
4471 /* If we want to keep subexpressions around, don't reuse our
4472 last target. */
4474 if (preserve_subexpressions_p ())
4475 subtarget = 0;
4477 /* Now normalize to the proper value in COMPARE_MODE. Sometimes
4478 we don't have to do anything. */
4479 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4481 /* STORE_FLAG_VALUE might be the most negative number, so write
4482 the comparison this way to avoid a compiler-time warning. */
4483 else if (- normalizep == STORE_FLAG_VALUE)
4484 op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4486 /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4487 makes it hard to use a value of just the sign bit due to
4488 ANSI integer constant typing rules. */
4489 else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4490 && (STORE_FLAG_VALUE
4491 & ((HOST_WIDE_INT) 1
4492 << (GET_MODE_BITSIZE (compare_mode) - 1))))
4493 op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4494 size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4495 subtarget, normalizep == 1);
4496 else if (STORE_FLAG_VALUE & 1)
4498 op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
4499 if (normalizep == -1)
4500 op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4502 else
4503 abort ();
4505 /* If we were converting to a smaller mode, do the
4506 conversion now. */
4507 if (target_mode != compare_mode)
4509 convert_move (target, op0, 0);
4510 return target;
4512 else
4513 return op0;
4517 delete_insns_since (last);
4519 /* If expensive optimizations, use different pseudo registers for each
4520 insn, instead of reusing the same pseudo. This leads to better CSE,
4521 but slows down the compiler, since there are more pseudos */
4522 subtarget = (!flag_expensive_optimizations
4523 && (target_mode == mode)) ? target : NULL_RTX;
4525 /* If we reached here, we can't do this with a scc insn. However, there
4526 are some comparisons that can be done directly. For example, if
4527 this is an equality comparison of integers, we can try to exclusive-or
4528 (or subtract) the two operands and use a recursive call to try the
4529 comparison with zero. Don't do any of these cases if branches are
4530 very cheap. */
4532 if (BRANCH_COST > 0
4533 && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4534 && op1 != const0_rtx)
4536 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4537 OPTAB_WIDEN);
4539 if (tem == 0)
4540 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4541 OPTAB_WIDEN);
4542 if (tem != 0)
4543 tem = emit_store_flag (target, code, tem, const0_rtx,
4544 mode, unsignedp, normalizep);
4545 if (tem == 0)
4546 delete_insns_since (last);
4547 return tem;
4550 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4551 the constant zero. Reject all other comparisons at this point. Only
4552 do LE and GT if branches are expensive since they are expensive on
4553 2-operand machines. */
4555 if (BRANCH_COST == 0
4556 || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4557 || (code != EQ && code != NE
4558 && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4559 return 0;
4561 /* See what we need to return. We can only return a 1, -1, or the
4562 sign bit. */
4564 if (normalizep == 0)
4566 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4567 normalizep = STORE_FLAG_VALUE;
4569 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4570 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4571 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4573 else
4574 return 0;
4577 /* Try to put the result of the comparison in the sign bit. Assume we can't
4578 do the necessary operation below. */
4580 tem = 0;
4582 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
4583 the sign bit set. */
4585 if (code == LE)
4587 /* This is destructive, so SUBTARGET can't be OP0. */
4588 if (rtx_equal_p (subtarget, op0))
4589 subtarget = 0;
4591 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4592 OPTAB_WIDEN);
4593 if (tem)
4594 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4595 OPTAB_WIDEN);
4598 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4599 number of bits in the mode of OP0, minus one. */
4601 if (code == GT)
4603 if (rtx_equal_p (subtarget, op0))
4604 subtarget = 0;
4606 tem = expand_shift (RSHIFT_EXPR, mode, op0,
4607 size_int (GET_MODE_BITSIZE (mode) - 1),
4608 subtarget, 0);
4609 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4610 OPTAB_WIDEN);
4613 if (code == EQ || code == NE)
4615 /* For EQ or NE, one way to do the comparison is to apply an operation
4616 that converts the operand into a positive number if it is non-zero
4617 or zero if it was originally zero. Then, for EQ, we subtract 1 and
4618 for NE we negate. This puts the result in the sign bit. Then we
4619 normalize with a shift, if needed.
4621 Two operations that can do the above actions are ABS and FFS, so try
4622 them. If that doesn't work, and MODE is smaller than a full word,
4623 we can use zero-extension to the wider mode (an unsigned conversion)
4624 as the operation. */
4626 /* Note that ABS doesn't yield a positive number for INT_MIN, but
4627 that is compensated by the subsequent overflow when subtracting
4628 one / negating. */
4630 if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4631 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4632 else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4633 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4634 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4636 op0 = protect_from_queue (op0, 0);
4637 tem = convert_modes (word_mode, mode, op0, 1);
4638 mode = word_mode;
4641 if (tem != 0)
4643 if (code == EQ)
4644 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4645 0, OPTAB_WIDEN);
4646 else
4647 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4650 /* If we couldn't do it that way, for NE we can "or" the two's complement
4651 of the value with itself. For EQ, we take the one's complement of
4652 that "or", which is an extra insn, so we only handle EQ if branches
4653 are expensive. */
4655 if (tem == 0 && (code == NE || BRANCH_COST > 1))
4657 if (rtx_equal_p (subtarget, op0))
4658 subtarget = 0;
4660 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4661 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4662 OPTAB_WIDEN);
4664 if (tem && code == EQ)
4665 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4669 if (tem && normalizep)
4670 tem = expand_shift (RSHIFT_EXPR, mode, tem,
4671 size_int (GET_MODE_BITSIZE (mode) - 1),
4672 subtarget, normalizep == 1);
4674 if (tem)
4676 if (GET_MODE (tem) != target_mode)
4678 convert_move (target, tem, 0);
4679 tem = target;
4681 else if (!subtarget)
4683 emit_move_insn (target, tem);
4684 tem = target;
4687 else
4688 delete_insns_since (last);
4690 return tem;
4693 /* Like emit_store_flag, but always succeeds. */
4696 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4697 rtx target;
4698 enum rtx_code code;
4699 rtx op0, op1;
4700 enum machine_mode mode;
4701 int unsignedp;
4702 int normalizep;
4704 rtx tem, label;
4706 /* First see if emit_store_flag can do the job. */
4707 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4708 if (tem != 0)
4709 return tem;
4711 if (normalizep == 0)
4712 normalizep = 1;
4714 /* If this failed, we have to do this with set/compare/jump/set code. */
4716 if (GET_CODE (target) != REG
4717 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4718 target = gen_reg_rtx (GET_MODE (target));
4720 emit_move_insn (target, const1_rtx);
4721 label = gen_label_rtx ();
4722 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4723 NULL_RTX, label);
4725 emit_move_insn (target, const0_rtx);
4726 emit_label (label);
4728 return target;
4731 /* Perform possibly multi-word comparison and conditional jump to LABEL
4732 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4734 The algorithm is based on the code in expr.c:do_jump.
4736 Note that this does not perform a general comparison. Only variants
4737 generated within expmed.c are correctly handled, others abort (but could
4738 be handled if needed). */
4740 static void
4741 do_cmp_and_jump (arg1, arg2, op, mode, label)
4742 rtx arg1, arg2, label;
4743 enum rtx_code op;
4744 enum machine_mode mode;
4746 /* If this mode is an integer too wide to compare properly,
4747 compare word by word. Rely on cse to optimize constant cases. */
4749 if (GET_MODE_CLASS (mode) == MODE_INT
4750 && ! can_compare_p (op, mode, ccp_jump))
4752 rtx label2 = gen_label_rtx ();
4754 switch (op)
4756 case LTU:
4757 do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4758 break;
4760 case LEU:
4761 do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4762 break;
4764 case LT:
4765 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4766 break;
4768 case GT:
4769 do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4770 break;
4772 case GE:
4773 do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4774 break;
4776 /* do_jump_by_parts_equality_rtx compares with zero. Luckily
4777 that's the only equality operations we do */
4778 case EQ:
4779 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4780 abort ();
4781 do_jump_by_parts_equality_rtx (arg1, label2, label);
4782 break;
4784 case NE:
4785 if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4786 abort ();
4787 do_jump_by_parts_equality_rtx (arg1, label, label2);
4788 break;
4790 default:
4791 abort ();
4794 emit_label (label2);
4796 else
4797 emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);