2011-06-16 Tom de Vries <tom@codesourcery.com>
[official-gcc.git] / gcc / expmed.c
blob314fac79c284c679c43765ee1f00f33654a5381a
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 2011
6 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "diagnostic-core.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "recog.h"
38 #include "langhooks.h"
39 #include "df.h"
40 #include "target.h"
41 #include "expmed.h"
43 struct target_expmed default_target_expmed;
44 #if SWITCHABLE_TARGET
45 struct target_expmed *this_target_expmed = &default_target_expmed;
46 #endif
48 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 unsigned HOST_WIDE_INT, rtx);
51 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT, rtx);
53 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
54 unsigned HOST_WIDE_INT,
55 unsigned HOST_WIDE_INT,
56 unsigned HOST_WIDE_INT, rtx, int, bool);
57 static rtx mask_rtx (enum machine_mode, int, int, int);
58 static rtx lshift_value (enum machine_mode, rtx, int, int);
59 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
60 unsigned HOST_WIDE_INT, int);
61 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
62 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
63 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
65 /* Test whether a value is zero of a power of two. */
66 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
68 #ifndef SLOW_UNALIGNED_ACCESS
69 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
70 #endif
73 /* Reduce conditional compilation elsewhere. */
74 #ifndef HAVE_insv
75 #define HAVE_insv 0
76 #define CODE_FOR_insv CODE_FOR_nothing
77 #define gen_insv(a,b,c,d) NULL_RTX
78 #endif
79 #ifndef HAVE_extv
80 #define HAVE_extv 0
81 #define CODE_FOR_extv CODE_FOR_nothing
82 #define gen_extv(a,b,c,d) NULL_RTX
83 #endif
84 #ifndef HAVE_extzv
85 #define HAVE_extzv 0
86 #define CODE_FOR_extzv CODE_FOR_nothing
87 #define gen_extzv(a,b,c,d) NULL_RTX
88 #endif
90 void
91 init_expmed (void)
93 struct
95 struct rtx_def reg; rtunion reg_fld[2];
96 struct rtx_def plus; rtunion plus_fld1;
97 struct rtx_def neg;
98 struct rtx_def mult; rtunion mult_fld1;
99 struct rtx_def sdiv; rtunion sdiv_fld1;
100 struct rtx_def udiv; rtunion udiv_fld1;
101 struct rtx_def zext;
102 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
103 struct rtx_def smod_32; rtunion smod_32_fld1;
104 struct rtx_def wide_mult; rtunion wide_mult_fld1;
105 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
106 struct rtx_def wide_trunc;
107 struct rtx_def shift; rtunion shift_fld1;
108 struct rtx_def shift_mult; rtunion shift_mult_fld1;
109 struct rtx_def shift_add; rtunion shift_add_fld1;
110 struct rtx_def shift_sub0; rtunion shift_sub0_fld1;
111 struct rtx_def shift_sub1; rtunion shift_sub1_fld1;
112 } all;
114 rtx pow2[MAX_BITS_PER_WORD];
115 rtx cint[MAX_BITS_PER_WORD];
116 int m, n;
117 enum machine_mode mode, wider_mode;
118 int speed;
121 for (m = 1; m < MAX_BITS_PER_WORD; m++)
123 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
124 cint[m] = GEN_INT (m);
126 memset (&all, 0, sizeof all);
128 PUT_CODE (&all.reg, REG);
129 /* Avoid using hard regs in ways which may be unsupported. */
130 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
132 PUT_CODE (&all.plus, PLUS);
133 XEXP (&all.plus, 0) = &all.reg;
134 XEXP (&all.plus, 1) = &all.reg;
136 PUT_CODE (&all.neg, NEG);
137 XEXP (&all.neg, 0) = &all.reg;
139 PUT_CODE (&all.mult, MULT);
140 XEXP (&all.mult, 0) = &all.reg;
141 XEXP (&all.mult, 1) = &all.reg;
143 PUT_CODE (&all.sdiv, DIV);
144 XEXP (&all.sdiv, 0) = &all.reg;
145 XEXP (&all.sdiv, 1) = &all.reg;
147 PUT_CODE (&all.udiv, UDIV);
148 XEXP (&all.udiv, 0) = &all.reg;
149 XEXP (&all.udiv, 1) = &all.reg;
151 PUT_CODE (&all.sdiv_32, DIV);
152 XEXP (&all.sdiv_32, 0) = &all.reg;
153 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
155 PUT_CODE (&all.smod_32, MOD);
156 XEXP (&all.smod_32, 0) = &all.reg;
157 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
159 PUT_CODE (&all.zext, ZERO_EXTEND);
160 XEXP (&all.zext, 0) = &all.reg;
162 PUT_CODE (&all.wide_mult, MULT);
163 XEXP (&all.wide_mult, 0) = &all.zext;
164 XEXP (&all.wide_mult, 1) = &all.zext;
166 PUT_CODE (&all.wide_lshr, LSHIFTRT);
167 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
169 PUT_CODE (&all.wide_trunc, TRUNCATE);
170 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
172 PUT_CODE (&all.shift, ASHIFT);
173 XEXP (&all.shift, 0) = &all.reg;
175 PUT_CODE (&all.shift_mult, MULT);
176 XEXP (&all.shift_mult, 0) = &all.reg;
178 PUT_CODE (&all.shift_add, PLUS);
179 XEXP (&all.shift_add, 0) = &all.shift_mult;
180 XEXP (&all.shift_add, 1) = &all.reg;
182 PUT_CODE (&all.shift_sub0, MINUS);
183 XEXP (&all.shift_sub0, 0) = &all.shift_mult;
184 XEXP (&all.shift_sub0, 1) = &all.reg;
186 PUT_CODE (&all.shift_sub1, MINUS);
187 XEXP (&all.shift_sub1, 0) = &all.reg;
188 XEXP (&all.shift_sub1, 1) = &all.shift_mult;
190 for (speed = 0; speed < 2; speed++)
192 crtl->maybe_hot_insn_p = speed;
193 zero_cost[speed] = rtx_cost (const0_rtx, SET, speed);
195 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
196 mode != VOIDmode;
197 mode = GET_MODE_WIDER_MODE (mode))
199 PUT_MODE (&all.reg, mode);
200 PUT_MODE (&all.plus, mode);
201 PUT_MODE (&all.neg, mode);
202 PUT_MODE (&all.mult, mode);
203 PUT_MODE (&all.sdiv, mode);
204 PUT_MODE (&all.udiv, mode);
205 PUT_MODE (&all.sdiv_32, mode);
206 PUT_MODE (&all.smod_32, mode);
207 PUT_MODE (&all.wide_trunc, mode);
208 PUT_MODE (&all.shift, mode);
209 PUT_MODE (&all.shift_mult, mode);
210 PUT_MODE (&all.shift_add, mode);
211 PUT_MODE (&all.shift_sub0, mode);
212 PUT_MODE (&all.shift_sub1, mode);
214 add_cost[speed][mode] = rtx_cost (&all.plus, SET, speed);
215 neg_cost[speed][mode] = rtx_cost (&all.neg, SET, speed);
216 mul_cost[speed][mode] = rtx_cost (&all.mult, SET, speed);
217 sdiv_cost[speed][mode] = rtx_cost (&all.sdiv, SET, speed);
218 udiv_cost[speed][mode] = rtx_cost (&all.udiv, SET, speed);
220 sdiv_pow2_cheap[speed][mode] = (rtx_cost (&all.sdiv_32, SET, speed)
221 <= 2 * add_cost[speed][mode]);
222 smod_pow2_cheap[speed][mode] = (rtx_cost (&all.smod_32, SET, speed)
223 <= 4 * add_cost[speed][mode]);
225 wider_mode = GET_MODE_WIDER_MODE (mode);
226 if (wider_mode != VOIDmode)
228 PUT_MODE (&all.zext, wider_mode);
229 PUT_MODE (&all.wide_mult, wider_mode);
230 PUT_MODE (&all.wide_lshr, wider_mode);
231 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
233 mul_widen_cost[speed][wider_mode]
234 = rtx_cost (&all.wide_mult, SET, speed);
235 mul_highpart_cost[speed][mode]
236 = rtx_cost (&all.wide_trunc, SET, speed);
239 shift_cost[speed][mode][0] = 0;
240 shiftadd_cost[speed][mode][0] = shiftsub0_cost[speed][mode][0]
241 = shiftsub1_cost[speed][mode][0] = add_cost[speed][mode];
243 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
244 for (m = 1; m < n; m++)
246 XEXP (&all.shift, 1) = cint[m];
247 XEXP (&all.shift_mult, 1) = pow2[m];
249 shift_cost[speed][mode][m] = rtx_cost (&all.shift, SET, speed);
250 shiftadd_cost[speed][mode][m] = rtx_cost (&all.shift_add, SET, speed);
251 shiftsub0_cost[speed][mode][m] = rtx_cost (&all.shift_sub0, SET, speed);
252 shiftsub1_cost[speed][mode][m] = rtx_cost (&all.shift_sub1, SET, speed);
256 if (alg_hash_used_p)
257 memset (alg_hash, 0, sizeof (alg_hash));
258 else
259 alg_hash_used_p = true;
260 default_rtl_profile ();
263 /* Return an rtx representing minus the value of X.
264 MODE is the intended mode of the result,
265 useful if X is a CONST_INT. */
268 negate_rtx (enum machine_mode mode, rtx x)
270 rtx result = simplify_unary_operation (NEG, mode, x, mode);
272 if (result == 0)
273 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
275 return result;
278 /* Report on the availability of insv/extv/extzv and the desired mode
279 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
280 is false; else the mode of the specified operand. If OPNO is -1,
281 all the caller cares about is whether the insn is available. */
282 enum machine_mode
283 mode_for_extraction (enum extraction_pattern pattern, int opno)
285 const struct insn_data_d *data;
287 switch (pattern)
289 case EP_insv:
290 if (HAVE_insv)
292 data = &insn_data[CODE_FOR_insv];
293 break;
295 return MAX_MACHINE_MODE;
297 case EP_extv:
298 if (HAVE_extv)
300 data = &insn_data[CODE_FOR_extv];
301 break;
303 return MAX_MACHINE_MODE;
305 case EP_extzv:
306 if (HAVE_extzv)
308 data = &insn_data[CODE_FOR_extzv];
309 break;
311 return MAX_MACHINE_MODE;
313 default:
314 gcc_unreachable ();
317 if (opno == -1)
318 return VOIDmode;
320 /* Everyone who uses this function used to follow it with
321 if (result == VOIDmode) result = word_mode; */
322 if (data->operand[opno].mode == VOIDmode)
323 return word_mode;
324 return data->operand[opno].mode;
327 /* A subroutine of store_bit_field, with the same arguments. Return true
328 if the operation could be implemented.
330 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
331 no other way of implementing the operation. If FALLBACK_P is false,
332 return false instead. */
334 static bool
335 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
336 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
337 rtx value, bool fallback_p)
339 unsigned int unit
340 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
341 unsigned HOST_WIDE_INT offset, bitpos;
342 rtx op0 = str_rtx;
343 int byte_offset;
344 rtx orig_value;
346 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
348 while (GET_CODE (op0) == SUBREG)
350 /* The following line once was done only if WORDS_BIG_ENDIAN,
351 but I think that is a mistake. WORDS_BIG_ENDIAN is
352 meaningful at a much higher level; when structures are copied
353 between memory and regs, the higher-numbered regs
354 always get higher addresses. */
355 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
356 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
358 byte_offset = 0;
360 /* Paradoxical subregs need special handling on big endian machines. */
361 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
363 int difference = inner_mode_size - outer_mode_size;
365 if (WORDS_BIG_ENDIAN)
366 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
367 if (BYTES_BIG_ENDIAN)
368 byte_offset += difference % UNITS_PER_WORD;
370 else
371 byte_offset = SUBREG_BYTE (op0);
373 bitnum += byte_offset * BITS_PER_UNIT;
374 op0 = SUBREG_REG (op0);
377 /* No action is needed if the target is a register and if the field
378 lies completely outside that register. This can occur if the source
379 code contains an out-of-bounds access to a small array. */
380 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
381 return true;
383 /* Use vec_set patterns for inserting parts of vectors whenever
384 available. */
385 if (VECTOR_MODE_P (GET_MODE (op0))
386 && !MEM_P (op0)
387 && optab_handler (vec_set_optab, GET_MODE (op0)) != CODE_FOR_nothing
388 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
389 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
390 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
392 struct expand_operand ops[3];
393 enum machine_mode outermode = GET_MODE (op0);
394 enum machine_mode innermode = GET_MODE_INNER (outermode);
395 enum insn_code icode = optab_handler (vec_set_optab, outermode);
396 int pos = bitnum / GET_MODE_BITSIZE (innermode);
398 create_fixed_operand (&ops[0], op0);
399 create_input_operand (&ops[1], value, innermode);
400 create_integer_operand (&ops[2], pos);
401 if (maybe_expand_insn (icode, 3, ops))
402 return true;
405 /* If the target is a register, overwriting the entire object, or storing
406 a full-word or multi-word field can be done with just a SUBREG.
408 If the target is memory, storing any naturally aligned field can be
409 done with a simple store. For targets that support fast unaligned
410 memory, any naturally sized, unit aligned field can be done directly. */
412 offset = bitnum / unit;
413 bitpos = bitnum % unit;
414 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
415 + (offset * UNITS_PER_WORD);
417 if (bitpos == 0
418 && bitsize == GET_MODE_BITSIZE (fieldmode)
419 && (!MEM_P (op0)
420 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
421 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
422 && ((GET_MODE (op0) == fieldmode && byte_offset == 0)
423 || validate_subreg (fieldmode, GET_MODE (op0), op0,
424 byte_offset)))
425 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
426 || (offset * BITS_PER_UNIT % bitsize == 0
427 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
429 if (MEM_P (op0))
430 op0 = adjust_address (op0, fieldmode, offset);
431 else if (GET_MODE (op0) != fieldmode)
432 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
433 byte_offset);
434 emit_move_insn (op0, value);
435 return true;
438 /* Make sure we are playing with integral modes. Pun with subregs
439 if we aren't. This must come after the entire register case above,
440 since that case is valid for any mode. The following cases are only
441 valid for integral modes. */
443 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
444 if (imode != GET_MODE (op0))
446 if (MEM_P (op0))
447 op0 = adjust_address (op0, imode, 0);
448 else
450 gcc_assert (imode != BLKmode);
451 op0 = gen_lowpart (imode, op0);
456 /* We may be accessing data outside the field, which means
457 we can alias adjacent data. */
458 if (MEM_P (op0))
460 op0 = shallow_copy_rtx (op0);
461 set_mem_alias_set (op0, 0);
462 set_mem_expr (op0, 0);
465 /* If OP0 is a register, BITPOS must count within a word.
466 But as we have it, it counts within whatever size OP0 now has.
467 On a bigendian machine, these are not the same, so convert. */
468 if (BYTES_BIG_ENDIAN
469 && !MEM_P (op0)
470 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
471 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
473 /* Storing an lsb-aligned field in a register
474 can be done with a movestrict instruction. */
476 if (!MEM_P (op0)
477 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
478 && bitsize == GET_MODE_BITSIZE (fieldmode)
479 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
481 struct expand_operand ops[2];
482 enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
483 rtx arg0 = op0;
484 unsigned HOST_WIDE_INT subreg_off;
486 if (GET_CODE (arg0) == SUBREG)
488 /* Else we've got some float mode source being extracted into
489 a different float mode destination -- this combination of
490 subregs results in Severe Tire Damage. */
491 gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
492 || GET_MODE_CLASS (fieldmode) == MODE_INT
493 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
494 arg0 = SUBREG_REG (arg0);
497 subreg_off = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
498 + (offset * UNITS_PER_WORD);
499 if (validate_subreg (fieldmode, GET_MODE (arg0), arg0, subreg_off))
501 arg0 = gen_rtx_SUBREG (fieldmode, arg0, subreg_off);
503 create_fixed_operand (&ops[0], arg0);
504 /* Shrink the source operand to FIELDMODE. */
505 create_convert_operand_to (&ops[1], value, fieldmode, false);
506 if (maybe_expand_insn (icode, 2, ops))
507 return true;
511 /* Handle fields bigger than a word. */
513 if (bitsize > BITS_PER_WORD)
515 /* Here we transfer the words of the field
516 in the order least significant first.
517 This is because the most significant word is the one which may
518 be less than full.
519 However, only do that if the value is not BLKmode. */
521 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
522 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
523 unsigned int i;
524 rtx last;
526 /* This is the mode we must force value to, so that there will be enough
527 subwords to extract. Note that fieldmode will often (always?) be
528 VOIDmode, because that is what store_field uses to indicate that this
529 is a bit field, but passing VOIDmode to operand_subword_force
530 is not allowed. */
531 fieldmode = GET_MODE (value);
532 if (fieldmode == VOIDmode)
533 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
535 last = get_last_insn ();
536 for (i = 0; i < nwords; i++)
538 /* If I is 0, use the low-order word in both field and target;
539 if I is 1, use the next to lowest word; and so on. */
540 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
541 unsigned int bit_offset = (backwards
542 ? MAX ((int) bitsize - ((int) i + 1)
543 * BITS_PER_WORD,
545 : (int) i * BITS_PER_WORD);
546 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
548 if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
549 bitsize - i * BITS_PER_WORD),
550 bitnum + bit_offset, word_mode,
551 value_word, fallback_p))
553 delete_insns_since (last);
554 return false;
557 return true;
560 /* From here on we can assume that the field to be stored in is
561 a full-word (whatever type that is), since it is shorter than a word. */
563 /* OFFSET is the number of words or bytes (UNIT says which)
564 from STR_RTX to the first word or byte containing part of the field. */
566 if (!MEM_P (op0))
568 if (offset != 0
569 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
571 if (!REG_P (op0))
573 /* Since this is a destination (lvalue), we can't copy
574 it to a pseudo. We can remove a SUBREG that does not
575 change the size of the operand. Such a SUBREG may
576 have been added above. */
577 gcc_assert (GET_CODE (op0) == SUBREG
578 && (GET_MODE_SIZE (GET_MODE (op0))
579 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
580 op0 = SUBREG_REG (op0);
582 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
583 op0, (offset * UNITS_PER_WORD));
585 offset = 0;
588 /* If VALUE has a floating-point or complex mode, access it as an
589 integer of the corresponding size. This can occur on a machine
590 with 64 bit registers that uses SFmode for float. It can also
591 occur for unaligned float or complex fields. */
592 orig_value = value;
593 if (GET_MODE (value) != VOIDmode
594 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
595 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
597 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
598 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
601 /* Now OFFSET is nonzero only if OP0 is memory
602 and is therefore always measured in bytes. */
604 if (HAVE_insv
605 && GET_MODE (value) != BLKmode
606 && bitsize > 0
607 && GET_MODE_BITSIZE (op_mode) >= bitsize
608 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
609 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
611 struct expand_operand ops[4];
612 int xbitpos = bitpos;
613 rtx value1;
614 rtx xop0 = op0;
615 rtx last = get_last_insn ();
616 bool copy_back = false;
618 /* Add OFFSET into OP0's address. */
619 if (MEM_P (xop0))
620 xop0 = adjust_address (xop0, byte_mode, offset);
622 /* If xop0 is a register, we need it in OP_MODE
623 to make it acceptable to the format of insv. */
624 if (GET_CODE (xop0) == SUBREG)
625 /* We can't just change the mode, because this might clobber op0,
626 and we will need the original value of op0 if insv fails. */
627 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
628 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
629 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
631 /* If the destination is a paradoxical subreg such that we need a
632 truncate to the inner mode, perform the insertion on a temporary and
633 truncate the result to the original destination. Note that we can't
634 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
635 X) 0)) is (reg:N X). */
636 if (GET_CODE (xop0) == SUBREG
637 && REG_P (SUBREG_REG (xop0))
638 && (!TRULY_NOOP_TRUNCATION
639 (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (xop0))),
640 GET_MODE_BITSIZE (op_mode))))
642 rtx tem = gen_reg_rtx (op_mode);
643 emit_move_insn (tem, xop0);
644 xop0 = tem;
645 copy_back = true;
648 /* On big-endian machines, we count bits from the most significant.
649 If the bit field insn does not, we must invert. */
651 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
652 xbitpos = unit - bitsize - xbitpos;
654 /* We have been counting XBITPOS within UNIT.
655 Count instead within the size of the register. */
656 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
657 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
659 unit = GET_MODE_BITSIZE (op_mode);
661 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
662 value1 = value;
663 if (GET_MODE (value) != op_mode)
665 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
667 /* Optimization: Don't bother really extending VALUE
668 if it has all the bits we will actually use. However,
669 if we must narrow it, be sure we do it correctly. */
671 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
673 rtx tmp;
675 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
676 if (! tmp)
677 tmp = simplify_gen_subreg (op_mode,
678 force_reg (GET_MODE (value),
679 value1),
680 GET_MODE (value), 0);
681 value1 = tmp;
683 else
684 value1 = gen_lowpart (op_mode, value1);
686 else if (CONST_INT_P (value))
687 value1 = gen_int_mode (INTVAL (value), op_mode);
688 else
689 /* Parse phase is supposed to make VALUE's data type
690 match that of the component reference, which is a type
691 at least as wide as the field; so VALUE should have
692 a mode that corresponds to that type. */
693 gcc_assert (CONSTANT_P (value));
696 create_fixed_operand (&ops[0], xop0);
697 create_integer_operand (&ops[1], bitsize);
698 create_integer_operand (&ops[2], xbitpos);
699 create_input_operand (&ops[3], value1, op_mode);
700 if (maybe_expand_insn (CODE_FOR_insv, 4, ops))
702 if (copy_back)
703 convert_move (op0, xop0, true);
704 return true;
706 delete_insns_since (last);
709 /* If OP0 is a memory, try copying it to a register and seeing if a
710 cheap register alternative is available. */
711 if (HAVE_insv && MEM_P (op0))
713 enum machine_mode bestmode;
715 /* Get the mode to use for inserting into this field. If OP0 is
716 BLKmode, get the smallest mode consistent with the alignment. If
717 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
718 mode. Otherwise, use the smallest mode containing the field. */
720 if (GET_MODE (op0) == BLKmode
721 || (op_mode != MAX_MACHINE_MODE
722 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
723 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
724 (op_mode == MAX_MACHINE_MODE
725 ? VOIDmode : op_mode),
726 MEM_VOLATILE_P (op0));
727 else
728 bestmode = GET_MODE (op0);
730 if (bestmode != VOIDmode
731 && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode)
732 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
733 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
735 rtx last, tempreg, xop0;
736 unsigned HOST_WIDE_INT xoffset, xbitpos;
738 last = get_last_insn ();
740 /* Adjust address to point to the containing unit of
741 that mode. Compute the offset as a multiple of this unit,
742 counting in bytes. */
743 unit = GET_MODE_BITSIZE (bestmode);
744 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
745 xbitpos = bitnum % unit;
746 xop0 = adjust_address (op0, bestmode, xoffset);
748 /* Fetch that unit, store the bitfield in it, then store
749 the unit. */
750 tempreg = copy_to_reg (xop0);
751 if (store_bit_field_1 (tempreg, bitsize, xbitpos,
752 fieldmode, orig_value, false))
754 emit_move_insn (xop0, tempreg);
755 return true;
757 delete_insns_since (last);
761 if (!fallback_p)
762 return false;
764 store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
765 return true;
768 /* Generate code to store value from rtx VALUE
769 into a bit-field within structure STR_RTX
770 containing BITSIZE bits starting at bit BITNUM.
771 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
773 void
774 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
775 unsigned HOST_WIDE_INT bitnum, enum machine_mode fieldmode,
776 rtx value)
778 if (!store_bit_field_1 (str_rtx, bitsize, bitnum, fieldmode, value, true))
779 gcc_unreachable ();
782 /* Use shifts and boolean operations to store VALUE
783 into a bit field of width BITSIZE
784 in a memory location specified by OP0 except offset by OFFSET bytes.
785 (OFFSET must be 0 if OP0 is a register.)
786 The field starts at position BITPOS within the byte.
787 (If OP0 is a register, it may be a full word or a narrower mode,
788 but BITPOS still counts within a full word,
789 which is significant on bigendian machines.) */
791 static void
792 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
793 unsigned HOST_WIDE_INT bitsize,
794 unsigned HOST_WIDE_INT bitpos, rtx value)
796 enum machine_mode mode;
797 unsigned int total_bits = BITS_PER_WORD;
798 rtx temp;
799 int all_zero = 0;
800 int all_one = 0;
802 /* There is a case not handled here:
803 a structure with a known alignment of just a halfword
804 and a field split across two aligned halfwords within the structure.
805 Or likewise a structure with a known alignment of just a byte
806 and a field split across two bytes.
807 Such cases are not supposed to be able to occur. */
809 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
811 gcc_assert (!offset);
812 /* Special treatment for a bit field split across two registers. */
813 if (bitsize + bitpos > BITS_PER_WORD)
815 store_split_bit_field (op0, bitsize, bitpos, value);
816 return;
819 else
821 /* Get the proper mode to use for this field. We want a mode that
822 includes the entire field. If such a mode would be larger than
823 a word, we won't be doing the extraction the normal way.
824 We don't want a mode bigger than the destination. */
826 mode = GET_MODE (op0);
827 if (GET_MODE_BITSIZE (mode) == 0
828 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
829 mode = word_mode;
831 if (MEM_VOLATILE_P (op0)
832 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
833 && flag_strict_volatile_bitfields > 0)
834 mode = GET_MODE (op0);
835 else
836 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
837 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
839 if (mode == VOIDmode)
841 /* The only way this should occur is if the field spans word
842 boundaries. */
843 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
844 value);
845 return;
848 total_bits = GET_MODE_BITSIZE (mode);
850 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
851 be in the range 0 to total_bits-1, and put any excess bytes in
852 OFFSET. */
853 if (bitpos >= total_bits)
855 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
856 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
857 * BITS_PER_UNIT);
860 /* Get ref to an aligned byte, halfword, or word containing the field.
861 Adjust BITPOS to be position within a word,
862 and OFFSET to be the offset of that word.
863 Then alter OP0 to refer to that word. */
864 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
865 offset -= (offset % (total_bits / BITS_PER_UNIT));
866 op0 = adjust_address (op0, mode, offset);
869 mode = GET_MODE (op0);
871 /* Now MODE is either some integral mode for a MEM as OP0,
872 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
873 The bit field is contained entirely within OP0.
874 BITPOS is the starting bit number within OP0.
875 (OP0's mode may actually be narrower than MODE.) */
877 if (BYTES_BIG_ENDIAN)
878 /* BITPOS is the distance between our msb
879 and that of the containing datum.
880 Convert it to the distance from the lsb. */
881 bitpos = total_bits - bitsize - bitpos;
883 /* Now BITPOS is always the distance between our lsb
884 and that of OP0. */
886 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
887 we must first convert its mode to MODE. */
889 if (CONST_INT_P (value))
891 HOST_WIDE_INT v = INTVAL (value);
893 if (bitsize < HOST_BITS_PER_WIDE_INT)
894 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
896 if (v == 0)
897 all_zero = 1;
898 else if ((bitsize < HOST_BITS_PER_WIDE_INT
899 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
900 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
901 all_one = 1;
903 value = lshift_value (mode, value, bitpos, bitsize);
905 else
907 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
908 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
910 if (GET_MODE (value) != mode)
911 value = convert_to_mode (mode, value, 1);
913 if (must_and)
914 value = expand_binop (mode, and_optab, value,
915 mask_rtx (mode, 0, bitsize, 0),
916 NULL_RTX, 1, OPTAB_LIB_WIDEN);
917 if (bitpos > 0)
918 value = expand_shift (LSHIFT_EXPR, mode, value,
919 bitpos, NULL_RTX, 1);
922 /* Now clear the chosen bits in OP0,
923 except that if VALUE is -1 we need not bother. */
924 /* We keep the intermediates in registers to allow CSE to combine
925 consecutive bitfield assignments. */
927 temp = force_reg (mode, op0);
929 if (! all_one)
931 temp = expand_binop (mode, and_optab, temp,
932 mask_rtx (mode, bitpos, bitsize, 1),
933 NULL_RTX, 1, OPTAB_LIB_WIDEN);
934 temp = force_reg (mode, temp);
937 /* Now logical-or VALUE into OP0, unless it is zero. */
939 if (! all_zero)
941 temp = expand_binop (mode, ior_optab, temp, value,
942 NULL_RTX, 1, OPTAB_LIB_WIDEN);
943 temp = force_reg (mode, temp);
946 if (op0 != temp)
948 op0 = copy_rtx (op0);
949 emit_move_insn (op0, temp);
953 /* Store a bit field that is split across multiple accessible memory objects.
955 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
956 BITSIZE is the field width; BITPOS the position of its first bit
957 (within the word).
958 VALUE is the value to store.
960 This does not yet handle fields wider than BITS_PER_WORD. */
962 static void
963 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
964 unsigned HOST_WIDE_INT bitpos, rtx value)
966 unsigned int unit;
967 unsigned int bitsdone = 0;
969 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
970 much at a time. */
971 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
972 unit = BITS_PER_WORD;
973 else
974 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
976 /* If VALUE is a constant other than a CONST_INT, get it into a register in
977 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
978 that VALUE might be a floating-point constant. */
979 if (CONSTANT_P (value) && !CONST_INT_P (value))
981 rtx word = gen_lowpart_common (word_mode, value);
983 if (word && (value != word))
984 value = word;
985 else
986 value = gen_lowpart_common (word_mode,
987 force_reg (GET_MODE (value) != VOIDmode
988 ? GET_MODE (value)
989 : word_mode, value));
992 while (bitsdone < bitsize)
994 unsigned HOST_WIDE_INT thissize;
995 rtx part, word;
996 unsigned HOST_WIDE_INT thispos;
997 unsigned HOST_WIDE_INT offset;
999 offset = (bitpos + bitsdone) / unit;
1000 thispos = (bitpos + bitsdone) % unit;
1002 /* THISSIZE must not overrun a word boundary. Otherwise,
1003 store_fixed_bit_field will call us again, and we will mutually
1004 recurse forever. */
1005 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1006 thissize = MIN (thissize, unit - thispos);
1008 if (BYTES_BIG_ENDIAN)
1010 int total_bits;
1012 /* We must do an endian conversion exactly the same way as it is
1013 done in extract_bit_field, so that the two calls to
1014 extract_fixed_bit_field will have comparable arguments. */
1015 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1016 total_bits = BITS_PER_WORD;
1017 else
1018 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1020 /* Fetch successively less significant portions. */
1021 if (CONST_INT_P (value))
1022 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1023 >> (bitsize - bitsdone - thissize))
1024 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1025 else
1026 /* The args are chosen so that the last part includes the
1027 lsb. Give extract_bit_field the value it needs (with
1028 endianness compensation) to fetch the piece we want. */
1029 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1030 total_bits - bitsize + bitsdone,
1031 NULL_RTX, 1, false);
1033 else
1035 /* Fetch successively more significant portions. */
1036 if (CONST_INT_P (value))
1037 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1038 >> bitsdone)
1039 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1040 else
1041 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1042 bitsdone, NULL_RTX, 1, false);
1045 /* If OP0 is a register, then handle OFFSET here.
1047 When handling multiword bitfields, extract_bit_field may pass
1048 down a word_mode SUBREG of a larger REG for a bitfield that actually
1049 crosses a word boundary. Thus, for a SUBREG, we must find
1050 the current word starting from the base register. */
1051 if (GET_CODE (op0) == SUBREG)
1053 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1054 enum machine_mode sub_mode = GET_MODE (SUBREG_REG (op0));
1055 if (sub_mode != BLKmode && GET_MODE_SIZE (sub_mode) < UNITS_PER_WORD)
1056 word = word_offset ? const0_rtx : op0;
1057 else
1058 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1059 GET_MODE (SUBREG_REG (op0)));
1060 offset = 0;
1062 else if (REG_P (op0))
1064 enum machine_mode op0_mode = GET_MODE (op0);
1065 if (op0_mode != BLKmode && GET_MODE_SIZE (op0_mode) < UNITS_PER_WORD)
1066 word = offset ? const0_rtx : op0;
1067 else
1068 word = operand_subword_force (op0, offset, GET_MODE (op0));
1069 offset = 0;
1071 else
1072 word = op0;
1074 /* OFFSET is in UNITs, and UNIT is in bits.
1075 store_fixed_bit_field wants offset in bytes. If WORD is const0_rtx,
1076 it is just an out-of-bounds access. Ignore it. */
1077 if (word != const0_rtx)
1078 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1079 thispos, part);
1080 bitsdone += thissize;
1084 /* A subroutine of extract_bit_field_1 that converts return value X
1085 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1086 to extract_bit_field. */
1088 static rtx
1089 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1090 enum machine_mode tmode, bool unsignedp)
1092 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1093 return x;
1095 /* If the x mode is not a scalar integral, first convert to the
1096 integer mode of that size and then access it as a floating-point
1097 value via a SUBREG. */
1098 if (!SCALAR_INT_MODE_P (tmode))
1100 enum machine_mode smode;
1102 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1103 x = convert_to_mode (smode, x, unsignedp);
1104 x = force_reg (smode, x);
1105 return gen_lowpart (tmode, x);
1108 return convert_to_mode (tmode, x, unsignedp);
1111 /* A subroutine of extract_bit_field, with the same arguments.
1112 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1113 if we can find no other means of implementing the operation.
1114 if FALLBACK_P is false, return NULL instead. */
1116 static rtx
1117 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1118 unsigned HOST_WIDE_INT bitnum,
1119 int unsignedp, bool packedp, rtx target,
1120 enum machine_mode mode, enum machine_mode tmode,
1121 bool fallback_p)
1123 unsigned int unit
1124 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1125 unsigned HOST_WIDE_INT offset, bitpos;
1126 rtx op0 = str_rtx;
1127 enum machine_mode int_mode;
1128 enum machine_mode ext_mode;
1129 enum machine_mode mode1;
1130 int byte_offset;
1132 if (tmode == VOIDmode)
1133 tmode = mode;
1135 while (GET_CODE (op0) == SUBREG)
1137 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1138 op0 = SUBREG_REG (op0);
1141 /* If we have an out-of-bounds access to a register, just return an
1142 uninitialized register of the required mode. This can occur if the
1143 source code contains an out-of-bounds access to a small array. */
1144 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1145 return gen_reg_rtx (tmode);
1147 if (REG_P (op0)
1148 && mode == GET_MODE (op0)
1149 && bitnum == 0
1150 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1152 /* We're trying to extract a full register from itself. */
1153 return op0;
1156 /* See if we can get a better vector mode before extracting. */
1157 if (VECTOR_MODE_P (GET_MODE (op0))
1158 && !MEM_P (op0)
1159 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1161 enum machine_mode new_mode;
1163 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1164 new_mode = MIN_MODE_VECTOR_FLOAT;
1165 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1166 new_mode = MIN_MODE_VECTOR_FRACT;
1167 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1168 new_mode = MIN_MODE_VECTOR_UFRACT;
1169 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1170 new_mode = MIN_MODE_VECTOR_ACCUM;
1171 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1172 new_mode = MIN_MODE_VECTOR_UACCUM;
1173 else
1174 new_mode = MIN_MODE_VECTOR_INT;
1176 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1177 if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1178 && targetm.vector_mode_supported_p (new_mode))
1179 break;
1180 if (new_mode != VOIDmode)
1181 op0 = gen_lowpart (new_mode, op0);
1184 /* Use vec_extract patterns for extracting parts of vectors whenever
1185 available. */
1186 if (VECTOR_MODE_P (GET_MODE (op0))
1187 && !MEM_P (op0)
1188 && optab_handler (vec_extract_optab, GET_MODE (op0)) != CODE_FOR_nothing
1189 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1190 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1192 struct expand_operand ops[3];
1193 enum machine_mode outermode = GET_MODE (op0);
1194 enum machine_mode innermode = GET_MODE_INNER (outermode);
1195 enum insn_code icode = optab_handler (vec_extract_optab, outermode);
1196 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1198 create_output_operand (&ops[0], target, innermode);
1199 create_input_operand (&ops[1], op0, outermode);
1200 create_integer_operand (&ops[2], pos);
1201 if (maybe_expand_insn (icode, 3, ops))
1203 target = ops[0].value;
1204 if (GET_MODE (target) != mode)
1205 return gen_lowpart (tmode, target);
1206 return target;
1210 /* Make sure we are playing with integral modes. Pun with subregs
1211 if we aren't. */
1213 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1214 if (imode != GET_MODE (op0))
1216 if (MEM_P (op0))
1217 op0 = adjust_address (op0, imode, 0);
1218 else if (imode != BLKmode)
1220 op0 = gen_lowpart (imode, op0);
1222 /* If we got a SUBREG, force it into a register since we
1223 aren't going to be able to do another SUBREG on it. */
1224 if (GET_CODE (op0) == SUBREG)
1225 op0 = force_reg (imode, op0);
1227 else if (REG_P (op0))
1229 rtx reg, subreg;
1230 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1231 MODE_INT);
1232 reg = gen_reg_rtx (imode);
1233 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1234 emit_move_insn (subreg, op0);
1235 op0 = reg;
1236 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1238 else
1240 rtx mem = assign_stack_temp (GET_MODE (op0),
1241 GET_MODE_SIZE (GET_MODE (op0)), 0);
1242 emit_move_insn (mem, op0);
1243 op0 = adjust_address (mem, BLKmode, 0);
1248 /* We may be accessing data outside the field, which means
1249 we can alias adjacent data. */
1250 if (MEM_P (op0))
1252 op0 = shallow_copy_rtx (op0);
1253 set_mem_alias_set (op0, 0);
1254 set_mem_expr (op0, 0);
1257 /* Extraction of a full-word or multi-word value from a structure
1258 in a register or aligned memory can be done with just a SUBREG.
1259 A subword value in the least significant part of a register
1260 can also be extracted with a SUBREG. For this, we need the
1261 byte offset of the value in op0. */
1263 bitpos = bitnum % unit;
1264 offset = bitnum / unit;
1265 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1267 /* If OP0 is a register, BITPOS must count within a word.
1268 But as we have it, it counts within whatever size OP0 now has.
1269 On a bigendian machine, these are not the same, so convert. */
1270 if (BYTES_BIG_ENDIAN
1271 && !MEM_P (op0)
1272 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1273 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1275 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1276 If that's wrong, the solution is to test for it and set TARGET to 0
1277 if needed. */
1279 /* Only scalar integer modes can be converted via subregs. There is an
1280 additional problem for FP modes here in that they can have a precision
1281 which is different from the size. mode_for_size uses precision, but
1282 we want a mode based on the size, so we must avoid calling it for FP
1283 modes. */
1284 mode1 = (SCALAR_INT_MODE_P (tmode)
1285 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1286 : mode);
1288 /* If the bitfield is volatile, we need to make sure the access
1289 remains on a type-aligned boundary. */
1290 if (GET_CODE (op0) == MEM
1291 && MEM_VOLATILE_P (op0)
1292 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
1293 && flag_strict_volatile_bitfields > 0)
1294 goto no_subreg_mode_swap;
1296 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1297 && bitpos % BITS_PER_WORD == 0)
1298 || (mode1 != BLKmode
1299 /* ??? The big endian test here is wrong. This is correct
1300 if the value is in a register, and if mode_for_size is not
1301 the same mode as op0. This causes us to get unnecessarily
1302 inefficient code from the Thumb port when -mbig-endian. */
1303 && (BYTES_BIG_ENDIAN
1304 ? bitpos + bitsize == BITS_PER_WORD
1305 : bitpos == 0)))
1306 && ((!MEM_P (op0)
1307 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1),
1308 GET_MODE_BITSIZE (GET_MODE (op0)))
1309 && GET_MODE_SIZE (mode1) != 0
1310 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1311 || (MEM_P (op0)
1312 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1313 || (offset * BITS_PER_UNIT % bitsize == 0
1314 && MEM_ALIGN (op0) % bitsize == 0)))))
1316 if (MEM_P (op0))
1317 op0 = adjust_address (op0, mode1, offset);
1318 else if (mode1 != GET_MODE (op0))
1320 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1321 byte_offset);
1322 if (sub == NULL)
1323 goto no_subreg_mode_swap;
1324 op0 = sub;
1326 if (mode1 != mode)
1327 return convert_to_mode (tmode, op0, unsignedp);
1328 return op0;
1330 no_subreg_mode_swap:
1332 /* Handle fields bigger than a word. */
1334 if (bitsize > BITS_PER_WORD)
1336 /* Here we transfer the words of the field
1337 in the order least significant first.
1338 This is because the most significant word is the one which may
1339 be less than full. */
1341 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1342 unsigned int i;
1344 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1345 target = gen_reg_rtx (mode);
1347 /* Indicate for flow that the entire target reg is being set. */
1348 emit_clobber (target);
1350 for (i = 0; i < nwords; i++)
1352 /* If I is 0, use the low-order word in both field and target;
1353 if I is 1, use the next to lowest word; and so on. */
1354 /* Word number in TARGET to use. */
1355 unsigned int wordnum
1356 = (WORDS_BIG_ENDIAN
1357 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1358 : i);
1359 /* Offset from start of field in OP0. */
1360 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1361 ? MAX (0, ((int) bitsize - ((int) i + 1)
1362 * (int) BITS_PER_WORD))
1363 : (int) i * BITS_PER_WORD);
1364 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1365 rtx result_part
1366 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1367 bitsize - i * BITS_PER_WORD),
1368 bitnum + bit_offset, 1, false, target_part, mode,
1369 word_mode);
1371 gcc_assert (target_part);
1373 if (result_part != target_part)
1374 emit_move_insn (target_part, result_part);
1377 if (unsignedp)
1379 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1380 need to be zero'd out. */
1381 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1383 unsigned int i, total_words;
1385 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1386 for (i = nwords; i < total_words; i++)
1387 emit_move_insn
1388 (operand_subword (target,
1389 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1390 1, VOIDmode),
1391 const0_rtx);
1393 return target;
1396 /* Signed bit field: sign-extend with two arithmetic shifts. */
1397 target = expand_shift (LSHIFT_EXPR, mode, target,
1398 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1399 return expand_shift (RSHIFT_EXPR, mode, target,
1400 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1403 /* From here on we know the desired field is smaller than a word. */
1405 /* Check if there is a correspondingly-sized integer field, so we can
1406 safely extract it as one size of integer, if necessary; then
1407 truncate or extend to the size that is wanted; then use SUBREGs or
1408 convert_to_mode to get one of the modes we really wanted. */
1410 int_mode = int_mode_for_mode (tmode);
1411 if (int_mode == BLKmode)
1412 int_mode = int_mode_for_mode (mode);
1413 /* Should probably push op0 out to memory and then do a load. */
1414 gcc_assert (int_mode != BLKmode);
1416 /* OFFSET is the number of words or bytes (UNIT says which)
1417 from STR_RTX to the first word or byte containing part of the field. */
1418 if (!MEM_P (op0))
1420 if (offset != 0
1421 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1423 if (!REG_P (op0))
1424 op0 = copy_to_reg (op0);
1425 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1426 op0, (offset * UNITS_PER_WORD));
1428 offset = 0;
1431 /* Now OFFSET is nonzero only for memory operands. */
1432 ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0);
1433 if (ext_mode != MAX_MACHINE_MODE
1434 && bitsize > 0
1435 && GET_MODE_BITSIZE (ext_mode) >= bitsize
1436 /* If op0 is a register, we need it in EXT_MODE to make it
1437 acceptable to the format of ext(z)v. */
1438 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1439 && !((REG_P (op0) || GET_CODE (op0) == SUBREG)
1440 && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode))))
1442 struct expand_operand ops[4];
1443 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1444 rtx xop0 = op0;
1445 rtx xtarget = target;
1446 rtx xspec_target = target;
1447 rtx xspec_target_subreg = 0;
1449 /* If op0 is a register, we need it in EXT_MODE to make it
1450 acceptable to the format of ext(z)v. */
1451 if (REG_P (xop0) && GET_MODE (xop0) != ext_mode)
1452 xop0 = gen_lowpart_SUBREG (ext_mode, xop0);
1453 if (MEM_P (xop0))
1454 /* Get ref to first byte containing part of the field. */
1455 xop0 = adjust_address (xop0, byte_mode, xoffset);
1457 /* On big-endian machines, we count bits from the most significant.
1458 If the bit field insn does not, we must invert. */
1459 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1460 xbitpos = unit - bitsize - xbitpos;
1462 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1463 if (BITS_BIG_ENDIAN && !MEM_P (xop0))
1464 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
1466 unit = GET_MODE_BITSIZE (ext_mode);
1468 if (xtarget == 0)
1469 xtarget = xspec_target = gen_reg_rtx (tmode);
1471 if (GET_MODE (xtarget) != ext_mode)
1473 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1474 between the mode of the extraction (word_mode) and the target
1475 mode. Instead, create a temporary and use convert_move to set
1476 the target. */
1477 if (REG_P (xtarget)
1478 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget)),
1479 GET_MODE_BITSIZE (ext_mode)))
1481 xtarget = gen_lowpart (ext_mode, xtarget);
1482 if (GET_MODE_SIZE (ext_mode)
1483 > GET_MODE_SIZE (GET_MODE (xspec_target)))
1484 xspec_target_subreg = xtarget;
1486 else
1487 xtarget = gen_reg_rtx (ext_mode);
1490 create_output_operand (&ops[0], xtarget, ext_mode);
1491 create_fixed_operand (&ops[1], xop0);
1492 create_integer_operand (&ops[2], bitsize);
1493 create_integer_operand (&ops[3], xbitpos);
1494 if (maybe_expand_insn (unsignedp ? CODE_FOR_extzv : CODE_FOR_extv,
1495 4, ops))
1497 xtarget = ops[0].value;
1498 if (xtarget == xspec_target)
1499 return xtarget;
1500 if (xtarget == xspec_target_subreg)
1501 return xspec_target;
1502 return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
1506 /* If OP0 is a memory, try copying it to a register and seeing if a
1507 cheap register alternative is available. */
1508 if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0))
1510 enum machine_mode bestmode;
1512 /* Get the mode to use for inserting into this field. If
1513 OP0 is BLKmode, get the smallest mode consistent with the
1514 alignment. If OP0 is a non-BLKmode object that is no
1515 wider than EXT_MODE, use its mode. Otherwise, use the
1516 smallest mode containing the field. */
1518 if (GET_MODE (op0) == BLKmode
1519 || (ext_mode != MAX_MACHINE_MODE
1520 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
1521 bestmode = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0),
1522 (ext_mode == MAX_MACHINE_MODE
1523 ? VOIDmode : ext_mode),
1524 MEM_VOLATILE_P (op0));
1525 else
1526 bestmode = GET_MODE (op0);
1528 if (bestmode != VOIDmode
1529 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
1530 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
1532 unsigned HOST_WIDE_INT xoffset, xbitpos;
1534 /* Compute the offset as a multiple of this unit,
1535 counting in bytes. */
1536 unit = GET_MODE_BITSIZE (bestmode);
1537 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1538 xbitpos = bitnum % unit;
1540 /* Make sure the register is big enough for the whole field. */
1541 if (xoffset * BITS_PER_UNIT + unit
1542 >= offset * BITS_PER_UNIT + bitsize)
1544 rtx last, result, xop0;
1546 last = get_last_insn ();
1548 /* Fetch it to a register in that size. */
1549 xop0 = adjust_address (op0, bestmode, xoffset);
1550 xop0 = force_reg (bestmode, xop0);
1551 result = extract_bit_field_1 (xop0, bitsize, xbitpos,
1552 unsignedp, packedp, target,
1553 mode, tmode, false);
1554 if (result)
1555 return result;
1557 delete_insns_since (last);
1562 if (!fallback_p)
1563 return NULL;
1565 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1566 bitpos, target, unsignedp, packedp);
1567 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1570 /* Generate code to extract a byte-field from STR_RTX
1571 containing BITSIZE bits, starting at BITNUM,
1572 and put it in TARGET if possible (if TARGET is nonzero).
1573 Regardless of TARGET, we return the rtx for where the value is placed.
1575 STR_RTX is the structure containing the byte (a REG or MEM).
1576 UNSIGNEDP is nonzero if this is an unsigned bit field.
1577 PACKEDP is nonzero if the field has the packed attribute.
1578 MODE is the natural mode of the field value once extracted.
1579 TMODE is the mode the caller would like the value to have;
1580 but the value may be returned with type MODE instead.
1582 If a TARGET is specified and we can store in it at no extra cost,
1583 we do so, and return TARGET.
1584 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1585 if they are equally easy. */
1588 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1589 unsigned HOST_WIDE_INT bitnum, int unsignedp, bool packedp,
1590 rtx target, enum machine_mode mode, enum machine_mode tmode)
1592 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp, packedp,
1593 target, mode, tmode, true);
1596 /* Extract a bit field using shifts and boolean operations
1597 Returns an rtx to represent the value.
1598 OP0 addresses a register (word) or memory (byte).
1599 BITPOS says which bit within the word or byte the bit field starts in.
1600 OFFSET says how many bytes farther the bit field starts;
1601 it is 0 if OP0 is a register.
1602 BITSIZE says how many bits long the bit field is.
1603 (If OP0 is a register, it may be narrower than a full word,
1604 but BITPOS still counts within a full word,
1605 which is significant on bigendian machines.)
1607 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1608 PACKEDP is true if the field has the packed attribute.
1610 If TARGET is nonzero, attempts to store the value there
1611 and return TARGET, but this is not guaranteed.
1612 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1614 static rtx
1615 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1616 unsigned HOST_WIDE_INT offset,
1617 unsigned HOST_WIDE_INT bitsize,
1618 unsigned HOST_WIDE_INT bitpos, rtx target,
1619 int unsignedp, bool packedp)
1621 unsigned int total_bits = BITS_PER_WORD;
1622 enum machine_mode mode;
1624 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1626 /* Special treatment for a bit field split across two registers. */
1627 if (bitsize + bitpos > BITS_PER_WORD)
1628 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1630 else
1632 /* Get the proper mode to use for this field. We want a mode that
1633 includes the entire field. If such a mode would be larger than
1634 a word, we won't be doing the extraction the normal way. */
1636 if (MEM_VOLATILE_P (op0)
1637 && flag_strict_volatile_bitfields > 0)
1639 if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
1640 mode = GET_MODE (op0);
1641 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
1642 mode = GET_MODE (target);
1643 else
1644 mode = tmode;
1646 else
1647 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1648 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1650 if (mode == VOIDmode)
1651 /* The only way this should occur is if the field spans word
1652 boundaries. */
1653 return extract_split_bit_field (op0, bitsize,
1654 bitpos + offset * BITS_PER_UNIT,
1655 unsignedp);
1657 total_bits = GET_MODE_BITSIZE (mode);
1659 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1660 be in the range 0 to total_bits-1, and put any excess bytes in
1661 OFFSET. */
1662 if (bitpos >= total_bits)
1664 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1665 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1666 * BITS_PER_UNIT);
1669 /* If we're accessing a volatile MEM, we can't do the next
1670 alignment step if it results in a multi-word access where we
1671 otherwise wouldn't have one. So, check for that case
1672 here. */
1673 if (MEM_P (op0)
1674 && MEM_VOLATILE_P (op0)
1675 && flag_strict_volatile_bitfields > 0
1676 && bitpos + bitsize <= total_bits
1677 && bitpos + bitsize + (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT > total_bits)
1679 if (STRICT_ALIGNMENT)
1681 static bool informed_about_misalignment = false;
1682 bool warned;
1684 if (packedp)
1686 if (bitsize == total_bits)
1687 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1688 "multiple accesses to volatile structure member"
1689 " because of packed attribute");
1690 else
1691 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1692 "multiple accesses to volatile structure bitfield"
1693 " because of packed attribute");
1695 return extract_split_bit_field (op0, bitsize,
1696 bitpos + offset * BITS_PER_UNIT,
1697 unsignedp);
1700 if (bitsize == total_bits)
1701 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1702 "mis-aligned access used for structure member");
1703 else
1704 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1705 "mis-aligned access used for structure bitfield");
1707 if (! informed_about_misalignment && warned)
1709 informed_about_misalignment = true;
1710 inform (input_location,
1711 "when a volatile object spans multiple type-sized locations,"
1712 " the compiler must choose between using a single mis-aligned access to"
1713 " preserve the volatility, or using multiple aligned accesses to avoid"
1714 " runtime faults; this code may fail at runtime if the hardware does"
1715 " not allow this access");
1719 else
1722 /* Get ref to an aligned byte, halfword, or word containing the field.
1723 Adjust BITPOS to be position within a word,
1724 and OFFSET to be the offset of that word.
1725 Then alter OP0 to refer to that word. */
1726 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1727 offset -= (offset % (total_bits / BITS_PER_UNIT));
1730 op0 = adjust_address (op0, mode, offset);
1733 mode = GET_MODE (op0);
1735 if (BYTES_BIG_ENDIAN)
1736 /* BITPOS is the distance between our msb and that of OP0.
1737 Convert it to the distance from the lsb. */
1738 bitpos = total_bits - bitsize - bitpos;
1740 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1741 We have reduced the big-endian case to the little-endian case. */
1743 if (unsignedp)
1745 if (bitpos)
1747 /* If the field does not already start at the lsb,
1748 shift it so it does. */
1749 /* Maybe propagate the target for the shift. */
1750 /* But not if we will return it--could confuse integrate.c. */
1751 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1752 if (tmode != mode) subtarget = 0;
1753 op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitpos, subtarget, 1);
1755 /* Convert the value to the desired mode. */
1756 if (mode != tmode)
1757 op0 = convert_to_mode (tmode, op0, 1);
1759 /* Unless the msb of the field used to be the msb when we shifted,
1760 mask out the upper bits. */
1762 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1763 return expand_binop (GET_MODE (op0), and_optab, op0,
1764 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1765 target, 1, OPTAB_LIB_WIDEN);
1766 return op0;
1769 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1770 then arithmetic-shift its lsb to the lsb of the word. */
1771 op0 = force_reg (mode, op0);
1773 /* Find the narrowest integer mode that contains the field. */
1775 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1776 mode = GET_MODE_WIDER_MODE (mode))
1777 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1779 op0 = convert_to_mode (mode, op0, 0);
1780 break;
1783 if (mode != tmode)
1784 target = 0;
1786 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1788 int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitpos);
1789 /* Maybe propagate the target for the shift. */
1790 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1791 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1794 return expand_shift (RSHIFT_EXPR, mode, op0,
1795 GET_MODE_BITSIZE (mode) - bitsize, target, 0);
1798 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1799 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1800 complement of that if COMPLEMENT. The mask is truncated if
1801 necessary to the width of mode MODE. The mask is zero-extended if
1802 BITSIZE+BITPOS is too small for MODE. */
1804 static rtx
1805 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1807 double_int mask;
1809 mask = double_int_mask (bitsize);
1810 mask = double_int_lshift (mask, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1812 if (complement)
1813 mask = double_int_not (mask);
1815 return immed_double_int_const (mask, mode);
1818 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1819 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1821 static rtx
1822 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1824 double_int val;
1826 val = double_int_zext (uhwi_to_double_int (INTVAL (value)), bitsize);
1827 val = double_int_lshift (val, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1829 return immed_double_int_const (val, mode);
1832 /* Extract a bit field that is split across two words
1833 and return an RTX for the result.
1835 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1836 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1837 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1839 static rtx
1840 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1841 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1843 unsigned int unit;
1844 unsigned int bitsdone = 0;
1845 rtx result = NULL_RTX;
1846 int first = 1;
1848 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1849 much at a time. */
1850 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1851 unit = BITS_PER_WORD;
1852 else
1853 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1855 while (bitsdone < bitsize)
1857 unsigned HOST_WIDE_INT thissize;
1858 rtx part, word;
1859 unsigned HOST_WIDE_INT thispos;
1860 unsigned HOST_WIDE_INT offset;
1862 offset = (bitpos + bitsdone) / unit;
1863 thispos = (bitpos + bitsdone) % unit;
1865 /* THISSIZE must not overrun a word boundary. Otherwise,
1866 extract_fixed_bit_field will call us again, and we will mutually
1867 recurse forever. */
1868 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1869 thissize = MIN (thissize, unit - thispos);
1871 /* If OP0 is a register, then handle OFFSET here.
1873 When handling multiword bitfields, extract_bit_field may pass
1874 down a word_mode SUBREG of a larger REG for a bitfield that actually
1875 crosses a word boundary. Thus, for a SUBREG, we must find
1876 the current word starting from the base register. */
1877 if (GET_CODE (op0) == SUBREG)
1879 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1880 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1881 GET_MODE (SUBREG_REG (op0)));
1882 offset = 0;
1884 else if (REG_P (op0))
1886 word = operand_subword_force (op0, offset, GET_MODE (op0));
1887 offset = 0;
1889 else
1890 word = op0;
1892 /* Extract the parts in bit-counting order,
1893 whose meaning is determined by BYTES_PER_UNIT.
1894 OFFSET is in UNITs, and UNIT is in bits.
1895 extract_fixed_bit_field wants offset in bytes. */
1896 part = extract_fixed_bit_field (word_mode, word,
1897 offset * unit / BITS_PER_UNIT,
1898 thissize, thispos, 0, 1, false);
1899 bitsdone += thissize;
1901 /* Shift this part into place for the result. */
1902 if (BYTES_BIG_ENDIAN)
1904 if (bitsize != bitsdone)
1905 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1906 bitsize - bitsdone, 0, 1);
1908 else
1910 if (bitsdone != thissize)
1911 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1912 bitsdone - thissize, 0, 1);
1915 if (first)
1916 result = part;
1917 else
1918 /* Combine the parts with bitwise or. This works
1919 because we extracted each part as an unsigned bit field. */
1920 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1921 OPTAB_LIB_WIDEN);
1923 first = 0;
1926 /* Unsigned bit field: we are done. */
1927 if (unsignedp)
1928 return result;
1929 /* Signed bit field: sign-extend with two arithmetic shifts. */
1930 result = expand_shift (LSHIFT_EXPR, word_mode, result,
1931 BITS_PER_WORD - bitsize, NULL_RTX, 0);
1932 return expand_shift (RSHIFT_EXPR, word_mode, result,
1933 BITS_PER_WORD - bitsize, NULL_RTX, 0);
1936 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
1937 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
1938 MODE, fill the upper bits with zeros. Fail if the layout of either
1939 mode is unknown (as for CC modes) or if the extraction would involve
1940 unprofitable mode punning. Return the value on success, otherwise
1941 return null.
1943 This is different from gen_lowpart* in these respects:
1945 - the returned value must always be considered an rvalue
1947 - when MODE is wider than SRC_MODE, the extraction involves
1948 a zero extension
1950 - when MODE is smaller than SRC_MODE, the extraction involves
1951 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
1953 In other words, this routine performs a computation, whereas the
1954 gen_lowpart* routines are conceptually lvalue or rvalue subreg
1955 operations. */
1958 extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src)
1960 enum machine_mode int_mode, src_int_mode;
1962 if (mode == src_mode)
1963 return src;
1965 if (CONSTANT_P (src))
1967 /* simplify_gen_subreg can't be used here, as if simplify_subreg
1968 fails, it will happily create (subreg (symbol_ref)) or similar
1969 invalid SUBREGs. */
1970 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
1971 rtx ret = simplify_subreg (mode, src, src_mode, byte);
1972 if (ret)
1973 return ret;
1975 if (GET_MODE (src) == VOIDmode
1976 || !validate_subreg (mode, src_mode, src, byte))
1977 return NULL_RTX;
1979 src = force_reg (GET_MODE (src), src);
1980 return gen_rtx_SUBREG (mode, src, byte);
1983 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
1984 return NULL_RTX;
1986 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
1987 && MODES_TIEABLE_P (mode, src_mode))
1989 rtx x = gen_lowpart_common (mode, src);
1990 if (x)
1991 return x;
1994 src_int_mode = int_mode_for_mode (src_mode);
1995 int_mode = int_mode_for_mode (mode);
1996 if (src_int_mode == BLKmode || int_mode == BLKmode)
1997 return NULL_RTX;
1999 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
2000 return NULL_RTX;
2001 if (!MODES_TIEABLE_P (int_mode, mode))
2002 return NULL_RTX;
2004 src = gen_lowpart (src_int_mode, src);
2005 src = convert_modes (int_mode, src_int_mode, src, true);
2006 src = gen_lowpart (mode, src);
2007 return src;
2010 /* Add INC into TARGET. */
2012 void
2013 expand_inc (rtx target, rtx inc)
2015 rtx value = expand_binop (GET_MODE (target), add_optab,
2016 target, inc,
2017 target, 0, OPTAB_LIB_WIDEN);
2018 if (value != target)
2019 emit_move_insn (target, value);
2022 /* Subtract DEC from TARGET. */
2024 void
2025 expand_dec (rtx target, rtx dec)
2027 rtx value = expand_binop (GET_MODE (target), sub_optab,
2028 target, dec,
2029 target, 0, OPTAB_LIB_WIDEN);
2030 if (value != target)
2031 emit_move_insn (target, value);
2034 /* Output a shift instruction for expression code CODE,
2035 with SHIFTED being the rtx for the value to shift,
2036 and AMOUNT the rtx for the amount to shift by.
2037 Store the result in the rtx TARGET, if that is convenient.
2038 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2039 Return the rtx for where the value is. */
2041 static rtx
2042 expand_shift_1 (enum tree_code code, enum machine_mode mode, rtx shifted,
2043 rtx amount, rtx target, int unsignedp)
2045 rtx op1, temp = 0;
2046 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2047 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2048 optab lshift_optab = ashl_optab;
2049 optab rshift_arith_optab = ashr_optab;
2050 optab rshift_uns_optab = lshr_optab;
2051 optab lrotate_optab = rotl_optab;
2052 optab rrotate_optab = rotr_optab;
2053 enum machine_mode op1_mode;
2054 int attempt;
2055 bool speed = optimize_insn_for_speed_p ();
2057 op1 = amount;
2058 op1_mode = GET_MODE (op1);
2060 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2061 shift amount is a vector, use the vector/vector shift patterns. */
2062 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2064 lshift_optab = vashl_optab;
2065 rshift_arith_optab = vashr_optab;
2066 rshift_uns_optab = vlshr_optab;
2067 lrotate_optab = vrotl_optab;
2068 rrotate_optab = vrotr_optab;
2071 /* Previously detected shift-counts computed by NEGATE_EXPR
2072 and shifted in the other direction; but that does not work
2073 on all machines. */
2075 if (SHIFT_COUNT_TRUNCATED)
2077 if (CONST_INT_P (op1)
2078 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2079 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2080 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2081 % GET_MODE_BITSIZE (mode));
2082 else if (GET_CODE (op1) == SUBREG
2083 && subreg_lowpart_p (op1)
2084 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1))))
2085 op1 = SUBREG_REG (op1);
2088 if (op1 == const0_rtx)
2089 return shifted;
2091 /* Check whether its cheaper to implement a left shift by a constant
2092 bit count by a sequence of additions. */
2093 if (code == LSHIFT_EXPR
2094 && CONST_INT_P (op1)
2095 && INTVAL (op1) > 0
2096 && INTVAL (op1) < GET_MODE_BITSIZE (mode)
2097 && INTVAL (op1) < MAX_BITS_PER_WORD
2098 && shift_cost[speed][mode][INTVAL (op1)] > INTVAL (op1) * add_cost[speed][mode]
2099 && shift_cost[speed][mode][INTVAL (op1)] != MAX_COST)
2101 int i;
2102 for (i = 0; i < INTVAL (op1); i++)
2104 temp = force_reg (mode, shifted);
2105 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2106 unsignedp, OPTAB_LIB_WIDEN);
2108 return shifted;
2111 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2113 enum optab_methods methods;
2115 if (attempt == 0)
2116 methods = OPTAB_DIRECT;
2117 else if (attempt == 1)
2118 methods = OPTAB_WIDEN;
2119 else
2120 methods = OPTAB_LIB_WIDEN;
2122 if (rotate)
2124 /* Widening does not work for rotation. */
2125 if (methods == OPTAB_WIDEN)
2126 continue;
2127 else if (methods == OPTAB_LIB_WIDEN)
2129 /* If we have been unable to open-code this by a rotation,
2130 do it as the IOR of two shifts. I.e., to rotate A
2131 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2132 where C is the bitsize of A.
2134 It is theoretically possible that the target machine might
2135 not be able to perform either shift and hence we would
2136 be making two libcalls rather than just the one for the
2137 shift (similarly if IOR could not be done). We will allow
2138 this extremely unlikely lossage to avoid complicating the
2139 code below. */
2141 rtx subtarget = target == shifted ? 0 : target;
2142 rtx new_amount, other_amount;
2143 rtx temp1;
2145 new_amount = op1;
2146 if (CONST_INT_P (op1))
2147 other_amount = GEN_INT (GET_MODE_BITSIZE (mode)
2148 - INTVAL (op1));
2149 else
2150 other_amount
2151 = simplify_gen_binary (MINUS, GET_MODE (op1),
2152 GEN_INT (GET_MODE_BITSIZE (mode)),
2153 op1);
2155 shifted = force_reg (mode, shifted);
2157 temp = expand_shift_1 (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2158 mode, shifted, new_amount, 0, 1);
2159 temp1 = expand_shift_1 (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2160 mode, shifted, other_amount,
2161 subtarget, 1);
2162 return expand_binop (mode, ior_optab, temp, temp1, target,
2163 unsignedp, methods);
2166 temp = expand_binop (mode,
2167 left ? lrotate_optab : rrotate_optab,
2168 shifted, op1, target, unsignedp, methods);
2170 else if (unsignedp)
2171 temp = expand_binop (mode,
2172 left ? lshift_optab : rshift_uns_optab,
2173 shifted, op1, target, unsignedp, methods);
2175 /* Do arithmetic shifts.
2176 Also, if we are going to widen the operand, we can just as well
2177 use an arithmetic right-shift instead of a logical one. */
2178 if (temp == 0 && ! rotate
2179 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2181 enum optab_methods methods1 = methods;
2183 /* If trying to widen a log shift to an arithmetic shift,
2184 don't accept an arithmetic shift of the same size. */
2185 if (unsignedp)
2186 methods1 = OPTAB_MUST_WIDEN;
2188 /* Arithmetic shift */
2190 temp = expand_binop (mode,
2191 left ? lshift_optab : rshift_arith_optab,
2192 shifted, op1, target, unsignedp, methods1);
2195 /* We used to try extzv here for logical right shifts, but that was
2196 only useful for one machine, the VAX, and caused poor code
2197 generation there for lshrdi3, so the code was deleted and a
2198 define_expand for lshrsi3 was added to vax.md. */
2201 gcc_assert (temp);
2202 return temp;
2205 /* Output a shift instruction for expression code CODE,
2206 with SHIFTED being the rtx for the value to shift,
2207 and AMOUNT the amount to shift by.
2208 Store the result in the rtx TARGET, if that is convenient.
2209 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2210 Return the rtx for where the value is. */
2213 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2214 int amount, rtx target, int unsignedp)
2216 return expand_shift_1 (code, mode,
2217 shifted, GEN_INT (amount), target, unsignedp);
2220 /* Output a shift instruction for expression code CODE,
2221 with SHIFTED being the rtx for the value to shift,
2222 and AMOUNT the tree for the amount to shift by.
2223 Store the result in the rtx TARGET, if that is convenient.
2224 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2225 Return the rtx for where the value is. */
2228 expand_variable_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2229 tree amount, rtx target, int unsignedp)
2231 return expand_shift_1 (code, mode,
2232 shifted, expand_normal (amount), target, unsignedp);
2236 /* Indicates the type of fixup needed after a constant multiplication.
2237 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2238 the result should be negated, and ADD_VARIANT means that the
2239 multiplicand should be added to the result. */
2240 enum mult_variant {basic_variant, negate_variant, add_variant};
2242 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2243 const struct mult_cost *, enum machine_mode mode);
2244 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2245 struct algorithm *, enum mult_variant *, int);
2246 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2247 const struct algorithm *, enum mult_variant);
2248 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2249 int, rtx *, int *, int *);
2250 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2251 static rtx extract_high_half (enum machine_mode, rtx);
2252 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2253 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2254 int, int);
2255 /* Compute and return the best algorithm for multiplying by T.
2256 The algorithm must cost less than cost_limit
2257 If retval.cost >= COST_LIMIT, no algorithm was found and all
2258 other field of the returned struct are undefined.
2259 MODE is the machine mode of the multiplication. */
2261 static void
2262 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2263 const struct mult_cost *cost_limit, enum machine_mode mode)
2265 int m;
2266 struct algorithm *alg_in, *best_alg;
2267 struct mult_cost best_cost;
2268 struct mult_cost new_limit;
2269 int op_cost, op_latency;
2270 unsigned HOST_WIDE_INT orig_t = t;
2271 unsigned HOST_WIDE_INT q;
2272 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2273 int hash_index;
2274 bool cache_hit = false;
2275 enum alg_code cache_alg = alg_zero;
2276 bool speed = optimize_insn_for_speed_p ();
2278 /* Indicate that no algorithm is yet found. If no algorithm
2279 is found, this value will be returned and indicate failure. */
2280 alg_out->cost.cost = cost_limit->cost + 1;
2281 alg_out->cost.latency = cost_limit->latency + 1;
2283 if (cost_limit->cost < 0
2284 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2285 return;
2287 /* Restrict the bits of "t" to the multiplication's mode. */
2288 t &= GET_MODE_MASK (mode);
2290 /* t == 1 can be done in zero cost. */
2291 if (t == 1)
2293 alg_out->ops = 1;
2294 alg_out->cost.cost = 0;
2295 alg_out->cost.latency = 0;
2296 alg_out->op[0] = alg_m;
2297 return;
2300 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2301 fail now. */
2302 if (t == 0)
2304 if (MULT_COST_LESS (cost_limit, zero_cost[speed]))
2305 return;
2306 else
2308 alg_out->ops = 1;
2309 alg_out->cost.cost = zero_cost[speed];
2310 alg_out->cost.latency = zero_cost[speed];
2311 alg_out->op[0] = alg_zero;
2312 return;
2316 /* We'll be needing a couple extra algorithm structures now. */
2318 alg_in = XALLOCA (struct algorithm);
2319 best_alg = XALLOCA (struct algorithm);
2320 best_cost = *cost_limit;
2322 /* Compute the hash index. */
2323 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2325 /* See if we already know what to do for T. */
2326 if (alg_hash[hash_index].t == t
2327 && alg_hash[hash_index].mode == mode
2328 && alg_hash[hash_index].mode == mode
2329 && alg_hash[hash_index].speed == speed
2330 && alg_hash[hash_index].alg != alg_unknown)
2332 cache_alg = alg_hash[hash_index].alg;
2334 if (cache_alg == alg_impossible)
2336 /* The cache tells us that it's impossible to synthesize
2337 multiplication by T within alg_hash[hash_index].cost. */
2338 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2339 /* COST_LIMIT is at least as restrictive as the one
2340 recorded in the hash table, in which case we have no
2341 hope of synthesizing a multiplication. Just
2342 return. */
2343 return;
2345 /* If we get here, COST_LIMIT is less restrictive than the
2346 one recorded in the hash table, so we may be able to
2347 synthesize a multiplication. Proceed as if we didn't
2348 have the cache entry. */
2350 else
2352 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2353 /* The cached algorithm shows that this multiplication
2354 requires more cost than COST_LIMIT. Just return. This
2355 way, we don't clobber this cache entry with
2356 alg_impossible but retain useful information. */
2357 return;
2359 cache_hit = true;
2361 switch (cache_alg)
2363 case alg_shift:
2364 goto do_alg_shift;
2366 case alg_add_t_m2:
2367 case alg_sub_t_m2:
2368 goto do_alg_addsub_t_m2;
2370 case alg_add_factor:
2371 case alg_sub_factor:
2372 goto do_alg_addsub_factor;
2374 case alg_add_t2_m:
2375 goto do_alg_add_t2_m;
2377 case alg_sub_t2_m:
2378 goto do_alg_sub_t2_m;
2380 default:
2381 gcc_unreachable ();
2386 /* If we have a group of zero bits at the low-order part of T, try
2387 multiplying by the remaining bits and then doing a shift. */
2389 if ((t & 1) == 0)
2391 do_alg_shift:
2392 m = floor_log2 (t & -t); /* m = number of low zero bits */
2393 if (m < maxm)
2395 q = t >> m;
2396 /* The function expand_shift will choose between a shift and
2397 a sequence of additions, so the observed cost is given as
2398 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2399 op_cost = m * add_cost[speed][mode];
2400 if (shift_cost[speed][mode][m] < op_cost)
2401 op_cost = shift_cost[speed][mode][m];
2402 new_limit.cost = best_cost.cost - op_cost;
2403 new_limit.latency = best_cost.latency - op_cost;
2404 synth_mult (alg_in, q, &new_limit, mode);
2406 alg_in->cost.cost += op_cost;
2407 alg_in->cost.latency += op_cost;
2408 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2410 struct algorithm *x;
2411 best_cost = alg_in->cost;
2412 x = alg_in, alg_in = best_alg, best_alg = x;
2413 best_alg->log[best_alg->ops] = m;
2414 best_alg->op[best_alg->ops] = alg_shift;
2417 /* See if treating ORIG_T as a signed number yields a better
2418 sequence. Try this sequence only for a negative ORIG_T
2419 as it would be useless for a non-negative ORIG_T. */
2420 if ((HOST_WIDE_INT) orig_t < 0)
2422 /* Shift ORIG_T as follows because a right shift of a
2423 negative-valued signed type is implementation
2424 defined. */
2425 q = ~(~orig_t >> m);
2426 /* The function expand_shift will choose between a shift
2427 and a sequence of additions, so the observed cost is
2428 given as MIN (m * add_cost[speed][mode],
2429 shift_cost[speed][mode][m]). */
2430 op_cost = m * add_cost[speed][mode];
2431 if (shift_cost[speed][mode][m] < op_cost)
2432 op_cost = shift_cost[speed][mode][m];
2433 new_limit.cost = best_cost.cost - op_cost;
2434 new_limit.latency = best_cost.latency - op_cost;
2435 synth_mult (alg_in, q, &new_limit, mode);
2437 alg_in->cost.cost += op_cost;
2438 alg_in->cost.latency += op_cost;
2439 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2441 struct algorithm *x;
2442 best_cost = alg_in->cost;
2443 x = alg_in, alg_in = best_alg, best_alg = x;
2444 best_alg->log[best_alg->ops] = m;
2445 best_alg->op[best_alg->ops] = alg_shift;
2449 if (cache_hit)
2450 goto done;
2453 /* If we have an odd number, add or subtract one. */
2454 if ((t & 1) != 0)
2456 unsigned HOST_WIDE_INT w;
2458 do_alg_addsub_t_m2:
2459 for (w = 1; (w & t) != 0; w <<= 1)
2461 /* If T was -1, then W will be zero after the loop. This is another
2462 case where T ends with ...111. Handling this with (T + 1) and
2463 subtract 1 produces slightly better code and results in algorithm
2464 selection much faster than treating it like the ...0111 case
2465 below. */
2466 if (w == 0
2467 || (w > 2
2468 /* Reject the case where t is 3.
2469 Thus we prefer addition in that case. */
2470 && t != 3))
2472 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2474 op_cost = add_cost[speed][mode];
2475 new_limit.cost = best_cost.cost - op_cost;
2476 new_limit.latency = best_cost.latency - op_cost;
2477 synth_mult (alg_in, t + 1, &new_limit, mode);
2479 alg_in->cost.cost += op_cost;
2480 alg_in->cost.latency += op_cost;
2481 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2483 struct algorithm *x;
2484 best_cost = alg_in->cost;
2485 x = alg_in, alg_in = best_alg, best_alg = x;
2486 best_alg->log[best_alg->ops] = 0;
2487 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2490 else
2492 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2494 op_cost = add_cost[speed][mode];
2495 new_limit.cost = best_cost.cost - op_cost;
2496 new_limit.latency = best_cost.latency - op_cost;
2497 synth_mult (alg_in, t - 1, &new_limit, mode);
2499 alg_in->cost.cost += op_cost;
2500 alg_in->cost.latency += op_cost;
2501 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2503 struct algorithm *x;
2504 best_cost = alg_in->cost;
2505 x = alg_in, alg_in = best_alg, best_alg = x;
2506 best_alg->log[best_alg->ops] = 0;
2507 best_alg->op[best_alg->ops] = alg_add_t_m2;
2511 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2512 quickly with a - a * n for some appropriate constant n. */
2513 m = exact_log2 (-orig_t + 1);
2514 if (m >= 0 && m < maxm)
2516 op_cost = shiftsub1_cost[speed][mode][m];
2517 new_limit.cost = best_cost.cost - op_cost;
2518 new_limit.latency = best_cost.latency - op_cost;
2519 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m, &new_limit, mode);
2521 alg_in->cost.cost += op_cost;
2522 alg_in->cost.latency += op_cost;
2523 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2525 struct algorithm *x;
2526 best_cost = alg_in->cost;
2527 x = alg_in, alg_in = best_alg, best_alg = x;
2528 best_alg->log[best_alg->ops] = m;
2529 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2533 if (cache_hit)
2534 goto done;
2537 /* Look for factors of t of the form
2538 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2539 If we find such a factor, we can multiply by t using an algorithm that
2540 multiplies by q, shift the result by m and add/subtract it to itself.
2542 We search for large factors first and loop down, even if large factors
2543 are less probable than small; if we find a large factor we will find a
2544 good sequence quickly, and therefore be able to prune (by decreasing
2545 COST_LIMIT) the search. */
2547 do_alg_addsub_factor:
2548 for (m = floor_log2 (t - 1); m >= 2; m--)
2550 unsigned HOST_WIDE_INT d;
2552 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2553 if (t % d == 0 && t > d && m < maxm
2554 && (!cache_hit || cache_alg == alg_add_factor))
2556 /* If the target has a cheap shift-and-add instruction use
2557 that in preference to a shift insn followed by an add insn.
2558 Assume that the shift-and-add is "atomic" with a latency
2559 equal to its cost, otherwise assume that on superscalar
2560 hardware the shift may be executed concurrently with the
2561 earlier steps in the algorithm. */
2562 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2563 if (shiftadd_cost[speed][mode][m] < op_cost)
2565 op_cost = shiftadd_cost[speed][mode][m];
2566 op_latency = op_cost;
2568 else
2569 op_latency = add_cost[speed][mode];
2571 new_limit.cost = best_cost.cost - op_cost;
2572 new_limit.latency = best_cost.latency - op_latency;
2573 synth_mult (alg_in, t / d, &new_limit, mode);
2575 alg_in->cost.cost += op_cost;
2576 alg_in->cost.latency += op_latency;
2577 if (alg_in->cost.latency < op_cost)
2578 alg_in->cost.latency = op_cost;
2579 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2581 struct algorithm *x;
2582 best_cost = alg_in->cost;
2583 x = alg_in, alg_in = best_alg, best_alg = x;
2584 best_alg->log[best_alg->ops] = m;
2585 best_alg->op[best_alg->ops] = alg_add_factor;
2587 /* Other factors will have been taken care of in the recursion. */
2588 break;
2591 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2592 if (t % d == 0 && t > d && m < maxm
2593 && (!cache_hit || cache_alg == alg_sub_factor))
2595 /* If the target has a cheap shift-and-subtract insn use
2596 that in preference to a shift insn followed by a sub insn.
2597 Assume that the shift-and-sub is "atomic" with a latency
2598 equal to it's cost, otherwise assume that on superscalar
2599 hardware the shift may be executed concurrently with the
2600 earlier steps in the algorithm. */
2601 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2602 if (shiftsub0_cost[speed][mode][m] < op_cost)
2604 op_cost = shiftsub0_cost[speed][mode][m];
2605 op_latency = op_cost;
2607 else
2608 op_latency = add_cost[speed][mode];
2610 new_limit.cost = best_cost.cost - op_cost;
2611 new_limit.latency = best_cost.latency - op_latency;
2612 synth_mult (alg_in, t / d, &new_limit, mode);
2614 alg_in->cost.cost += op_cost;
2615 alg_in->cost.latency += op_latency;
2616 if (alg_in->cost.latency < op_cost)
2617 alg_in->cost.latency = op_cost;
2618 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2620 struct algorithm *x;
2621 best_cost = alg_in->cost;
2622 x = alg_in, alg_in = best_alg, best_alg = x;
2623 best_alg->log[best_alg->ops] = m;
2624 best_alg->op[best_alg->ops] = alg_sub_factor;
2626 break;
2629 if (cache_hit)
2630 goto done;
2632 /* Try shift-and-add (load effective address) instructions,
2633 i.e. do a*3, a*5, a*9. */
2634 if ((t & 1) != 0)
2636 do_alg_add_t2_m:
2637 q = t - 1;
2638 q = q & -q;
2639 m = exact_log2 (q);
2640 if (m >= 0 && m < maxm)
2642 op_cost = shiftadd_cost[speed][mode][m];
2643 new_limit.cost = best_cost.cost - op_cost;
2644 new_limit.latency = best_cost.latency - op_cost;
2645 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2647 alg_in->cost.cost += op_cost;
2648 alg_in->cost.latency += op_cost;
2649 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2651 struct algorithm *x;
2652 best_cost = alg_in->cost;
2653 x = alg_in, alg_in = best_alg, best_alg = x;
2654 best_alg->log[best_alg->ops] = m;
2655 best_alg->op[best_alg->ops] = alg_add_t2_m;
2658 if (cache_hit)
2659 goto done;
2661 do_alg_sub_t2_m:
2662 q = t + 1;
2663 q = q & -q;
2664 m = exact_log2 (q);
2665 if (m >= 0 && m < maxm)
2667 op_cost = shiftsub0_cost[speed][mode][m];
2668 new_limit.cost = best_cost.cost - op_cost;
2669 new_limit.latency = best_cost.latency - op_cost;
2670 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2672 alg_in->cost.cost += op_cost;
2673 alg_in->cost.latency += op_cost;
2674 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2676 struct algorithm *x;
2677 best_cost = alg_in->cost;
2678 x = alg_in, alg_in = best_alg, best_alg = x;
2679 best_alg->log[best_alg->ops] = m;
2680 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2683 if (cache_hit)
2684 goto done;
2687 done:
2688 /* If best_cost has not decreased, we have not found any algorithm. */
2689 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2691 /* We failed to find an algorithm. Record alg_impossible for
2692 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2693 we are asked to find an algorithm for T within the same or
2694 lower COST_LIMIT, we can immediately return to the
2695 caller. */
2696 alg_hash[hash_index].t = t;
2697 alg_hash[hash_index].mode = mode;
2698 alg_hash[hash_index].speed = speed;
2699 alg_hash[hash_index].alg = alg_impossible;
2700 alg_hash[hash_index].cost = *cost_limit;
2701 return;
2704 /* Cache the result. */
2705 if (!cache_hit)
2707 alg_hash[hash_index].t = t;
2708 alg_hash[hash_index].mode = mode;
2709 alg_hash[hash_index].speed = speed;
2710 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2711 alg_hash[hash_index].cost.cost = best_cost.cost;
2712 alg_hash[hash_index].cost.latency = best_cost.latency;
2715 /* If we are getting a too long sequence for `struct algorithm'
2716 to record, make this search fail. */
2717 if (best_alg->ops == MAX_BITS_PER_WORD)
2718 return;
2720 /* Copy the algorithm from temporary space to the space at alg_out.
2721 We avoid using structure assignment because the majority of
2722 best_alg is normally undefined, and this is a critical function. */
2723 alg_out->ops = best_alg->ops + 1;
2724 alg_out->cost = best_cost;
2725 memcpy (alg_out->op, best_alg->op,
2726 alg_out->ops * sizeof *alg_out->op);
2727 memcpy (alg_out->log, best_alg->log,
2728 alg_out->ops * sizeof *alg_out->log);
2731 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2732 Try three variations:
2734 - a shift/add sequence based on VAL itself
2735 - a shift/add sequence based on -VAL, followed by a negation
2736 - a shift/add sequence based on VAL - 1, followed by an addition.
2738 Return true if the cheapest of these cost less than MULT_COST,
2739 describing the algorithm in *ALG and final fixup in *VARIANT. */
2741 static bool
2742 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2743 struct algorithm *alg, enum mult_variant *variant,
2744 int mult_cost)
2746 struct algorithm alg2;
2747 struct mult_cost limit;
2748 int op_cost;
2749 bool speed = optimize_insn_for_speed_p ();
2751 /* Fail quickly for impossible bounds. */
2752 if (mult_cost < 0)
2753 return false;
2755 /* Ensure that mult_cost provides a reasonable upper bound.
2756 Any constant multiplication can be performed with less
2757 than 2 * bits additions. */
2758 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[speed][mode];
2759 if (mult_cost > op_cost)
2760 mult_cost = op_cost;
2762 *variant = basic_variant;
2763 limit.cost = mult_cost;
2764 limit.latency = mult_cost;
2765 synth_mult (alg, val, &limit, mode);
2767 /* This works only if the inverted value actually fits in an
2768 `unsigned int' */
2769 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2771 op_cost = neg_cost[speed][mode];
2772 if (MULT_COST_LESS (&alg->cost, mult_cost))
2774 limit.cost = alg->cost.cost - op_cost;
2775 limit.latency = alg->cost.latency - op_cost;
2777 else
2779 limit.cost = mult_cost - op_cost;
2780 limit.latency = mult_cost - op_cost;
2783 synth_mult (&alg2, -val, &limit, mode);
2784 alg2.cost.cost += op_cost;
2785 alg2.cost.latency += op_cost;
2786 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2787 *alg = alg2, *variant = negate_variant;
2790 /* This proves very useful for division-by-constant. */
2791 op_cost = add_cost[speed][mode];
2792 if (MULT_COST_LESS (&alg->cost, mult_cost))
2794 limit.cost = alg->cost.cost - op_cost;
2795 limit.latency = alg->cost.latency - op_cost;
2797 else
2799 limit.cost = mult_cost - op_cost;
2800 limit.latency = mult_cost - op_cost;
2803 synth_mult (&alg2, val - 1, &limit, mode);
2804 alg2.cost.cost += op_cost;
2805 alg2.cost.latency += op_cost;
2806 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2807 *alg = alg2, *variant = add_variant;
2809 return MULT_COST_LESS (&alg->cost, mult_cost);
2812 /* A subroutine of expand_mult, used for constant multiplications.
2813 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2814 convenient. Use the shift/add sequence described by ALG and apply
2815 the final fixup specified by VARIANT. */
2817 static rtx
2818 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2819 rtx target, const struct algorithm *alg,
2820 enum mult_variant variant)
2822 HOST_WIDE_INT val_so_far;
2823 rtx insn, accum, tem;
2824 int opno;
2825 enum machine_mode nmode;
2827 /* Avoid referencing memory over and over and invalid sharing
2828 on SUBREGs. */
2829 op0 = force_reg (mode, op0);
2831 /* ACCUM starts out either as OP0 or as a zero, depending on
2832 the first operation. */
2834 if (alg->op[0] == alg_zero)
2836 accum = copy_to_mode_reg (mode, const0_rtx);
2837 val_so_far = 0;
2839 else if (alg->op[0] == alg_m)
2841 accum = copy_to_mode_reg (mode, op0);
2842 val_so_far = 1;
2844 else
2845 gcc_unreachable ();
2847 for (opno = 1; opno < alg->ops; opno++)
2849 int log = alg->log[opno];
2850 rtx shift_subtarget = optimize ? 0 : accum;
2851 rtx add_target
2852 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2853 && !optimize)
2854 ? target : 0;
2855 rtx accum_target = optimize ? 0 : accum;
2857 switch (alg->op[opno])
2859 case alg_shift:
2860 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2861 /* REG_EQUAL note will be attached to the following insn. */
2862 emit_move_insn (accum, tem);
2863 val_so_far <<= log;
2864 break;
2866 case alg_add_t_m2:
2867 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
2868 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2869 add_target ? add_target : accum_target);
2870 val_so_far += (HOST_WIDE_INT) 1 << log;
2871 break;
2873 case alg_sub_t_m2:
2874 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
2875 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2876 add_target ? add_target : accum_target);
2877 val_so_far -= (HOST_WIDE_INT) 1 << log;
2878 break;
2880 case alg_add_t2_m:
2881 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2882 log, shift_subtarget, 0);
2883 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2884 add_target ? add_target : accum_target);
2885 val_so_far = (val_so_far << log) + 1;
2886 break;
2888 case alg_sub_t2_m:
2889 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2890 log, shift_subtarget, 0);
2891 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2892 add_target ? add_target : accum_target);
2893 val_so_far = (val_so_far << log) - 1;
2894 break;
2896 case alg_add_factor:
2897 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2898 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2899 add_target ? add_target : accum_target);
2900 val_so_far += val_so_far << log;
2901 break;
2903 case alg_sub_factor:
2904 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2905 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2906 (add_target
2907 ? add_target : (optimize ? 0 : tem)));
2908 val_so_far = (val_so_far << log) - val_so_far;
2909 break;
2911 default:
2912 gcc_unreachable ();
2915 /* Write a REG_EQUAL note on the last insn so that we can cse
2916 multiplication sequences. Note that if ACCUM is a SUBREG,
2917 we've set the inner register and must properly indicate
2918 that. */
2920 tem = op0, nmode = mode;
2921 if (GET_CODE (accum) == SUBREG)
2923 nmode = GET_MODE (SUBREG_REG (accum));
2924 tem = gen_lowpart (nmode, op0);
2927 insn = get_last_insn ();
2928 set_unique_reg_note (insn, REG_EQUAL,
2929 gen_rtx_MULT (nmode, tem,
2930 GEN_INT (val_so_far)));
2933 if (variant == negate_variant)
2935 val_so_far = -val_so_far;
2936 accum = expand_unop (mode, neg_optab, accum, target, 0);
2938 else if (variant == add_variant)
2940 val_so_far = val_so_far + 1;
2941 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2944 /* Compare only the bits of val and val_so_far that are significant
2945 in the result mode, to avoid sign-/zero-extension confusion. */
2946 val &= GET_MODE_MASK (mode);
2947 val_so_far &= GET_MODE_MASK (mode);
2948 gcc_assert (val == val_so_far);
2950 return accum;
2953 /* Perform a multiplication and return an rtx for the result.
2954 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2955 TARGET is a suggestion for where to store the result (an rtx).
2957 We check specially for a constant integer as OP1.
2958 If you want this check for OP0 as well, then before calling
2959 you should swap the two operands if OP0 would be constant. */
2962 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2963 int unsignedp)
2965 enum mult_variant variant;
2966 struct algorithm algorithm;
2967 int max_cost;
2968 bool speed = optimize_insn_for_speed_p ();
2970 /* Handling const0_rtx here allows us to use zero as a rogue value for
2971 coeff below. */
2972 if (op1 == const0_rtx)
2973 return const0_rtx;
2974 if (op1 == const1_rtx)
2975 return op0;
2976 if (op1 == constm1_rtx)
2977 return expand_unop (mode,
2978 GET_MODE_CLASS (mode) == MODE_INT
2979 && !unsignedp && flag_trapv
2980 ? negv_optab : neg_optab,
2981 op0, target, 0);
2983 /* These are the operations that are potentially turned into a sequence
2984 of shifts and additions. */
2985 if (SCALAR_INT_MODE_P (mode)
2986 && (unsignedp || !flag_trapv))
2988 HOST_WIDE_INT coeff = 0;
2989 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
2991 /* synth_mult does an `unsigned int' multiply. As long as the mode is
2992 less than or equal in size to `unsigned int' this doesn't matter.
2993 If the mode is larger than `unsigned int', then synth_mult works
2994 only if the constant value exactly fits in an `unsigned int' without
2995 any truncation. This means that multiplying by negative values does
2996 not work; results are off by 2^32 on a 32 bit machine. */
2998 if (CONST_INT_P (op1))
3000 /* Attempt to handle multiplication of DImode values by negative
3001 coefficients, by performing the multiplication by a positive
3002 multiplier and then inverting the result. */
3003 if (INTVAL (op1) < 0
3004 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3006 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3007 result is interpreted as an unsigned coefficient.
3008 Exclude cost of op0 from max_cost to match the cost
3009 calculation of the synth_mult. */
3010 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed)
3011 - neg_cost[speed][mode];
3012 if (max_cost > 0
3013 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3014 &variant, max_cost))
3016 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3017 NULL_RTX, &algorithm,
3018 variant);
3019 return expand_unop (mode, neg_optab, temp, target, 0);
3022 else coeff = INTVAL (op1);
3024 else if (GET_CODE (op1) == CONST_DOUBLE)
3026 /* If we are multiplying in DImode, it may still be a win
3027 to try to work with shifts and adds. */
3028 if (CONST_DOUBLE_HIGH (op1) == 0
3029 && CONST_DOUBLE_LOW (op1) > 0)
3030 coeff = CONST_DOUBLE_LOW (op1);
3031 else if (CONST_DOUBLE_LOW (op1) == 0
3032 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3034 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3035 + HOST_BITS_PER_WIDE_INT;
3036 return expand_shift (LSHIFT_EXPR, mode, op0,
3037 shift, target, unsignedp);
3041 /* We used to test optimize here, on the grounds that it's better to
3042 produce a smaller program when -O is not used. But this causes
3043 such a terrible slowdown sometimes that it seems better to always
3044 use synth_mult. */
3045 if (coeff != 0)
3047 /* Special case powers of two. */
3048 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3049 return expand_shift (LSHIFT_EXPR, mode, op0,
3050 floor_log2 (coeff), target, unsignedp);
3052 /* Exclude cost of op0 from max_cost to match the cost
3053 calculation of the synth_mult. */
3054 max_cost = rtx_cost (gen_rtx_MULT (mode, fake_reg, op1), SET, speed);
3055 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3056 max_cost))
3057 return expand_mult_const (mode, op0, coeff, target,
3058 &algorithm, variant);
3062 if (GET_CODE (op0) == CONST_DOUBLE)
3064 rtx temp = op0;
3065 op0 = op1;
3066 op1 = temp;
3069 /* Expand x*2.0 as x+x. */
3070 if (GET_CODE (op1) == CONST_DOUBLE
3071 && SCALAR_FLOAT_MODE_P (mode))
3073 REAL_VALUE_TYPE d;
3074 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3076 if (REAL_VALUES_EQUAL (d, dconst2))
3078 op0 = force_reg (GET_MODE (op0), op0);
3079 return expand_binop (mode, add_optab, op0, op0,
3080 target, unsignedp, OPTAB_LIB_WIDEN);
3084 /* This used to use umul_optab if unsigned, but for non-widening multiply
3085 there is no difference between signed and unsigned. */
3086 op0 = expand_binop (mode,
3087 ! unsignedp
3088 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3089 ? smulv_optab : smul_optab,
3090 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3091 gcc_assert (op0);
3092 return op0;
3095 /* Perform a widening multiplication and return an rtx for the result.
3096 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3097 TARGET is a suggestion for where to store the result (an rtx).
3098 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3099 or smul_widen_optab.
3101 We check specially for a constant integer as OP1, comparing the
3102 cost of a widening multiply against the cost of a sequence of shifts
3103 and adds. */
3106 expand_widening_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3107 int unsignedp, optab this_optab)
3109 bool speed = optimize_insn_for_speed_p ();
3110 rtx cop1;
3112 if (CONST_INT_P (op1)
3113 && GET_MODE (op0) != VOIDmode
3114 && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3115 this_optab == umul_widen_optab))
3116 && CONST_INT_P (cop1)
3117 && (INTVAL (cop1) >= 0
3118 || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT))
3120 HOST_WIDE_INT coeff = INTVAL (cop1);
3121 int max_cost;
3122 enum mult_variant variant;
3123 struct algorithm algorithm;
3125 /* Special case powers of two. */
3126 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3128 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3129 return expand_shift (LSHIFT_EXPR, mode, op0,
3130 floor_log2 (coeff), target, unsignedp);
3133 /* Exclude cost of op0 from max_cost to match the cost
3134 calculation of the synth_mult. */
3135 max_cost = mul_widen_cost[speed][mode];
3136 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3137 max_cost))
3139 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3140 return expand_mult_const (mode, op0, coeff, target,
3141 &algorithm, variant);
3144 return expand_binop (mode, this_optab, op0, op1, target,
3145 unsignedp, OPTAB_LIB_WIDEN);
3148 /* Return the smallest n such that 2**n >= X. */
3151 ceil_log2 (unsigned HOST_WIDE_INT x)
3153 return floor_log2 (x - 1) + 1;
3156 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3157 replace division by D, and put the least significant N bits of the result
3158 in *MULTIPLIER_PTR and return the most significant bit.
3160 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3161 needed precision is in PRECISION (should be <= N).
3163 PRECISION should be as small as possible so this function can choose
3164 multiplier more freely.
3166 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3167 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3169 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3170 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3172 static
3173 unsigned HOST_WIDE_INT
3174 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3175 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3177 HOST_WIDE_INT mhigh_hi, mlow_hi;
3178 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3179 int lgup, post_shift;
3180 int pow, pow2;
3181 unsigned HOST_WIDE_INT nl, dummy1;
3182 HOST_WIDE_INT nh, dummy2;
3184 /* lgup = ceil(log2(divisor)); */
3185 lgup = ceil_log2 (d);
3187 gcc_assert (lgup <= n);
3189 pow = n + lgup;
3190 pow2 = n + lgup - precision;
3192 /* We could handle this with some effort, but this case is much
3193 better handled directly with a scc insn, so rely on caller using
3194 that. */
3195 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3197 /* mlow = 2^(N + lgup)/d */
3198 if (pow >= HOST_BITS_PER_WIDE_INT)
3200 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3201 nl = 0;
3203 else
3205 nh = 0;
3206 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3208 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3209 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3211 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3212 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3213 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3214 else
3215 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3216 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3217 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3219 gcc_assert (!mhigh_hi || nh - d < d);
3220 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3221 /* Assert that mlow < mhigh. */
3222 gcc_assert (mlow_hi < mhigh_hi
3223 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3225 /* If precision == N, then mlow, mhigh exceed 2^N
3226 (but they do not exceed 2^(N+1)). */
3228 /* Reduce to lowest terms. */
3229 for (post_shift = lgup; post_shift > 0; post_shift--)
3231 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3232 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3233 if (ml_lo >= mh_lo)
3234 break;
3236 mlow_hi = 0;
3237 mlow_lo = ml_lo;
3238 mhigh_hi = 0;
3239 mhigh_lo = mh_lo;
3242 *post_shift_ptr = post_shift;
3243 *lgup_ptr = lgup;
3244 if (n < HOST_BITS_PER_WIDE_INT)
3246 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3247 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3248 return mhigh_lo >= mask;
3250 else
3252 *multiplier_ptr = GEN_INT (mhigh_lo);
3253 return mhigh_hi;
3257 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3258 congruent to 1 (mod 2**N). */
3260 static unsigned HOST_WIDE_INT
3261 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3263 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3265 /* The algorithm notes that the choice y = x satisfies
3266 x*y == 1 mod 2^3, since x is assumed odd.
3267 Each iteration doubles the number of bits of significance in y. */
3269 unsigned HOST_WIDE_INT mask;
3270 unsigned HOST_WIDE_INT y = x;
3271 int nbit = 3;
3273 mask = (n == HOST_BITS_PER_WIDE_INT
3274 ? ~(unsigned HOST_WIDE_INT) 0
3275 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3277 while (nbit < n)
3279 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3280 nbit *= 2;
3282 return y;
3285 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3286 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3287 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3288 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3289 become signed.
3291 The result is put in TARGET if that is convenient.
3293 MODE is the mode of operation. */
3296 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3297 rtx op1, rtx target, int unsignedp)
3299 rtx tem;
3300 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3302 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3303 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3304 tem = expand_and (mode, tem, op1, NULL_RTX);
3305 adj_operand
3306 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3307 adj_operand);
3309 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3310 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3311 tem = expand_and (mode, tem, op0, NULL_RTX);
3312 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3313 target);
3315 return target;
3318 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3320 static rtx
3321 extract_high_half (enum machine_mode mode, rtx op)
3323 enum machine_mode wider_mode;
3325 if (mode == word_mode)
3326 return gen_highpart (mode, op);
3328 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3330 wider_mode = GET_MODE_WIDER_MODE (mode);
3331 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3332 GET_MODE_BITSIZE (mode), 0, 1);
3333 return convert_modes (mode, wider_mode, op, 0);
3336 /* Like expand_mult_highpart, but only consider using a multiplication
3337 optab. OP1 is an rtx for the constant operand. */
3339 static rtx
3340 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3341 rtx target, int unsignedp, int max_cost)
3343 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3344 enum machine_mode wider_mode;
3345 optab moptab;
3346 rtx tem;
3347 int size;
3348 bool speed = optimize_insn_for_speed_p ();
3350 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3352 wider_mode = GET_MODE_WIDER_MODE (mode);
3353 size = GET_MODE_BITSIZE (mode);
3355 /* Firstly, try using a multiplication insn that only generates the needed
3356 high part of the product, and in the sign flavor of unsignedp. */
3357 if (mul_highpart_cost[speed][mode] < max_cost)
3359 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3360 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3361 unsignedp, OPTAB_DIRECT);
3362 if (tem)
3363 return tem;
3366 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3367 Need to adjust the result after the multiplication. */
3368 if (size - 1 < BITS_PER_WORD
3369 && (mul_highpart_cost[speed][mode] + 2 * shift_cost[speed][mode][size-1]
3370 + 4 * add_cost[speed][mode] < max_cost))
3372 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3373 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3374 unsignedp, OPTAB_DIRECT);
3375 if (tem)
3376 /* We used the wrong signedness. Adjust the result. */
3377 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3378 tem, unsignedp);
3381 /* Try widening multiplication. */
3382 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3383 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3384 && mul_widen_cost[speed][wider_mode] < max_cost)
3386 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3387 unsignedp, OPTAB_WIDEN);
3388 if (tem)
3389 return extract_high_half (mode, tem);
3392 /* Try widening the mode and perform a non-widening multiplication. */
3393 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3394 && size - 1 < BITS_PER_WORD
3395 && mul_cost[speed][wider_mode] + shift_cost[speed][mode][size-1] < max_cost)
3397 rtx insns, wop0, wop1;
3399 /* We need to widen the operands, for example to ensure the
3400 constant multiplier is correctly sign or zero extended.
3401 Use a sequence to clean-up any instructions emitted by
3402 the conversions if things don't work out. */
3403 start_sequence ();
3404 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3405 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3406 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3407 unsignedp, OPTAB_WIDEN);
3408 insns = get_insns ();
3409 end_sequence ();
3411 if (tem)
3413 emit_insn (insns);
3414 return extract_high_half (mode, tem);
3418 /* Try widening multiplication of opposite signedness, and adjust. */
3419 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3420 if (optab_handler (moptab, wider_mode) != CODE_FOR_nothing
3421 && size - 1 < BITS_PER_WORD
3422 && (mul_widen_cost[speed][wider_mode] + 2 * shift_cost[speed][mode][size-1]
3423 + 4 * add_cost[speed][mode] < max_cost))
3425 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3426 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3427 if (tem != 0)
3429 tem = extract_high_half (mode, tem);
3430 /* We used the wrong signedness. Adjust the result. */
3431 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3432 target, unsignedp);
3436 return 0;
3439 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3440 putting the high half of the result in TARGET if that is convenient,
3441 and return where the result is. If the operation can not be performed,
3442 0 is returned.
3444 MODE is the mode of operation and result.
3446 UNSIGNEDP nonzero means unsigned multiply.
3448 MAX_COST is the total allowed cost for the expanded RTL. */
3450 static rtx
3451 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3452 rtx target, int unsignedp, int max_cost)
3454 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3455 unsigned HOST_WIDE_INT cnst1;
3456 int extra_cost;
3457 bool sign_adjust = false;
3458 enum mult_variant variant;
3459 struct algorithm alg;
3460 rtx tem;
3461 bool speed = optimize_insn_for_speed_p ();
3463 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3464 /* We can't support modes wider than HOST_BITS_PER_INT. */
3465 gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT);
3467 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3469 /* We can't optimize modes wider than BITS_PER_WORD.
3470 ??? We might be able to perform double-word arithmetic if
3471 mode == word_mode, however all the cost calculations in
3472 synth_mult etc. assume single-word operations. */
3473 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3474 return expand_mult_highpart_optab (mode, op0, op1, target,
3475 unsignedp, max_cost);
3477 extra_cost = shift_cost[speed][mode][GET_MODE_BITSIZE (mode) - 1];
3479 /* Check whether we try to multiply by a negative constant. */
3480 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3482 sign_adjust = true;
3483 extra_cost += add_cost[speed][mode];
3486 /* See whether shift/add multiplication is cheap enough. */
3487 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3488 max_cost - extra_cost))
3490 /* See whether the specialized multiplication optabs are
3491 cheaper than the shift/add version. */
3492 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3493 alg.cost.cost + extra_cost);
3494 if (tem)
3495 return tem;
3497 tem = convert_to_mode (wider_mode, op0, unsignedp);
3498 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3499 tem = extract_high_half (mode, tem);
3501 /* Adjust result for signedness. */
3502 if (sign_adjust)
3503 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3505 return tem;
3507 return expand_mult_highpart_optab (mode, op0, op1, target,
3508 unsignedp, max_cost);
3512 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3514 static rtx
3515 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3517 unsigned HOST_WIDE_INT masklow, maskhigh;
3518 rtx result, temp, shift, label;
3519 int logd;
3521 logd = floor_log2 (d);
3522 result = gen_reg_rtx (mode);
3524 /* Avoid conditional branches when they're expensive. */
3525 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3526 && optimize_insn_for_speed_p ())
3528 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3529 mode, 0, -1);
3530 if (signmask)
3532 signmask = force_reg (mode, signmask);
3533 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3534 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3536 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3537 which instruction sequence to use. If logical right shifts
3538 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3539 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3541 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3542 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3543 || rtx_cost (temp, SET, optimize_insn_for_speed_p ()) > COSTS_N_INSNS (2))
3545 temp = expand_binop (mode, xor_optab, op0, signmask,
3546 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3547 temp = expand_binop (mode, sub_optab, temp, signmask,
3548 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3549 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3550 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3551 temp = expand_binop (mode, xor_optab, temp, signmask,
3552 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3553 temp = expand_binop (mode, sub_optab, temp, signmask,
3554 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3556 else
3558 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3559 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3560 signmask = force_reg (mode, signmask);
3562 temp = expand_binop (mode, add_optab, op0, signmask,
3563 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3564 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3565 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3566 temp = expand_binop (mode, sub_optab, temp, signmask,
3567 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3569 return temp;
3573 /* Mask contains the mode's signbit and the significant bits of the
3574 modulus. By including the signbit in the operation, many targets
3575 can avoid an explicit compare operation in the following comparison
3576 against zero. */
3578 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3579 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3581 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3582 maskhigh = -1;
3584 else
3585 maskhigh = (HOST_WIDE_INT) -1
3586 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3588 temp = expand_binop (mode, and_optab, op0,
3589 immed_double_const (masklow, maskhigh, mode),
3590 result, 1, OPTAB_LIB_WIDEN);
3591 if (temp != result)
3592 emit_move_insn (result, temp);
3594 label = gen_label_rtx ();
3595 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3597 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3598 0, OPTAB_LIB_WIDEN);
3599 masklow = (HOST_WIDE_INT) -1 << logd;
3600 maskhigh = -1;
3601 temp = expand_binop (mode, ior_optab, temp,
3602 immed_double_const (masklow, maskhigh, mode),
3603 result, 1, OPTAB_LIB_WIDEN);
3604 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3605 0, OPTAB_LIB_WIDEN);
3606 if (temp != result)
3607 emit_move_insn (result, temp);
3608 emit_label (label);
3609 return result;
3612 /* Expand signed division of OP0 by a power of two D in mode MODE.
3613 This routine is only called for positive values of D. */
3615 static rtx
3616 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3618 rtx temp, label;
3619 int logd;
3621 logd = floor_log2 (d);
3623 if (d == 2
3624 && BRANCH_COST (optimize_insn_for_speed_p (),
3625 false) >= 1)
3627 temp = gen_reg_rtx (mode);
3628 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3629 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3630 0, OPTAB_LIB_WIDEN);
3631 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3634 #ifdef HAVE_conditional_move
3635 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3636 >= 2)
3638 rtx temp2;
3640 /* ??? emit_conditional_move forces a stack adjustment via
3641 compare_from_rtx so, if the sequence is discarded, it will
3642 be lost. Do it now instead. */
3643 do_pending_stack_adjust ();
3645 start_sequence ();
3646 temp2 = copy_to_mode_reg (mode, op0);
3647 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3648 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3649 temp = force_reg (mode, temp);
3651 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3652 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3653 mode, temp, temp2, mode, 0);
3654 if (temp2)
3656 rtx seq = get_insns ();
3657 end_sequence ();
3658 emit_insn (seq);
3659 return expand_shift (RSHIFT_EXPR, mode, temp2, logd, NULL_RTX, 0);
3661 end_sequence ();
3663 #endif
3665 if (BRANCH_COST (optimize_insn_for_speed_p (),
3666 false) >= 2)
3668 int ushift = GET_MODE_BITSIZE (mode) - logd;
3670 temp = gen_reg_rtx (mode);
3671 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3672 if (shift_cost[optimize_insn_for_speed_p ()][mode][ushift] > COSTS_N_INSNS (1))
3673 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3674 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3675 else
3676 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3677 ushift, NULL_RTX, 1);
3678 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3679 0, OPTAB_LIB_WIDEN);
3680 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3683 label = gen_label_rtx ();
3684 temp = copy_to_mode_reg (mode, op0);
3685 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3686 expand_inc (temp, GEN_INT (d - 1));
3687 emit_label (label);
3688 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3691 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3692 if that is convenient, and returning where the result is.
3693 You may request either the quotient or the remainder as the result;
3694 specify REM_FLAG nonzero to get the remainder.
3696 CODE is the expression code for which kind of division this is;
3697 it controls how rounding is done. MODE is the machine mode to use.
3698 UNSIGNEDP nonzero means do unsigned division. */
3700 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3701 and then correct it by or'ing in missing high bits
3702 if result of ANDI is nonzero.
3703 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3704 This could optimize to a bfexts instruction.
3705 But C doesn't use these operations, so their optimizations are
3706 left for later. */
3707 /* ??? For modulo, we don't actually need the highpart of the first product,
3708 the low part will do nicely. And for small divisors, the second multiply
3709 can also be a low-part only multiply or even be completely left out.
3710 E.g. to calculate the remainder of a division by 3 with a 32 bit
3711 multiply, multiply with 0x55555556 and extract the upper two bits;
3712 the result is exact for inputs up to 0x1fffffff.
3713 The input range can be reduced by using cross-sum rules.
3714 For odd divisors >= 3, the following table gives right shift counts
3715 so that if a number is shifted by an integer multiple of the given
3716 amount, the remainder stays the same:
3717 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3718 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3719 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3720 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3721 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3723 Cross-sum rules for even numbers can be derived by leaving as many bits
3724 to the right alone as the divisor has zeros to the right.
3725 E.g. if x is an unsigned 32 bit number:
3726 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3730 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3731 rtx op0, rtx op1, rtx target, int unsignedp)
3733 enum machine_mode compute_mode;
3734 rtx tquotient;
3735 rtx quotient = 0, remainder = 0;
3736 rtx last;
3737 int size;
3738 rtx insn, set;
3739 optab optab1, optab2;
3740 int op1_is_constant, op1_is_pow2 = 0;
3741 int max_cost, extra_cost;
3742 static HOST_WIDE_INT last_div_const = 0;
3743 static HOST_WIDE_INT ext_op1;
3744 bool speed = optimize_insn_for_speed_p ();
3746 op1_is_constant = CONST_INT_P (op1);
3747 if (op1_is_constant)
3749 ext_op1 = INTVAL (op1);
3750 if (unsignedp)
3751 ext_op1 &= GET_MODE_MASK (mode);
3752 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3753 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3757 This is the structure of expand_divmod:
3759 First comes code to fix up the operands so we can perform the operations
3760 correctly and efficiently.
3762 Second comes a switch statement with code specific for each rounding mode.
3763 For some special operands this code emits all RTL for the desired
3764 operation, for other cases, it generates only a quotient and stores it in
3765 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3766 to indicate that it has not done anything.
3768 Last comes code that finishes the operation. If QUOTIENT is set and
3769 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3770 QUOTIENT is not set, it is computed using trunc rounding.
3772 We try to generate special code for division and remainder when OP1 is a
3773 constant. If |OP1| = 2**n we can use shifts and some other fast
3774 operations. For other values of OP1, we compute a carefully selected
3775 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3776 by m.
3778 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3779 half of the product. Different strategies for generating the product are
3780 implemented in expand_mult_highpart.
3782 If what we actually want is the remainder, we generate that by another
3783 by-constant multiplication and a subtraction. */
3785 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3786 code below will malfunction if we are, so check here and handle
3787 the special case if so. */
3788 if (op1 == const1_rtx)
3789 return rem_flag ? const0_rtx : op0;
3791 /* When dividing by -1, we could get an overflow.
3792 negv_optab can handle overflows. */
3793 if (! unsignedp && op1 == constm1_rtx)
3795 if (rem_flag)
3796 return const0_rtx;
3797 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3798 ? negv_optab : neg_optab, op0, target, 0);
3801 if (target
3802 /* Don't use the function value register as a target
3803 since we have to read it as well as write it,
3804 and function-inlining gets confused by this. */
3805 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3806 /* Don't clobber an operand while doing a multi-step calculation. */
3807 || ((rem_flag || op1_is_constant)
3808 && (reg_mentioned_p (target, op0)
3809 || (MEM_P (op0) && MEM_P (target))))
3810 || reg_mentioned_p (target, op1)
3811 || (MEM_P (op1) && MEM_P (target))))
3812 target = 0;
3814 /* Get the mode in which to perform this computation. Normally it will
3815 be MODE, but sometimes we can't do the desired operation in MODE.
3816 If so, pick a wider mode in which we can do the operation. Convert
3817 to that mode at the start to avoid repeated conversions.
3819 First see what operations we need. These depend on the expression
3820 we are evaluating. (We assume that divxx3 insns exist under the
3821 same conditions that modxx3 insns and that these insns don't normally
3822 fail. If these assumptions are not correct, we may generate less
3823 efficient code in some cases.)
3825 Then see if we find a mode in which we can open-code that operation
3826 (either a division, modulus, or shift). Finally, check for the smallest
3827 mode for which we can do the operation with a library call. */
3829 /* We might want to refine this now that we have division-by-constant
3830 optimization. Since expand_mult_highpart tries so many variants, it is
3831 not straightforward to generalize this. Maybe we should make an array
3832 of possible modes in init_expmed? Save this for GCC 2.7. */
3834 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3835 ? (unsignedp ? lshr_optab : ashr_optab)
3836 : (unsignedp ? udiv_optab : sdiv_optab));
3837 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3838 ? optab1
3839 : (unsignedp ? udivmod_optab : sdivmod_optab));
3841 for (compute_mode = mode; compute_mode != VOIDmode;
3842 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3843 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
3844 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
3845 break;
3847 if (compute_mode == VOIDmode)
3848 for (compute_mode = mode; compute_mode != VOIDmode;
3849 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3850 if (optab_libfunc (optab1, compute_mode)
3851 || optab_libfunc (optab2, compute_mode))
3852 break;
3854 /* If we still couldn't find a mode, use MODE, but expand_binop will
3855 probably die. */
3856 if (compute_mode == VOIDmode)
3857 compute_mode = mode;
3859 if (target && GET_MODE (target) == compute_mode)
3860 tquotient = target;
3861 else
3862 tquotient = gen_reg_rtx (compute_mode);
3864 size = GET_MODE_BITSIZE (compute_mode);
3865 #if 0
3866 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3867 (mode), and thereby get better code when OP1 is a constant. Do that
3868 later. It will require going over all usages of SIZE below. */
3869 size = GET_MODE_BITSIZE (mode);
3870 #endif
3872 /* Only deduct something for a REM if the last divide done was
3873 for a different constant. Then set the constant of the last
3874 divide. */
3875 max_cost = unsignedp ? udiv_cost[speed][compute_mode] : sdiv_cost[speed][compute_mode];
3876 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3877 && INTVAL (op1) == last_div_const))
3878 max_cost -= mul_cost[speed][compute_mode] + add_cost[speed][compute_mode];
3880 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3882 /* Now convert to the best mode to use. */
3883 if (compute_mode != mode)
3885 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3886 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3888 /* convert_modes may have placed op1 into a register, so we
3889 must recompute the following. */
3890 op1_is_constant = CONST_INT_P (op1);
3891 op1_is_pow2 = (op1_is_constant
3892 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3893 || (! unsignedp
3894 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3897 /* If one of the operands is a volatile MEM, copy it into a register. */
3899 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3900 op0 = force_reg (compute_mode, op0);
3901 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3902 op1 = force_reg (compute_mode, op1);
3904 /* If we need the remainder or if OP1 is constant, we need to
3905 put OP0 in a register in case it has any queued subexpressions. */
3906 if (rem_flag || op1_is_constant)
3907 op0 = force_reg (compute_mode, op0);
3909 last = get_last_insn ();
3911 /* Promote floor rounding to trunc rounding for unsigned operations. */
3912 if (unsignedp)
3914 if (code == FLOOR_DIV_EXPR)
3915 code = TRUNC_DIV_EXPR;
3916 if (code == FLOOR_MOD_EXPR)
3917 code = TRUNC_MOD_EXPR;
3918 if (code == EXACT_DIV_EXPR && op1_is_pow2)
3919 code = TRUNC_DIV_EXPR;
3922 if (op1 != const0_rtx)
3923 switch (code)
3925 case TRUNC_MOD_EXPR:
3926 case TRUNC_DIV_EXPR:
3927 if (op1_is_constant)
3929 if (unsignedp)
3931 unsigned HOST_WIDE_INT mh;
3932 int pre_shift, post_shift;
3933 int dummy;
3934 rtx ml;
3935 unsigned HOST_WIDE_INT d = (INTVAL (op1)
3936 & GET_MODE_MASK (compute_mode));
3938 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3940 pre_shift = floor_log2 (d);
3941 if (rem_flag)
3943 remainder
3944 = expand_binop (compute_mode, and_optab, op0,
3945 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3946 remainder, 1,
3947 OPTAB_LIB_WIDEN);
3948 if (remainder)
3949 return gen_lowpart (mode, remainder);
3951 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3952 pre_shift, tquotient, 1);
3954 else if (size <= HOST_BITS_PER_WIDE_INT)
3956 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3958 /* Most significant bit of divisor is set; emit an scc
3959 insn. */
3960 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
3961 compute_mode, 1, 1);
3963 else
3965 /* Find a suitable multiplier and right shift count
3966 instead of multiplying with D. */
3968 mh = choose_multiplier (d, size, size,
3969 &ml, &post_shift, &dummy);
3971 /* If the suggested multiplier is more than SIZE bits,
3972 we can do better for even divisors, using an
3973 initial right shift. */
3974 if (mh != 0 && (d & 1) == 0)
3976 pre_shift = floor_log2 (d & -d);
3977 mh = choose_multiplier (d >> pre_shift, size,
3978 size - pre_shift,
3979 &ml, &post_shift, &dummy);
3980 gcc_assert (!mh);
3982 else
3983 pre_shift = 0;
3985 if (mh != 0)
3987 rtx t1, t2, t3, t4;
3989 if (post_shift - 1 >= BITS_PER_WORD)
3990 goto fail1;
3992 extra_cost
3993 = (shift_cost[speed][compute_mode][post_shift - 1]
3994 + shift_cost[speed][compute_mode][1]
3995 + 2 * add_cost[speed][compute_mode]);
3996 t1 = expand_mult_highpart (compute_mode, op0, ml,
3997 NULL_RTX, 1,
3998 max_cost - extra_cost);
3999 if (t1 == 0)
4000 goto fail1;
4001 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4002 op0, t1),
4003 NULL_RTX);
4004 t3 = expand_shift (RSHIFT_EXPR, compute_mode,
4005 t2, 1, NULL_RTX, 1);
4006 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4007 t1, t3),
4008 NULL_RTX);
4009 quotient = expand_shift
4010 (RSHIFT_EXPR, compute_mode, t4,
4011 post_shift - 1, tquotient, 1);
4013 else
4015 rtx t1, t2;
4017 if (pre_shift >= BITS_PER_WORD
4018 || post_shift >= BITS_PER_WORD)
4019 goto fail1;
4021 t1 = expand_shift
4022 (RSHIFT_EXPR, compute_mode, op0,
4023 pre_shift, NULL_RTX, 1);
4024 extra_cost
4025 = (shift_cost[speed][compute_mode][pre_shift]
4026 + shift_cost[speed][compute_mode][post_shift]);
4027 t2 = expand_mult_highpart (compute_mode, t1, ml,
4028 NULL_RTX, 1,
4029 max_cost - extra_cost);
4030 if (t2 == 0)
4031 goto fail1;
4032 quotient = expand_shift
4033 (RSHIFT_EXPR, compute_mode, t2,
4034 post_shift, tquotient, 1);
4038 else /* Too wide mode to use tricky code */
4039 break;
4041 insn = get_last_insn ();
4042 if (insn != last
4043 && (set = single_set (insn)) != 0
4044 && SET_DEST (set) == quotient)
4045 set_unique_reg_note (insn,
4046 REG_EQUAL,
4047 gen_rtx_UDIV (compute_mode, op0, op1));
4049 else /* TRUNC_DIV, signed */
4051 unsigned HOST_WIDE_INT ml;
4052 int lgup, post_shift;
4053 rtx mlr;
4054 HOST_WIDE_INT d = INTVAL (op1);
4055 unsigned HOST_WIDE_INT abs_d;
4057 /* Since d might be INT_MIN, we have to cast to
4058 unsigned HOST_WIDE_INT before negating to avoid
4059 undefined signed overflow. */
4060 abs_d = (d >= 0
4061 ? (unsigned HOST_WIDE_INT) d
4062 : - (unsigned HOST_WIDE_INT) d);
4064 /* n rem d = n rem -d */
4065 if (rem_flag && d < 0)
4067 d = abs_d;
4068 op1 = gen_int_mode (abs_d, compute_mode);
4071 if (d == 1)
4072 quotient = op0;
4073 else if (d == -1)
4074 quotient = expand_unop (compute_mode, neg_optab, op0,
4075 tquotient, 0);
4076 else if (HOST_BITS_PER_WIDE_INT >= size
4077 && abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4079 /* This case is not handled correctly below. */
4080 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4081 compute_mode, 1, 1);
4082 if (quotient == 0)
4083 goto fail1;
4085 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4086 && (rem_flag ? smod_pow2_cheap[speed][compute_mode]
4087 : sdiv_pow2_cheap[speed][compute_mode])
4088 /* We assume that cheap metric is true if the
4089 optab has an expander for this mode. */
4090 && ((optab_handler ((rem_flag ? smod_optab
4091 : sdiv_optab),
4092 compute_mode)
4093 != CODE_FOR_nothing)
4094 || (optab_handler (sdivmod_optab,
4095 compute_mode)
4096 != CODE_FOR_nothing)))
4098 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4100 if (rem_flag)
4102 remainder = expand_smod_pow2 (compute_mode, op0, d);
4103 if (remainder)
4104 return gen_lowpart (mode, remainder);
4107 if (sdiv_pow2_cheap[speed][compute_mode]
4108 && ((optab_handler (sdiv_optab, compute_mode)
4109 != CODE_FOR_nothing)
4110 || (optab_handler (sdivmod_optab, compute_mode)
4111 != CODE_FOR_nothing)))
4112 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4113 compute_mode, op0,
4114 gen_int_mode (abs_d,
4115 compute_mode),
4116 NULL_RTX, 0);
4117 else
4118 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4120 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4121 negate the quotient. */
4122 if (d < 0)
4124 insn = get_last_insn ();
4125 if (insn != last
4126 && (set = single_set (insn)) != 0
4127 && SET_DEST (set) == quotient
4128 && abs_d < ((unsigned HOST_WIDE_INT) 1
4129 << (HOST_BITS_PER_WIDE_INT - 1)))
4130 set_unique_reg_note (insn,
4131 REG_EQUAL,
4132 gen_rtx_DIV (compute_mode,
4133 op0,
4134 GEN_INT
4135 (trunc_int_for_mode
4136 (abs_d,
4137 compute_mode))));
4139 quotient = expand_unop (compute_mode, neg_optab,
4140 quotient, quotient, 0);
4143 else if (size <= HOST_BITS_PER_WIDE_INT)
4145 choose_multiplier (abs_d, size, size - 1,
4146 &mlr, &post_shift, &lgup);
4147 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4148 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4150 rtx t1, t2, t3;
4152 if (post_shift >= BITS_PER_WORD
4153 || size - 1 >= BITS_PER_WORD)
4154 goto fail1;
4156 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4157 + shift_cost[speed][compute_mode][size - 1]
4158 + add_cost[speed][compute_mode]);
4159 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4160 NULL_RTX, 0,
4161 max_cost - extra_cost);
4162 if (t1 == 0)
4163 goto fail1;
4164 t2 = expand_shift
4165 (RSHIFT_EXPR, compute_mode, t1,
4166 post_shift, NULL_RTX, 0);
4167 t3 = expand_shift
4168 (RSHIFT_EXPR, compute_mode, op0,
4169 size - 1, NULL_RTX, 0);
4170 if (d < 0)
4171 quotient
4172 = force_operand (gen_rtx_MINUS (compute_mode,
4173 t3, t2),
4174 tquotient);
4175 else
4176 quotient
4177 = force_operand (gen_rtx_MINUS (compute_mode,
4178 t2, t3),
4179 tquotient);
4181 else
4183 rtx t1, t2, t3, t4;
4185 if (post_shift >= BITS_PER_WORD
4186 || size - 1 >= BITS_PER_WORD)
4187 goto fail1;
4189 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4190 mlr = gen_int_mode (ml, compute_mode);
4191 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4192 + shift_cost[speed][compute_mode][size - 1]
4193 + 2 * add_cost[speed][compute_mode]);
4194 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4195 NULL_RTX, 0,
4196 max_cost - extra_cost);
4197 if (t1 == 0)
4198 goto fail1;
4199 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4200 t1, op0),
4201 NULL_RTX);
4202 t3 = expand_shift
4203 (RSHIFT_EXPR, compute_mode, t2,
4204 post_shift, NULL_RTX, 0);
4205 t4 = expand_shift
4206 (RSHIFT_EXPR, compute_mode, op0,
4207 size - 1, NULL_RTX, 0);
4208 if (d < 0)
4209 quotient
4210 = force_operand (gen_rtx_MINUS (compute_mode,
4211 t4, t3),
4212 tquotient);
4213 else
4214 quotient
4215 = force_operand (gen_rtx_MINUS (compute_mode,
4216 t3, t4),
4217 tquotient);
4220 else /* Too wide mode to use tricky code */
4221 break;
4223 insn = get_last_insn ();
4224 if (insn != last
4225 && (set = single_set (insn)) != 0
4226 && SET_DEST (set) == quotient)
4227 set_unique_reg_note (insn,
4228 REG_EQUAL,
4229 gen_rtx_DIV (compute_mode, op0, op1));
4231 break;
4233 fail1:
4234 delete_insns_since (last);
4235 break;
4237 case FLOOR_DIV_EXPR:
4238 case FLOOR_MOD_EXPR:
4239 /* We will come here only for signed operations. */
4240 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4242 unsigned HOST_WIDE_INT mh;
4243 int pre_shift, lgup, post_shift;
4244 HOST_WIDE_INT d = INTVAL (op1);
4245 rtx ml;
4247 if (d > 0)
4249 /* We could just as easily deal with negative constants here,
4250 but it does not seem worth the trouble for GCC 2.6. */
4251 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4253 pre_shift = floor_log2 (d);
4254 if (rem_flag)
4256 remainder = expand_binop (compute_mode, and_optab, op0,
4257 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4258 remainder, 0, OPTAB_LIB_WIDEN);
4259 if (remainder)
4260 return gen_lowpart (mode, remainder);
4262 quotient = expand_shift
4263 (RSHIFT_EXPR, compute_mode, op0,
4264 pre_shift, tquotient, 0);
4266 else
4268 rtx t1, t2, t3, t4;
4270 mh = choose_multiplier (d, size, size - 1,
4271 &ml, &post_shift, &lgup);
4272 gcc_assert (!mh);
4274 if (post_shift < BITS_PER_WORD
4275 && size - 1 < BITS_PER_WORD)
4277 t1 = expand_shift
4278 (RSHIFT_EXPR, compute_mode, op0,
4279 size - 1, NULL_RTX, 0);
4280 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4281 NULL_RTX, 0, OPTAB_WIDEN);
4282 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4283 + shift_cost[speed][compute_mode][size - 1]
4284 + 2 * add_cost[speed][compute_mode]);
4285 t3 = expand_mult_highpart (compute_mode, t2, ml,
4286 NULL_RTX, 1,
4287 max_cost - extra_cost);
4288 if (t3 != 0)
4290 t4 = expand_shift
4291 (RSHIFT_EXPR, compute_mode, t3,
4292 post_shift, NULL_RTX, 1);
4293 quotient = expand_binop (compute_mode, xor_optab,
4294 t4, t1, tquotient, 0,
4295 OPTAB_WIDEN);
4300 else
4302 rtx nsign, t1, t2, t3, t4;
4303 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4304 op0, constm1_rtx), NULL_RTX);
4305 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4306 0, OPTAB_WIDEN);
4307 nsign = expand_shift
4308 (RSHIFT_EXPR, compute_mode, t2,
4309 size - 1, NULL_RTX, 0);
4310 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4311 NULL_RTX);
4312 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4313 NULL_RTX, 0);
4314 if (t4)
4316 rtx t5;
4317 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4318 NULL_RTX, 0);
4319 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4320 t4, t5),
4321 tquotient);
4326 if (quotient != 0)
4327 break;
4328 delete_insns_since (last);
4330 /* Try using an instruction that produces both the quotient and
4331 remainder, using truncation. We can easily compensate the quotient
4332 or remainder to get floor rounding, once we have the remainder.
4333 Notice that we compute also the final remainder value here,
4334 and return the result right away. */
4335 if (target == 0 || GET_MODE (target) != compute_mode)
4336 target = gen_reg_rtx (compute_mode);
4338 if (rem_flag)
4340 remainder
4341 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4342 quotient = gen_reg_rtx (compute_mode);
4344 else
4346 quotient
4347 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4348 remainder = gen_reg_rtx (compute_mode);
4351 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4352 quotient, remainder, 0))
4354 /* This could be computed with a branch-less sequence.
4355 Save that for later. */
4356 rtx tem;
4357 rtx label = gen_label_rtx ();
4358 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4359 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4360 NULL_RTX, 0, OPTAB_WIDEN);
4361 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4362 expand_dec (quotient, const1_rtx);
4363 expand_inc (remainder, op1);
4364 emit_label (label);
4365 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4368 /* No luck with division elimination or divmod. Have to do it
4369 by conditionally adjusting op0 *and* the result. */
4371 rtx label1, label2, label3, label4, label5;
4372 rtx adjusted_op0;
4373 rtx tem;
4375 quotient = gen_reg_rtx (compute_mode);
4376 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4377 label1 = gen_label_rtx ();
4378 label2 = gen_label_rtx ();
4379 label3 = gen_label_rtx ();
4380 label4 = gen_label_rtx ();
4381 label5 = gen_label_rtx ();
4382 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4383 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4384 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4385 quotient, 0, OPTAB_LIB_WIDEN);
4386 if (tem != quotient)
4387 emit_move_insn (quotient, tem);
4388 emit_jump_insn (gen_jump (label5));
4389 emit_barrier ();
4390 emit_label (label1);
4391 expand_inc (adjusted_op0, const1_rtx);
4392 emit_jump_insn (gen_jump (label4));
4393 emit_barrier ();
4394 emit_label (label2);
4395 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4396 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4397 quotient, 0, OPTAB_LIB_WIDEN);
4398 if (tem != quotient)
4399 emit_move_insn (quotient, tem);
4400 emit_jump_insn (gen_jump (label5));
4401 emit_barrier ();
4402 emit_label (label3);
4403 expand_dec (adjusted_op0, const1_rtx);
4404 emit_label (label4);
4405 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4406 quotient, 0, OPTAB_LIB_WIDEN);
4407 if (tem != quotient)
4408 emit_move_insn (quotient, tem);
4409 expand_dec (quotient, const1_rtx);
4410 emit_label (label5);
4412 break;
4414 case CEIL_DIV_EXPR:
4415 case CEIL_MOD_EXPR:
4416 if (unsignedp)
4418 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4420 rtx t1, t2, t3;
4421 unsigned HOST_WIDE_INT d = INTVAL (op1);
4422 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4423 floor_log2 (d), tquotient, 1);
4424 t2 = expand_binop (compute_mode, and_optab, op0,
4425 GEN_INT (d - 1),
4426 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4427 t3 = gen_reg_rtx (compute_mode);
4428 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4429 compute_mode, 1, 1);
4430 if (t3 == 0)
4432 rtx lab;
4433 lab = gen_label_rtx ();
4434 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4435 expand_inc (t1, const1_rtx);
4436 emit_label (lab);
4437 quotient = t1;
4439 else
4440 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4441 t1, t3),
4442 tquotient);
4443 break;
4446 /* Try using an instruction that produces both the quotient and
4447 remainder, using truncation. We can easily compensate the
4448 quotient or remainder to get ceiling rounding, once we have the
4449 remainder. Notice that we compute also the final remainder
4450 value here, and return the result right away. */
4451 if (target == 0 || GET_MODE (target) != compute_mode)
4452 target = gen_reg_rtx (compute_mode);
4454 if (rem_flag)
4456 remainder = (REG_P (target)
4457 ? target : gen_reg_rtx (compute_mode));
4458 quotient = gen_reg_rtx (compute_mode);
4460 else
4462 quotient = (REG_P (target)
4463 ? target : gen_reg_rtx (compute_mode));
4464 remainder = gen_reg_rtx (compute_mode);
4467 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4468 remainder, 1))
4470 /* This could be computed with a branch-less sequence.
4471 Save that for later. */
4472 rtx label = gen_label_rtx ();
4473 do_cmp_and_jump (remainder, const0_rtx, EQ,
4474 compute_mode, label);
4475 expand_inc (quotient, const1_rtx);
4476 expand_dec (remainder, op1);
4477 emit_label (label);
4478 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4481 /* No luck with division elimination or divmod. Have to do it
4482 by conditionally adjusting op0 *and* the result. */
4484 rtx label1, label2;
4485 rtx adjusted_op0, tem;
4487 quotient = gen_reg_rtx (compute_mode);
4488 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4489 label1 = gen_label_rtx ();
4490 label2 = gen_label_rtx ();
4491 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4492 compute_mode, label1);
4493 emit_move_insn (quotient, const0_rtx);
4494 emit_jump_insn (gen_jump (label2));
4495 emit_barrier ();
4496 emit_label (label1);
4497 expand_dec (adjusted_op0, const1_rtx);
4498 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4499 quotient, 1, OPTAB_LIB_WIDEN);
4500 if (tem != quotient)
4501 emit_move_insn (quotient, tem);
4502 expand_inc (quotient, const1_rtx);
4503 emit_label (label2);
4506 else /* signed */
4508 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4509 && INTVAL (op1) >= 0)
4511 /* This is extremely similar to the code for the unsigned case
4512 above. For 2.7 we should merge these variants, but for
4513 2.6.1 I don't want to touch the code for unsigned since that
4514 get used in C. The signed case will only be used by other
4515 languages (Ada). */
4517 rtx t1, t2, t3;
4518 unsigned HOST_WIDE_INT d = INTVAL (op1);
4519 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4520 floor_log2 (d), tquotient, 0);
4521 t2 = expand_binop (compute_mode, and_optab, op0,
4522 GEN_INT (d - 1),
4523 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4524 t3 = gen_reg_rtx (compute_mode);
4525 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4526 compute_mode, 1, 1);
4527 if (t3 == 0)
4529 rtx lab;
4530 lab = gen_label_rtx ();
4531 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4532 expand_inc (t1, const1_rtx);
4533 emit_label (lab);
4534 quotient = t1;
4536 else
4537 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4538 t1, t3),
4539 tquotient);
4540 break;
4543 /* Try using an instruction that produces both the quotient and
4544 remainder, using truncation. We can easily compensate the
4545 quotient or remainder to get ceiling rounding, once we have the
4546 remainder. Notice that we compute also the final remainder
4547 value here, and return the result right away. */
4548 if (target == 0 || GET_MODE (target) != compute_mode)
4549 target = gen_reg_rtx (compute_mode);
4550 if (rem_flag)
4552 remainder= (REG_P (target)
4553 ? target : gen_reg_rtx (compute_mode));
4554 quotient = gen_reg_rtx (compute_mode);
4556 else
4558 quotient = (REG_P (target)
4559 ? target : gen_reg_rtx (compute_mode));
4560 remainder = gen_reg_rtx (compute_mode);
4563 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4564 remainder, 0))
4566 /* This could be computed with a branch-less sequence.
4567 Save that for later. */
4568 rtx tem;
4569 rtx label = gen_label_rtx ();
4570 do_cmp_and_jump (remainder, const0_rtx, EQ,
4571 compute_mode, label);
4572 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4573 NULL_RTX, 0, OPTAB_WIDEN);
4574 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4575 expand_inc (quotient, const1_rtx);
4576 expand_dec (remainder, op1);
4577 emit_label (label);
4578 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4581 /* No luck with division elimination or divmod. Have to do it
4582 by conditionally adjusting op0 *and* the result. */
4584 rtx label1, label2, label3, label4, label5;
4585 rtx adjusted_op0;
4586 rtx tem;
4588 quotient = gen_reg_rtx (compute_mode);
4589 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4590 label1 = gen_label_rtx ();
4591 label2 = gen_label_rtx ();
4592 label3 = gen_label_rtx ();
4593 label4 = gen_label_rtx ();
4594 label5 = gen_label_rtx ();
4595 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4596 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4597 compute_mode, label1);
4598 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4599 quotient, 0, OPTAB_LIB_WIDEN);
4600 if (tem != quotient)
4601 emit_move_insn (quotient, tem);
4602 emit_jump_insn (gen_jump (label5));
4603 emit_barrier ();
4604 emit_label (label1);
4605 expand_dec (adjusted_op0, const1_rtx);
4606 emit_jump_insn (gen_jump (label4));
4607 emit_barrier ();
4608 emit_label (label2);
4609 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4610 compute_mode, label3);
4611 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4612 quotient, 0, OPTAB_LIB_WIDEN);
4613 if (tem != quotient)
4614 emit_move_insn (quotient, tem);
4615 emit_jump_insn (gen_jump (label5));
4616 emit_barrier ();
4617 emit_label (label3);
4618 expand_inc (adjusted_op0, const1_rtx);
4619 emit_label (label4);
4620 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4621 quotient, 0, OPTAB_LIB_WIDEN);
4622 if (tem != quotient)
4623 emit_move_insn (quotient, tem);
4624 expand_inc (quotient, const1_rtx);
4625 emit_label (label5);
4628 break;
4630 case EXACT_DIV_EXPR:
4631 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4633 HOST_WIDE_INT d = INTVAL (op1);
4634 unsigned HOST_WIDE_INT ml;
4635 int pre_shift;
4636 rtx t1;
4638 pre_shift = floor_log2 (d & -d);
4639 ml = invert_mod2n (d >> pre_shift, size);
4640 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4641 pre_shift, NULL_RTX, unsignedp);
4642 quotient = expand_mult (compute_mode, t1,
4643 gen_int_mode (ml, compute_mode),
4644 NULL_RTX, 1);
4646 insn = get_last_insn ();
4647 set_unique_reg_note (insn,
4648 REG_EQUAL,
4649 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4650 compute_mode,
4651 op0, op1));
4653 break;
4655 case ROUND_DIV_EXPR:
4656 case ROUND_MOD_EXPR:
4657 if (unsignedp)
4659 rtx tem;
4660 rtx label;
4661 label = gen_label_rtx ();
4662 quotient = gen_reg_rtx (compute_mode);
4663 remainder = gen_reg_rtx (compute_mode);
4664 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4666 rtx tem;
4667 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4668 quotient, 1, OPTAB_LIB_WIDEN);
4669 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4670 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4671 remainder, 1, OPTAB_LIB_WIDEN);
4673 tem = plus_constant (op1, -1);
4674 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem, 1, NULL_RTX, 1);
4675 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4676 expand_inc (quotient, const1_rtx);
4677 expand_dec (remainder, op1);
4678 emit_label (label);
4680 else
4682 rtx abs_rem, abs_op1, tem, mask;
4683 rtx label;
4684 label = gen_label_rtx ();
4685 quotient = gen_reg_rtx (compute_mode);
4686 remainder = gen_reg_rtx (compute_mode);
4687 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4689 rtx tem;
4690 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4691 quotient, 0, OPTAB_LIB_WIDEN);
4692 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4693 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4694 remainder, 0, OPTAB_LIB_WIDEN);
4696 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4697 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4698 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4699 1, NULL_RTX, 1);
4700 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4701 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4702 NULL_RTX, 0, OPTAB_WIDEN);
4703 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4704 size - 1, NULL_RTX, 0);
4705 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4706 NULL_RTX, 0, OPTAB_WIDEN);
4707 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4708 NULL_RTX, 0, OPTAB_WIDEN);
4709 expand_inc (quotient, tem);
4710 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4711 NULL_RTX, 0, OPTAB_WIDEN);
4712 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4713 NULL_RTX, 0, OPTAB_WIDEN);
4714 expand_dec (remainder, tem);
4715 emit_label (label);
4717 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4719 default:
4720 gcc_unreachable ();
4723 if (quotient == 0)
4725 if (target && GET_MODE (target) != compute_mode)
4726 target = 0;
4728 if (rem_flag)
4730 /* Try to produce the remainder without producing the quotient.
4731 If we seem to have a divmod pattern that does not require widening,
4732 don't try widening here. We should really have a WIDEN argument
4733 to expand_twoval_binop, since what we'd really like to do here is
4734 1) try a mod insn in compute_mode
4735 2) try a divmod insn in compute_mode
4736 3) try a div insn in compute_mode and multiply-subtract to get
4737 remainder
4738 4) try the same things with widening allowed. */
4739 remainder
4740 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4741 op0, op1, target,
4742 unsignedp,
4743 ((optab_handler (optab2, compute_mode)
4744 != CODE_FOR_nothing)
4745 ? OPTAB_DIRECT : OPTAB_WIDEN));
4746 if (remainder == 0)
4748 /* No luck there. Can we do remainder and divide at once
4749 without a library call? */
4750 remainder = gen_reg_rtx (compute_mode);
4751 if (! expand_twoval_binop ((unsignedp
4752 ? udivmod_optab
4753 : sdivmod_optab),
4754 op0, op1,
4755 NULL_RTX, remainder, unsignedp))
4756 remainder = 0;
4759 if (remainder)
4760 return gen_lowpart (mode, remainder);
4763 /* Produce the quotient. Try a quotient insn, but not a library call.
4764 If we have a divmod in this mode, use it in preference to widening
4765 the div (for this test we assume it will not fail). Note that optab2
4766 is set to the one of the two optabs that the call below will use. */
4767 quotient
4768 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4769 op0, op1, rem_flag ? NULL_RTX : target,
4770 unsignedp,
4771 ((optab_handler (optab2, compute_mode)
4772 != CODE_FOR_nothing)
4773 ? OPTAB_DIRECT : OPTAB_WIDEN));
4775 if (quotient == 0)
4777 /* No luck there. Try a quotient-and-remainder insn,
4778 keeping the quotient alone. */
4779 quotient = gen_reg_rtx (compute_mode);
4780 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4781 op0, op1,
4782 quotient, NULL_RTX, unsignedp))
4784 quotient = 0;
4785 if (! rem_flag)
4786 /* Still no luck. If we are not computing the remainder,
4787 use a library call for the quotient. */
4788 quotient = sign_expand_binop (compute_mode,
4789 udiv_optab, sdiv_optab,
4790 op0, op1, target,
4791 unsignedp, OPTAB_LIB_WIDEN);
4796 if (rem_flag)
4798 if (target && GET_MODE (target) != compute_mode)
4799 target = 0;
4801 if (quotient == 0)
4803 /* No divide instruction either. Use library for remainder. */
4804 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4805 op0, op1, target,
4806 unsignedp, OPTAB_LIB_WIDEN);
4807 /* No remainder function. Try a quotient-and-remainder
4808 function, keeping the remainder. */
4809 if (!remainder)
4811 remainder = gen_reg_rtx (compute_mode);
4812 if (!expand_twoval_binop_libfunc
4813 (unsignedp ? udivmod_optab : sdivmod_optab,
4814 op0, op1,
4815 NULL_RTX, remainder,
4816 unsignedp ? UMOD : MOD))
4817 remainder = NULL_RTX;
4820 else
4822 /* We divided. Now finish doing X - Y * (X / Y). */
4823 remainder = expand_mult (compute_mode, quotient, op1,
4824 NULL_RTX, unsignedp);
4825 remainder = expand_binop (compute_mode, sub_optab, op0,
4826 remainder, target, unsignedp,
4827 OPTAB_LIB_WIDEN);
4831 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4834 /* Return a tree node with data type TYPE, describing the value of X.
4835 Usually this is an VAR_DECL, if there is no obvious better choice.
4836 X may be an expression, however we only support those expressions
4837 generated by loop.c. */
4839 tree
4840 make_tree (tree type, rtx x)
4842 tree t;
4844 switch (GET_CODE (x))
4846 case CONST_INT:
4848 HOST_WIDE_INT hi = 0;
4850 if (INTVAL (x) < 0
4851 && !(TYPE_UNSIGNED (type)
4852 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4853 < HOST_BITS_PER_WIDE_INT)))
4854 hi = -1;
4856 t = build_int_cst_wide (type, INTVAL (x), hi);
4858 return t;
4861 case CONST_DOUBLE:
4862 if (GET_MODE (x) == VOIDmode)
4863 t = build_int_cst_wide (type,
4864 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4865 else
4867 REAL_VALUE_TYPE d;
4869 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4870 t = build_real (type, d);
4873 return t;
4875 case CONST_VECTOR:
4877 int units = CONST_VECTOR_NUNITS (x);
4878 tree itype = TREE_TYPE (type);
4879 tree t = NULL_TREE;
4880 int i;
4883 /* Build a tree with vector elements. */
4884 for (i = units - 1; i >= 0; --i)
4886 rtx elt = CONST_VECTOR_ELT (x, i);
4887 t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
4890 return build_vector (type, t);
4893 case PLUS:
4894 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4895 make_tree (type, XEXP (x, 1)));
4897 case MINUS:
4898 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4899 make_tree (type, XEXP (x, 1)));
4901 case NEG:
4902 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
4904 case MULT:
4905 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4906 make_tree (type, XEXP (x, 1)));
4908 case ASHIFT:
4909 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4910 make_tree (type, XEXP (x, 1)));
4912 case LSHIFTRT:
4913 t = unsigned_type_for (type);
4914 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4915 make_tree (t, XEXP (x, 0)),
4916 make_tree (type, XEXP (x, 1))));
4918 case ASHIFTRT:
4919 t = signed_type_for (type);
4920 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4921 make_tree (t, XEXP (x, 0)),
4922 make_tree (type, XEXP (x, 1))));
4924 case DIV:
4925 if (TREE_CODE (type) != REAL_TYPE)
4926 t = signed_type_for (type);
4927 else
4928 t = type;
4930 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4931 make_tree (t, XEXP (x, 0)),
4932 make_tree (t, XEXP (x, 1))));
4933 case UDIV:
4934 t = unsigned_type_for (type);
4935 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
4936 make_tree (t, XEXP (x, 0)),
4937 make_tree (t, XEXP (x, 1))));
4939 case SIGN_EXTEND:
4940 case ZERO_EXTEND:
4941 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
4942 GET_CODE (x) == ZERO_EXTEND);
4943 return fold_convert (type, make_tree (t, XEXP (x, 0)));
4945 case CONST:
4946 return make_tree (type, XEXP (x, 0));
4948 case SYMBOL_REF:
4949 t = SYMBOL_REF_DECL (x);
4950 if (t)
4951 return fold_convert (type, build_fold_addr_expr (t));
4952 /* else fall through. */
4954 default:
4955 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
4957 /* If TYPE is a POINTER_TYPE, we might need to convert X from
4958 address mode to pointer mode. */
4959 if (POINTER_TYPE_P (type))
4960 x = convert_memory_address_addr_space
4961 (TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
4963 /* Note that we do *not* use SET_DECL_RTL here, because we do not
4964 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
4965 t->decl_with_rtl.rtl = x;
4967 return t;
4971 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4972 and returning TARGET.
4974 If TARGET is 0, a pseudo-register or constant is returned. */
4977 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
4979 rtx tem = 0;
4981 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4982 tem = simplify_binary_operation (AND, mode, op0, op1);
4983 if (tem == 0)
4984 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4986 if (target == 0)
4987 target = tem;
4988 else if (tem != target)
4989 emit_move_insn (target, tem);
4990 return target;
4993 /* Helper function for emit_store_flag. */
4994 static rtx
4995 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
4996 enum machine_mode mode, enum machine_mode compare_mode,
4997 int unsignedp, rtx x, rtx y, int normalizep,
4998 enum machine_mode target_mode)
5000 struct expand_operand ops[4];
5001 rtx op0, last, comparison, subtarget;
5002 enum machine_mode result_mode = insn_data[(int) icode].operand[0].mode;
5004 last = get_last_insn ();
5005 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5006 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5007 if (!x || !y)
5009 delete_insns_since (last);
5010 return NULL_RTX;
5013 if (target_mode == VOIDmode)
5014 target_mode = result_mode;
5015 if (!target)
5016 target = gen_reg_rtx (target_mode);
5018 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5020 create_output_operand (&ops[0], optimize ? NULL_RTX : target, result_mode);
5021 create_fixed_operand (&ops[1], comparison);
5022 create_fixed_operand (&ops[2], x);
5023 create_fixed_operand (&ops[3], y);
5024 if (!maybe_expand_insn (icode, 4, ops))
5026 delete_insns_since (last);
5027 return NULL_RTX;
5029 subtarget = ops[0].value;
5031 /* If we are converting to a wider mode, first convert to
5032 TARGET_MODE, then normalize. This produces better combining
5033 opportunities on machines that have a SIGN_EXTRACT when we are
5034 testing a single bit. This mostly benefits the 68k.
5036 If STORE_FLAG_VALUE does not have the sign bit set when
5037 interpreted in MODE, we can do this conversion as unsigned, which
5038 is usually more efficient. */
5039 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (result_mode))
5041 convert_move (target, subtarget,
5042 (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT)
5043 && 0 == (STORE_FLAG_VALUE
5044 & ((HOST_WIDE_INT) 1
5045 << (GET_MODE_BITSIZE (result_mode) -1))));
5046 op0 = target;
5047 result_mode = target_mode;
5049 else
5050 op0 = subtarget;
5052 /* If we want to keep subexpressions around, don't reuse our last
5053 target. */
5054 if (optimize)
5055 subtarget = 0;
5057 /* Now normalize to the proper value in MODE. Sometimes we don't
5058 have to do anything. */
5059 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5061 /* STORE_FLAG_VALUE might be the most negative number, so write
5062 the comparison this way to avoid a compiler-time warning. */
5063 else if (- normalizep == STORE_FLAG_VALUE)
5064 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5066 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5067 it hard to use a value of just the sign bit due to ANSI integer
5068 constant typing rules. */
5069 else if (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
5070 && (STORE_FLAG_VALUE
5071 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1))))
5072 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5073 GET_MODE_BITSIZE (result_mode) - 1, subtarget,
5074 normalizep == 1);
5075 else
5077 gcc_assert (STORE_FLAG_VALUE & 1);
5079 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5080 if (normalizep == -1)
5081 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5084 /* If we were converting to a smaller mode, do the conversion now. */
5085 if (target_mode != result_mode)
5087 convert_move (target, op0, 0);
5088 return target;
5090 else
5091 return op0;
5095 /* A subroutine of emit_store_flag only including "tricks" that do not
5096 need a recursive call. These are kept separate to avoid infinite
5097 loops. */
5099 static rtx
5100 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5101 enum machine_mode mode, int unsignedp, int normalizep,
5102 enum machine_mode target_mode)
5104 rtx subtarget;
5105 enum insn_code icode;
5106 enum machine_mode compare_mode;
5107 enum mode_class mclass;
5108 enum rtx_code scode;
5109 rtx tem;
5111 if (unsignedp)
5112 code = unsigned_condition (code);
5113 scode = swap_condition (code);
5115 /* If one operand is constant, make it the second one. Only do this
5116 if the other operand is not constant as well. */
5118 if (swap_commutative_operands_p (op0, op1))
5120 tem = op0;
5121 op0 = op1;
5122 op1 = tem;
5123 code = swap_condition (code);
5126 if (mode == VOIDmode)
5127 mode = GET_MODE (op0);
5129 /* For some comparisons with 1 and -1, we can convert this to
5130 comparisons with zero. This will often produce more opportunities for
5131 store-flag insns. */
5133 switch (code)
5135 case LT:
5136 if (op1 == const1_rtx)
5137 op1 = const0_rtx, code = LE;
5138 break;
5139 case LE:
5140 if (op1 == constm1_rtx)
5141 op1 = const0_rtx, code = LT;
5142 break;
5143 case GE:
5144 if (op1 == const1_rtx)
5145 op1 = const0_rtx, code = GT;
5146 break;
5147 case GT:
5148 if (op1 == constm1_rtx)
5149 op1 = const0_rtx, code = GE;
5150 break;
5151 case GEU:
5152 if (op1 == const1_rtx)
5153 op1 = const0_rtx, code = NE;
5154 break;
5155 case LTU:
5156 if (op1 == const1_rtx)
5157 op1 = const0_rtx, code = EQ;
5158 break;
5159 default:
5160 break;
5163 /* If we are comparing a double-word integer with zero or -1, we can
5164 convert the comparison into one involving a single word. */
5165 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5166 && GET_MODE_CLASS (mode) == MODE_INT
5167 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5169 if ((code == EQ || code == NE)
5170 && (op1 == const0_rtx || op1 == constm1_rtx))
5172 rtx op00, op01;
5174 /* Do a logical OR or AND of the two words and compare the
5175 result. */
5176 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5177 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5178 tem = expand_binop (word_mode,
5179 op1 == const0_rtx ? ior_optab : and_optab,
5180 op00, op01, NULL_RTX, unsignedp,
5181 OPTAB_DIRECT);
5183 if (tem != 0)
5184 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5185 unsignedp, normalizep);
5187 else if ((code == LT || code == GE) && op1 == const0_rtx)
5189 rtx op0h;
5191 /* If testing the sign bit, can just test on high word. */
5192 op0h = simplify_gen_subreg (word_mode, op0, mode,
5193 subreg_highpart_offset (word_mode,
5194 mode));
5195 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5196 unsignedp, normalizep);
5198 else
5199 tem = NULL_RTX;
5201 if (tem)
5203 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5204 return tem;
5205 if (!target)
5206 target = gen_reg_rtx (target_mode);
5208 convert_move (target, tem,
5209 0 == ((normalizep ? normalizep : STORE_FLAG_VALUE)
5210 & ((HOST_WIDE_INT) 1
5211 << (GET_MODE_BITSIZE (word_mode) -1))));
5212 return target;
5216 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5217 complement of A (for GE) and shifting the sign bit to the low bit. */
5218 if (op1 == const0_rtx && (code == LT || code == GE)
5219 && GET_MODE_CLASS (mode) == MODE_INT
5220 && (normalizep || STORE_FLAG_VALUE == 1
5221 || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5222 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5223 == ((unsigned HOST_WIDE_INT) 1
5224 << (GET_MODE_BITSIZE (mode) - 1))))))
5226 subtarget = target;
5228 if (!target)
5229 target_mode = mode;
5231 /* If the result is to be wider than OP0, it is best to convert it
5232 first. If it is to be narrower, it is *incorrect* to convert it
5233 first. */
5234 else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5236 op0 = convert_modes (target_mode, mode, op0, 0);
5237 mode = target_mode;
5240 if (target_mode != mode)
5241 subtarget = 0;
5243 if (code == GE)
5244 op0 = expand_unop (mode, one_cmpl_optab, op0,
5245 ((STORE_FLAG_VALUE == 1 || normalizep)
5246 ? 0 : subtarget), 0);
5248 if (STORE_FLAG_VALUE == 1 || normalizep)
5249 /* If we are supposed to produce a 0/1 value, we want to do
5250 a logical shift from the sign bit to the low-order bit; for
5251 a -1/0 value, we do an arithmetic shift. */
5252 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5253 GET_MODE_BITSIZE (mode) - 1,
5254 subtarget, normalizep != -1);
5256 if (mode != target_mode)
5257 op0 = convert_modes (target_mode, mode, op0, 0);
5259 return op0;
5262 mclass = GET_MODE_CLASS (mode);
5263 for (compare_mode = mode; compare_mode != VOIDmode;
5264 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5266 enum machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5267 icode = optab_handler (cstore_optab, optab_mode);
5268 if (icode != CODE_FOR_nothing)
5270 do_pending_stack_adjust ();
5271 tem = emit_cstore (target, icode, code, mode, compare_mode,
5272 unsignedp, op0, op1, normalizep, target_mode);
5273 if (tem)
5274 return tem;
5276 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5278 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5279 unsignedp, op1, op0, normalizep, target_mode);
5280 if (tem)
5281 return tem;
5283 break;
5287 return 0;
5290 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5291 and storing in TARGET. Normally return TARGET.
5292 Return 0 if that cannot be done.
5294 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5295 it is VOIDmode, they cannot both be CONST_INT.
5297 UNSIGNEDP is for the case where we have to widen the operands
5298 to perform the operation. It says to use zero-extension.
5300 NORMALIZEP is 1 if we should convert the result to be either zero
5301 or one. Normalize is -1 if we should convert the result to be
5302 either zero or -1. If NORMALIZEP is zero, the result will be left
5303 "raw" out of the scc insn. */
5306 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5307 enum machine_mode mode, int unsignedp, int normalizep)
5309 enum machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5310 enum rtx_code rcode;
5311 rtx subtarget;
5312 rtx tem, last, trueval;
5314 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5315 target_mode);
5316 if (tem)
5317 return tem;
5319 /* If we reached here, we can't do this with a scc insn, however there
5320 are some comparisons that can be done in other ways. Don't do any
5321 of these cases if branches are very cheap. */
5322 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5323 return 0;
5325 /* See what we need to return. We can only return a 1, -1, or the
5326 sign bit. */
5328 if (normalizep == 0)
5330 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5331 normalizep = STORE_FLAG_VALUE;
5333 else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
5334 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
5335 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
5337 else
5338 return 0;
5341 last = get_last_insn ();
5343 /* If optimizing, use different pseudo registers for each insn, instead
5344 of reusing the same pseudo. This leads to better CSE, but slows
5345 down the compiler, since there are more pseudos */
5346 subtarget = (!optimize
5347 && (target_mode == mode)) ? target : NULL_RTX;
5348 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5350 /* For floating-point comparisons, try the reverse comparison or try
5351 changing the "orderedness" of the comparison. */
5352 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5354 enum rtx_code first_code;
5355 bool and_them;
5357 rcode = reverse_condition_maybe_unordered (code);
5358 if (can_compare_p (rcode, mode, ccp_store_flag)
5359 && (code == ORDERED || code == UNORDERED
5360 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5361 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5363 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5364 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5366 /* For the reverse comparison, use either an addition or a XOR. */
5367 if (want_add
5368 && rtx_cost (GEN_INT (normalizep), PLUS,
5369 optimize_insn_for_speed_p ()) == 0)
5371 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5372 STORE_FLAG_VALUE, target_mode);
5373 if (tem)
5374 return expand_binop (target_mode, add_optab, tem,
5375 GEN_INT (normalizep),
5376 target, 0, OPTAB_WIDEN);
5378 else if (!want_add
5379 && rtx_cost (trueval, XOR,
5380 optimize_insn_for_speed_p ()) == 0)
5382 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5383 normalizep, target_mode);
5384 if (tem)
5385 return expand_binop (target_mode, xor_optab, tem, trueval,
5386 target, INTVAL (trueval) >= 0, OPTAB_WIDEN);
5390 delete_insns_since (last);
5392 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5393 if (code == ORDERED || code == UNORDERED)
5394 return 0;
5396 and_them = split_comparison (code, mode, &first_code, &code);
5398 /* If there are no NaNs, the first comparison should always fall through.
5399 Effectively change the comparison to the other one. */
5400 if (!HONOR_NANS (mode))
5402 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
5403 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
5404 target_mode);
5407 #ifdef HAVE_conditional_move
5408 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5409 conditional move. */
5410 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
5411 normalizep, target_mode);
5412 if (tem == 0)
5413 return 0;
5415 if (and_them)
5416 tem = emit_conditional_move (target, code, op0, op1, mode,
5417 tem, const0_rtx, GET_MODE (tem), 0);
5418 else
5419 tem = emit_conditional_move (target, code, op0, op1, mode,
5420 trueval, tem, GET_MODE (tem), 0);
5422 if (tem == 0)
5423 delete_insns_since (last);
5424 return tem;
5425 #else
5426 return 0;
5427 #endif
5430 /* The remaining tricks only apply to integer comparisons. */
5432 if (GET_MODE_CLASS (mode) != MODE_INT)
5433 return 0;
5435 /* If this is an equality comparison of integers, we can try to exclusive-or
5436 (or subtract) the two operands and use a recursive call to try the
5437 comparison with zero. Don't do any of these cases if branches are
5438 very cheap. */
5440 if ((code == EQ || code == NE) && op1 != const0_rtx)
5442 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5443 OPTAB_WIDEN);
5445 if (tem == 0)
5446 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5447 OPTAB_WIDEN);
5448 if (tem != 0)
5449 tem = emit_store_flag (target, code, tem, const0_rtx,
5450 mode, unsignedp, normalizep);
5451 if (tem != 0)
5452 return tem;
5454 delete_insns_since (last);
5457 /* For integer comparisons, try the reverse comparison. However, for
5458 small X and if we'd have anyway to extend, implementing "X != 0"
5459 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5460 rcode = reverse_condition (code);
5461 if (can_compare_p (rcode, mode, ccp_store_flag)
5462 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5463 && code == NE
5464 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5465 && op1 == const0_rtx))
5467 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5468 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5470 /* Again, for the reverse comparison, use either an addition or a XOR. */
5471 if (want_add
5472 && rtx_cost (GEN_INT (normalizep), PLUS,
5473 optimize_insn_for_speed_p ()) == 0)
5475 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5476 STORE_FLAG_VALUE, target_mode);
5477 if (tem != 0)
5478 tem = expand_binop (target_mode, add_optab, tem,
5479 GEN_INT (normalizep), target, 0, OPTAB_WIDEN);
5481 else if (!want_add
5482 && rtx_cost (trueval, XOR,
5483 optimize_insn_for_speed_p ()) == 0)
5485 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5486 normalizep, target_mode);
5487 if (tem != 0)
5488 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5489 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5492 if (tem != 0)
5493 return tem;
5494 delete_insns_since (last);
5497 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5498 the constant zero. Reject all other comparisons at this point. Only
5499 do LE and GT if branches are expensive since they are expensive on
5500 2-operand machines. */
5502 if (op1 != const0_rtx
5503 || (code != EQ && code != NE
5504 && (BRANCH_COST (optimize_insn_for_speed_p (),
5505 false) <= 1 || (code != LE && code != GT))))
5506 return 0;
5508 /* Try to put the result of the comparison in the sign bit. Assume we can't
5509 do the necessary operation below. */
5511 tem = 0;
5513 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5514 the sign bit set. */
5516 if (code == LE)
5518 /* This is destructive, so SUBTARGET can't be OP0. */
5519 if (rtx_equal_p (subtarget, op0))
5520 subtarget = 0;
5522 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5523 OPTAB_WIDEN);
5524 if (tem)
5525 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5526 OPTAB_WIDEN);
5529 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5530 number of bits in the mode of OP0, minus one. */
5532 if (code == GT)
5534 if (rtx_equal_p (subtarget, op0))
5535 subtarget = 0;
5537 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5538 GET_MODE_BITSIZE (mode) - 1,
5539 subtarget, 0);
5540 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5541 OPTAB_WIDEN);
5544 if (code == EQ || code == NE)
5546 /* For EQ or NE, one way to do the comparison is to apply an operation
5547 that converts the operand into a positive number if it is nonzero
5548 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5549 for NE we negate. This puts the result in the sign bit. Then we
5550 normalize with a shift, if needed.
5552 Two operations that can do the above actions are ABS and FFS, so try
5553 them. If that doesn't work, and MODE is smaller than a full word,
5554 we can use zero-extension to the wider mode (an unsigned conversion)
5555 as the operation. */
5557 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5558 that is compensated by the subsequent overflow when subtracting
5559 one / negating. */
5561 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5562 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5563 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5564 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5565 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5567 tem = convert_modes (word_mode, mode, op0, 1);
5568 mode = word_mode;
5571 if (tem != 0)
5573 if (code == EQ)
5574 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5575 0, OPTAB_WIDEN);
5576 else
5577 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5580 /* If we couldn't do it that way, for NE we can "or" the two's complement
5581 of the value with itself. For EQ, we take the one's complement of
5582 that "or", which is an extra insn, so we only handle EQ if branches
5583 are expensive. */
5585 if (tem == 0
5586 && (code == NE
5587 || BRANCH_COST (optimize_insn_for_speed_p (),
5588 false) > 1))
5590 if (rtx_equal_p (subtarget, op0))
5591 subtarget = 0;
5593 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5594 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5595 OPTAB_WIDEN);
5597 if (tem && code == EQ)
5598 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5602 if (tem && normalizep)
5603 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5604 GET_MODE_BITSIZE (mode) - 1,
5605 subtarget, normalizep == 1);
5607 if (tem)
5609 if (!target)
5611 else if (GET_MODE (tem) != target_mode)
5613 convert_move (target, tem, 0);
5614 tem = target;
5616 else if (!subtarget)
5618 emit_move_insn (target, tem);
5619 tem = target;
5622 else
5623 delete_insns_since (last);
5625 return tem;
5628 /* Like emit_store_flag, but always succeeds. */
5631 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5632 enum machine_mode mode, int unsignedp, int normalizep)
5634 rtx tem, label;
5635 rtx trueval, falseval;
5637 /* First see if emit_store_flag can do the job. */
5638 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5639 if (tem != 0)
5640 return tem;
5642 if (!target)
5643 target = gen_reg_rtx (word_mode);
5645 /* If this failed, we have to do this with set/compare/jump/set code.
5646 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5647 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
5648 if (code == NE
5649 && GET_MODE_CLASS (mode) == MODE_INT
5650 && REG_P (target)
5651 && op0 == target
5652 && op1 == const0_rtx)
5654 label = gen_label_rtx ();
5655 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp,
5656 mode, NULL_RTX, NULL_RTX, label, -1);
5657 emit_move_insn (target, trueval);
5658 emit_label (label);
5659 return target;
5662 if (!REG_P (target)
5663 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5664 target = gen_reg_rtx (GET_MODE (target));
5666 /* Jump in the right direction if the target cannot implement CODE
5667 but can jump on its reverse condition. */
5668 falseval = const0_rtx;
5669 if (! can_compare_p (code, mode, ccp_jump)
5670 && (! FLOAT_MODE_P (mode)
5671 || code == ORDERED || code == UNORDERED
5672 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5673 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5675 enum rtx_code rcode;
5676 if (FLOAT_MODE_P (mode))
5677 rcode = reverse_condition_maybe_unordered (code);
5678 else
5679 rcode = reverse_condition (code);
5681 /* Canonicalize to UNORDERED for the libcall. */
5682 if (can_compare_p (rcode, mode, ccp_jump)
5683 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
5685 falseval = trueval;
5686 trueval = const0_rtx;
5687 code = rcode;
5691 emit_move_insn (target, trueval);
5692 label = gen_label_rtx ();
5693 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5694 NULL_RTX, label, -1);
5696 emit_move_insn (target, falseval);
5697 emit_label (label);
5699 return target;
5702 /* Perform possibly multi-word comparison and conditional jump to LABEL
5703 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5704 now a thin wrapper around do_compare_rtx_and_jump. */
5706 static void
5707 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5708 rtx label)
5710 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5711 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5712 NULL_RTX, NULL_RTX, label, -1);