Restore 2012 entries that hasn't been saved.
[official-gcc.git] / gcc / expmed.c
blobbced96e74d455203e917a0ea655b5f3f36a1b713
1 /* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
5 2011
6 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "diagnostic-core.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "tm_p.h"
33 #include "flags.h"
34 #include "insn-config.h"
35 #include "expr.h"
36 #include "optabs.h"
37 #include "recog.h"
38 #include "langhooks.h"
39 #include "df.h"
40 #include "target.h"
41 #include "expmed.h"
43 struct target_expmed default_target_expmed;
44 #if SWITCHABLE_TARGET
45 struct target_expmed *this_target_expmed = &default_target_expmed;
46 #endif
48 static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
49 unsigned HOST_WIDE_INT,
50 unsigned HOST_WIDE_INT,
51 unsigned HOST_WIDE_INT,
52 unsigned HOST_WIDE_INT,
53 rtx);
54 static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
55 unsigned HOST_WIDE_INT,
56 unsigned HOST_WIDE_INT,
57 unsigned HOST_WIDE_INT,
58 rtx);
59 static rtx extract_fixed_bit_field (enum machine_mode, rtx,
60 unsigned HOST_WIDE_INT,
61 unsigned HOST_WIDE_INT,
62 unsigned HOST_WIDE_INT, rtx, int, bool);
63 static rtx mask_rtx (enum machine_mode, int, int, int);
64 static rtx lshift_value (enum machine_mode, rtx, int, int);
65 static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
66 unsigned HOST_WIDE_INT, int);
67 static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
68 static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
69 static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
71 /* Test whether a value is zero of a power of two. */
72 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
74 #ifndef SLOW_UNALIGNED_ACCESS
75 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
76 #endif
79 /* Reduce conditional compilation elsewhere. */
80 #ifndef HAVE_insv
81 #define HAVE_insv 0
82 #define CODE_FOR_insv CODE_FOR_nothing
83 #define gen_insv(a,b,c,d) NULL_RTX
84 #endif
85 #ifndef HAVE_extv
86 #define HAVE_extv 0
87 #define CODE_FOR_extv CODE_FOR_nothing
88 #define gen_extv(a,b,c,d) NULL_RTX
89 #endif
90 #ifndef HAVE_extzv
91 #define HAVE_extzv 0
92 #define CODE_FOR_extzv CODE_FOR_nothing
93 #define gen_extzv(a,b,c,d) NULL_RTX
94 #endif
96 void
97 init_expmed (void)
99 struct
101 struct rtx_def reg; rtunion reg_fld[2];
102 struct rtx_def plus; rtunion plus_fld1;
103 struct rtx_def neg;
104 struct rtx_def mult; rtunion mult_fld1;
105 struct rtx_def sdiv; rtunion sdiv_fld1;
106 struct rtx_def udiv; rtunion udiv_fld1;
107 struct rtx_def zext;
108 struct rtx_def sdiv_32; rtunion sdiv_32_fld1;
109 struct rtx_def smod_32; rtunion smod_32_fld1;
110 struct rtx_def wide_mult; rtunion wide_mult_fld1;
111 struct rtx_def wide_lshr; rtunion wide_lshr_fld1;
112 struct rtx_def wide_trunc;
113 struct rtx_def shift; rtunion shift_fld1;
114 struct rtx_def shift_mult; rtunion shift_mult_fld1;
115 struct rtx_def shift_add; rtunion shift_add_fld1;
116 struct rtx_def shift_sub0; rtunion shift_sub0_fld1;
117 struct rtx_def shift_sub1; rtunion shift_sub1_fld1;
118 } all;
120 rtx pow2[MAX_BITS_PER_WORD];
121 rtx cint[MAX_BITS_PER_WORD];
122 int m, n;
123 enum machine_mode mode, wider_mode;
124 int speed;
127 for (m = 1; m < MAX_BITS_PER_WORD; m++)
129 pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
130 cint[m] = GEN_INT (m);
132 memset (&all, 0, sizeof all);
134 PUT_CODE (&all.reg, REG);
135 /* Avoid using hard regs in ways which may be unsupported. */
136 SET_REGNO (&all.reg, LAST_VIRTUAL_REGISTER + 1);
138 PUT_CODE (&all.plus, PLUS);
139 XEXP (&all.plus, 0) = &all.reg;
140 XEXP (&all.plus, 1) = &all.reg;
142 PUT_CODE (&all.neg, NEG);
143 XEXP (&all.neg, 0) = &all.reg;
145 PUT_CODE (&all.mult, MULT);
146 XEXP (&all.mult, 0) = &all.reg;
147 XEXP (&all.mult, 1) = &all.reg;
149 PUT_CODE (&all.sdiv, DIV);
150 XEXP (&all.sdiv, 0) = &all.reg;
151 XEXP (&all.sdiv, 1) = &all.reg;
153 PUT_CODE (&all.udiv, UDIV);
154 XEXP (&all.udiv, 0) = &all.reg;
155 XEXP (&all.udiv, 1) = &all.reg;
157 PUT_CODE (&all.sdiv_32, DIV);
158 XEXP (&all.sdiv_32, 0) = &all.reg;
159 XEXP (&all.sdiv_32, 1) = 32 < MAX_BITS_PER_WORD ? cint[32] : GEN_INT (32);
161 PUT_CODE (&all.smod_32, MOD);
162 XEXP (&all.smod_32, 0) = &all.reg;
163 XEXP (&all.smod_32, 1) = XEXP (&all.sdiv_32, 1);
165 PUT_CODE (&all.zext, ZERO_EXTEND);
166 XEXP (&all.zext, 0) = &all.reg;
168 PUT_CODE (&all.wide_mult, MULT);
169 XEXP (&all.wide_mult, 0) = &all.zext;
170 XEXP (&all.wide_mult, 1) = &all.zext;
172 PUT_CODE (&all.wide_lshr, LSHIFTRT);
173 XEXP (&all.wide_lshr, 0) = &all.wide_mult;
175 PUT_CODE (&all.wide_trunc, TRUNCATE);
176 XEXP (&all.wide_trunc, 0) = &all.wide_lshr;
178 PUT_CODE (&all.shift, ASHIFT);
179 XEXP (&all.shift, 0) = &all.reg;
181 PUT_CODE (&all.shift_mult, MULT);
182 XEXP (&all.shift_mult, 0) = &all.reg;
184 PUT_CODE (&all.shift_add, PLUS);
185 XEXP (&all.shift_add, 0) = &all.shift_mult;
186 XEXP (&all.shift_add, 1) = &all.reg;
188 PUT_CODE (&all.shift_sub0, MINUS);
189 XEXP (&all.shift_sub0, 0) = &all.shift_mult;
190 XEXP (&all.shift_sub0, 1) = &all.reg;
192 PUT_CODE (&all.shift_sub1, MINUS);
193 XEXP (&all.shift_sub1, 0) = &all.reg;
194 XEXP (&all.shift_sub1, 1) = &all.shift_mult;
196 for (speed = 0; speed < 2; speed++)
198 crtl->maybe_hot_insn_p = speed;
199 zero_cost[speed] = set_src_cost (const0_rtx, speed);
201 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
202 mode != VOIDmode;
203 mode = GET_MODE_WIDER_MODE (mode))
205 PUT_MODE (&all.reg, mode);
206 PUT_MODE (&all.plus, mode);
207 PUT_MODE (&all.neg, mode);
208 PUT_MODE (&all.mult, mode);
209 PUT_MODE (&all.sdiv, mode);
210 PUT_MODE (&all.udiv, mode);
211 PUT_MODE (&all.sdiv_32, mode);
212 PUT_MODE (&all.smod_32, mode);
213 PUT_MODE (&all.wide_trunc, mode);
214 PUT_MODE (&all.shift, mode);
215 PUT_MODE (&all.shift_mult, mode);
216 PUT_MODE (&all.shift_add, mode);
217 PUT_MODE (&all.shift_sub0, mode);
218 PUT_MODE (&all.shift_sub1, mode);
220 add_cost[speed][mode] = set_src_cost (&all.plus, speed);
221 neg_cost[speed][mode] = set_src_cost (&all.neg, speed);
222 mul_cost[speed][mode] = set_src_cost (&all.mult, speed);
223 sdiv_cost[speed][mode] = set_src_cost (&all.sdiv, speed);
224 udiv_cost[speed][mode] = set_src_cost (&all.udiv, speed);
226 sdiv_pow2_cheap[speed][mode] = (set_src_cost (&all.sdiv_32, speed)
227 <= 2 * add_cost[speed][mode]);
228 smod_pow2_cheap[speed][mode] = (set_src_cost (&all.smod_32, speed)
229 <= 4 * add_cost[speed][mode]);
231 wider_mode = GET_MODE_WIDER_MODE (mode);
232 if (wider_mode != VOIDmode)
234 PUT_MODE (&all.zext, wider_mode);
235 PUT_MODE (&all.wide_mult, wider_mode);
236 PUT_MODE (&all.wide_lshr, wider_mode);
237 XEXP (&all.wide_lshr, 1) = GEN_INT (GET_MODE_BITSIZE (mode));
239 mul_widen_cost[speed][wider_mode]
240 = set_src_cost (&all.wide_mult, speed);
241 mul_highpart_cost[speed][mode]
242 = set_src_cost (&all.wide_trunc, speed);
245 shift_cost[speed][mode][0] = 0;
246 shiftadd_cost[speed][mode][0] = shiftsub0_cost[speed][mode][0]
247 = shiftsub1_cost[speed][mode][0] = add_cost[speed][mode];
249 n = MIN (MAX_BITS_PER_WORD, GET_MODE_BITSIZE (mode));
250 for (m = 1; m < n; m++)
252 XEXP (&all.shift, 1) = cint[m];
253 XEXP (&all.shift_mult, 1) = pow2[m];
255 shift_cost[speed][mode][m] = set_src_cost (&all.shift, speed);
256 shiftadd_cost[speed][mode][m] = set_src_cost (&all.shift_add,
257 speed);
258 shiftsub0_cost[speed][mode][m] = set_src_cost (&all.shift_sub0,
259 speed);
260 shiftsub1_cost[speed][mode][m] = set_src_cost (&all.shift_sub1,
261 speed);
265 if (alg_hash_used_p)
266 memset (alg_hash, 0, sizeof (alg_hash));
267 else
268 alg_hash_used_p = true;
269 default_rtl_profile ();
272 /* Return an rtx representing minus the value of X.
273 MODE is the intended mode of the result,
274 useful if X is a CONST_INT. */
277 negate_rtx (enum machine_mode mode, rtx x)
279 rtx result = simplify_unary_operation (NEG, mode, x, mode);
281 if (result == 0)
282 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
284 return result;
287 /* Report on the availability of insv/extv/extzv and the desired mode
288 of each of their operands. Returns MAX_MACHINE_MODE if HAVE_foo
289 is false; else the mode of the specified operand. If OPNO is -1,
290 all the caller cares about is whether the insn is available. */
291 enum machine_mode
292 mode_for_extraction (enum extraction_pattern pattern, int opno)
294 const struct insn_data_d *data;
296 switch (pattern)
298 case EP_insv:
299 if (HAVE_insv)
301 data = &insn_data[CODE_FOR_insv];
302 break;
304 return MAX_MACHINE_MODE;
306 case EP_extv:
307 if (HAVE_extv)
309 data = &insn_data[CODE_FOR_extv];
310 break;
312 return MAX_MACHINE_MODE;
314 case EP_extzv:
315 if (HAVE_extzv)
317 data = &insn_data[CODE_FOR_extzv];
318 break;
320 return MAX_MACHINE_MODE;
322 default:
323 gcc_unreachable ();
326 if (opno == -1)
327 return VOIDmode;
329 /* Everyone who uses this function used to follow it with
330 if (result == VOIDmode) result = word_mode; */
331 if (data->operand[opno].mode == VOIDmode)
332 return word_mode;
333 return data->operand[opno].mode;
336 /* A subroutine of store_bit_field, with the same arguments. Return true
337 if the operation could be implemented.
339 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
340 no other way of implementing the operation. If FALLBACK_P is false,
341 return false instead. */
343 static bool
344 store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
345 unsigned HOST_WIDE_INT bitnum,
346 unsigned HOST_WIDE_INT bitregion_start,
347 unsigned HOST_WIDE_INT bitregion_end,
348 enum machine_mode fieldmode,
349 rtx value, bool fallback_p)
351 unsigned int unit
352 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
353 unsigned HOST_WIDE_INT offset, bitpos;
354 rtx op0 = str_rtx;
355 int byte_offset;
356 rtx orig_value;
358 enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
360 while (GET_CODE (op0) == SUBREG)
362 /* The following line once was done only if WORDS_BIG_ENDIAN,
363 but I think that is a mistake. WORDS_BIG_ENDIAN is
364 meaningful at a much higher level; when structures are copied
365 between memory and regs, the higher-numbered regs
366 always get higher addresses. */
367 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
368 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
370 byte_offset = 0;
372 /* Paradoxical subregs need special handling on big endian machines. */
373 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
375 int difference = inner_mode_size - outer_mode_size;
377 if (WORDS_BIG_ENDIAN)
378 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
379 if (BYTES_BIG_ENDIAN)
380 byte_offset += difference % UNITS_PER_WORD;
382 else
383 byte_offset = SUBREG_BYTE (op0);
385 bitnum += byte_offset * BITS_PER_UNIT;
386 op0 = SUBREG_REG (op0);
389 /* No action is needed if the target is a register and if the field
390 lies completely outside that register. This can occur if the source
391 code contains an out-of-bounds access to a small array. */
392 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
393 return true;
395 /* Use vec_set patterns for inserting parts of vectors whenever
396 available. */
397 if (VECTOR_MODE_P (GET_MODE (op0))
398 && !MEM_P (op0)
399 && optab_handler (vec_set_optab, GET_MODE (op0)) != CODE_FOR_nothing
400 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
401 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
402 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
404 struct expand_operand ops[3];
405 enum machine_mode outermode = GET_MODE (op0);
406 enum machine_mode innermode = GET_MODE_INNER (outermode);
407 enum insn_code icode = optab_handler (vec_set_optab, outermode);
408 int pos = bitnum / GET_MODE_BITSIZE (innermode);
410 create_fixed_operand (&ops[0], op0);
411 create_input_operand (&ops[1], value, innermode);
412 create_integer_operand (&ops[2], pos);
413 if (maybe_expand_insn (icode, 3, ops))
414 return true;
417 /* If the target is a register, overwriting the entire object, or storing
418 a full-word or multi-word field can be done with just a SUBREG.
420 If the target is memory, storing any naturally aligned field can be
421 done with a simple store. For targets that support fast unaligned
422 memory, any naturally sized, unit aligned field can be done directly. */
424 offset = bitnum / unit;
425 bitpos = bitnum % unit;
426 byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
427 + (offset * UNITS_PER_WORD);
429 if (bitpos == 0
430 && bitsize == GET_MODE_BITSIZE (fieldmode)
431 && (!MEM_P (op0)
432 ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
433 || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
434 && ((GET_MODE (op0) == fieldmode && byte_offset == 0)
435 || validate_subreg (fieldmode, GET_MODE (op0), op0,
436 byte_offset)))
437 : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
438 || (offset * BITS_PER_UNIT % bitsize == 0
439 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
441 if (MEM_P (op0))
442 op0 = adjust_address (op0, fieldmode, offset);
443 else if (GET_MODE (op0) != fieldmode)
444 op0 = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
445 byte_offset);
446 emit_move_insn (op0, value);
447 return true;
450 /* Make sure we are playing with integral modes. Pun with subregs
451 if we aren't. This must come after the entire register case above,
452 since that case is valid for any mode. The following cases are only
453 valid for integral modes. */
455 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
456 if (imode != GET_MODE (op0))
458 if (MEM_P (op0))
459 op0 = adjust_address (op0, imode, 0);
460 else
462 gcc_assert (imode != BLKmode);
463 op0 = gen_lowpart (imode, op0);
468 /* We may be accessing data outside the field, which means
469 we can alias adjacent data. */
470 /* ?? not always for C++0x memory model ?? */
471 if (MEM_P (op0))
473 op0 = shallow_copy_rtx (op0);
474 set_mem_alias_set (op0, 0);
475 set_mem_expr (op0, 0);
478 /* If OP0 is a register, BITPOS must count within a word.
479 But as we have it, it counts within whatever size OP0 now has.
480 On a bigendian machine, these are not the same, so convert. */
481 if (BYTES_BIG_ENDIAN
482 && !MEM_P (op0)
483 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
484 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
486 /* Storing an lsb-aligned field in a register
487 can be done with a movestrict instruction. */
489 if (!MEM_P (op0)
490 && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
491 && bitsize == GET_MODE_BITSIZE (fieldmode)
492 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
494 struct expand_operand ops[2];
495 enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
496 rtx arg0 = op0;
497 unsigned HOST_WIDE_INT subreg_off;
499 if (GET_CODE (arg0) == SUBREG)
501 /* Else we've got some float mode source being extracted into
502 a different float mode destination -- this combination of
503 subregs results in Severe Tire Damage. */
504 gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
505 || GET_MODE_CLASS (fieldmode) == MODE_INT
506 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
507 arg0 = SUBREG_REG (arg0);
510 subreg_off = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
511 + (offset * UNITS_PER_WORD);
512 if (validate_subreg (fieldmode, GET_MODE (arg0), arg0, subreg_off))
514 arg0 = gen_rtx_SUBREG (fieldmode, arg0, subreg_off);
516 create_fixed_operand (&ops[0], arg0);
517 /* Shrink the source operand to FIELDMODE. */
518 create_convert_operand_to (&ops[1], value, fieldmode, false);
519 if (maybe_expand_insn (icode, 2, ops))
520 return true;
524 /* Handle fields bigger than a word. */
526 if (bitsize > BITS_PER_WORD)
528 /* Here we transfer the words of the field
529 in the order least significant first.
530 This is because the most significant word is the one which may
531 be less than full.
532 However, only do that if the value is not BLKmode. */
534 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
535 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
536 unsigned int i;
537 rtx last;
539 /* This is the mode we must force value to, so that there will be enough
540 subwords to extract. Note that fieldmode will often (always?) be
541 VOIDmode, because that is what store_field uses to indicate that this
542 is a bit field, but passing VOIDmode to operand_subword_force
543 is not allowed. */
544 fieldmode = GET_MODE (value);
545 if (fieldmode == VOIDmode)
546 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
548 last = get_last_insn ();
549 for (i = 0; i < nwords; i++)
551 /* If I is 0, use the low-order word in both field and target;
552 if I is 1, use the next to lowest word; and so on. */
553 unsigned int wordnum = (backwards ? nwords - i - 1 : i);
554 unsigned int bit_offset = (backwards
555 ? MAX ((int) bitsize - ((int) i + 1)
556 * BITS_PER_WORD,
558 : (int) i * BITS_PER_WORD);
559 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
561 if (!store_bit_field_1 (op0, MIN (BITS_PER_WORD,
562 bitsize - i * BITS_PER_WORD),
563 bitnum + bit_offset,
564 bitregion_start, bitregion_end,
565 word_mode,
566 value_word, fallback_p))
568 delete_insns_since (last);
569 return false;
572 return true;
575 /* From here on we can assume that the field to be stored in is
576 a full-word (whatever type that is), since it is shorter than a word. */
578 /* OFFSET is the number of words or bytes (UNIT says which)
579 from STR_RTX to the first word or byte containing part of the field. */
581 if (!MEM_P (op0))
583 if (offset != 0
584 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
586 if (!REG_P (op0))
588 /* Since this is a destination (lvalue), we can't copy
589 it to a pseudo. We can remove a SUBREG that does not
590 change the size of the operand. Such a SUBREG may
591 have been added above. */
592 gcc_assert (GET_CODE (op0) == SUBREG
593 && (GET_MODE_SIZE (GET_MODE (op0))
594 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))));
595 op0 = SUBREG_REG (op0);
597 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
598 op0, (offset * UNITS_PER_WORD));
600 offset = 0;
603 /* If VALUE has a floating-point or complex mode, access it as an
604 integer of the corresponding size. This can occur on a machine
605 with 64 bit registers that uses SFmode for float. It can also
606 occur for unaligned float or complex fields. */
607 orig_value = value;
608 if (GET_MODE (value) != VOIDmode
609 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
610 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
612 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
613 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
616 /* Now OFFSET is nonzero only if OP0 is memory
617 and is therefore always measured in bytes. */
619 if (HAVE_insv
620 && GET_MODE (value) != BLKmode
621 && bitsize > 0
622 && GET_MODE_BITSIZE (op_mode) >= bitsize
623 /* Do not use insv for volatile bitfields when
624 -fstrict-volatile-bitfields is in effect. */
625 && !(MEM_P (op0) && MEM_VOLATILE_P (op0)
626 && flag_strict_volatile_bitfields > 0)
627 && ! ((REG_P (op0) || GET_CODE (op0) == SUBREG)
628 && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
630 struct expand_operand ops[4];
631 int xbitpos = bitpos;
632 rtx value1;
633 rtx xop0 = op0;
634 rtx last = get_last_insn ();
635 bool copy_back = false;
637 /* Add OFFSET into OP0's address. */
638 if (MEM_P (xop0))
639 xop0 = adjust_address (xop0, byte_mode, offset);
641 /* If xop0 is a register, we need it in OP_MODE
642 to make it acceptable to the format of insv. */
643 if (GET_CODE (xop0) == SUBREG)
644 /* We can't just change the mode, because this might clobber op0,
645 and we will need the original value of op0 if insv fails. */
646 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
647 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
648 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
650 /* If the destination is a paradoxical subreg such that we need a
651 truncate to the inner mode, perform the insertion on a temporary and
652 truncate the result to the original destination. Note that we can't
653 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
654 X) 0)) is (reg:N X). */
655 if (GET_CODE (xop0) == SUBREG
656 && REG_P (SUBREG_REG (xop0))
657 && (!TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0)),
658 op_mode)))
660 rtx tem = gen_reg_rtx (op_mode);
661 emit_move_insn (tem, xop0);
662 xop0 = tem;
663 copy_back = true;
666 /* We have been counting XBITPOS within UNIT.
667 Count instead within the size of the register. */
668 if (BYTES_BIG_ENDIAN && !MEM_P (xop0))
669 xbitpos += GET_MODE_BITSIZE (op_mode) - unit;
671 unit = GET_MODE_BITSIZE (op_mode);
673 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
674 "backwards" from the size of the unit we are inserting into.
675 Otherwise, we count bits from the most significant on a
676 BYTES/BITS_BIG_ENDIAN machine. */
678 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
679 xbitpos = unit - bitsize - xbitpos;
681 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
682 value1 = value;
683 if (GET_MODE (value) != op_mode)
685 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
687 /* Optimization: Don't bother really extending VALUE
688 if it has all the bits we will actually use. However,
689 if we must narrow it, be sure we do it correctly. */
691 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
693 rtx tmp;
695 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
696 if (! tmp)
697 tmp = simplify_gen_subreg (op_mode,
698 force_reg (GET_MODE (value),
699 value1),
700 GET_MODE (value), 0);
701 value1 = tmp;
703 else
704 value1 = gen_lowpart (op_mode, value1);
706 else if (CONST_INT_P (value))
707 value1 = gen_int_mode (INTVAL (value), op_mode);
708 else
709 /* Parse phase is supposed to make VALUE's data type
710 match that of the component reference, which is a type
711 at least as wide as the field; so VALUE should have
712 a mode that corresponds to that type. */
713 gcc_assert (CONSTANT_P (value));
716 create_fixed_operand (&ops[0], xop0);
717 create_integer_operand (&ops[1], bitsize);
718 create_integer_operand (&ops[2], xbitpos);
719 create_input_operand (&ops[3], value1, op_mode);
720 if (maybe_expand_insn (CODE_FOR_insv, 4, ops))
722 if (copy_back)
723 convert_move (op0, xop0, true);
724 return true;
726 delete_insns_since (last);
729 /* If OP0 is a memory, try copying it to a register and seeing if a
730 cheap register alternative is available. */
731 if (HAVE_insv && MEM_P (op0))
733 enum machine_mode bestmode;
734 unsigned HOST_WIDE_INT maxbits = MAX_FIXED_MODE_SIZE;
736 if (bitregion_end)
737 maxbits = bitregion_end - bitregion_start + 1;
739 /* Get the mode to use for inserting into this field. If OP0 is
740 BLKmode, get the smallest mode consistent with the alignment. If
741 OP0 is a non-BLKmode object that is no wider than OP_MODE, use its
742 mode. Otherwise, use the smallest mode containing the field. */
744 if (GET_MODE (op0) == BLKmode
745 || GET_MODE_BITSIZE (GET_MODE (op0)) > maxbits
746 || (op_mode != MAX_MACHINE_MODE
747 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (op_mode)))
748 bestmode = get_best_mode (bitsize, bitnum,
749 bitregion_start, bitregion_end,
750 MEM_ALIGN (op0),
751 (op_mode == MAX_MACHINE_MODE
752 ? VOIDmode : op_mode),
753 MEM_VOLATILE_P (op0));
754 else
755 bestmode = GET_MODE (op0);
757 if (bestmode != VOIDmode
758 && GET_MODE_SIZE (bestmode) >= GET_MODE_SIZE (fieldmode)
759 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
760 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
762 rtx last, tempreg, xop0;
763 unsigned HOST_WIDE_INT xoffset, xbitpos;
765 last = get_last_insn ();
767 /* Adjust address to point to the containing unit of
768 that mode. Compute the offset as a multiple of this unit,
769 counting in bytes. */
770 unit = GET_MODE_BITSIZE (bestmode);
771 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
772 xbitpos = bitnum % unit;
773 xop0 = adjust_address (op0, bestmode, xoffset);
775 /* Fetch that unit, store the bitfield in it, then store
776 the unit. */
777 tempreg = copy_to_reg (xop0);
778 if (store_bit_field_1 (tempreg, bitsize, xbitpos,
779 bitregion_start, bitregion_end,
780 fieldmode, orig_value, false))
782 emit_move_insn (xop0, tempreg);
783 return true;
785 delete_insns_since (last);
789 if (!fallback_p)
790 return false;
792 store_fixed_bit_field (op0, offset, bitsize, bitpos,
793 bitregion_start, bitregion_end, value);
794 return true;
797 /* Generate code to store value from rtx VALUE
798 into a bit-field within structure STR_RTX
799 containing BITSIZE bits starting at bit BITNUM.
801 BITREGION_START is bitpos of the first bitfield in this region.
802 BITREGION_END is the bitpos of the ending bitfield in this region.
803 These two fields are 0, if the C++ memory model does not apply,
804 or we are not interested in keeping track of bitfield regions.
806 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
808 void
809 store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
810 unsigned HOST_WIDE_INT bitnum,
811 unsigned HOST_WIDE_INT bitregion_start,
812 unsigned HOST_WIDE_INT bitregion_end,
813 enum machine_mode fieldmode,
814 rtx value)
816 /* Under the C++0x memory model, we must not touch bits outside the
817 bit region. Adjust the address to start at the beginning of the
818 bit region. */
819 if (MEM_P (str_rtx)
820 && bitregion_start > 0)
822 enum machine_mode bestmode;
823 enum machine_mode op_mode;
824 unsigned HOST_WIDE_INT offset;
826 op_mode = mode_for_extraction (EP_insv, 3);
827 if (op_mode == MAX_MACHINE_MODE)
828 op_mode = VOIDmode;
830 offset = bitregion_start / BITS_PER_UNIT;
831 bitnum -= bitregion_start;
832 bitregion_end -= bitregion_start;
833 bitregion_start = 0;
834 bestmode = get_best_mode (bitsize, bitnum,
835 bitregion_start, bitregion_end,
836 MEM_ALIGN (str_rtx),
837 op_mode,
838 MEM_VOLATILE_P (str_rtx));
839 str_rtx = adjust_address (str_rtx, bestmode, offset);
842 if (!store_bit_field_1 (str_rtx, bitsize, bitnum,
843 bitregion_start, bitregion_end,
844 fieldmode, value, true))
845 gcc_unreachable ();
848 /* Use shifts and boolean operations to store VALUE
849 into a bit field of width BITSIZE
850 in a memory location specified by OP0 except offset by OFFSET bytes.
851 (OFFSET must be 0 if OP0 is a register.)
852 The field starts at position BITPOS within the byte.
853 (If OP0 is a register, it may be a full word or a narrower mode,
854 but BITPOS still counts within a full word,
855 which is significant on bigendian machines.) */
857 static void
858 store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT offset,
859 unsigned HOST_WIDE_INT bitsize,
860 unsigned HOST_WIDE_INT bitpos,
861 unsigned HOST_WIDE_INT bitregion_start,
862 unsigned HOST_WIDE_INT bitregion_end,
863 rtx value)
865 enum machine_mode mode;
866 unsigned int total_bits = BITS_PER_WORD;
867 rtx temp;
868 int all_zero = 0;
869 int all_one = 0;
871 /* There is a case not handled here:
872 a structure with a known alignment of just a halfword
873 and a field split across two aligned halfwords within the structure.
874 Or likewise a structure with a known alignment of just a byte
875 and a field split across two bytes.
876 Such cases are not supposed to be able to occur. */
878 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
880 gcc_assert (!offset);
881 /* Special treatment for a bit field split across two registers. */
882 if (bitsize + bitpos > BITS_PER_WORD)
884 store_split_bit_field (op0, bitsize, bitpos,
885 bitregion_start, bitregion_end,
886 value);
887 return;
890 else
892 unsigned HOST_WIDE_INT maxbits = MAX_FIXED_MODE_SIZE;
894 if (bitregion_end)
895 maxbits = bitregion_end - bitregion_start + 1;
897 /* Get the proper mode to use for this field. We want a mode that
898 includes the entire field. If such a mode would be larger than
899 a word, we won't be doing the extraction the normal way.
900 We don't want a mode bigger than the destination. */
902 mode = GET_MODE (op0);
903 if (GET_MODE_BITSIZE (mode) == 0
904 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
905 mode = word_mode;
907 if (MEM_VOLATILE_P (op0)
908 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
909 && GET_MODE_BITSIZE (GET_MODE (op0)) <= maxbits
910 && flag_strict_volatile_bitfields > 0)
911 mode = GET_MODE (op0);
912 else
913 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
914 bitregion_start, bitregion_end,
915 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
917 if (mode == VOIDmode)
919 /* The only way this should occur is if the field spans word
920 boundaries. */
921 store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
922 bitregion_start, bitregion_end, value);
923 return;
926 total_bits = GET_MODE_BITSIZE (mode);
928 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
929 be in the range 0 to total_bits-1, and put any excess bytes in
930 OFFSET. */
931 if (bitpos >= total_bits)
933 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
934 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
935 * BITS_PER_UNIT);
938 /* Get ref to an aligned byte, halfword, or word containing the field.
939 Adjust BITPOS to be position within a word,
940 and OFFSET to be the offset of that word.
941 Then alter OP0 to refer to that word. */
942 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
943 offset -= (offset % (total_bits / BITS_PER_UNIT));
944 op0 = adjust_address (op0, mode, offset);
947 mode = GET_MODE (op0);
949 /* Now MODE is either some integral mode for a MEM as OP0,
950 or is a full-word for a REG as OP0. TOTAL_BITS corresponds.
951 The bit field is contained entirely within OP0.
952 BITPOS is the starting bit number within OP0.
953 (OP0's mode may actually be narrower than MODE.) */
955 if (BYTES_BIG_ENDIAN)
956 /* BITPOS is the distance between our msb
957 and that of the containing datum.
958 Convert it to the distance from the lsb. */
959 bitpos = total_bits - bitsize - bitpos;
961 /* Now BITPOS is always the distance between our lsb
962 and that of OP0. */
964 /* Shift VALUE left by BITPOS bits. If VALUE is not constant,
965 we must first convert its mode to MODE. */
967 if (CONST_INT_P (value))
969 HOST_WIDE_INT v = INTVAL (value);
971 if (bitsize < HOST_BITS_PER_WIDE_INT)
972 v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
974 if (v == 0)
975 all_zero = 1;
976 else if ((bitsize < HOST_BITS_PER_WIDE_INT
977 && v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
978 || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
979 all_one = 1;
981 value = lshift_value (mode, value, bitpos, bitsize);
983 else
985 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
986 && bitpos + bitsize != GET_MODE_BITSIZE (mode));
988 if (GET_MODE (value) != mode)
989 value = convert_to_mode (mode, value, 1);
991 if (must_and)
992 value = expand_binop (mode, and_optab, value,
993 mask_rtx (mode, 0, bitsize, 0),
994 NULL_RTX, 1, OPTAB_LIB_WIDEN);
995 if (bitpos > 0)
996 value = expand_shift (LSHIFT_EXPR, mode, value,
997 bitpos, NULL_RTX, 1);
1000 /* Now clear the chosen bits in OP0,
1001 except that if VALUE is -1 we need not bother. */
1002 /* We keep the intermediates in registers to allow CSE to combine
1003 consecutive bitfield assignments. */
1005 temp = force_reg (mode, op0);
1007 if (! all_one)
1009 temp = expand_binop (mode, and_optab, temp,
1010 mask_rtx (mode, bitpos, bitsize, 1),
1011 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1012 temp = force_reg (mode, temp);
1015 /* Now logical-or VALUE into OP0, unless it is zero. */
1017 if (! all_zero)
1019 temp = expand_binop (mode, ior_optab, temp, value,
1020 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1021 temp = force_reg (mode, temp);
1024 if (op0 != temp)
1026 op0 = copy_rtx (op0);
1027 emit_move_insn (op0, temp);
1031 /* Store a bit field that is split across multiple accessible memory objects.
1033 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1034 BITSIZE is the field width; BITPOS the position of its first bit
1035 (within the word).
1036 VALUE is the value to store.
1038 This does not yet handle fields wider than BITS_PER_WORD. */
1040 static void
1041 store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1042 unsigned HOST_WIDE_INT bitpos,
1043 unsigned HOST_WIDE_INT bitregion_start,
1044 unsigned HOST_WIDE_INT bitregion_end,
1045 rtx value)
1047 unsigned int unit;
1048 unsigned int bitsdone = 0;
1050 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1051 much at a time. */
1052 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1053 unit = BITS_PER_WORD;
1054 else
1055 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1057 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1058 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1059 that VALUE might be a floating-point constant. */
1060 if (CONSTANT_P (value) && !CONST_INT_P (value))
1062 rtx word = gen_lowpart_common (word_mode, value);
1064 if (word && (value != word))
1065 value = word;
1066 else
1067 value = gen_lowpart_common (word_mode,
1068 force_reg (GET_MODE (value) != VOIDmode
1069 ? GET_MODE (value)
1070 : word_mode, value));
1073 while (bitsdone < bitsize)
1075 unsigned HOST_WIDE_INT thissize;
1076 rtx part, word;
1077 unsigned HOST_WIDE_INT thispos;
1078 unsigned HOST_WIDE_INT offset;
1080 offset = (bitpos + bitsdone) / unit;
1081 thispos = (bitpos + bitsdone) % unit;
1083 /* THISSIZE must not overrun a word boundary. Otherwise,
1084 store_fixed_bit_field will call us again, and we will mutually
1085 recurse forever. */
1086 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1087 thissize = MIN (thissize, unit - thispos);
1089 if (BYTES_BIG_ENDIAN)
1091 int total_bits;
1093 /* We must do an endian conversion exactly the same way as it is
1094 done in extract_bit_field, so that the two calls to
1095 extract_fixed_bit_field will have comparable arguments. */
1096 if (!MEM_P (value) || GET_MODE (value) == BLKmode)
1097 total_bits = BITS_PER_WORD;
1098 else
1099 total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1101 /* Fetch successively less significant portions. */
1102 if (CONST_INT_P (value))
1103 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1104 >> (bitsize - bitsdone - thissize))
1105 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1106 else
1107 /* The args are chosen so that the last part includes the
1108 lsb. Give extract_bit_field the value it needs (with
1109 endianness compensation) to fetch the piece we want. */
1110 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1111 total_bits - bitsize + bitsdone,
1112 NULL_RTX, 1, false);
1114 else
1116 /* Fetch successively more significant portions. */
1117 if (CONST_INT_P (value))
1118 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1119 >> bitsdone)
1120 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1121 else
1122 part = extract_fixed_bit_field (word_mode, value, 0, thissize,
1123 bitsdone, NULL_RTX, 1, false);
1126 /* If OP0 is a register, then handle OFFSET here.
1128 When handling multiword bitfields, extract_bit_field may pass
1129 down a word_mode SUBREG of a larger REG for a bitfield that actually
1130 crosses a word boundary. Thus, for a SUBREG, we must find
1131 the current word starting from the base register. */
1132 if (GET_CODE (op0) == SUBREG)
1134 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1135 enum machine_mode sub_mode = GET_MODE (SUBREG_REG (op0));
1136 if (sub_mode != BLKmode && GET_MODE_SIZE (sub_mode) < UNITS_PER_WORD)
1137 word = word_offset ? const0_rtx : op0;
1138 else
1139 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1140 GET_MODE (SUBREG_REG (op0)));
1141 offset = 0;
1143 else if (REG_P (op0))
1145 enum machine_mode op0_mode = GET_MODE (op0);
1146 if (op0_mode != BLKmode && GET_MODE_SIZE (op0_mode) < UNITS_PER_WORD)
1147 word = offset ? const0_rtx : op0;
1148 else
1149 word = operand_subword_force (op0, offset, GET_MODE (op0));
1150 offset = 0;
1152 else
1153 word = op0;
1155 /* OFFSET is in UNITs, and UNIT is in bits.
1156 store_fixed_bit_field wants offset in bytes. If WORD is const0_rtx,
1157 it is just an out-of-bounds access. Ignore it. */
1158 if (word != const0_rtx)
1159 store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
1160 thispos, bitregion_start, bitregion_end, part);
1161 bitsdone += thissize;
1165 /* A subroutine of extract_bit_field_1 that converts return value X
1166 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1167 to extract_bit_field. */
1169 static rtx
1170 convert_extracted_bit_field (rtx x, enum machine_mode mode,
1171 enum machine_mode tmode, bool unsignedp)
1173 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1174 return x;
1176 /* If the x mode is not a scalar integral, first convert to the
1177 integer mode of that size and then access it as a floating-point
1178 value via a SUBREG. */
1179 if (!SCALAR_INT_MODE_P (tmode))
1181 enum machine_mode smode;
1183 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1184 x = convert_to_mode (smode, x, unsignedp);
1185 x = force_reg (smode, x);
1186 return gen_lowpart (tmode, x);
1189 return convert_to_mode (tmode, x, unsignedp);
1192 /* A subroutine of extract_bit_field, with the same arguments.
1193 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1194 if we can find no other means of implementing the operation.
1195 if FALLBACK_P is false, return NULL instead. */
1197 static rtx
1198 extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1199 unsigned HOST_WIDE_INT bitnum,
1200 int unsignedp, bool packedp, rtx target,
1201 enum machine_mode mode, enum machine_mode tmode,
1202 bool fallback_p)
1204 unsigned int unit
1205 = (MEM_P (str_rtx)) ? BITS_PER_UNIT : BITS_PER_WORD;
1206 unsigned HOST_WIDE_INT offset, bitpos;
1207 rtx op0 = str_rtx;
1208 enum machine_mode int_mode;
1209 enum machine_mode ext_mode;
1210 enum machine_mode mode1;
1211 int byte_offset;
1213 if (tmode == VOIDmode)
1214 tmode = mode;
1216 while (GET_CODE (op0) == SUBREG)
1218 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1219 op0 = SUBREG_REG (op0);
1222 /* If we have an out-of-bounds access to a register, just return an
1223 uninitialized register of the required mode. This can occur if the
1224 source code contains an out-of-bounds access to a small array. */
1225 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1226 return gen_reg_rtx (tmode);
1228 if (REG_P (op0)
1229 && mode == GET_MODE (op0)
1230 && bitnum == 0
1231 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1233 /* We're trying to extract a full register from itself. */
1234 return op0;
1237 /* See if we can get a better vector mode before extracting. */
1238 if (VECTOR_MODE_P (GET_MODE (op0))
1239 && !MEM_P (op0)
1240 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1242 enum machine_mode new_mode;
1244 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1245 new_mode = MIN_MODE_VECTOR_FLOAT;
1246 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1247 new_mode = MIN_MODE_VECTOR_FRACT;
1248 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1249 new_mode = MIN_MODE_VECTOR_UFRACT;
1250 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1251 new_mode = MIN_MODE_VECTOR_ACCUM;
1252 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1253 new_mode = MIN_MODE_VECTOR_UACCUM;
1254 else
1255 new_mode = MIN_MODE_VECTOR_INT;
1257 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1258 if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1259 && targetm.vector_mode_supported_p (new_mode))
1260 break;
1261 if (new_mode != VOIDmode)
1262 op0 = gen_lowpart (new_mode, op0);
1265 /* Use vec_extract patterns for extracting parts of vectors whenever
1266 available. */
1267 if (VECTOR_MODE_P (GET_MODE (op0))
1268 && !MEM_P (op0)
1269 && optab_handler (vec_extract_optab, GET_MODE (op0)) != CODE_FOR_nothing
1270 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1271 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1273 struct expand_operand ops[3];
1274 enum machine_mode outermode = GET_MODE (op0);
1275 enum machine_mode innermode = GET_MODE_INNER (outermode);
1276 enum insn_code icode = optab_handler (vec_extract_optab, outermode);
1277 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1279 create_output_operand (&ops[0], target, innermode);
1280 create_input_operand (&ops[1], op0, outermode);
1281 create_integer_operand (&ops[2], pos);
1282 if (maybe_expand_insn (icode, 3, ops))
1284 target = ops[0].value;
1285 if (GET_MODE (target) != mode)
1286 return gen_lowpart (tmode, target);
1287 return target;
1291 /* Make sure we are playing with integral modes. Pun with subregs
1292 if we aren't. */
1294 enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1295 if (imode != GET_MODE (op0))
1297 if (MEM_P (op0))
1298 op0 = adjust_address (op0, imode, 0);
1299 else if (imode != BLKmode)
1301 op0 = gen_lowpart (imode, op0);
1303 /* If we got a SUBREG, force it into a register since we
1304 aren't going to be able to do another SUBREG on it. */
1305 if (GET_CODE (op0) == SUBREG)
1306 op0 = force_reg (imode, op0);
1308 else if (REG_P (op0))
1310 rtx reg, subreg;
1311 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1312 MODE_INT);
1313 reg = gen_reg_rtx (imode);
1314 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1315 emit_move_insn (subreg, op0);
1316 op0 = reg;
1317 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1319 else
1321 rtx mem = assign_stack_temp (GET_MODE (op0),
1322 GET_MODE_SIZE (GET_MODE (op0)), 0);
1323 emit_move_insn (mem, op0);
1324 op0 = adjust_address (mem, BLKmode, 0);
1329 /* We may be accessing data outside the field, which means
1330 we can alias adjacent data. */
1331 if (MEM_P (op0))
1333 op0 = shallow_copy_rtx (op0);
1334 set_mem_alias_set (op0, 0);
1335 set_mem_expr (op0, 0);
1338 /* Extraction of a full-word or multi-word value from a structure
1339 in a register or aligned memory can be done with just a SUBREG.
1340 A subword value in the least significant part of a register
1341 can also be extracted with a SUBREG. For this, we need the
1342 byte offset of the value in op0. */
1344 bitpos = bitnum % unit;
1345 offset = bitnum / unit;
1346 byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1348 /* If OP0 is a register, BITPOS must count within a word.
1349 But as we have it, it counts within whatever size OP0 now has.
1350 On a bigendian machine, these are not the same, so convert. */
1351 if (BYTES_BIG_ENDIAN
1352 && !MEM_P (op0)
1353 && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1354 bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1356 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1357 If that's wrong, the solution is to test for it and set TARGET to 0
1358 if needed. */
1360 /* Only scalar integer modes can be converted via subregs. There is an
1361 additional problem for FP modes here in that they can have a precision
1362 which is different from the size. mode_for_size uses precision, but
1363 we want a mode based on the size, so we must avoid calling it for FP
1364 modes. */
1365 mode1 = (SCALAR_INT_MODE_P (tmode)
1366 ? mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0)
1367 : mode);
1369 /* If the bitfield is volatile, we need to make sure the access
1370 remains on a type-aligned boundary. */
1371 if (GET_CODE (op0) == MEM
1372 && MEM_VOLATILE_P (op0)
1373 && GET_MODE_BITSIZE (GET_MODE (op0)) > 0
1374 && flag_strict_volatile_bitfields > 0)
1375 goto no_subreg_mode_swap;
1377 if (((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1378 && bitpos % BITS_PER_WORD == 0)
1379 || (mode1 != BLKmode
1380 /* ??? The big endian test here is wrong. This is correct
1381 if the value is in a register, and if mode_for_size is not
1382 the same mode as op0. This causes us to get unnecessarily
1383 inefficient code from the Thumb port when -mbig-endian. */
1384 && (BYTES_BIG_ENDIAN
1385 ? bitpos + bitsize == BITS_PER_WORD
1386 : bitpos == 0)))
1387 && ((!MEM_P (op0)
1388 && TRULY_NOOP_TRUNCATION_MODES_P (mode1, GET_MODE (op0))
1389 && GET_MODE_SIZE (mode1) != 0
1390 && byte_offset % GET_MODE_SIZE (mode1) == 0)
1391 || (MEM_P (op0)
1392 && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1393 || (offset * BITS_PER_UNIT % bitsize == 0
1394 && MEM_ALIGN (op0) % bitsize == 0)))))
1396 if (MEM_P (op0))
1397 op0 = adjust_address (op0, mode1, offset);
1398 else if (mode1 != GET_MODE (op0))
1400 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1401 byte_offset);
1402 if (sub == NULL)
1403 goto no_subreg_mode_swap;
1404 op0 = sub;
1406 if (mode1 != mode)
1407 return convert_to_mode (tmode, op0, unsignedp);
1408 return op0;
1410 no_subreg_mode_swap:
1412 /* Handle fields bigger than a word. */
1414 if (bitsize > BITS_PER_WORD)
1416 /* Here we transfer the words of the field
1417 in the order least significant first.
1418 This is because the most significant word is the one which may
1419 be less than full. */
1421 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1422 unsigned int i;
1424 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1425 target = gen_reg_rtx (mode);
1427 /* Indicate for flow that the entire target reg is being set. */
1428 emit_clobber (target);
1430 for (i = 0; i < nwords; i++)
1432 /* If I is 0, use the low-order word in both field and target;
1433 if I is 1, use the next to lowest word; and so on. */
1434 /* Word number in TARGET to use. */
1435 unsigned int wordnum
1436 = (WORDS_BIG_ENDIAN
1437 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1438 : i);
1439 /* Offset from start of field in OP0. */
1440 unsigned int bit_offset = (WORDS_BIG_ENDIAN
1441 ? MAX (0, ((int) bitsize - ((int) i + 1)
1442 * (int) BITS_PER_WORD))
1443 : (int) i * BITS_PER_WORD);
1444 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1445 rtx result_part
1446 = extract_bit_field (op0, MIN (BITS_PER_WORD,
1447 bitsize - i * BITS_PER_WORD),
1448 bitnum + bit_offset, 1, false, target_part, mode,
1449 word_mode);
1451 gcc_assert (target_part);
1453 if (result_part != target_part)
1454 emit_move_insn (target_part, result_part);
1457 if (unsignedp)
1459 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1460 need to be zero'd out. */
1461 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1463 unsigned int i, total_words;
1465 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1466 for (i = nwords; i < total_words; i++)
1467 emit_move_insn
1468 (operand_subword (target,
1469 WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1470 1, VOIDmode),
1471 const0_rtx);
1473 return target;
1476 /* Signed bit field: sign-extend with two arithmetic shifts. */
1477 target = expand_shift (LSHIFT_EXPR, mode, target,
1478 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1479 return expand_shift (RSHIFT_EXPR, mode, target,
1480 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1483 /* From here on we know the desired field is smaller than a word. */
1485 /* Check if there is a correspondingly-sized integer field, so we can
1486 safely extract it as one size of integer, if necessary; then
1487 truncate or extend to the size that is wanted; then use SUBREGs or
1488 convert_to_mode to get one of the modes we really wanted. */
1490 int_mode = int_mode_for_mode (tmode);
1491 if (int_mode == BLKmode)
1492 int_mode = int_mode_for_mode (mode);
1493 /* Should probably push op0 out to memory and then do a load. */
1494 gcc_assert (int_mode != BLKmode);
1496 /* OFFSET is the number of words or bytes (UNIT says which)
1497 from STR_RTX to the first word or byte containing part of the field. */
1498 if (!MEM_P (op0))
1500 if (offset != 0
1501 || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1503 if (!REG_P (op0))
1504 op0 = copy_to_reg (op0);
1505 op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1506 op0, (offset * UNITS_PER_WORD));
1508 offset = 0;
1511 /* Now OFFSET is nonzero only for memory operands. */
1512 ext_mode = mode_for_extraction (unsignedp ? EP_extzv : EP_extv, 0);
1513 if (ext_mode != MAX_MACHINE_MODE
1514 && bitsize > 0
1515 && GET_MODE_BITSIZE (ext_mode) >= bitsize
1516 /* Do not use extv/extzv for volatile bitfields when
1517 -fstrict-volatile-bitfields is in effect. */
1518 && !(MEM_P (op0) && MEM_VOLATILE_P (op0)
1519 && flag_strict_volatile_bitfields > 0)
1520 /* If op0 is a register, we need it in EXT_MODE to make it
1521 acceptable to the format of ext(z)v. */
1522 && !(GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1523 && !((REG_P (op0) || GET_CODE (op0) == SUBREG)
1524 && (bitsize + bitpos > GET_MODE_BITSIZE (ext_mode))))
1526 struct expand_operand ops[4];
1527 unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1528 rtx xop0 = op0;
1529 rtx xtarget = target;
1530 rtx xspec_target = target;
1531 rtx xspec_target_subreg = 0;
1533 /* If op0 is a register, we need it in EXT_MODE to make it
1534 acceptable to the format of ext(z)v. */
1535 if (REG_P (xop0) && GET_MODE (xop0) != ext_mode)
1536 xop0 = gen_lowpart_SUBREG (ext_mode, xop0);
1537 if (MEM_P (xop0))
1538 /* Get ref to first byte containing part of the field. */
1539 xop0 = adjust_address (xop0, byte_mode, xoffset);
1541 /* Now convert from counting within UNIT to counting in EXT_MODE. */
1542 if (BYTES_BIG_ENDIAN && !MEM_P (xop0))
1543 xbitpos += GET_MODE_BITSIZE (ext_mode) - unit;
1545 unit = GET_MODE_BITSIZE (ext_mode);
1547 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1548 "backwards" from the size of the unit we are extracting from.
1549 Otherwise, we count bits from the most significant on a
1550 BYTES/BITS_BIG_ENDIAN machine. */
1552 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1553 xbitpos = unit - bitsize - xbitpos;
1555 if (xtarget == 0)
1556 xtarget = xspec_target = gen_reg_rtx (tmode);
1558 if (GET_MODE (xtarget) != ext_mode)
1560 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1561 between the mode of the extraction (word_mode) and the target
1562 mode. Instead, create a temporary and use convert_move to set
1563 the target. */
1564 if (REG_P (xtarget)
1565 && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (xtarget), ext_mode))
1567 xtarget = gen_lowpart (ext_mode, xtarget);
1568 if (GET_MODE_PRECISION (ext_mode)
1569 > GET_MODE_PRECISION (GET_MODE (xspec_target)))
1570 xspec_target_subreg = xtarget;
1572 else
1573 xtarget = gen_reg_rtx (ext_mode);
1576 create_output_operand (&ops[0], xtarget, ext_mode);
1577 create_fixed_operand (&ops[1], xop0);
1578 create_integer_operand (&ops[2], bitsize);
1579 create_integer_operand (&ops[3], xbitpos);
1580 if (maybe_expand_insn (unsignedp ? CODE_FOR_extzv : CODE_FOR_extv,
1581 4, ops))
1583 xtarget = ops[0].value;
1584 if (xtarget == xspec_target)
1585 return xtarget;
1586 if (xtarget == xspec_target_subreg)
1587 return xspec_target;
1588 return convert_extracted_bit_field (xtarget, mode, tmode, unsignedp);
1592 /* If OP0 is a memory, try copying it to a register and seeing if a
1593 cheap register alternative is available. */
1594 if (ext_mode != MAX_MACHINE_MODE && MEM_P (op0))
1596 enum machine_mode bestmode;
1598 /* Get the mode to use for inserting into this field. If
1599 OP0 is BLKmode, get the smallest mode consistent with the
1600 alignment. If OP0 is a non-BLKmode object that is no
1601 wider than EXT_MODE, use its mode. Otherwise, use the
1602 smallest mode containing the field. */
1604 if (GET_MODE (op0) == BLKmode
1605 || (ext_mode != MAX_MACHINE_MODE
1606 && GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (ext_mode)))
1607 bestmode = get_best_mode (bitsize, bitnum, 0, 0, MEM_ALIGN (op0),
1608 (ext_mode == MAX_MACHINE_MODE
1609 ? VOIDmode : ext_mode),
1610 MEM_VOLATILE_P (op0));
1611 else
1612 bestmode = GET_MODE (op0);
1614 if (bestmode != VOIDmode
1615 && !(SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
1616 && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
1618 unsigned HOST_WIDE_INT xoffset, xbitpos;
1620 /* Compute the offset as a multiple of this unit,
1621 counting in bytes. */
1622 unit = GET_MODE_BITSIZE (bestmode);
1623 xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1624 xbitpos = bitnum % unit;
1626 /* Make sure the register is big enough for the whole field. */
1627 if (xoffset * BITS_PER_UNIT + unit
1628 >= offset * BITS_PER_UNIT + bitsize)
1630 rtx last, result, xop0;
1632 last = get_last_insn ();
1634 /* Fetch it to a register in that size. */
1635 xop0 = adjust_address (op0, bestmode, xoffset);
1636 xop0 = force_reg (bestmode, xop0);
1637 result = extract_bit_field_1 (xop0, bitsize, xbitpos,
1638 unsignedp, packedp, target,
1639 mode, tmode, false);
1640 if (result)
1641 return result;
1643 delete_insns_since (last);
1648 if (!fallback_p)
1649 return NULL;
1651 target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1652 bitpos, target, unsignedp, packedp);
1653 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1656 /* Generate code to extract a byte-field from STR_RTX
1657 containing BITSIZE bits, starting at BITNUM,
1658 and put it in TARGET if possible (if TARGET is nonzero).
1659 Regardless of TARGET, we return the rtx for where the value is placed.
1661 STR_RTX is the structure containing the byte (a REG or MEM).
1662 UNSIGNEDP is nonzero if this is an unsigned bit field.
1663 PACKEDP is nonzero if the field has the packed attribute.
1664 MODE is the natural mode of the field value once extracted.
1665 TMODE is the mode the caller would like the value to have;
1666 but the value may be returned with type MODE instead.
1668 If a TARGET is specified and we can store in it at no extra cost,
1669 we do so, and return TARGET.
1670 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1671 if they are equally easy. */
1674 extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1675 unsigned HOST_WIDE_INT bitnum, int unsignedp, bool packedp,
1676 rtx target, enum machine_mode mode, enum machine_mode tmode)
1678 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp, packedp,
1679 target, mode, tmode, true);
1682 /* Extract a bit field using shifts and boolean operations
1683 Returns an rtx to represent the value.
1684 OP0 addresses a register (word) or memory (byte).
1685 BITPOS says which bit within the word or byte the bit field starts in.
1686 OFFSET says how many bytes farther the bit field starts;
1687 it is 0 if OP0 is a register.
1688 BITSIZE says how many bits long the bit field is.
1689 (If OP0 is a register, it may be narrower than a full word,
1690 but BITPOS still counts within a full word,
1691 which is significant on bigendian machines.)
1693 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1694 PACKEDP is true if the field has the packed attribute.
1696 If TARGET is nonzero, attempts to store the value there
1697 and return TARGET, but this is not guaranteed.
1698 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1700 static rtx
1701 extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
1702 unsigned HOST_WIDE_INT offset,
1703 unsigned HOST_WIDE_INT bitsize,
1704 unsigned HOST_WIDE_INT bitpos, rtx target,
1705 int unsignedp, bool packedp)
1707 unsigned int total_bits = BITS_PER_WORD;
1708 enum machine_mode mode;
1710 if (GET_CODE (op0) == SUBREG || REG_P (op0))
1712 /* Special treatment for a bit field split across two registers. */
1713 if (bitsize + bitpos > BITS_PER_WORD)
1714 return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1716 else
1718 /* Get the proper mode to use for this field. We want a mode that
1719 includes the entire field. If such a mode would be larger than
1720 a word, we won't be doing the extraction the normal way. */
1722 if (MEM_VOLATILE_P (op0)
1723 && flag_strict_volatile_bitfields > 0)
1725 if (GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
1726 mode = GET_MODE (op0);
1727 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
1728 mode = GET_MODE (target);
1729 else
1730 mode = tmode;
1732 else
1733 mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT, 0, 0,
1734 MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1736 if (mode == VOIDmode)
1737 /* The only way this should occur is if the field spans word
1738 boundaries. */
1739 return extract_split_bit_field (op0, bitsize,
1740 bitpos + offset * BITS_PER_UNIT,
1741 unsignedp);
1743 total_bits = GET_MODE_BITSIZE (mode);
1745 /* Make sure bitpos is valid for the chosen mode. Adjust BITPOS to
1746 be in the range 0 to total_bits-1, and put any excess bytes in
1747 OFFSET. */
1748 if (bitpos >= total_bits)
1750 offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1751 bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1752 * BITS_PER_UNIT);
1755 /* If we're accessing a volatile MEM, we can't do the next
1756 alignment step if it results in a multi-word access where we
1757 otherwise wouldn't have one. So, check for that case
1758 here. */
1759 if (MEM_P (op0)
1760 && MEM_VOLATILE_P (op0)
1761 && flag_strict_volatile_bitfields > 0
1762 && bitpos + bitsize <= total_bits
1763 && bitpos + bitsize + (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT > total_bits)
1765 if (STRICT_ALIGNMENT)
1767 static bool informed_about_misalignment = false;
1768 bool warned;
1770 if (packedp)
1772 if (bitsize == total_bits)
1773 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1774 "multiple accesses to volatile structure member"
1775 " because of packed attribute");
1776 else
1777 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1778 "multiple accesses to volatile structure bitfield"
1779 " because of packed attribute");
1781 return extract_split_bit_field (op0, bitsize,
1782 bitpos + offset * BITS_PER_UNIT,
1783 unsignedp);
1786 if (bitsize == total_bits)
1787 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1788 "mis-aligned access used for structure member");
1789 else
1790 warned = warning_at (input_location, OPT_fstrict_volatile_bitfields,
1791 "mis-aligned access used for structure bitfield");
1793 if (! informed_about_misalignment && warned)
1795 informed_about_misalignment = true;
1796 inform (input_location,
1797 "when a volatile object spans multiple type-sized locations,"
1798 " the compiler must choose between using a single mis-aligned access to"
1799 " preserve the volatility, or using multiple aligned accesses to avoid"
1800 " runtime faults; this code may fail at runtime if the hardware does"
1801 " not allow this access");
1805 else
1808 /* Get ref to an aligned byte, halfword, or word containing the field.
1809 Adjust BITPOS to be position within a word,
1810 and OFFSET to be the offset of that word.
1811 Then alter OP0 to refer to that word. */
1812 bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1813 offset -= (offset % (total_bits / BITS_PER_UNIT));
1816 op0 = adjust_address (op0, mode, offset);
1819 mode = GET_MODE (op0);
1821 if (BYTES_BIG_ENDIAN)
1822 /* BITPOS is the distance between our msb and that of OP0.
1823 Convert it to the distance from the lsb. */
1824 bitpos = total_bits - bitsize - bitpos;
1826 /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1827 We have reduced the big-endian case to the little-endian case. */
1829 if (unsignedp)
1831 if (bitpos)
1833 /* If the field does not already start at the lsb,
1834 shift it so it does. */
1835 /* Maybe propagate the target for the shift. */
1836 /* But not if we will return it--could confuse integrate.c. */
1837 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1838 if (tmode != mode) subtarget = 0;
1839 op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitpos, subtarget, 1);
1841 /* Convert the value to the desired mode. */
1842 if (mode != tmode)
1843 op0 = convert_to_mode (tmode, op0, 1);
1845 /* Unless the msb of the field used to be the msb when we shifted,
1846 mask out the upper bits. */
1848 if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1849 return expand_binop (GET_MODE (op0), and_optab, op0,
1850 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1851 target, 1, OPTAB_LIB_WIDEN);
1852 return op0;
1855 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1856 then arithmetic-shift its lsb to the lsb of the word. */
1857 op0 = force_reg (mode, op0);
1859 /* Find the narrowest integer mode that contains the field. */
1861 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1862 mode = GET_MODE_WIDER_MODE (mode))
1863 if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1865 op0 = convert_to_mode (mode, op0, 0);
1866 break;
1869 if (mode != tmode)
1870 target = 0;
1872 if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1874 int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitpos);
1875 /* Maybe propagate the target for the shift. */
1876 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1877 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1880 return expand_shift (RSHIFT_EXPR, mode, op0,
1881 GET_MODE_BITSIZE (mode) - bitsize, target, 0);
1884 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1885 of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1886 complement of that if COMPLEMENT. The mask is truncated if
1887 necessary to the width of mode MODE. The mask is zero-extended if
1888 BITSIZE+BITPOS is too small for MODE. */
1890 static rtx
1891 mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
1893 double_int mask;
1895 mask = double_int_mask (bitsize);
1896 mask = double_int_lshift (mask, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1898 if (complement)
1899 mask = double_int_not (mask);
1901 return immed_double_int_const (mask, mode);
1904 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1905 VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
1907 static rtx
1908 lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
1910 double_int val;
1912 val = double_int_zext (uhwi_to_double_int (INTVAL (value)), bitsize);
1913 val = double_int_lshift (val, bitpos, HOST_BITS_PER_DOUBLE_INT, false);
1915 return immed_double_int_const (val, mode);
1918 /* Extract a bit field that is split across two words
1919 and return an RTX for the result.
1921 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1922 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1923 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1925 static rtx
1926 extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1927 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1929 unsigned int unit;
1930 unsigned int bitsdone = 0;
1931 rtx result = NULL_RTX;
1932 int first = 1;
1934 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1935 much at a time. */
1936 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1937 unit = BITS_PER_WORD;
1938 else
1939 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1941 while (bitsdone < bitsize)
1943 unsigned HOST_WIDE_INT thissize;
1944 rtx part, word;
1945 unsigned HOST_WIDE_INT thispos;
1946 unsigned HOST_WIDE_INT offset;
1948 offset = (bitpos + bitsdone) / unit;
1949 thispos = (bitpos + bitsdone) % unit;
1951 /* THISSIZE must not overrun a word boundary. Otherwise,
1952 extract_fixed_bit_field will call us again, and we will mutually
1953 recurse forever. */
1954 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1955 thissize = MIN (thissize, unit - thispos);
1957 /* If OP0 is a register, then handle OFFSET here.
1959 When handling multiword bitfields, extract_bit_field may pass
1960 down a word_mode SUBREG of a larger REG for a bitfield that actually
1961 crosses a word boundary. Thus, for a SUBREG, we must find
1962 the current word starting from the base register. */
1963 if (GET_CODE (op0) == SUBREG)
1965 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1966 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1967 GET_MODE (SUBREG_REG (op0)));
1968 offset = 0;
1970 else if (REG_P (op0))
1972 word = operand_subword_force (op0, offset, GET_MODE (op0));
1973 offset = 0;
1975 else
1976 word = op0;
1978 /* Extract the parts in bit-counting order,
1979 whose meaning is determined by BYTES_PER_UNIT.
1980 OFFSET is in UNITs, and UNIT is in bits.
1981 extract_fixed_bit_field wants offset in bytes. */
1982 part = extract_fixed_bit_field (word_mode, word,
1983 offset * unit / BITS_PER_UNIT,
1984 thissize, thispos, 0, 1, false);
1985 bitsdone += thissize;
1987 /* Shift this part into place for the result. */
1988 if (BYTES_BIG_ENDIAN)
1990 if (bitsize != bitsdone)
1991 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1992 bitsize - bitsdone, 0, 1);
1994 else
1996 if (bitsdone != thissize)
1997 part = expand_shift (LSHIFT_EXPR, word_mode, part,
1998 bitsdone - thissize, 0, 1);
2001 if (first)
2002 result = part;
2003 else
2004 /* Combine the parts with bitwise or. This works
2005 because we extracted each part as an unsigned bit field. */
2006 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2007 OPTAB_LIB_WIDEN);
2009 first = 0;
2012 /* Unsigned bit field: we are done. */
2013 if (unsignedp)
2014 return result;
2015 /* Signed bit field: sign-extend with two arithmetic shifts. */
2016 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2017 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2018 return expand_shift (RSHIFT_EXPR, word_mode, result,
2019 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2022 /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2023 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2024 MODE, fill the upper bits with zeros. Fail if the layout of either
2025 mode is unknown (as for CC modes) or if the extraction would involve
2026 unprofitable mode punning. Return the value on success, otherwise
2027 return null.
2029 This is different from gen_lowpart* in these respects:
2031 - the returned value must always be considered an rvalue
2033 - when MODE is wider than SRC_MODE, the extraction involves
2034 a zero extension
2036 - when MODE is smaller than SRC_MODE, the extraction involves
2037 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2039 In other words, this routine performs a computation, whereas the
2040 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2041 operations. */
2044 extract_low_bits (enum machine_mode mode, enum machine_mode src_mode, rtx src)
2046 enum machine_mode int_mode, src_int_mode;
2048 if (mode == src_mode)
2049 return src;
2051 if (CONSTANT_P (src))
2053 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2054 fails, it will happily create (subreg (symbol_ref)) or similar
2055 invalid SUBREGs. */
2056 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
2057 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2058 if (ret)
2059 return ret;
2061 if (GET_MODE (src) == VOIDmode
2062 || !validate_subreg (mode, src_mode, src, byte))
2063 return NULL_RTX;
2065 src = force_reg (GET_MODE (src), src);
2066 return gen_rtx_SUBREG (mode, src, byte);
2069 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2070 return NULL_RTX;
2072 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
2073 && MODES_TIEABLE_P (mode, src_mode))
2075 rtx x = gen_lowpart_common (mode, src);
2076 if (x)
2077 return x;
2080 src_int_mode = int_mode_for_mode (src_mode);
2081 int_mode = int_mode_for_mode (mode);
2082 if (src_int_mode == BLKmode || int_mode == BLKmode)
2083 return NULL_RTX;
2085 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
2086 return NULL_RTX;
2087 if (!MODES_TIEABLE_P (int_mode, mode))
2088 return NULL_RTX;
2090 src = gen_lowpart (src_int_mode, src);
2091 src = convert_modes (int_mode, src_int_mode, src, true);
2092 src = gen_lowpart (mode, src);
2093 return src;
2096 /* Add INC into TARGET. */
2098 void
2099 expand_inc (rtx target, rtx inc)
2101 rtx value = expand_binop (GET_MODE (target), add_optab,
2102 target, inc,
2103 target, 0, OPTAB_LIB_WIDEN);
2104 if (value != target)
2105 emit_move_insn (target, value);
2108 /* Subtract DEC from TARGET. */
2110 void
2111 expand_dec (rtx target, rtx dec)
2113 rtx value = expand_binop (GET_MODE (target), sub_optab,
2114 target, dec,
2115 target, 0, OPTAB_LIB_WIDEN);
2116 if (value != target)
2117 emit_move_insn (target, value);
2120 /* Output a shift instruction for expression code CODE,
2121 with SHIFTED being the rtx for the value to shift,
2122 and AMOUNT the rtx for the amount to shift by.
2123 Store the result in the rtx TARGET, if that is convenient.
2124 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2125 Return the rtx for where the value is. */
2127 static rtx
2128 expand_shift_1 (enum tree_code code, enum machine_mode mode, rtx shifted,
2129 rtx amount, rtx target, int unsignedp)
2131 rtx op1, temp = 0;
2132 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2133 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2134 optab lshift_optab = ashl_optab;
2135 optab rshift_arith_optab = ashr_optab;
2136 optab rshift_uns_optab = lshr_optab;
2137 optab lrotate_optab = rotl_optab;
2138 optab rrotate_optab = rotr_optab;
2139 enum machine_mode op1_mode;
2140 int attempt;
2141 bool speed = optimize_insn_for_speed_p ();
2143 op1 = amount;
2144 op1_mode = GET_MODE (op1);
2146 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2147 shift amount is a vector, use the vector/vector shift patterns. */
2148 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2150 lshift_optab = vashl_optab;
2151 rshift_arith_optab = vashr_optab;
2152 rshift_uns_optab = vlshr_optab;
2153 lrotate_optab = vrotl_optab;
2154 rrotate_optab = vrotr_optab;
2157 /* Previously detected shift-counts computed by NEGATE_EXPR
2158 and shifted in the other direction; but that does not work
2159 on all machines. */
2161 if (SHIFT_COUNT_TRUNCATED)
2163 if (CONST_INT_P (op1)
2164 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2165 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
2166 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2167 % GET_MODE_BITSIZE (mode));
2168 else if (GET_CODE (op1) == SUBREG
2169 && subreg_lowpart_p (op1)
2170 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (op1))))
2171 op1 = SUBREG_REG (op1);
2174 if (op1 == const0_rtx)
2175 return shifted;
2177 /* Check whether its cheaper to implement a left shift by a constant
2178 bit count by a sequence of additions. */
2179 if (code == LSHIFT_EXPR
2180 && CONST_INT_P (op1)
2181 && INTVAL (op1) > 0
2182 && INTVAL (op1) < GET_MODE_PRECISION (mode)
2183 && INTVAL (op1) < MAX_BITS_PER_WORD
2184 && shift_cost[speed][mode][INTVAL (op1)] > INTVAL (op1) * add_cost[speed][mode]
2185 && shift_cost[speed][mode][INTVAL (op1)] != MAX_COST)
2187 int i;
2188 for (i = 0; i < INTVAL (op1); i++)
2190 temp = force_reg (mode, shifted);
2191 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2192 unsignedp, OPTAB_LIB_WIDEN);
2194 return shifted;
2197 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2199 enum optab_methods methods;
2201 if (attempt == 0)
2202 methods = OPTAB_DIRECT;
2203 else if (attempt == 1)
2204 methods = OPTAB_WIDEN;
2205 else
2206 methods = OPTAB_LIB_WIDEN;
2208 if (rotate)
2210 /* Widening does not work for rotation. */
2211 if (methods == OPTAB_WIDEN)
2212 continue;
2213 else if (methods == OPTAB_LIB_WIDEN)
2215 /* If we have been unable to open-code this by a rotation,
2216 do it as the IOR of two shifts. I.e., to rotate A
2217 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
2218 where C is the bitsize of A.
2220 It is theoretically possible that the target machine might
2221 not be able to perform either shift and hence we would
2222 be making two libcalls rather than just the one for the
2223 shift (similarly if IOR could not be done). We will allow
2224 this extremely unlikely lossage to avoid complicating the
2225 code below. */
2227 rtx subtarget = target == shifted ? 0 : target;
2228 rtx new_amount, other_amount;
2229 rtx temp1;
2231 new_amount = op1;
2232 if (CONST_INT_P (op1))
2233 other_amount = GEN_INT (GET_MODE_BITSIZE (mode)
2234 - INTVAL (op1));
2235 else
2236 other_amount
2237 = simplify_gen_binary (MINUS, GET_MODE (op1),
2238 GEN_INT (GET_MODE_PRECISION (mode)),
2239 op1);
2241 shifted = force_reg (mode, shifted);
2243 temp = expand_shift_1 (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2244 mode, shifted, new_amount, 0, 1);
2245 temp1 = expand_shift_1 (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2246 mode, shifted, other_amount,
2247 subtarget, 1);
2248 return expand_binop (mode, ior_optab, temp, temp1, target,
2249 unsignedp, methods);
2252 temp = expand_binop (mode,
2253 left ? lrotate_optab : rrotate_optab,
2254 shifted, op1, target, unsignedp, methods);
2256 else if (unsignedp)
2257 temp = expand_binop (mode,
2258 left ? lshift_optab : rshift_uns_optab,
2259 shifted, op1, target, unsignedp, methods);
2261 /* Do arithmetic shifts.
2262 Also, if we are going to widen the operand, we can just as well
2263 use an arithmetic right-shift instead of a logical one. */
2264 if (temp == 0 && ! rotate
2265 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2267 enum optab_methods methods1 = methods;
2269 /* If trying to widen a log shift to an arithmetic shift,
2270 don't accept an arithmetic shift of the same size. */
2271 if (unsignedp)
2272 methods1 = OPTAB_MUST_WIDEN;
2274 /* Arithmetic shift */
2276 temp = expand_binop (mode,
2277 left ? lshift_optab : rshift_arith_optab,
2278 shifted, op1, target, unsignedp, methods1);
2281 /* We used to try extzv here for logical right shifts, but that was
2282 only useful for one machine, the VAX, and caused poor code
2283 generation there for lshrdi3, so the code was deleted and a
2284 define_expand for lshrsi3 was added to vax.md. */
2287 gcc_assert (temp);
2288 return temp;
2291 /* Output a shift instruction for expression code CODE,
2292 with SHIFTED being the rtx for the value to shift,
2293 and AMOUNT the amount to shift by.
2294 Store the result in the rtx TARGET, if that is convenient.
2295 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2296 Return the rtx for where the value is. */
2299 expand_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2300 int amount, rtx target, int unsignedp)
2302 return expand_shift_1 (code, mode,
2303 shifted, GEN_INT (amount), target, unsignedp);
2306 /* Output a shift instruction for expression code CODE,
2307 with SHIFTED being the rtx for the value to shift,
2308 and AMOUNT the tree for the amount to shift by.
2309 Store the result in the rtx TARGET, if that is convenient.
2310 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2311 Return the rtx for where the value is. */
2314 expand_variable_shift (enum tree_code code, enum machine_mode mode, rtx shifted,
2315 tree amount, rtx target, int unsignedp)
2317 return expand_shift_1 (code, mode,
2318 shifted, expand_normal (amount), target, unsignedp);
2322 /* Indicates the type of fixup needed after a constant multiplication.
2323 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2324 the result should be negated, and ADD_VARIANT means that the
2325 multiplicand should be added to the result. */
2326 enum mult_variant {basic_variant, negate_variant, add_variant};
2328 static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2329 const struct mult_cost *, enum machine_mode mode);
2330 static bool choose_mult_variant (enum machine_mode, HOST_WIDE_INT,
2331 struct algorithm *, enum mult_variant *, int);
2332 static rtx expand_mult_const (enum machine_mode, rtx, HOST_WIDE_INT, rtx,
2333 const struct algorithm *, enum mult_variant);
2334 static unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT, int,
2335 int, rtx *, int *, int *);
2336 static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2337 static rtx extract_high_half (enum machine_mode, rtx);
2338 static rtx expand_mult_highpart (enum machine_mode, rtx, rtx, rtx, int, int);
2339 static rtx expand_mult_highpart_optab (enum machine_mode, rtx, rtx, rtx,
2340 int, int);
2341 /* Compute and return the best algorithm for multiplying by T.
2342 The algorithm must cost less than cost_limit
2343 If retval.cost >= COST_LIMIT, no algorithm was found and all
2344 other field of the returned struct are undefined.
2345 MODE is the machine mode of the multiplication. */
2347 static void
2348 synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2349 const struct mult_cost *cost_limit, enum machine_mode mode)
2351 int m;
2352 struct algorithm *alg_in, *best_alg;
2353 struct mult_cost best_cost;
2354 struct mult_cost new_limit;
2355 int op_cost, op_latency;
2356 unsigned HOST_WIDE_INT orig_t = t;
2357 unsigned HOST_WIDE_INT q;
2358 int maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (mode));
2359 int hash_index;
2360 bool cache_hit = false;
2361 enum alg_code cache_alg = alg_zero;
2362 bool speed = optimize_insn_for_speed_p ();
2364 /* Indicate that no algorithm is yet found. If no algorithm
2365 is found, this value will be returned and indicate failure. */
2366 alg_out->cost.cost = cost_limit->cost + 1;
2367 alg_out->cost.latency = cost_limit->latency + 1;
2369 if (cost_limit->cost < 0
2370 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2371 return;
2373 /* Restrict the bits of "t" to the multiplication's mode. */
2374 t &= GET_MODE_MASK (mode);
2376 /* t == 1 can be done in zero cost. */
2377 if (t == 1)
2379 alg_out->ops = 1;
2380 alg_out->cost.cost = 0;
2381 alg_out->cost.latency = 0;
2382 alg_out->op[0] = alg_m;
2383 return;
2386 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2387 fail now. */
2388 if (t == 0)
2390 if (MULT_COST_LESS (cost_limit, zero_cost[speed]))
2391 return;
2392 else
2394 alg_out->ops = 1;
2395 alg_out->cost.cost = zero_cost[speed];
2396 alg_out->cost.latency = zero_cost[speed];
2397 alg_out->op[0] = alg_zero;
2398 return;
2402 /* We'll be needing a couple extra algorithm structures now. */
2404 alg_in = XALLOCA (struct algorithm);
2405 best_alg = XALLOCA (struct algorithm);
2406 best_cost = *cost_limit;
2408 /* Compute the hash index. */
2409 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2411 /* See if we already know what to do for T. */
2412 if (alg_hash[hash_index].t == t
2413 && alg_hash[hash_index].mode == mode
2414 && alg_hash[hash_index].mode == mode
2415 && alg_hash[hash_index].speed == speed
2416 && alg_hash[hash_index].alg != alg_unknown)
2418 cache_alg = alg_hash[hash_index].alg;
2420 if (cache_alg == alg_impossible)
2422 /* The cache tells us that it's impossible to synthesize
2423 multiplication by T within alg_hash[hash_index].cost. */
2424 if (!CHEAPER_MULT_COST (&alg_hash[hash_index].cost, cost_limit))
2425 /* COST_LIMIT is at least as restrictive as the one
2426 recorded in the hash table, in which case we have no
2427 hope of synthesizing a multiplication. Just
2428 return. */
2429 return;
2431 /* If we get here, COST_LIMIT is less restrictive than the
2432 one recorded in the hash table, so we may be able to
2433 synthesize a multiplication. Proceed as if we didn't
2434 have the cache entry. */
2436 else
2438 if (CHEAPER_MULT_COST (cost_limit, &alg_hash[hash_index].cost))
2439 /* The cached algorithm shows that this multiplication
2440 requires more cost than COST_LIMIT. Just return. This
2441 way, we don't clobber this cache entry with
2442 alg_impossible but retain useful information. */
2443 return;
2445 cache_hit = true;
2447 switch (cache_alg)
2449 case alg_shift:
2450 goto do_alg_shift;
2452 case alg_add_t_m2:
2453 case alg_sub_t_m2:
2454 goto do_alg_addsub_t_m2;
2456 case alg_add_factor:
2457 case alg_sub_factor:
2458 goto do_alg_addsub_factor;
2460 case alg_add_t2_m:
2461 goto do_alg_add_t2_m;
2463 case alg_sub_t2_m:
2464 goto do_alg_sub_t2_m;
2466 default:
2467 gcc_unreachable ();
2472 /* If we have a group of zero bits at the low-order part of T, try
2473 multiplying by the remaining bits and then doing a shift. */
2475 if ((t & 1) == 0)
2477 do_alg_shift:
2478 m = floor_log2 (t & -t); /* m = number of low zero bits */
2479 if (m < maxm)
2481 q = t >> m;
2482 /* The function expand_shift will choose between a shift and
2483 a sequence of additions, so the observed cost is given as
2484 MIN (m * add_cost[speed][mode], shift_cost[speed][mode][m]). */
2485 op_cost = m * add_cost[speed][mode];
2486 if (shift_cost[speed][mode][m] < op_cost)
2487 op_cost = shift_cost[speed][mode][m];
2488 new_limit.cost = best_cost.cost - op_cost;
2489 new_limit.latency = best_cost.latency - op_cost;
2490 synth_mult (alg_in, q, &new_limit, mode);
2492 alg_in->cost.cost += op_cost;
2493 alg_in->cost.latency += op_cost;
2494 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2496 struct algorithm *x;
2497 best_cost = alg_in->cost;
2498 x = alg_in, alg_in = best_alg, best_alg = x;
2499 best_alg->log[best_alg->ops] = m;
2500 best_alg->op[best_alg->ops] = alg_shift;
2503 /* See if treating ORIG_T as a signed number yields a better
2504 sequence. Try this sequence only for a negative ORIG_T
2505 as it would be useless for a non-negative ORIG_T. */
2506 if ((HOST_WIDE_INT) orig_t < 0)
2508 /* Shift ORIG_T as follows because a right shift of a
2509 negative-valued signed type is implementation
2510 defined. */
2511 q = ~(~orig_t >> m);
2512 /* The function expand_shift will choose between a shift
2513 and a sequence of additions, so the observed cost is
2514 given as MIN (m * add_cost[speed][mode],
2515 shift_cost[speed][mode][m]). */
2516 op_cost = m * add_cost[speed][mode];
2517 if (shift_cost[speed][mode][m] < op_cost)
2518 op_cost = shift_cost[speed][mode][m];
2519 new_limit.cost = best_cost.cost - op_cost;
2520 new_limit.latency = best_cost.latency - op_cost;
2521 synth_mult (alg_in, q, &new_limit, mode);
2523 alg_in->cost.cost += op_cost;
2524 alg_in->cost.latency += op_cost;
2525 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2527 struct algorithm *x;
2528 best_cost = alg_in->cost;
2529 x = alg_in, alg_in = best_alg, best_alg = x;
2530 best_alg->log[best_alg->ops] = m;
2531 best_alg->op[best_alg->ops] = alg_shift;
2535 if (cache_hit)
2536 goto done;
2539 /* If we have an odd number, add or subtract one. */
2540 if ((t & 1) != 0)
2542 unsigned HOST_WIDE_INT w;
2544 do_alg_addsub_t_m2:
2545 for (w = 1; (w & t) != 0; w <<= 1)
2547 /* If T was -1, then W will be zero after the loop. This is another
2548 case where T ends with ...111. Handling this with (T + 1) and
2549 subtract 1 produces slightly better code and results in algorithm
2550 selection much faster than treating it like the ...0111 case
2551 below. */
2552 if (w == 0
2553 || (w > 2
2554 /* Reject the case where t is 3.
2555 Thus we prefer addition in that case. */
2556 && t != 3))
2558 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2560 op_cost = add_cost[speed][mode];
2561 new_limit.cost = best_cost.cost - op_cost;
2562 new_limit.latency = best_cost.latency - op_cost;
2563 synth_mult (alg_in, t + 1, &new_limit, mode);
2565 alg_in->cost.cost += op_cost;
2566 alg_in->cost.latency += op_cost;
2567 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2569 struct algorithm *x;
2570 best_cost = alg_in->cost;
2571 x = alg_in, alg_in = best_alg, best_alg = x;
2572 best_alg->log[best_alg->ops] = 0;
2573 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2576 else
2578 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2580 op_cost = add_cost[speed][mode];
2581 new_limit.cost = best_cost.cost - op_cost;
2582 new_limit.latency = best_cost.latency - op_cost;
2583 synth_mult (alg_in, t - 1, &new_limit, mode);
2585 alg_in->cost.cost += op_cost;
2586 alg_in->cost.latency += op_cost;
2587 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2589 struct algorithm *x;
2590 best_cost = alg_in->cost;
2591 x = alg_in, alg_in = best_alg, best_alg = x;
2592 best_alg->log[best_alg->ops] = 0;
2593 best_alg->op[best_alg->ops] = alg_add_t_m2;
2597 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2598 quickly with a - a * n for some appropriate constant n. */
2599 m = exact_log2 (-orig_t + 1);
2600 if (m >= 0 && m < maxm)
2602 op_cost = shiftsub1_cost[speed][mode][m];
2603 new_limit.cost = best_cost.cost - op_cost;
2604 new_limit.latency = best_cost.latency - op_cost;
2605 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m, &new_limit, mode);
2607 alg_in->cost.cost += op_cost;
2608 alg_in->cost.latency += op_cost;
2609 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2611 struct algorithm *x;
2612 best_cost = alg_in->cost;
2613 x = alg_in, alg_in = best_alg, best_alg = x;
2614 best_alg->log[best_alg->ops] = m;
2615 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2619 if (cache_hit)
2620 goto done;
2623 /* Look for factors of t of the form
2624 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2625 If we find such a factor, we can multiply by t using an algorithm that
2626 multiplies by q, shift the result by m and add/subtract it to itself.
2628 We search for large factors first and loop down, even if large factors
2629 are less probable than small; if we find a large factor we will find a
2630 good sequence quickly, and therefore be able to prune (by decreasing
2631 COST_LIMIT) the search. */
2633 do_alg_addsub_factor:
2634 for (m = floor_log2 (t - 1); m >= 2; m--)
2636 unsigned HOST_WIDE_INT d;
2638 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2639 if (t % d == 0 && t > d && m < maxm
2640 && (!cache_hit || cache_alg == alg_add_factor))
2642 /* If the target has a cheap shift-and-add instruction use
2643 that in preference to a shift insn followed by an add insn.
2644 Assume that the shift-and-add is "atomic" with a latency
2645 equal to its cost, otherwise assume that on superscalar
2646 hardware the shift may be executed concurrently with the
2647 earlier steps in the algorithm. */
2648 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2649 if (shiftadd_cost[speed][mode][m] < op_cost)
2651 op_cost = shiftadd_cost[speed][mode][m];
2652 op_latency = op_cost;
2654 else
2655 op_latency = add_cost[speed][mode];
2657 new_limit.cost = best_cost.cost - op_cost;
2658 new_limit.latency = best_cost.latency - op_latency;
2659 synth_mult (alg_in, t / d, &new_limit, mode);
2661 alg_in->cost.cost += op_cost;
2662 alg_in->cost.latency += op_latency;
2663 if (alg_in->cost.latency < op_cost)
2664 alg_in->cost.latency = op_cost;
2665 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2667 struct algorithm *x;
2668 best_cost = alg_in->cost;
2669 x = alg_in, alg_in = best_alg, best_alg = x;
2670 best_alg->log[best_alg->ops] = m;
2671 best_alg->op[best_alg->ops] = alg_add_factor;
2673 /* Other factors will have been taken care of in the recursion. */
2674 break;
2677 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2678 if (t % d == 0 && t > d && m < maxm
2679 && (!cache_hit || cache_alg == alg_sub_factor))
2681 /* If the target has a cheap shift-and-subtract insn use
2682 that in preference to a shift insn followed by a sub insn.
2683 Assume that the shift-and-sub is "atomic" with a latency
2684 equal to it's cost, otherwise assume that on superscalar
2685 hardware the shift may be executed concurrently with the
2686 earlier steps in the algorithm. */
2687 op_cost = add_cost[speed][mode] + shift_cost[speed][mode][m];
2688 if (shiftsub0_cost[speed][mode][m] < op_cost)
2690 op_cost = shiftsub0_cost[speed][mode][m];
2691 op_latency = op_cost;
2693 else
2694 op_latency = add_cost[speed][mode];
2696 new_limit.cost = best_cost.cost - op_cost;
2697 new_limit.latency = best_cost.latency - op_latency;
2698 synth_mult (alg_in, t / d, &new_limit, mode);
2700 alg_in->cost.cost += op_cost;
2701 alg_in->cost.latency += op_latency;
2702 if (alg_in->cost.latency < op_cost)
2703 alg_in->cost.latency = op_cost;
2704 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2706 struct algorithm *x;
2707 best_cost = alg_in->cost;
2708 x = alg_in, alg_in = best_alg, best_alg = x;
2709 best_alg->log[best_alg->ops] = m;
2710 best_alg->op[best_alg->ops] = alg_sub_factor;
2712 break;
2715 if (cache_hit)
2716 goto done;
2718 /* Try shift-and-add (load effective address) instructions,
2719 i.e. do a*3, a*5, a*9. */
2720 if ((t & 1) != 0)
2722 do_alg_add_t2_m:
2723 q = t - 1;
2724 q = q & -q;
2725 m = exact_log2 (q);
2726 if (m >= 0 && m < maxm)
2728 op_cost = shiftadd_cost[speed][mode][m];
2729 new_limit.cost = best_cost.cost - op_cost;
2730 new_limit.latency = best_cost.latency - op_cost;
2731 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2733 alg_in->cost.cost += op_cost;
2734 alg_in->cost.latency += op_cost;
2735 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2737 struct algorithm *x;
2738 best_cost = alg_in->cost;
2739 x = alg_in, alg_in = best_alg, best_alg = x;
2740 best_alg->log[best_alg->ops] = m;
2741 best_alg->op[best_alg->ops] = alg_add_t2_m;
2744 if (cache_hit)
2745 goto done;
2747 do_alg_sub_t2_m:
2748 q = t + 1;
2749 q = q & -q;
2750 m = exact_log2 (q);
2751 if (m >= 0 && m < maxm)
2753 op_cost = shiftsub0_cost[speed][mode][m];
2754 new_limit.cost = best_cost.cost - op_cost;
2755 new_limit.latency = best_cost.latency - op_cost;
2756 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2758 alg_in->cost.cost += op_cost;
2759 alg_in->cost.latency += op_cost;
2760 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2762 struct algorithm *x;
2763 best_cost = alg_in->cost;
2764 x = alg_in, alg_in = best_alg, best_alg = x;
2765 best_alg->log[best_alg->ops] = m;
2766 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2769 if (cache_hit)
2770 goto done;
2773 done:
2774 /* If best_cost has not decreased, we have not found any algorithm. */
2775 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2777 /* We failed to find an algorithm. Record alg_impossible for
2778 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2779 we are asked to find an algorithm for T within the same or
2780 lower COST_LIMIT, we can immediately return to the
2781 caller. */
2782 alg_hash[hash_index].t = t;
2783 alg_hash[hash_index].mode = mode;
2784 alg_hash[hash_index].speed = speed;
2785 alg_hash[hash_index].alg = alg_impossible;
2786 alg_hash[hash_index].cost = *cost_limit;
2787 return;
2790 /* Cache the result. */
2791 if (!cache_hit)
2793 alg_hash[hash_index].t = t;
2794 alg_hash[hash_index].mode = mode;
2795 alg_hash[hash_index].speed = speed;
2796 alg_hash[hash_index].alg = best_alg->op[best_alg->ops];
2797 alg_hash[hash_index].cost.cost = best_cost.cost;
2798 alg_hash[hash_index].cost.latency = best_cost.latency;
2801 /* If we are getting a too long sequence for `struct algorithm'
2802 to record, make this search fail. */
2803 if (best_alg->ops == MAX_BITS_PER_WORD)
2804 return;
2806 /* Copy the algorithm from temporary space to the space at alg_out.
2807 We avoid using structure assignment because the majority of
2808 best_alg is normally undefined, and this is a critical function. */
2809 alg_out->ops = best_alg->ops + 1;
2810 alg_out->cost = best_cost;
2811 memcpy (alg_out->op, best_alg->op,
2812 alg_out->ops * sizeof *alg_out->op);
2813 memcpy (alg_out->log, best_alg->log,
2814 alg_out->ops * sizeof *alg_out->log);
2817 /* Find the cheapest way of multiplying a value of mode MODE by VAL.
2818 Try three variations:
2820 - a shift/add sequence based on VAL itself
2821 - a shift/add sequence based on -VAL, followed by a negation
2822 - a shift/add sequence based on VAL - 1, followed by an addition.
2824 Return true if the cheapest of these cost less than MULT_COST,
2825 describing the algorithm in *ALG and final fixup in *VARIANT. */
2827 static bool
2828 choose_mult_variant (enum machine_mode mode, HOST_WIDE_INT val,
2829 struct algorithm *alg, enum mult_variant *variant,
2830 int mult_cost)
2832 struct algorithm alg2;
2833 struct mult_cost limit;
2834 int op_cost;
2835 bool speed = optimize_insn_for_speed_p ();
2837 /* Fail quickly for impossible bounds. */
2838 if (mult_cost < 0)
2839 return false;
2841 /* Ensure that mult_cost provides a reasonable upper bound.
2842 Any constant multiplication can be performed with less
2843 than 2 * bits additions. */
2844 op_cost = 2 * GET_MODE_BITSIZE (mode) * add_cost[speed][mode];
2845 if (mult_cost > op_cost)
2846 mult_cost = op_cost;
2848 *variant = basic_variant;
2849 limit.cost = mult_cost;
2850 limit.latency = mult_cost;
2851 synth_mult (alg, val, &limit, mode);
2853 /* This works only if the inverted value actually fits in an
2854 `unsigned int' */
2855 if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2857 op_cost = neg_cost[speed][mode];
2858 if (MULT_COST_LESS (&alg->cost, mult_cost))
2860 limit.cost = alg->cost.cost - op_cost;
2861 limit.latency = alg->cost.latency - op_cost;
2863 else
2865 limit.cost = mult_cost - op_cost;
2866 limit.latency = mult_cost - op_cost;
2869 synth_mult (&alg2, -val, &limit, mode);
2870 alg2.cost.cost += op_cost;
2871 alg2.cost.latency += op_cost;
2872 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2873 *alg = alg2, *variant = negate_variant;
2876 /* This proves very useful for division-by-constant. */
2877 op_cost = add_cost[speed][mode];
2878 if (MULT_COST_LESS (&alg->cost, mult_cost))
2880 limit.cost = alg->cost.cost - op_cost;
2881 limit.latency = alg->cost.latency - op_cost;
2883 else
2885 limit.cost = mult_cost - op_cost;
2886 limit.latency = mult_cost - op_cost;
2889 synth_mult (&alg2, val - 1, &limit, mode);
2890 alg2.cost.cost += op_cost;
2891 alg2.cost.latency += op_cost;
2892 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2893 *alg = alg2, *variant = add_variant;
2895 return MULT_COST_LESS (&alg->cost, mult_cost);
2898 /* A subroutine of expand_mult, used for constant multiplications.
2899 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2900 convenient. Use the shift/add sequence described by ALG and apply
2901 the final fixup specified by VARIANT. */
2903 static rtx
2904 expand_mult_const (enum machine_mode mode, rtx op0, HOST_WIDE_INT val,
2905 rtx target, const struct algorithm *alg,
2906 enum mult_variant variant)
2908 HOST_WIDE_INT val_so_far;
2909 rtx insn, accum, tem;
2910 int opno;
2911 enum machine_mode nmode;
2913 /* Avoid referencing memory over and over and invalid sharing
2914 on SUBREGs. */
2915 op0 = force_reg (mode, op0);
2917 /* ACCUM starts out either as OP0 or as a zero, depending on
2918 the first operation. */
2920 if (alg->op[0] == alg_zero)
2922 accum = copy_to_mode_reg (mode, const0_rtx);
2923 val_so_far = 0;
2925 else if (alg->op[0] == alg_m)
2927 accum = copy_to_mode_reg (mode, op0);
2928 val_so_far = 1;
2930 else
2931 gcc_unreachable ();
2933 for (opno = 1; opno < alg->ops; opno++)
2935 int log = alg->log[opno];
2936 rtx shift_subtarget = optimize ? 0 : accum;
2937 rtx add_target
2938 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2939 && !optimize)
2940 ? target : 0;
2941 rtx accum_target = optimize ? 0 : accum;
2942 rtx accum_inner;
2944 switch (alg->op[opno])
2946 case alg_shift:
2947 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2948 /* REG_EQUAL note will be attached to the following insn. */
2949 emit_move_insn (accum, tem);
2950 val_so_far <<= log;
2951 break;
2953 case alg_add_t_m2:
2954 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
2955 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2956 add_target ? add_target : accum_target);
2957 val_so_far += (HOST_WIDE_INT) 1 << log;
2958 break;
2960 case alg_sub_t_m2:
2961 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
2962 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2963 add_target ? add_target : accum_target);
2964 val_so_far -= (HOST_WIDE_INT) 1 << log;
2965 break;
2967 case alg_add_t2_m:
2968 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2969 log, shift_subtarget, 0);
2970 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2971 add_target ? add_target : accum_target);
2972 val_so_far = (val_so_far << log) + 1;
2973 break;
2975 case alg_sub_t2_m:
2976 accum = expand_shift (LSHIFT_EXPR, mode, accum,
2977 log, shift_subtarget, 0);
2978 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2979 add_target ? add_target : accum_target);
2980 val_so_far = (val_so_far << log) - 1;
2981 break;
2983 case alg_add_factor:
2984 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2985 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2986 add_target ? add_target : accum_target);
2987 val_so_far += val_so_far << log;
2988 break;
2990 case alg_sub_factor:
2991 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2992 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2993 (add_target
2994 ? add_target : (optimize ? 0 : tem)));
2995 val_so_far = (val_so_far << log) - val_so_far;
2996 break;
2998 default:
2999 gcc_unreachable ();
3002 /* Write a REG_EQUAL note on the last insn so that we can cse
3003 multiplication sequences. Note that if ACCUM is a SUBREG,
3004 we've set the inner register and must properly indicate
3005 that. */
3007 tem = op0, nmode = mode;
3008 accum_inner = accum;
3009 if (GET_CODE (accum) == SUBREG)
3011 accum_inner = SUBREG_REG (accum);
3012 nmode = GET_MODE (accum_inner);
3013 tem = gen_lowpart (nmode, op0);
3016 insn = get_last_insn ();
3017 set_dst_reg_note (insn, REG_EQUAL,
3018 gen_rtx_MULT (nmode, tem, GEN_INT (val_so_far)),
3019 accum_inner);
3022 if (variant == negate_variant)
3024 val_so_far = -val_so_far;
3025 accum = expand_unop (mode, neg_optab, accum, target, 0);
3027 else if (variant == add_variant)
3029 val_so_far = val_so_far + 1;
3030 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3033 /* Compare only the bits of val and val_so_far that are significant
3034 in the result mode, to avoid sign-/zero-extension confusion. */
3035 val &= GET_MODE_MASK (mode);
3036 val_so_far &= GET_MODE_MASK (mode);
3037 gcc_assert (val == val_so_far);
3039 return accum;
3042 /* Perform a multiplication and return an rtx for the result.
3043 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3044 TARGET is a suggestion for where to store the result (an rtx).
3046 We check specially for a constant integer as OP1.
3047 If you want this check for OP0 as well, then before calling
3048 you should swap the two operands if OP0 would be constant. */
3051 expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3052 int unsignedp)
3054 enum mult_variant variant;
3055 struct algorithm algorithm;
3056 int max_cost;
3057 bool speed = optimize_insn_for_speed_p ();
3059 /* Handling const0_rtx here allows us to use zero as a rogue value for
3060 coeff below. */
3061 if (op1 == const0_rtx)
3062 return const0_rtx;
3063 if (op1 == const1_rtx)
3064 return op0;
3065 if (op1 == constm1_rtx)
3066 return expand_unop (mode,
3067 GET_MODE_CLASS (mode) == MODE_INT
3068 && !unsignedp && flag_trapv
3069 ? negv_optab : neg_optab,
3070 op0, target, 0);
3072 /* These are the operations that are potentially turned into a sequence
3073 of shifts and additions. */
3074 if (SCALAR_INT_MODE_P (mode)
3075 && (unsignedp || !flag_trapv))
3077 HOST_WIDE_INT coeff = 0;
3078 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3080 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3081 less than or equal in size to `unsigned int' this doesn't matter.
3082 If the mode is larger than `unsigned int', then synth_mult works
3083 only if the constant value exactly fits in an `unsigned int' without
3084 any truncation. This means that multiplying by negative values does
3085 not work; results are off by 2^32 on a 32 bit machine. */
3087 if (CONST_INT_P (op1))
3089 /* Attempt to handle multiplication of DImode values by negative
3090 coefficients, by performing the multiplication by a positive
3091 multiplier and then inverting the result. */
3092 if (INTVAL (op1) < 0
3093 && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
3095 /* Its safe to use -INTVAL (op1) even for INT_MIN, as the
3096 result is interpreted as an unsigned coefficient.
3097 Exclude cost of op0 from max_cost to match the cost
3098 calculation of the synth_mult. */
3099 max_cost = (set_src_cost (gen_rtx_MULT (mode, fake_reg, op1),
3100 speed)
3101 - neg_cost[speed][mode]);
3102 if (max_cost > 0
3103 && choose_mult_variant (mode, -INTVAL (op1), &algorithm,
3104 &variant, max_cost))
3106 rtx temp = expand_mult_const (mode, op0, -INTVAL (op1),
3107 NULL_RTX, &algorithm,
3108 variant);
3109 return expand_unop (mode, neg_optab, temp, target, 0);
3112 else coeff = INTVAL (op1);
3114 else if (GET_CODE (op1) == CONST_DOUBLE)
3116 /* If we are multiplying in DImode, it may still be a win
3117 to try to work with shifts and adds. */
3118 if (CONST_DOUBLE_HIGH (op1) == 0
3119 && CONST_DOUBLE_LOW (op1) > 0)
3120 coeff = CONST_DOUBLE_LOW (op1);
3121 else if (CONST_DOUBLE_LOW (op1) == 0
3122 && EXACT_POWER_OF_2_OR_ZERO_P (CONST_DOUBLE_HIGH (op1)))
3124 int shift = floor_log2 (CONST_DOUBLE_HIGH (op1))
3125 + HOST_BITS_PER_WIDE_INT;
3126 return expand_shift (LSHIFT_EXPR, mode, op0,
3127 shift, target, unsignedp);
3131 /* We used to test optimize here, on the grounds that it's better to
3132 produce a smaller program when -O is not used. But this causes
3133 such a terrible slowdown sometimes that it seems better to always
3134 use synth_mult. */
3135 if (coeff != 0)
3137 /* Special case powers of two. */
3138 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3139 return expand_shift (LSHIFT_EXPR, mode, op0,
3140 floor_log2 (coeff), target, unsignedp);
3142 /* Exclude cost of op0 from max_cost to match the cost
3143 calculation of the synth_mult. */
3144 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, op1), speed);
3145 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3146 max_cost))
3147 return expand_mult_const (mode, op0, coeff, target,
3148 &algorithm, variant);
3152 if (GET_CODE (op0) == CONST_DOUBLE)
3154 rtx temp = op0;
3155 op0 = op1;
3156 op1 = temp;
3159 /* Expand x*2.0 as x+x. */
3160 if (GET_CODE (op1) == CONST_DOUBLE
3161 && SCALAR_FLOAT_MODE_P (mode))
3163 REAL_VALUE_TYPE d;
3164 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
3166 if (REAL_VALUES_EQUAL (d, dconst2))
3168 op0 = force_reg (GET_MODE (op0), op0);
3169 return expand_binop (mode, add_optab, op0, op0,
3170 target, unsignedp, OPTAB_LIB_WIDEN);
3174 /* This used to use umul_optab if unsigned, but for non-widening multiply
3175 there is no difference between signed and unsigned. */
3176 op0 = expand_binop (mode,
3177 ! unsignedp
3178 && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
3179 ? smulv_optab : smul_optab,
3180 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3181 gcc_assert (op0);
3182 return op0;
3185 /* Perform a widening multiplication and return an rtx for the result.
3186 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3187 TARGET is a suggestion for where to store the result (an rtx).
3188 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3189 or smul_widen_optab.
3191 We check specially for a constant integer as OP1, comparing the
3192 cost of a widening multiply against the cost of a sequence of shifts
3193 and adds. */
3196 expand_widening_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3197 int unsignedp, optab this_optab)
3199 bool speed = optimize_insn_for_speed_p ();
3200 rtx cop1;
3202 if (CONST_INT_P (op1)
3203 && GET_MODE (op0) != VOIDmode
3204 && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3205 this_optab == umul_widen_optab))
3206 && CONST_INT_P (cop1)
3207 && (INTVAL (cop1) >= 0
3208 || HWI_COMPUTABLE_MODE_P (mode)))
3210 HOST_WIDE_INT coeff = INTVAL (cop1);
3211 int max_cost;
3212 enum mult_variant variant;
3213 struct algorithm algorithm;
3215 /* Special case powers of two. */
3216 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3218 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3219 return expand_shift (LSHIFT_EXPR, mode, op0,
3220 floor_log2 (coeff), target, unsignedp);
3223 /* Exclude cost of op0 from max_cost to match the cost
3224 calculation of the synth_mult. */
3225 max_cost = mul_widen_cost[speed][mode];
3226 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3227 max_cost))
3229 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3230 return expand_mult_const (mode, op0, coeff, target,
3231 &algorithm, variant);
3234 return expand_binop (mode, this_optab, op0, op1, target,
3235 unsignedp, OPTAB_LIB_WIDEN);
3238 /* Return the smallest n such that 2**n >= X. */
3241 ceil_log2 (unsigned HOST_WIDE_INT x)
3243 return floor_log2 (x - 1) + 1;
3246 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3247 replace division by D, and put the least significant N bits of the result
3248 in *MULTIPLIER_PTR and return the most significant bit.
3250 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3251 needed precision is in PRECISION (should be <= N).
3253 PRECISION should be as small as possible so this function can choose
3254 multiplier more freely.
3256 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3257 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3259 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3260 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3262 static
3263 unsigned HOST_WIDE_INT
3264 choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3265 rtx *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr)
3267 HOST_WIDE_INT mhigh_hi, mlow_hi;
3268 unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
3269 int lgup, post_shift;
3270 int pow, pow2;
3271 unsigned HOST_WIDE_INT nl, dummy1;
3272 HOST_WIDE_INT nh, dummy2;
3274 /* lgup = ceil(log2(divisor)); */
3275 lgup = ceil_log2 (d);
3277 gcc_assert (lgup <= n);
3279 pow = n + lgup;
3280 pow2 = n + lgup - precision;
3282 /* We could handle this with some effort, but this case is much
3283 better handled directly with a scc insn, so rely on caller using
3284 that. */
3285 gcc_assert (pow != 2 * HOST_BITS_PER_WIDE_INT);
3287 /* mlow = 2^(N + lgup)/d */
3288 if (pow >= HOST_BITS_PER_WIDE_INT)
3290 nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
3291 nl = 0;
3293 else
3295 nh = 0;
3296 nl = (unsigned HOST_WIDE_INT) 1 << pow;
3298 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3299 &mlow_lo, &mlow_hi, &dummy1, &dummy2);
3301 /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
3302 if (pow2 >= HOST_BITS_PER_WIDE_INT)
3303 nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
3304 else
3305 nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
3306 div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
3307 &mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
3309 gcc_assert (!mhigh_hi || nh - d < d);
3310 gcc_assert (mhigh_hi <= 1 && mlow_hi <= 1);
3311 /* Assert that mlow < mhigh. */
3312 gcc_assert (mlow_hi < mhigh_hi
3313 || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo));
3315 /* If precision == N, then mlow, mhigh exceed 2^N
3316 (but they do not exceed 2^(N+1)). */
3318 /* Reduce to lowest terms. */
3319 for (post_shift = lgup; post_shift > 0; post_shift--)
3321 unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
3322 unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
3323 if (ml_lo >= mh_lo)
3324 break;
3326 mlow_hi = 0;
3327 mlow_lo = ml_lo;
3328 mhigh_hi = 0;
3329 mhigh_lo = mh_lo;
3332 *post_shift_ptr = post_shift;
3333 *lgup_ptr = lgup;
3334 if (n < HOST_BITS_PER_WIDE_INT)
3336 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3337 *multiplier_ptr = GEN_INT (mhigh_lo & mask);
3338 return mhigh_lo >= mask;
3340 else
3342 *multiplier_ptr = GEN_INT (mhigh_lo);
3343 return mhigh_hi;
3347 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3348 congruent to 1 (mod 2**N). */
3350 static unsigned HOST_WIDE_INT
3351 invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3353 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3355 /* The algorithm notes that the choice y = x satisfies
3356 x*y == 1 mod 2^3, since x is assumed odd.
3357 Each iteration doubles the number of bits of significance in y. */
3359 unsigned HOST_WIDE_INT mask;
3360 unsigned HOST_WIDE_INT y = x;
3361 int nbit = 3;
3363 mask = (n == HOST_BITS_PER_WIDE_INT
3364 ? ~(unsigned HOST_WIDE_INT) 0
3365 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3367 while (nbit < n)
3369 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3370 nbit *= 2;
3372 return y;
3375 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3376 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3377 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3378 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3379 become signed.
3381 The result is put in TARGET if that is convenient.
3383 MODE is the mode of operation. */
3386 expand_mult_highpart_adjust (enum machine_mode mode, rtx adj_operand, rtx op0,
3387 rtx op1, rtx target, int unsignedp)
3389 rtx tem;
3390 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3392 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3393 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3394 tem = expand_and (mode, tem, op1, NULL_RTX);
3395 adj_operand
3396 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3397 adj_operand);
3399 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3400 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3401 tem = expand_and (mode, tem, op0, NULL_RTX);
3402 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3403 target);
3405 return target;
3408 /* Subroutine of expand_mult_highpart. Return the MODE high part of OP. */
3410 static rtx
3411 extract_high_half (enum machine_mode mode, rtx op)
3413 enum machine_mode wider_mode;
3415 if (mode == word_mode)
3416 return gen_highpart (mode, op);
3418 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3420 wider_mode = GET_MODE_WIDER_MODE (mode);
3421 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3422 GET_MODE_BITSIZE (mode), 0, 1);
3423 return convert_modes (mode, wider_mode, op, 0);
3426 /* Like expand_mult_highpart, but only consider using a multiplication
3427 optab. OP1 is an rtx for the constant operand. */
3429 static rtx
3430 expand_mult_highpart_optab (enum machine_mode mode, rtx op0, rtx op1,
3431 rtx target, int unsignedp, int max_cost)
3433 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3434 enum machine_mode wider_mode;
3435 optab moptab;
3436 rtx tem;
3437 int size;
3438 bool speed = optimize_insn_for_speed_p ();
3440 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3442 wider_mode = GET_MODE_WIDER_MODE (mode);
3443 size = GET_MODE_BITSIZE (mode);
3445 /* Firstly, try using a multiplication insn that only generates the needed
3446 high part of the product, and in the sign flavor of unsignedp. */
3447 if (mul_highpart_cost[speed][mode] < max_cost)
3449 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3450 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3451 unsignedp, OPTAB_DIRECT);
3452 if (tem)
3453 return tem;
3456 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3457 Need to adjust the result after the multiplication. */
3458 if (size - 1 < BITS_PER_WORD
3459 && (mul_highpart_cost[speed][mode] + 2 * shift_cost[speed][mode][size-1]
3460 + 4 * add_cost[speed][mode] < max_cost))
3462 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3463 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3464 unsignedp, OPTAB_DIRECT);
3465 if (tem)
3466 /* We used the wrong signedness. Adjust the result. */
3467 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3468 tem, unsignedp);
3471 /* Try widening multiplication. */
3472 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3473 if (widening_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3474 && mul_widen_cost[speed][wider_mode] < max_cost)
3476 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3477 unsignedp, OPTAB_WIDEN);
3478 if (tem)
3479 return extract_high_half (mode, tem);
3482 /* Try widening the mode and perform a non-widening multiplication. */
3483 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3484 && size - 1 < BITS_PER_WORD
3485 && mul_cost[speed][wider_mode] + shift_cost[speed][mode][size-1] < max_cost)
3487 rtx insns, wop0, wop1;
3489 /* We need to widen the operands, for example to ensure the
3490 constant multiplier is correctly sign or zero extended.
3491 Use a sequence to clean-up any instructions emitted by
3492 the conversions if things don't work out. */
3493 start_sequence ();
3494 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3495 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3496 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3497 unsignedp, OPTAB_WIDEN);
3498 insns = get_insns ();
3499 end_sequence ();
3501 if (tem)
3503 emit_insn (insns);
3504 return extract_high_half (mode, tem);
3508 /* Try widening multiplication of opposite signedness, and adjust. */
3509 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3510 if (widening_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3511 && size - 1 < BITS_PER_WORD
3512 && (mul_widen_cost[speed][wider_mode] + 2 * shift_cost[speed][mode][size-1]
3513 + 4 * add_cost[speed][mode] < max_cost))
3515 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3516 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3517 if (tem != 0)
3519 tem = extract_high_half (mode, tem);
3520 /* We used the wrong signedness. Adjust the result. */
3521 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3522 target, unsignedp);
3526 return 0;
3529 /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3530 putting the high half of the result in TARGET if that is convenient,
3531 and return where the result is. If the operation can not be performed,
3532 0 is returned.
3534 MODE is the mode of operation and result.
3536 UNSIGNEDP nonzero means unsigned multiply.
3538 MAX_COST is the total allowed cost for the expanded RTL. */
3540 static rtx
3541 expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
3542 rtx target, int unsignedp, int max_cost)
3544 enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3545 unsigned HOST_WIDE_INT cnst1;
3546 int extra_cost;
3547 bool sign_adjust = false;
3548 enum mult_variant variant;
3549 struct algorithm alg;
3550 rtx tem;
3551 bool speed = optimize_insn_for_speed_p ();
3553 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3554 /* We can't support modes wider than HOST_BITS_PER_INT. */
3555 gcc_assert (HWI_COMPUTABLE_MODE_P (mode));
3557 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3559 /* We can't optimize modes wider than BITS_PER_WORD.
3560 ??? We might be able to perform double-word arithmetic if
3561 mode == word_mode, however all the cost calculations in
3562 synth_mult etc. assume single-word operations. */
3563 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3564 return expand_mult_highpart_optab (mode, op0, op1, target,
3565 unsignedp, max_cost);
3567 extra_cost = shift_cost[speed][mode][GET_MODE_BITSIZE (mode) - 1];
3569 /* Check whether we try to multiply by a negative constant. */
3570 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3572 sign_adjust = true;
3573 extra_cost += add_cost[speed][mode];
3576 /* See whether shift/add multiplication is cheap enough. */
3577 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3578 max_cost - extra_cost))
3580 /* See whether the specialized multiplication optabs are
3581 cheaper than the shift/add version. */
3582 tem = expand_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3583 alg.cost.cost + extra_cost);
3584 if (tem)
3585 return tem;
3587 tem = convert_to_mode (wider_mode, op0, unsignedp);
3588 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3589 tem = extract_high_half (mode, tem);
3591 /* Adjust result for signedness. */
3592 if (sign_adjust)
3593 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3595 return tem;
3597 return expand_mult_highpart_optab (mode, op0, op1, target,
3598 unsignedp, max_cost);
3602 /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3604 static rtx
3605 expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3607 unsigned HOST_WIDE_INT masklow, maskhigh;
3608 rtx result, temp, shift, label;
3609 int logd;
3611 logd = floor_log2 (d);
3612 result = gen_reg_rtx (mode);
3614 /* Avoid conditional branches when they're expensive. */
3615 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3616 && optimize_insn_for_speed_p ())
3618 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3619 mode, 0, -1);
3620 if (signmask)
3622 signmask = force_reg (mode, signmask);
3623 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3624 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3626 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3627 which instruction sequence to use. If logical right shifts
3628 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3629 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3631 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3632 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3633 || (set_src_cost (temp, optimize_insn_for_speed_p ())
3634 > COSTS_N_INSNS (2)))
3636 temp = expand_binop (mode, xor_optab, op0, signmask,
3637 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3638 temp = expand_binop (mode, sub_optab, temp, signmask,
3639 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3640 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3641 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3642 temp = expand_binop (mode, xor_optab, temp, signmask,
3643 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3644 temp = expand_binop (mode, sub_optab, temp, signmask,
3645 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3647 else
3649 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3650 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3651 signmask = force_reg (mode, signmask);
3653 temp = expand_binop (mode, add_optab, op0, signmask,
3654 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3655 temp = expand_binop (mode, and_optab, temp, GEN_INT (masklow),
3656 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3657 temp = expand_binop (mode, sub_optab, temp, signmask,
3658 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3660 return temp;
3664 /* Mask contains the mode's signbit and the significant bits of the
3665 modulus. By including the signbit in the operation, many targets
3666 can avoid an explicit compare operation in the following comparison
3667 against zero. */
3669 masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3670 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3672 masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
3673 maskhigh = -1;
3675 else
3676 maskhigh = (HOST_WIDE_INT) -1
3677 << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
3679 temp = expand_binop (mode, and_optab, op0,
3680 immed_double_const (masklow, maskhigh, mode),
3681 result, 1, OPTAB_LIB_WIDEN);
3682 if (temp != result)
3683 emit_move_insn (result, temp);
3685 label = gen_label_rtx ();
3686 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3688 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3689 0, OPTAB_LIB_WIDEN);
3690 masklow = (HOST_WIDE_INT) -1 << logd;
3691 maskhigh = -1;
3692 temp = expand_binop (mode, ior_optab, temp,
3693 immed_double_const (masklow, maskhigh, mode),
3694 result, 1, OPTAB_LIB_WIDEN);
3695 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3696 0, OPTAB_LIB_WIDEN);
3697 if (temp != result)
3698 emit_move_insn (result, temp);
3699 emit_label (label);
3700 return result;
3703 /* Expand signed division of OP0 by a power of two D in mode MODE.
3704 This routine is only called for positive values of D. */
3706 static rtx
3707 expand_sdiv_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
3709 rtx temp, label;
3710 int logd;
3712 logd = floor_log2 (d);
3714 if (d == 2
3715 && BRANCH_COST (optimize_insn_for_speed_p (),
3716 false) >= 1)
3718 temp = gen_reg_rtx (mode);
3719 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
3720 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3721 0, OPTAB_LIB_WIDEN);
3722 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3725 #ifdef HAVE_conditional_move
3726 if (BRANCH_COST (optimize_insn_for_speed_p (), false)
3727 >= 2)
3729 rtx temp2;
3731 /* ??? emit_conditional_move forces a stack adjustment via
3732 compare_from_rtx so, if the sequence is discarded, it will
3733 be lost. Do it now instead. */
3734 do_pending_stack_adjust ();
3736 start_sequence ();
3737 temp2 = copy_to_mode_reg (mode, op0);
3738 temp = expand_binop (mode, add_optab, temp2, GEN_INT (d-1),
3739 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3740 temp = force_reg (mode, temp);
3742 /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
3743 temp2 = emit_conditional_move (temp2, LT, temp2, const0_rtx,
3744 mode, temp, temp2, mode, 0);
3745 if (temp2)
3747 rtx seq = get_insns ();
3748 end_sequence ();
3749 emit_insn (seq);
3750 return expand_shift (RSHIFT_EXPR, mode, temp2, logd, NULL_RTX, 0);
3752 end_sequence ();
3754 #endif
3756 if (BRANCH_COST (optimize_insn_for_speed_p (),
3757 false) >= 2)
3759 int ushift = GET_MODE_BITSIZE (mode) - logd;
3761 temp = gen_reg_rtx (mode);
3762 temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
3763 if (shift_cost[optimize_insn_for_speed_p ()][mode][ushift] > COSTS_N_INSNS (1))
3764 temp = expand_binop (mode, and_optab, temp, GEN_INT (d - 1),
3765 NULL_RTX, 0, OPTAB_LIB_WIDEN);
3766 else
3767 temp = expand_shift (RSHIFT_EXPR, mode, temp,
3768 ushift, NULL_RTX, 1);
3769 temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
3770 0, OPTAB_LIB_WIDEN);
3771 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3774 label = gen_label_rtx ();
3775 temp = copy_to_mode_reg (mode, op0);
3776 do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
3777 expand_inc (temp, GEN_INT (d - 1));
3778 emit_label (label);
3779 return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
3782 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
3783 if that is convenient, and returning where the result is.
3784 You may request either the quotient or the remainder as the result;
3785 specify REM_FLAG nonzero to get the remainder.
3787 CODE is the expression code for which kind of division this is;
3788 it controls how rounding is done. MODE is the machine mode to use.
3789 UNSIGNEDP nonzero means do unsigned division. */
3791 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
3792 and then correct it by or'ing in missing high bits
3793 if result of ANDI is nonzero.
3794 For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
3795 This could optimize to a bfexts instruction.
3796 But C doesn't use these operations, so their optimizations are
3797 left for later. */
3798 /* ??? For modulo, we don't actually need the highpart of the first product,
3799 the low part will do nicely. And for small divisors, the second multiply
3800 can also be a low-part only multiply or even be completely left out.
3801 E.g. to calculate the remainder of a division by 3 with a 32 bit
3802 multiply, multiply with 0x55555556 and extract the upper two bits;
3803 the result is exact for inputs up to 0x1fffffff.
3804 The input range can be reduced by using cross-sum rules.
3805 For odd divisors >= 3, the following table gives right shift counts
3806 so that if a number is shifted by an integer multiple of the given
3807 amount, the remainder stays the same:
3808 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
3809 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
3810 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
3811 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
3812 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
3814 Cross-sum rules for even numbers can be derived by leaving as many bits
3815 to the right alone as the divisor has zeros to the right.
3816 E.g. if x is an unsigned 32 bit number:
3817 (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
3821 expand_divmod (int rem_flag, enum tree_code code, enum machine_mode mode,
3822 rtx op0, rtx op1, rtx target, int unsignedp)
3824 enum machine_mode compute_mode;
3825 rtx tquotient;
3826 rtx quotient = 0, remainder = 0;
3827 rtx last;
3828 int size;
3829 rtx insn;
3830 optab optab1, optab2;
3831 int op1_is_constant, op1_is_pow2 = 0;
3832 int max_cost, extra_cost;
3833 static HOST_WIDE_INT last_div_const = 0;
3834 static HOST_WIDE_INT ext_op1;
3835 bool speed = optimize_insn_for_speed_p ();
3837 op1_is_constant = CONST_INT_P (op1);
3838 if (op1_is_constant)
3840 ext_op1 = INTVAL (op1);
3841 if (unsignedp)
3842 ext_op1 &= GET_MODE_MASK (mode);
3843 op1_is_pow2 = ((EXACT_POWER_OF_2_OR_ZERO_P (ext_op1)
3844 || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-ext_op1))));
3848 This is the structure of expand_divmod:
3850 First comes code to fix up the operands so we can perform the operations
3851 correctly and efficiently.
3853 Second comes a switch statement with code specific for each rounding mode.
3854 For some special operands this code emits all RTL for the desired
3855 operation, for other cases, it generates only a quotient and stores it in
3856 QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
3857 to indicate that it has not done anything.
3859 Last comes code that finishes the operation. If QUOTIENT is set and
3860 REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
3861 QUOTIENT is not set, it is computed using trunc rounding.
3863 We try to generate special code for division and remainder when OP1 is a
3864 constant. If |OP1| = 2**n we can use shifts and some other fast
3865 operations. For other values of OP1, we compute a carefully selected
3866 fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
3867 by m.
3869 In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
3870 half of the product. Different strategies for generating the product are
3871 implemented in expand_mult_highpart.
3873 If what we actually want is the remainder, we generate that by another
3874 by-constant multiplication and a subtraction. */
3876 /* We shouldn't be called with OP1 == const1_rtx, but some of the
3877 code below will malfunction if we are, so check here and handle
3878 the special case if so. */
3879 if (op1 == const1_rtx)
3880 return rem_flag ? const0_rtx : op0;
3882 /* When dividing by -1, we could get an overflow.
3883 negv_optab can handle overflows. */
3884 if (! unsignedp && op1 == constm1_rtx)
3886 if (rem_flag)
3887 return const0_rtx;
3888 return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3889 ? negv_optab : neg_optab, op0, target, 0);
3892 if (target
3893 /* Don't use the function value register as a target
3894 since we have to read it as well as write it,
3895 and function-inlining gets confused by this. */
3896 && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3897 /* Don't clobber an operand while doing a multi-step calculation. */
3898 || ((rem_flag || op1_is_constant)
3899 && (reg_mentioned_p (target, op0)
3900 || (MEM_P (op0) && MEM_P (target))))
3901 || reg_mentioned_p (target, op1)
3902 || (MEM_P (op1) && MEM_P (target))))
3903 target = 0;
3905 /* Get the mode in which to perform this computation. Normally it will
3906 be MODE, but sometimes we can't do the desired operation in MODE.
3907 If so, pick a wider mode in which we can do the operation. Convert
3908 to that mode at the start to avoid repeated conversions.
3910 First see what operations we need. These depend on the expression
3911 we are evaluating. (We assume that divxx3 insns exist under the
3912 same conditions that modxx3 insns and that these insns don't normally
3913 fail. If these assumptions are not correct, we may generate less
3914 efficient code in some cases.)
3916 Then see if we find a mode in which we can open-code that operation
3917 (either a division, modulus, or shift). Finally, check for the smallest
3918 mode for which we can do the operation with a library call. */
3920 /* We might want to refine this now that we have division-by-constant
3921 optimization. Since expand_mult_highpart tries so many variants, it is
3922 not straightforward to generalize this. Maybe we should make an array
3923 of possible modes in init_expmed? Save this for GCC 2.7. */
3925 optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3926 ? (unsignedp ? lshr_optab : ashr_optab)
3927 : (unsignedp ? udiv_optab : sdiv_optab));
3928 optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3929 ? optab1
3930 : (unsignedp ? udivmod_optab : sdivmod_optab));
3932 for (compute_mode = mode; compute_mode != VOIDmode;
3933 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3934 if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
3935 || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
3936 break;
3938 if (compute_mode == VOIDmode)
3939 for (compute_mode = mode; compute_mode != VOIDmode;
3940 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3941 if (optab_libfunc (optab1, compute_mode)
3942 || optab_libfunc (optab2, compute_mode))
3943 break;
3945 /* If we still couldn't find a mode, use MODE, but expand_binop will
3946 probably die. */
3947 if (compute_mode == VOIDmode)
3948 compute_mode = mode;
3950 if (target && GET_MODE (target) == compute_mode)
3951 tquotient = target;
3952 else
3953 tquotient = gen_reg_rtx (compute_mode);
3955 size = GET_MODE_BITSIZE (compute_mode);
3956 #if 0
3957 /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3958 (mode), and thereby get better code when OP1 is a constant. Do that
3959 later. It will require going over all usages of SIZE below. */
3960 size = GET_MODE_BITSIZE (mode);
3961 #endif
3963 /* Only deduct something for a REM if the last divide done was
3964 for a different constant. Then set the constant of the last
3965 divide. */
3966 max_cost = unsignedp ? udiv_cost[speed][compute_mode] : sdiv_cost[speed][compute_mode];
3967 if (rem_flag && ! (last_div_const != 0 && op1_is_constant
3968 && INTVAL (op1) == last_div_const))
3969 max_cost -= mul_cost[speed][compute_mode] + add_cost[speed][compute_mode];
3971 last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3973 /* Now convert to the best mode to use. */
3974 if (compute_mode != mode)
3976 op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3977 op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3979 /* convert_modes may have placed op1 into a register, so we
3980 must recompute the following. */
3981 op1_is_constant = CONST_INT_P (op1);
3982 op1_is_pow2 = (op1_is_constant
3983 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3984 || (! unsignedp
3985 && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3988 /* If one of the operands is a volatile MEM, copy it into a register. */
3990 if (MEM_P (op0) && MEM_VOLATILE_P (op0))
3991 op0 = force_reg (compute_mode, op0);
3992 if (MEM_P (op1) && MEM_VOLATILE_P (op1))
3993 op1 = force_reg (compute_mode, op1);
3995 /* If we need the remainder or if OP1 is constant, we need to
3996 put OP0 in a register in case it has any queued subexpressions. */
3997 if (rem_flag || op1_is_constant)
3998 op0 = force_reg (compute_mode, op0);
4000 last = get_last_insn ();
4002 /* Promote floor rounding to trunc rounding for unsigned operations. */
4003 if (unsignedp)
4005 if (code == FLOOR_DIV_EXPR)
4006 code = TRUNC_DIV_EXPR;
4007 if (code == FLOOR_MOD_EXPR)
4008 code = TRUNC_MOD_EXPR;
4009 if (code == EXACT_DIV_EXPR && op1_is_pow2)
4010 code = TRUNC_DIV_EXPR;
4013 if (op1 != const0_rtx)
4014 switch (code)
4016 case TRUNC_MOD_EXPR:
4017 case TRUNC_DIV_EXPR:
4018 if (op1_is_constant)
4020 if (unsignedp)
4022 unsigned HOST_WIDE_INT mh;
4023 int pre_shift, post_shift;
4024 int dummy;
4025 rtx ml;
4026 unsigned HOST_WIDE_INT d = (INTVAL (op1)
4027 & GET_MODE_MASK (compute_mode));
4029 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4031 pre_shift = floor_log2 (d);
4032 if (rem_flag)
4034 remainder
4035 = expand_binop (compute_mode, and_optab, op0,
4036 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4037 remainder, 1,
4038 OPTAB_LIB_WIDEN);
4039 if (remainder)
4040 return gen_lowpart (mode, remainder);
4042 quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4043 pre_shift, tquotient, 1);
4045 else if (size <= HOST_BITS_PER_WIDE_INT)
4047 if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
4049 /* Most significant bit of divisor is set; emit an scc
4050 insn. */
4051 quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4052 compute_mode, 1, 1);
4054 else
4056 /* Find a suitable multiplier and right shift count
4057 instead of multiplying with D. */
4059 mh = choose_multiplier (d, size, size,
4060 &ml, &post_shift, &dummy);
4062 /* If the suggested multiplier is more than SIZE bits,
4063 we can do better for even divisors, using an
4064 initial right shift. */
4065 if (mh != 0 && (d & 1) == 0)
4067 pre_shift = floor_log2 (d & -d);
4068 mh = choose_multiplier (d >> pre_shift, size,
4069 size - pre_shift,
4070 &ml, &post_shift, &dummy);
4071 gcc_assert (!mh);
4073 else
4074 pre_shift = 0;
4076 if (mh != 0)
4078 rtx t1, t2, t3, t4;
4080 if (post_shift - 1 >= BITS_PER_WORD)
4081 goto fail1;
4083 extra_cost
4084 = (shift_cost[speed][compute_mode][post_shift - 1]
4085 + shift_cost[speed][compute_mode][1]
4086 + 2 * add_cost[speed][compute_mode]);
4087 t1 = expand_mult_highpart (compute_mode, op0, ml,
4088 NULL_RTX, 1,
4089 max_cost - extra_cost);
4090 if (t1 == 0)
4091 goto fail1;
4092 t2 = force_operand (gen_rtx_MINUS (compute_mode,
4093 op0, t1),
4094 NULL_RTX);
4095 t3 = expand_shift (RSHIFT_EXPR, compute_mode,
4096 t2, 1, NULL_RTX, 1);
4097 t4 = force_operand (gen_rtx_PLUS (compute_mode,
4098 t1, t3),
4099 NULL_RTX);
4100 quotient = expand_shift
4101 (RSHIFT_EXPR, compute_mode, t4,
4102 post_shift - 1, tquotient, 1);
4104 else
4106 rtx t1, t2;
4108 if (pre_shift >= BITS_PER_WORD
4109 || post_shift >= BITS_PER_WORD)
4110 goto fail1;
4112 t1 = expand_shift
4113 (RSHIFT_EXPR, compute_mode, op0,
4114 pre_shift, NULL_RTX, 1);
4115 extra_cost
4116 = (shift_cost[speed][compute_mode][pre_shift]
4117 + shift_cost[speed][compute_mode][post_shift]);
4118 t2 = expand_mult_highpart (compute_mode, t1, ml,
4119 NULL_RTX, 1,
4120 max_cost - extra_cost);
4121 if (t2 == 0)
4122 goto fail1;
4123 quotient = expand_shift
4124 (RSHIFT_EXPR, compute_mode, t2,
4125 post_shift, tquotient, 1);
4129 else /* Too wide mode to use tricky code */
4130 break;
4132 insn = get_last_insn ();
4133 if (insn != last)
4134 set_dst_reg_note (insn, REG_EQUAL,
4135 gen_rtx_UDIV (compute_mode, op0, op1),
4136 quotient);
4138 else /* TRUNC_DIV, signed */
4140 unsigned HOST_WIDE_INT ml;
4141 int lgup, post_shift;
4142 rtx mlr;
4143 HOST_WIDE_INT d = INTVAL (op1);
4144 unsigned HOST_WIDE_INT abs_d;
4146 /* Since d might be INT_MIN, we have to cast to
4147 unsigned HOST_WIDE_INT before negating to avoid
4148 undefined signed overflow. */
4149 abs_d = (d >= 0
4150 ? (unsigned HOST_WIDE_INT) d
4151 : - (unsigned HOST_WIDE_INT) d);
4153 /* n rem d = n rem -d */
4154 if (rem_flag && d < 0)
4156 d = abs_d;
4157 op1 = gen_int_mode (abs_d, compute_mode);
4160 if (d == 1)
4161 quotient = op0;
4162 else if (d == -1)
4163 quotient = expand_unop (compute_mode, neg_optab, op0,
4164 tquotient, 0);
4165 else if (HOST_BITS_PER_WIDE_INT >= size
4166 && abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
4168 /* This case is not handled correctly below. */
4169 quotient = emit_store_flag (tquotient, EQ, op0, op1,
4170 compute_mode, 1, 1);
4171 if (quotient == 0)
4172 goto fail1;
4174 else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4175 && (rem_flag ? smod_pow2_cheap[speed][compute_mode]
4176 : sdiv_pow2_cheap[speed][compute_mode])
4177 /* We assume that cheap metric is true if the
4178 optab has an expander for this mode. */
4179 && ((optab_handler ((rem_flag ? smod_optab
4180 : sdiv_optab),
4181 compute_mode)
4182 != CODE_FOR_nothing)
4183 || (optab_handler (sdivmod_optab,
4184 compute_mode)
4185 != CODE_FOR_nothing)))
4187 else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4189 if (rem_flag)
4191 remainder = expand_smod_pow2 (compute_mode, op0, d);
4192 if (remainder)
4193 return gen_lowpart (mode, remainder);
4196 if (sdiv_pow2_cheap[speed][compute_mode]
4197 && ((optab_handler (sdiv_optab, compute_mode)
4198 != CODE_FOR_nothing)
4199 || (optab_handler (sdivmod_optab, compute_mode)
4200 != CODE_FOR_nothing)))
4201 quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4202 compute_mode, op0,
4203 gen_int_mode (abs_d,
4204 compute_mode),
4205 NULL_RTX, 0);
4206 else
4207 quotient = expand_sdiv_pow2 (compute_mode, op0, abs_d);
4209 /* We have computed OP0 / abs(OP1). If OP1 is negative,
4210 negate the quotient. */
4211 if (d < 0)
4213 insn = get_last_insn ();
4214 if (insn != last
4215 && abs_d < ((unsigned HOST_WIDE_INT) 1
4216 << (HOST_BITS_PER_WIDE_INT - 1)))
4217 set_dst_reg_note (insn, REG_EQUAL,
4218 gen_rtx_DIV (compute_mode, op0,
4219 gen_int_mode
4220 (abs_d,
4221 compute_mode)),
4222 quotient);
4224 quotient = expand_unop (compute_mode, neg_optab,
4225 quotient, quotient, 0);
4228 else if (size <= HOST_BITS_PER_WIDE_INT)
4230 choose_multiplier (abs_d, size, size - 1,
4231 &mlr, &post_shift, &lgup);
4232 ml = (unsigned HOST_WIDE_INT) INTVAL (mlr);
4233 if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
4235 rtx t1, t2, t3;
4237 if (post_shift >= BITS_PER_WORD
4238 || size - 1 >= BITS_PER_WORD)
4239 goto fail1;
4241 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4242 + shift_cost[speed][compute_mode][size - 1]
4243 + add_cost[speed][compute_mode]);
4244 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4245 NULL_RTX, 0,
4246 max_cost - extra_cost);
4247 if (t1 == 0)
4248 goto fail1;
4249 t2 = expand_shift
4250 (RSHIFT_EXPR, compute_mode, t1,
4251 post_shift, NULL_RTX, 0);
4252 t3 = expand_shift
4253 (RSHIFT_EXPR, compute_mode, op0,
4254 size - 1, NULL_RTX, 0);
4255 if (d < 0)
4256 quotient
4257 = force_operand (gen_rtx_MINUS (compute_mode,
4258 t3, t2),
4259 tquotient);
4260 else
4261 quotient
4262 = force_operand (gen_rtx_MINUS (compute_mode,
4263 t2, t3),
4264 tquotient);
4266 else
4268 rtx t1, t2, t3, t4;
4270 if (post_shift >= BITS_PER_WORD
4271 || size - 1 >= BITS_PER_WORD)
4272 goto fail1;
4274 ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
4275 mlr = gen_int_mode (ml, compute_mode);
4276 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4277 + shift_cost[speed][compute_mode][size - 1]
4278 + 2 * add_cost[speed][compute_mode]);
4279 t1 = expand_mult_highpart (compute_mode, op0, mlr,
4280 NULL_RTX, 0,
4281 max_cost - extra_cost);
4282 if (t1 == 0)
4283 goto fail1;
4284 t2 = force_operand (gen_rtx_PLUS (compute_mode,
4285 t1, op0),
4286 NULL_RTX);
4287 t3 = expand_shift
4288 (RSHIFT_EXPR, compute_mode, t2,
4289 post_shift, NULL_RTX, 0);
4290 t4 = expand_shift
4291 (RSHIFT_EXPR, compute_mode, op0,
4292 size - 1, NULL_RTX, 0);
4293 if (d < 0)
4294 quotient
4295 = force_operand (gen_rtx_MINUS (compute_mode,
4296 t4, t3),
4297 tquotient);
4298 else
4299 quotient
4300 = force_operand (gen_rtx_MINUS (compute_mode,
4301 t3, t4),
4302 tquotient);
4305 else /* Too wide mode to use tricky code */
4306 break;
4308 insn = get_last_insn ();
4309 if (insn != last)
4310 set_dst_reg_note (insn, REG_EQUAL,
4311 gen_rtx_DIV (compute_mode, op0, op1),
4312 quotient);
4314 break;
4316 fail1:
4317 delete_insns_since (last);
4318 break;
4320 case FLOOR_DIV_EXPR:
4321 case FLOOR_MOD_EXPR:
4322 /* We will come here only for signed operations. */
4323 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4325 unsigned HOST_WIDE_INT mh;
4326 int pre_shift, lgup, post_shift;
4327 HOST_WIDE_INT d = INTVAL (op1);
4328 rtx ml;
4330 if (d > 0)
4332 /* We could just as easily deal with negative constants here,
4333 but it does not seem worth the trouble for GCC 2.6. */
4334 if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4336 pre_shift = floor_log2 (d);
4337 if (rem_flag)
4339 remainder = expand_binop (compute_mode, and_optab, op0,
4340 GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
4341 remainder, 0, OPTAB_LIB_WIDEN);
4342 if (remainder)
4343 return gen_lowpart (mode, remainder);
4345 quotient = expand_shift
4346 (RSHIFT_EXPR, compute_mode, op0,
4347 pre_shift, tquotient, 0);
4349 else
4351 rtx t1, t2, t3, t4;
4353 mh = choose_multiplier (d, size, size - 1,
4354 &ml, &post_shift, &lgup);
4355 gcc_assert (!mh);
4357 if (post_shift < BITS_PER_WORD
4358 && size - 1 < BITS_PER_WORD)
4360 t1 = expand_shift
4361 (RSHIFT_EXPR, compute_mode, op0,
4362 size - 1, NULL_RTX, 0);
4363 t2 = expand_binop (compute_mode, xor_optab, op0, t1,
4364 NULL_RTX, 0, OPTAB_WIDEN);
4365 extra_cost = (shift_cost[speed][compute_mode][post_shift]
4366 + shift_cost[speed][compute_mode][size - 1]
4367 + 2 * add_cost[speed][compute_mode]);
4368 t3 = expand_mult_highpart (compute_mode, t2, ml,
4369 NULL_RTX, 1,
4370 max_cost - extra_cost);
4371 if (t3 != 0)
4373 t4 = expand_shift
4374 (RSHIFT_EXPR, compute_mode, t3,
4375 post_shift, NULL_RTX, 1);
4376 quotient = expand_binop (compute_mode, xor_optab,
4377 t4, t1, tquotient, 0,
4378 OPTAB_WIDEN);
4383 else
4385 rtx nsign, t1, t2, t3, t4;
4386 t1 = force_operand (gen_rtx_PLUS (compute_mode,
4387 op0, constm1_rtx), NULL_RTX);
4388 t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
4389 0, OPTAB_WIDEN);
4390 nsign = expand_shift
4391 (RSHIFT_EXPR, compute_mode, t2,
4392 size - 1, NULL_RTX, 0);
4393 t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
4394 NULL_RTX);
4395 t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
4396 NULL_RTX, 0);
4397 if (t4)
4399 rtx t5;
4400 t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
4401 NULL_RTX, 0);
4402 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4403 t4, t5),
4404 tquotient);
4409 if (quotient != 0)
4410 break;
4411 delete_insns_since (last);
4413 /* Try using an instruction that produces both the quotient and
4414 remainder, using truncation. We can easily compensate the quotient
4415 or remainder to get floor rounding, once we have the remainder.
4416 Notice that we compute also the final remainder value here,
4417 and return the result right away. */
4418 if (target == 0 || GET_MODE (target) != compute_mode)
4419 target = gen_reg_rtx (compute_mode);
4421 if (rem_flag)
4423 remainder
4424 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4425 quotient = gen_reg_rtx (compute_mode);
4427 else
4429 quotient
4430 = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4431 remainder = gen_reg_rtx (compute_mode);
4434 if (expand_twoval_binop (sdivmod_optab, op0, op1,
4435 quotient, remainder, 0))
4437 /* This could be computed with a branch-less sequence.
4438 Save that for later. */
4439 rtx tem;
4440 rtx label = gen_label_rtx ();
4441 do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4442 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4443 NULL_RTX, 0, OPTAB_WIDEN);
4444 do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4445 expand_dec (quotient, const1_rtx);
4446 expand_inc (remainder, op1);
4447 emit_label (label);
4448 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4451 /* No luck with division elimination or divmod. Have to do it
4452 by conditionally adjusting op0 *and* the result. */
4454 rtx label1, label2, label3, label4, label5;
4455 rtx adjusted_op0;
4456 rtx tem;
4458 quotient = gen_reg_rtx (compute_mode);
4459 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4460 label1 = gen_label_rtx ();
4461 label2 = gen_label_rtx ();
4462 label3 = gen_label_rtx ();
4463 label4 = gen_label_rtx ();
4464 label5 = gen_label_rtx ();
4465 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4466 do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4467 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4468 quotient, 0, OPTAB_LIB_WIDEN);
4469 if (tem != quotient)
4470 emit_move_insn (quotient, tem);
4471 emit_jump_insn (gen_jump (label5));
4472 emit_barrier ();
4473 emit_label (label1);
4474 expand_inc (adjusted_op0, const1_rtx);
4475 emit_jump_insn (gen_jump (label4));
4476 emit_barrier ();
4477 emit_label (label2);
4478 do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4479 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4480 quotient, 0, OPTAB_LIB_WIDEN);
4481 if (tem != quotient)
4482 emit_move_insn (quotient, tem);
4483 emit_jump_insn (gen_jump (label5));
4484 emit_barrier ();
4485 emit_label (label3);
4486 expand_dec (adjusted_op0, const1_rtx);
4487 emit_label (label4);
4488 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4489 quotient, 0, OPTAB_LIB_WIDEN);
4490 if (tem != quotient)
4491 emit_move_insn (quotient, tem);
4492 expand_dec (quotient, const1_rtx);
4493 emit_label (label5);
4495 break;
4497 case CEIL_DIV_EXPR:
4498 case CEIL_MOD_EXPR:
4499 if (unsignedp)
4501 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
4503 rtx t1, t2, t3;
4504 unsigned HOST_WIDE_INT d = INTVAL (op1);
4505 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4506 floor_log2 (d), tquotient, 1);
4507 t2 = expand_binop (compute_mode, and_optab, op0,
4508 GEN_INT (d - 1),
4509 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4510 t3 = gen_reg_rtx (compute_mode);
4511 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4512 compute_mode, 1, 1);
4513 if (t3 == 0)
4515 rtx lab;
4516 lab = gen_label_rtx ();
4517 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4518 expand_inc (t1, const1_rtx);
4519 emit_label (lab);
4520 quotient = t1;
4522 else
4523 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4524 t1, t3),
4525 tquotient);
4526 break;
4529 /* Try using an instruction that produces both the quotient and
4530 remainder, using truncation. We can easily compensate the
4531 quotient or remainder to get ceiling rounding, once we have the
4532 remainder. Notice that we compute also the final remainder
4533 value here, and return the result right away. */
4534 if (target == 0 || GET_MODE (target) != compute_mode)
4535 target = gen_reg_rtx (compute_mode);
4537 if (rem_flag)
4539 remainder = (REG_P (target)
4540 ? target : gen_reg_rtx (compute_mode));
4541 quotient = gen_reg_rtx (compute_mode);
4543 else
4545 quotient = (REG_P (target)
4546 ? target : gen_reg_rtx (compute_mode));
4547 remainder = gen_reg_rtx (compute_mode);
4550 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
4551 remainder, 1))
4553 /* This could be computed with a branch-less sequence.
4554 Save that for later. */
4555 rtx label = gen_label_rtx ();
4556 do_cmp_and_jump (remainder, const0_rtx, EQ,
4557 compute_mode, label);
4558 expand_inc (quotient, const1_rtx);
4559 expand_dec (remainder, op1);
4560 emit_label (label);
4561 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4564 /* No luck with division elimination or divmod. Have to do it
4565 by conditionally adjusting op0 *and* the result. */
4567 rtx label1, label2;
4568 rtx adjusted_op0, tem;
4570 quotient = gen_reg_rtx (compute_mode);
4571 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4572 label1 = gen_label_rtx ();
4573 label2 = gen_label_rtx ();
4574 do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
4575 compute_mode, label1);
4576 emit_move_insn (quotient, const0_rtx);
4577 emit_jump_insn (gen_jump (label2));
4578 emit_barrier ();
4579 emit_label (label1);
4580 expand_dec (adjusted_op0, const1_rtx);
4581 tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
4582 quotient, 1, OPTAB_LIB_WIDEN);
4583 if (tem != quotient)
4584 emit_move_insn (quotient, tem);
4585 expand_inc (quotient, const1_rtx);
4586 emit_label (label2);
4589 else /* signed */
4591 if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4592 && INTVAL (op1) >= 0)
4594 /* This is extremely similar to the code for the unsigned case
4595 above. For 2.7 we should merge these variants, but for
4596 2.6.1 I don't want to touch the code for unsigned since that
4597 get used in C. The signed case will only be used by other
4598 languages (Ada). */
4600 rtx t1, t2, t3;
4601 unsigned HOST_WIDE_INT d = INTVAL (op1);
4602 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4603 floor_log2 (d), tquotient, 0);
4604 t2 = expand_binop (compute_mode, and_optab, op0,
4605 GEN_INT (d - 1),
4606 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4607 t3 = gen_reg_rtx (compute_mode);
4608 t3 = emit_store_flag (t3, NE, t2, const0_rtx,
4609 compute_mode, 1, 1);
4610 if (t3 == 0)
4612 rtx lab;
4613 lab = gen_label_rtx ();
4614 do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
4615 expand_inc (t1, const1_rtx);
4616 emit_label (lab);
4617 quotient = t1;
4619 else
4620 quotient = force_operand (gen_rtx_PLUS (compute_mode,
4621 t1, t3),
4622 tquotient);
4623 break;
4626 /* Try using an instruction that produces both the quotient and
4627 remainder, using truncation. We can easily compensate the
4628 quotient or remainder to get ceiling rounding, once we have the
4629 remainder. Notice that we compute also the final remainder
4630 value here, and return the result right away. */
4631 if (target == 0 || GET_MODE (target) != compute_mode)
4632 target = gen_reg_rtx (compute_mode);
4633 if (rem_flag)
4635 remainder= (REG_P (target)
4636 ? target : gen_reg_rtx (compute_mode));
4637 quotient = gen_reg_rtx (compute_mode);
4639 else
4641 quotient = (REG_P (target)
4642 ? target : gen_reg_rtx (compute_mode));
4643 remainder = gen_reg_rtx (compute_mode);
4646 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
4647 remainder, 0))
4649 /* This could be computed with a branch-less sequence.
4650 Save that for later. */
4651 rtx tem;
4652 rtx label = gen_label_rtx ();
4653 do_cmp_and_jump (remainder, const0_rtx, EQ,
4654 compute_mode, label);
4655 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4656 NULL_RTX, 0, OPTAB_WIDEN);
4657 do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
4658 expand_inc (quotient, const1_rtx);
4659 expand_dec (remainder, op1);
4660 emit_label (label);
4661 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4664 /* No luck with division elimination or divmod. Have to do it
4665 by conditionally adjusting op0 *and* the result. */
4667 rtx label1, label2, label3, label4, label5;
4668 rtx adjusted_op0;
4669 rtx tem;
4671 quotient = gen_reg_rtx (compute_mode);
4672 adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4673 label1 = gen_label_rtx ();
4674 label2 = gen_label_rtx ();
4675 label3 = gen_label_rtx ();
4676 label4 = gen_label_rtx ();
4677 label5 = gen_label_rtx ();
4678 do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4679 do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
4680 compute_mode, label1);
4681 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4682 quotient, 0, OPTAB_LIB_WIDEN);
4683 if (tem != quotient)
4684 emit_move_insn (quotient, tem);
4685 emit_jump_insn (gen_jump (label5));
4686 emit_barrier ();
4687 emit_label (label1);
4688 expand_dec (adjusted_op0, const1_rtx);
4689 emit_jump_insn (gen_jump (label4));
4690 emit_barrier ();
4691 emit_label (label2);
4692 do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
4693 compute_mode, label3);
4694 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4695 quotient, 0, OPTAB_LIB_WIDEN);
4696 if (tem != quotient)
4697 emit_move_insn (quotient, tem);
4698 emit_jump_insn (gen_jump (label5));
4699 emit_barrier ();
4700 emit_label (label3);
4701 expand_inc (adjusted_op0, const1_rtx);
4702 emit_label (label4);
4703 tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4704 quotient, 0, OPTAB_LIB_WIDEN);
4705 if (tem != quotient)
4706 emit_move_insn (quotient, tem);
4707 expand_inc (quotient, const1_rtx);
4708 emit_label (label5);
4711 break;
4713 case EXACT_DIV_EXPR:
4714 if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
4716 HOST_WIDE_INT d = INTVAL (op1);
4717 unsigned HOST_WIDE_INT ml;
4718 int pre_shift;
4719 rtx t1;
4721 pre_shift = floor_log2 (d & -d);
4722 ml = invert_mod2n (d >> pre_shift, size);
4723 t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
4724 pre_shift, NULL_RTX, unsignedp);
4725 quotient = expand_mult (compute_mode, t1,
4726 gen_int_mode (ml, compute_mode),
4727 NULL_RTX, 1);
4729 insn = get_last_insn ();
4730 set_dst_reg_note (insn, REG_EQUAL,
4731 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
4732 compute_mode, op0, op1),
4733 quotient);
4735 break;
4737 case ROUND_DIV_EXPR:
4738 case ROUND_MOD_EXPR:
4739 if (unsignedp)
4741 rtx tem;
4742 rtx label;
4743 label = gen_label_rtx ();
4744 quotient = gen_reg_rtx (compute_mode);
4745 remainder = gen_reg_rtx (compute_mode);
4746 if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
4748 rtx tem;
4749 quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
4750 quotient, 1, OPTAB_LIB_WIDEN);
4751 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
4752 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4753 remainder, 1, OPTAB_LIB_WIDEN);
4755 tem = plus_constant (op1, -1);
4756 tem = expand_shift (RSHIFT_EXPR, compute_mode, tem, 1, NULL_RTX, 1);
4757 do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
4758 expand_inc (quotient, const1_rtx);
4759 expand_dec (remainder, op1);
4760 emit_label (label);
4762 else
4764 rtx abs_rem, abs_op1, tem, mask;
4765 rtx label;
4766 label = gen_label_rtx ();
4767 quotient = gen_reg_rtx (compute_mode);
4768 remainder = gen_reg_rtx (compute_mode);
4769 if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
4771 rtx tem;
4772 quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
4773 quotient, 0, OPTAB_LIB_WIDEN);
4774 tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
4775 remainder = expand_binop (compute_mode, sub_optab, op0, tem,
4776 remainder, 0, OPTAB_LIB_WIDEN);
4778 abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
4779 abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
4780 tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
4781 1, NULL_RTX, 1);
4782 do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
4783 tem = expand_binop (compute_mode, xor_optab, op0, op1,
4784 NULL_RTX, 0, OPTAB_WIDEN);
4785 mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
4786 size - 1, NULL_RTX, 0);
4787 tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
4788 NULL_RTX, 0, OPTAB_WIDEN);
4789 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4790 NULL_RTX, 0, OPTAB_WIDEN);
4791 expand_inc (quotient, tem);
4792 tem = expand_binop (compute_mode, xor_optab, mask, op1,
4793 NULL_RTX, 0, OPTAB_WIDEN);
4794 tem = expand_binop (compute_mode, sub_optab, tem, mask,
4795 NULL_RTX, 0, OPTAB_WIDEN);
4796 expand_dec (remainder, tem);
4797 emit_label (label);
4799 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4801 default:
4802 gcc_unreachable ();
4805 if (quotient == 0)
4807 if (target && GET_MODE (target) != compute_mode)
4808 target = 0;
4810 if (rem_flag)
4812 /* Try to produce the remainder without producing the quotient.
4813 If we seem to have a divmod pattern that does not require widening,
4814 don't try widening here. We should really have a WIDEN argument
4815 to expand_twoval_binop, since what we'd really like to do here is
4816 1) try a mod insn in compute_mode
4817 2) try a divmod insn in compute_mode
4818 3) try a div insn in compute_mode and multiply-subtract to get
4819 remainder
4820 4) try the same things with widening allowed. */
4821 remainder
4822 = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4823 op0, op1, target,
4824 unsignedp,
4825 ((optab_handler (optab2, compute_mode)
4826 != CODE_FOR_nothing)
4827 ? OPTAB_DIRECT : OPTAB_WIDEN));
4828 if (remainder == 0)
4830 /* No luck there. Can we do remainder and divide at once
4831 without a library call? */
4832 remainder = gen_reg_rtx (compute_mode);
4833 if (! expand_twoval_binop ((unsignedp
4834 ? udivmod_optab
4835 : sdivmod_optab),
4836 op0, op1,
4837 NULL_RTX, remainder, unsignedp))
4838 remainder = 0;
4841 if (remainder)
4842 return gen_lowpart (mode, remainder);
4845 /* Produce the quotient. Try a quotient insn, but not a library call.
4846 If we have a divmod in this mode, use it in preference to widening
4847 the div (for this test we assume it will not fail). Note that optab2
4848 is set to the one of the two optabs that the call below will use. */
4849 quotient
4850 = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
4851 op0, op1, rem_flag ? NULL_RTX : target,
4852 unsignedp,
4853 ((optab_handler (optab2, compute_mode)
4854 != CODE_FOR_nothing)
4855 ? OPTAB_DIRECT : OPTAB_WIDEN));
4857 if (quotient == 0)
4859 /* No luck there. Try a quotient-and-remainder insn,
4860 keeping the quotient alone. */
4861 quotient = gen_reg_rtx (compute_mode);
4862 if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
4863 op0, op1,
4864 quotient, NULL_RTX, unsignedp))
4866 quotient = 0;
4867 if (! rem_flag)
4868 /* Still no luck. If we are not computing the remainder,
4869 use a library call for the quotient. */
4870 quotient = sign_expand_binop (compute_mode,
4871 udiv_optab, sdiv_optab,
4872 op0, op1, target,
4873 unsignedp, OPTAB_LIB_WIDEN);
4878 if (rem_flag)
4880 if (target && GET_MODE (target) != compute_mode)
4881 target = 0;
4883 if (quotient == 0)
4885 /* No divide instruction either. Use library for remainder. */
4886 remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4887 op0, op1, target,
4888 unsignedp, OPTAB_LIB_WIDEN);
4889 /* No remainder function. Try a quotient-and-remainder
4890 function, keeping the remainder. */
4891 if (!remainder)
4893 remainder = gen_reg_rtx (compute_mode);
4894 if (!expand_twoval_binop_libfunc
4895 (unsignedp ? udivmod_optab : sdivmod_optab,
4896 op0, op1,
4897 NULL_RTX, remainder,
4898 unsignedp ? UMOD : MOD))
4899 remainder = NULL_RTX;
4902 else
4904 /* We divided. Now finish doing X - Y * (X / Y). */
4905 remainder = expand_mult (compute_mode, quotient, op1,
4906 NULL_RTX, unsignedp);
4907 remainder = expand_binop (compute_mode, sub_optab, op0,
4908 remainder, target, unsignedp,
4909 OPTAB_LIB_WIDEN);
4913 return gen_lowpart (mode, rem_flag ? remainder : quotient);
4916 /* Return a tree node with data type TYPE, describing the value of X.
4917 Usually this is an VAR_DECL, if there is no obvious better choice.
4918 X may be an expression, however we only support those expressions
4919 generated by loop.c. */
4921 tree
4922 make_tree (tree type, rtx x)
4924 tree t;
4926 switch (GET_CODE (x))
4928 case CONST_INT:
4930 HOST_WIDE_INT hi = 0;
4932 if (INTVAL (x) < 0
4933 && !(TYPE_UNSIGNED (type)
4934 && (GET_MODE_BITSIZE (TYPE_MODE (type))
4935 < HOST_BITS_PER_WIDE_INT)))
4936 hi = -1;
4938 t = build_int_cst_wide (type, INTVAL (x), hi);
4940 return t;
4943 case CONST_DOUBLE:
4944 if (GET_MODE (x) == VOIDmode)
4945 t = build_int_cst_wide (type,
4946 CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4947 else
4949 REAL_VALUE_TYPE d;
4951 REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4952 t = build_real (type, d);
4955 return t;
4957 case CONST_VECTOR:
4959 int units = CONST_VECTOR_NUNITS (x);
4960 tree itype = TREE_TYPE (type);
4961 tree t = NULL_TREE;
4962 int i;
4965 /* Build a tree with vector elements. */
4966 for (i = units - 1; i >= 0; --i)
4968 rtx elt = CONST_VECTOR_ELT (x, i);
4969 t = tree_cons (NULL_TREE, make_tree (itype, elt), t);
4972 return build_vector (type, t);
4975 case PLUS:
4976 return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4977 make_tree (type, XEXP (x, 1)));
4979 case MINUS:
4980 return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4981 make_tree (type, XEXP (x, 1)));
4983 case NEG:
4984 return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
4986 case MULT:
4987 return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4988 make_tree (type, XEXP (x, 1)));
4990 case ASHIFT:
4991 return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4992 make_tree (type, XEXP (x, 1)));
4994 case LSHIFTRT:
4995 t = unsigned_type_for (type);
4996 return fold_convert (type, build2 (RSHIFT_EXPR, t,
4997 make_tree (t, XEXP (x, 0)),
4998 make_tree (type, XEXP (x, 1))));
5000 case ASHIFTRT:
5001 t = signed_type_for (type);
5002 return fold_convert (type, build2 (RSHIFT_EXPR, t,
5003 make_tree (t, XEXP (x, 0)),
5004 make_tree (type, XEXP (x, 1))));
5006 case DIV:
5007 if (TREE_CODE (type) != REAL_TYPE)
5008 t = signed_type_for (type);
5009 else
5010 t = type;
5012 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5013 make_tree (t, XEXP (x, 0)),
5014 make_tree (t, XEXP (x, 1))));
5015 case UDIV:
5016 t = unsigned_type_for (type);
5017 return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5018 make_tree (t, XEXP (x, 0)),
5019 make_tree (t, XEXP (x, 1))));
5021 case SIGN_EXTEND:
5022 case ZERO_EXTEND:
5023 t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5024 GET_CODE (x) == ZERO_EXTEND);
5025 return fold_convert (type, make_tree (t, XEXP (x, 0)));
5027 case CONST:
5028 return make_tree (type, XEXP (x, 0));
5030 case SYMBOL_REF:
5031 t = SYMBOL_REF_DECL (x);
5032 if (t)
5033 return fold_convert (type, build_fold_addr_expr (t));
5034 /* else fall through. */
5036 default:
5037 t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
5039 /* If TYPE is a POINTER_TYPE, we might need to convert X from
5040 address mode to pointer mode. */
5041 if (POINTER_TYPE_P (type))
5042 x = convert_memory_address_addr_space
5043 (TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
5045 /* Note that we do *not* use SET_DECL_RTL here, because we do not
5046 want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5047 t->decl_with_rtl.rtl = x;
5049 return t;
5053 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5054 and returning TARGET.
5056 If TARGET is 0, a pseudo-register or constant is returned. */
5059 expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
5061 rtx tem = 0;
5063 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5064 tem = simplify_binary_operation (AND, mode, op0, op1);
5065 if (tem == 0)
5066 tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5068 if (target == 0)
5069 target = tem;
5070 else if (tem != target)
5071 emit_move_insn (target, tem);
5072 return target;
5075 /* Helper function for emit_store_flag. */
5076 static rtx
5077 emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
5078 enum machine_mode mode, enum machine_mode compare_mode,
5079 int unsignedp, rtx x, rtx y, int normalizep,
5080 enum machine_mode target_mode)
5082 struct expand_operand ops[4];
5083 rtx op0, last, comparison, subtarget;
5084 enum machine_mode result_mode = insn_data[(int) icode].operand[0].mode;
5086 last = get_last_insn ();
5087 x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5088 y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5089 if (!x || !y)
5091 delete_insns_since (last);
5092 return NULL_RTX;
5095 if (target_mode == VOIDmode)
5096 target_mode = result_mode;
5097 if (!target)
5098 target = gen_reg_rtx (target_mode);
5100 comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5102 create_output_operand (&ops[0], optimize ? NULL_RTX : target, result_mode);
5103 create_fixed_operand (&ops[1], comparison);
5104 create_fixed_operand (&ops[2], x);
5105 create_fixed_operand (&ops[3], y);
5106 if (!maybe_expand_insn (icode, 4, ops))
5108 delete_insns_since (last);
5109 return NULL_RTX;
5111 subtarget = ops[0].value;
5113 /* If we are converting to a wider mode, first convert to
5114 TARGET_MODE, then normalize. This produces better combining
5115 opportunities on machines that have a SIGN_EXTRACT when we are
5116 testing a single bit. This mostly benefits the 68k.
5118 If STORE_FLAG_VALUE does not have the sign bit set when
5119 interpreted in MODE, we can do this conversion as unsigned, which
5120 is usually more efficient. */
5121 if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (result_mode))
5123 convert_move (target, subtarget,
5124 val_signbit_known_clear_p (result_mode,
5125 STORE_FLAG_VALUE));
5126 op0 = target;
5127 result_mode = target_mode;
5129 else
5130 op0 = subtarget;
5132 /* If we want to keep subexpressions around, don't reuse our last
5133 target. */
5134 if (optimize)
5135 subtarget = 0;
5137 /* Now normalize to the proper value in MODE. Sometimes we don't
5138 have to do anything. */
5139 if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5141 /* STORE_FLAG_VALUE might be the most negative number, so write
5142 the comparison this way to avoid a compiler-time warning. */
5143 else if (- normalizep == STORE_FLAG_VALUE)
5144 op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5146 /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5147 it hard to use a value of just the sign bit due to ANSI integer
5148 constant typing rules. */
5149 else if (val_signbit_known_set_p (result_mode, STORE_FLAG_VALUE))
5150 op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5151 GET_MODE_BITSIZE (result_mode) - 1, subtarget,
5152 normalizep == 1);
5153 else
5155 gcc_assert (STORE_FLAG_VALUE & 1);
5157 op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5158 if (normalizep == -1)
5159 op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5162 /* If we were converting to a smaller mode, do the conversion now. */
5163 if (target_mode != result_mode)
5165 convert_move (target, op0, 0);
5166 return target;
5168 else
5169 return op0;
5173 /* A subroutine of emit_store_flag only including "tricks" that do not
5174 need a recursive call. These are kept separate to avoid infinite
5175 loops. */
5177 static rtx
5178 emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5179 enum machine_mode mode, int unsignedp, int normalizep,
5180 enum machine_mode target_mode)
5182 rtx subtarget;
5183 enum insn_code icode;
5184 enum machine_mode compare_mode;
5185 enum mode_class mclass;
5186 enum rtx_code scode;
5187 rtx tem;
5189 if (unsignedp)
5190 code = unsigned_condition (code);
5191 scode = swap_condition (code);
5193 /* If one operand is constant, make it the second one. Only do this
5194 if the other operand is not constant as well. */
5196 if (swap_commutative_operands_p (op0, op1))
5198 tem = op0;
5199 op0 = op1;
5200 op1 = tem;
5201 code = swap_condition (code);
5204 if (mode == VOIDmode)
5205 mode = GET_MODE (op0);
5207 /* For some comparisons with 1 and -1, we can convert this to
5208 comparisons with zero. This will often produce more opportunities for
5209 store-flag insns. */
5211 switch (code)
5213 case LT:
5214 if (op1 == const1_rtx)
5215 op1 = const0_rtx, code = LE;
5216 break;
5217 case LE:
5218 if (op1 == constm1_rtx)
5219 op1 = const0_rtx, code = LT;
5220 break;
5221 case GE:
5222 if (op1 == const1_rtx)
5223 op1 = const0_rtx, code = GT;
5224 break;
5225 case GT:
5226 if (op1 == constm1_rtx)
5227 op1 = const0_rtx, code = GE;
5228 break;
5229 case GEU:
5230 if (op1 == const1_rtx)
5231 op1 = const0_rtx, code = NE;
5232 break;
5233 case LTU:
5234 if (op1 == const1_rtx)
5235 op1 = const0_rtx, code = EQ;
5236 break;
5237 default:
5238 break;
5241 /* If we are comparing a double-word integer with zero or -1, we can
5242 convert the comparison into one involving a single word. */
5243 if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
5244 && GET_MODE_CLASS (mode) == MODE_INT
5245 && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5247 if ((code == EQ || code == NE)
5248 && (op1 == const0_rtx || op1 == constm1_rtx))
5250 rtx op00, op01;
5252 /* Do a logical OR or AND of the two words and compare the
5253 result. */
5254 op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
5255 op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
5256 tem = expand_binop (word_mode,
5257 op1 == const0_rtx ? ior_optab : and_optab,
5258 op00, op01, NULL_RTX, unsignedp,
5259 OPTAB_DIRECT);
5261 if (tem != 0)
5262 tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5263 unsignedp, normalizep);
5265 else if ((code == LT || code == GE) && op1 == const0_rtx)
5267 rtx op0h;
5269 /* If testing the sign bit, can just test on high word. */
5270 op0h = simplify_gen_subreg (word_mode, op0, mode,
5271 subreg_highpart_offset (word_mode,
5272 mode));
5273 tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5274 unsignedp, normalizep);
5276 else
5277 tem = NULL_RTX;
5279 if (tem)
5281 if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5282 return tem;
5283 if (!target)
5284 target = gen_reg_rtx (target_mode);
5286 convert_move (target, tem,
5287 !val_signbit_known_set_p (word_mode,
5288 (normalizep ? normalizep
5289 : STORE_FLAG_VALUE)));
5290 return target;
5294 /* If this is A < 0 or A >= 0, we can do this by taking the ones
5295 complement of A (for GE) and shifting the sign bit to the low bit. */
5296 if (op1 == const0_rtx && (code == LT || code == GE)
5297 && GET_MODE_CLASS (mode) == MODE_INT
5298 && (normalizep || STORE_FLAG_VALUE == 1
5299 || val_signbit_p (mode, STORE_FLAG_VALUE)))
5301 subtarget = target;
5303 if (!target)
5304 target_mode = mode;
5306 /* If the result is to be wider than OP0, it is best to convert it
5307 first. If it is to be narrower, it is *incorrect* to convert it
5308 first. */
5309 else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
5311 op0 = convert_modes (target_mode, mode, op0, 0);
5312 mode = target_mode;
5315 if (target_mode != mode)
5316 subtarget = 0;
5318 if (code == GE)
5319 op0 = expand_unop (mode, one_cmpl_optab, op0,
5320 ((STORE_FLAG_VALUE == 1 || normalizep)
5321 ? 0 : subtarget), 0);
5323 if (STORE_FLAG_VALUE == 1 || normalizep)
5324 /* If we are supposed to produce a 0/1 value, we want to do
5325 a logical shift from the sign bit to the low-order bit; for
5326 a -1/0 value, we do an arithmetic shift. */
5327 op0 = expand_shift (RSHIFT_EXPR, mode, op0,
5328 GET_MODE_BITSIZE (mode) - 1,
5329 subtarget, normalizep != -1);
5331 if (mode != target_mode)
5332 op0 = convert_modes (target_mode, mode, op0, 0);
5334 return op0;
5337 mclass = GET_MODE_CLASS (mode);
5338 for (compare_mode = mode; compare_mode != VOIDmode;
5339 compare_mode = GET_MODE_WIDER_MODE (compare_mode))
5341 enum machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5342 icode = optab_handler (cstore_optab, optab_mode);
5343 if (icode != CODE_FOR_nothing)
5345 do_pending_stack_adjust ();
5346 tem = emit_cstore (target, icode, code, mode, compare_mode,
5347 unsignedp, op0, op1, normalizep, target_mode);
5348 if (tem)
5349 return tem;
5351 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5353 tem = emit_cstore (target, icode, scode, mode, compare_mode,
5354 unsignedp, op1, op0, normalizep, target_mode);
5355 if (tem)
5356 return tem;
5358 break;
5362 return 0;
5365 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
5366 and storing in TARGET. Normally return TARGET.
5367 Return 0 if that cannot be done.
5369 MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
5370 it is VOIDmode, they cannot both be CONST_INT.
5372 UNSIGNEDP is for the case where we have to widen the operands
5373 to perform the operation. It says to use zero-extension.
5375 NORMALIZEP is 1 if we should convert the result to be either zero
5376 or one. Normalize is -1 if we should convert the result to be
5377 either zero or -1. If NORMALIZEP is zero, the result will be left
5378 "raw" out of the scc insn. */
5381 emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
5382 enum machine_mode mode, int unsignedp, int normalizep)
5384 enum machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5385 enum rtx_code rcode;
5386 rtx subtarget;
5387 rtx tem, last, trueval;
5389 tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
5390 target_mode);
5391 if (tem)
5392 return tem;
5394 /* If we reached here, we can't do this with a scc insn, however there
5395 are some comparisons that can be done in other ways. Don't do any
5396 of these cases if branches are very cheap. */
5397 if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
5398 return 0;
5400 /* See what we need to return. We can only return a 1, -1, or the
5401 sign bit. */
5403 if (normalizep == 0)
5405 if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
5406 normalizep = STORE_FLAG_VALUE;
5408 else if (val_signbit_p (mode, STORE_FLAG_VALUE))
5410 else
5411 return 0;
5414 last = get_last_insn ();
5416 /* If optimizing, use different pseudo registers for each insn, instead
5417 of reusing the same pseudo. This leads to better CSE, but slows
5418 down the compiler, since there are more pseudos */
5419 subtarget = (!optimize
5420 && (target_mode == mode)) ? target : NULL_RTX;
5421 trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
5423 /* For floating-point comparisons, try the reverse comparison or try
5424 changing the "orderedness" of the comparison. */
5425 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5427 enum rtx_code first_code;
5428 bool and_them;
5430 rcode = reverse_condition_maybe_unordered (code);
5431 if (can_compare_p (rcode, mode, ccp_store_flag)
5432 && (code == ORDERED || code == UNORDERED
5433 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5434 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5436 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5437 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5439 /* For the reverse comparison, use either an addition or a XOR. */
5440 if (want_add
5441 && rtx_cost (GEN_INT (normalizep), PLUS, 1,
5442 optimize_insn_for_speed_p ()) == 0)
5444 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5445 STORE_FLAG_VALUE, target_mode);
5446 if (tem)
5447 return expand_binop (target_mode, add_optab, tem,
5448 GEN_INT (normalizep),
5449 target, 0, OPTAB_WIDEN);
5451 else if (!want_add
5452 && rtx_cost (trueval, XOR, 1,
5453 optimize_insn_for_speed_p ()) == 0)
5455 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5456 normalizep, target_mode);
5457 if (tem)
5458 return expand_binop (target_mode, xor_optab, tem, trueval,
5459 target, INTVAL (trueval) >= 0, OPTAB_WIDEN);
5463 delete_insns_since (last);
5465 /* Cannot split ORDERED and UNORDERED, only try the above trick. */
5466 if (code == ORDERED || code == UNORDERED)
5467 return 0;
5469 and_them = split_comparison (code, mode, &first_code, &code);
5471 /* If there are no NaNs, the first comparison should always fall through.
5472 Effectively change the comparison to the other one. */
5473 if (!HONOR_NANS (mode))
5475 gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
5476 return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
5477 target_mode);
5480 #ifdef HAVE_conditional_move
5481 /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
5482 conditional move. */
5483 tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
5484 normalizep, target_mode);
5485 if (tem == 0)
5486 return 0;
5488 if (and_them)
5489 tem = emit_conditional_move (target, code, op0, op1, mode,
5490 tem, const0_rtx, GET_MODE (tem), 0);
5491 else
5492 tem = emit_conditional_move (target, code, op0, op1, mode,
5493 trueval, tem, GET_MODE (tem), 0);
5495 if (tem == 0)
5496 delete_insns_since (last);
5497 return tem;
5498 #else
5499 return 0;
5500 #endif
5503 /* The remaining tricks only apply to integer comparisons. */
5505 if (GET_MODE_CLASS (mode) != MODE_INT)
5506 return 0;
5508 /* If this is an equality comparison of integers, we can try to exclusive-or
5509 (or subtract) the two operands and use a recursive call to try the
5510 comparison with zero. Don't do any of these cases if branches are
5511 very cheap. */
5513 if ((code == EQ || code == NE) && op1 != const0_rtx)
5515 tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5516 OPTAB_WIDEN);
5518 if (tem == 0)
5519 tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5520 OPTAB_WIDEN);
5521 if (tem != 0)
5522 tem = emit_store_flag (target, code, tem, const0_rtx,
5523 mode, unsignedp, normalizep);
5524 if (tem != 0)
5525 return tem;
5527 delete_insns_since (last);
5530 /* For integer comparisons, try the reverse comparison. However, for
5531 small X and if we'd have anyway to extend, implementing "X != 0"
5532 as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5533 rcode = reverse_condition (code);
5534 if (can_compare_p (rcode, mode, ccp_store_flag)
5535 && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5536 && code == NE
5537 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5538 && op1 == const0_rtx))
5540 int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5541 || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5543 /* Again, for the reverse comparison, use either an addition or a XOR. */
5544 if (want_add
5545 && rtx_cost (GEN_INT (normalizep), PLUS, 1,
5546 optimize_insn_for_speed_p ()) == 0)
5548 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5549 STORE_FLAG_VALUE, target_mode);
5550 if (tem != 0)
5551 tem = expand_binop (target_mode, add_optab, tem,
5552 GEN_INT (normalizep), target, 0, OPTAB_WIDEN);
5554 else if (!want_add
5555 && rtx_cost (trueval, XOR, 1,
5556 optimize_insn_for_speed_p ()) == 0)
5558 tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5559 normalizep, target_mode);
5560 if (tem != 0)
5561 tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5562 INTVAL (trueval) >= 0, OPTAB_WIDEN);
5565 if (tem != 0)
5566 return tem;
5567 delete_insns_since (last);
5570 /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5571 the constant zero. Reject all other comparisons at this point. Only
5572 do LE and GT if branches are expensive since they are expensive on
5573 2-operand machines. */
5575 if (op1 != const0_rtx
5576 || (code != EQ && code != NE
5577 && (BRANCH_COST (optimize_insn_for_speed_p (),
5578 false) <= 1 || (code != LE && code != GT))))
5579 return 0;
5581 /* Try to put the result of the comparison in the sign bit. Assume we can't
5582 do the necessary operation below. */
5584 tem = 0;
5586 /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5587 the sign bit set. */
5589 if (code == LE)
5591 /* This is destructive, so SUBTARGET can't be OP0. */
5592 if (rtx_equal_p (subtarget, op0))
5593 subtarget = 0;
5595 tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5596 OPTAB_WIDEN);
5597 if (tem)
5598 tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5599 OPTAB_WIDEN);
5602 /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5603 number of bits in the mode of OP0, minus one. */
5605 if (code == GT)
5607 if (rtx_equal_p (subtarget, op0))
5608 subtarget = 0;
5610 tem = expand_shift (RSHIFT_EXPR, mode, op0,
5611 GET_MODE_BITSIZE (mode) - 1,
5612 subtarget, 0);
5613 tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5614 OPTAB_WIDEN);
5617 if (code == EQ || code == NE)
5619 /* For EQ or NE, one way to do the comparison is to apply an operation
5620 that converts the operand into a positive number if it is nonzero
5621 or zero if it was originally zero. Then, for EQ, we subtract 1 and
5622 for NE we negate. This puts the result in the sign bit. Then we
5623 normalize with a shift, if needed.
5625 Two operations that can do the above actions are ABS and FFS, so try
5626 them. If that doesn't work, and MODE is smaller than a full word,
5627 we can use zero-extension to the wider mode (an unsigned conversion)
5628 as the operation. */
5630 /* Note that ABS doesn't yield a positive number for INT_MIN, but
5631 that is compensated by the subsequent overflow when subtracting
5632 one / negating. */
5634 if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5635 tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5636 else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5637 tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
5638 else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5640 tem = convert_modes (word_mode, mode, op0, 1);
5641 mode = word_mode;
5644 if (tem != 0)
5646 if (code == EQ)
5647 tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
5648 0, OPTAB_WIDEN);
5649 else
5650 tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
5653 /* If we couldn't do it that way, for NE we can "or" the two's complement
5654 of the value with itself. For EQ, we take the one's complement of
5655 that "or", which is an extra insn, so we only handle EQ if branches
5656 are expensive. */
5658 if (tem == 0
5659 && (code == NE
5660 || BRANCH_COST (optimize_insn_for_speed_p (),
5661 false) > 1))
5663 if (rtx_equal_p (subtarget, op0))
5664 subtarget = 0;
5666 tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
5667 tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
5668 OPTAB_WIDEN);
5670 if (tem && code == EQ)
5671 tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
5675 if (tem && normalizep)
5676 tem = expand_shift (RSHIFT_EXPR, mode, tem,
5677 GET_MODE_BITSIZE (mode) - 1,
5678 subtarget, normalizep == 1);
5680 if (tem)
5682 if (!target)
5684 else if (GET_MODE (tem) != target_mode)
5686 convert_move (target, tem, 0);
5687 tem = target;
5689 else if (!subtarget)
5691 emit_move_insn (target, tem);
5692 tem = target;
5695 else
5696 delete_insns_since (last);
5698 return tem;
5701 /* Like emit_store_flag, but always succeeds. */
5704 emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
5705 enum machine_mode mode, int unsignedp, int normalizep)
5707 rtx tem, label;
5708 rtx trueval, falseval;
5710 /* First see if emit_store_flag can do the job. */
5711 tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
5712 if (tem != 0)
5713 return tem;
5715 if (!target)
5716 target = gen_reg_rtx (word_mode);
5718 /* If this failed, we have to do this with set/compare/jump/set code.
5719 For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
5720 trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
5721 if (code == NE
5722 && GET_MODE_CLASS (mode) == MODE_INT
5723 && REG_P (target)
5724 && op0 == target
5725 && op1 == const0_rtx)
5727 label = gen_label_rtx ();
5728 do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp,
5729 mode, NULL_RTX, NULL_RTX, label, -1);
5730 emit_move_insn (target, trueval);
5731 emit_label (label);
5732 return target;
5735 if (!REG_P (target)
5736 || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
5737 target = gen_reg_rtx (GET_MODE (target));
5739 /* Jump in the right direction if the target cannot implement CODE
5740 but can jump on its reverse condition. */
5741 falseval = const0_rtx;
5742 if (! can_compare_p (code, mode, ccp_jump)
5743 && (! FLOAT_MODE_P (mode)
5744 || code == ORDERED || code == UNORDERED
5745 || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
5746 || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
5748 enum rtx_code rcode;
5749 if (FLOAT_MODE_P (mode))
5750 rcode = reverse_condition_maybe_unordered (code);
5751 else
5752 rcode = reverse_condition (code);
5754 /* Canonicalize to UNORDERED for the libcall. */
5755 if (can_compare_p (rcode, mode, ccp_jump)
5756 || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
5758 falseval = trueval;
5759 trueval = const0_rtx;
5760 code = rcode;
5764 emit_move_insn (target, trueval);
5765 label = gen_label_rtx ();
5766 do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
5767 NULL_RTX, label, -1);
5769 emit_move_insn (target, falseval);
5770 emit_label (label);
5772 return target;
5775 /* Perform possibly multi-word comparison and conditional jump to LABEL
5776 if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
5777 now a thin wrapper around do_compare_rtx_and_jump. */
5779 static void
5780 do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, enum machine_mode mode,
5781 rtx label)
5783 int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
5784 do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode,
5785 NULL_RTX, NULL_RTX, label, -1);