1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2009-2020 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
31 we can transform this into a single 4-byte store if the target supports it:
32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
56 The algorithm is applied to each basic block in three phases:
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
64 terminate such chains when appropriate (for example when the stored
65 values get used subsequently).
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
76 store_immediate_info objects) and coalesce contiguous stores into
77 merged_store_group objects. For bit-field stores, we don't need to
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
84 For example, given the stores:
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
143 #include "coretypes.h"
147 #include "builtins.h"
148 #include "fold-const.h"
149 #include "tree-pass.h"
151 #include "gimple-pretty-print.h"
153 #include "fold-const.h"
154 #include "print-tree.h"
155 #include "tree-hash-traits.h"
156 #include "gimple-iterator.h"
157 #include "gimplify.h"
158 #include "gimple-fold.h"
159 #include "stor-layout.h"
162 #include "cfgcleanup.h"
163 #include "tree-cfg.h"
167 #include "gimplify-me.h"
169 #include "expr.h" /* For get_bit_range. */
170 #include "optabs-tree.h"
172 #include "selftest.h"
174 /* The maximum size (in bits) of the stores this pass should generate. */
175 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
176 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
178 /* Limit to bound the number of aliasing checks for loads with the same
179 vuse as the corresponding store. */
180 #define MAX_STORE_ALIAS_CHECKS 64
186 /* Number of hand-written 16-bit nop / bswaps found. */
189 /* Number of hand-written 32-bit nop / bswaps found. */
192 /* Number of hand-written 64-bit nop / bswaps found. */
194 } nop_stats
, bswap_stats
;
196 /* A symbolic number structure is used to detect byte permutation and selection
197 patterns of a source. To achieve that, its field N contains an artificial
198 number consisting of BITS_PER_MARKER sized markers tracking where does each
199 byte come from in the source:
201 0 - target byte has the value 0
202 FF - target byte has an unknown value (eg. due to sign extension)
203 1..size - marker value is the byte index in the source (0 for lsb).
205 To detect permutations on memory sources (arrays and structures), a symbolic
206 number is also associated:
207 - a base address BASE_ADDR and an OFFSET giving the address of the source;
208 - a range which gives the difference between the highest and lowest accessed
209 memory location to make such a symbolic number;
210 - the address SRC of the source element of lowest address as a convenience
211 to easily get BASE_ADDR + offset + lowest bytepos;
212 - number of expressions N_OPS bitwise ored together to represent
213 approximate cost of the computation.
215 Note 1: the range is different from size as size reflects the size of the
216 type of the current expression. For instance, for an array char a[],
217 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
218 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
221 Note 2: for non-memory sources, range holds the same value as size.
223 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
225 struct symbolic_number
{
230 poly_int64_pod bytepos
;
234 unsigned HOST_WIDE_INT range
;
238 #define BITS_PER_MARKER 8
239 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
240 #define MARKER_BYTE_UNKNOWN MARKER_MASK
241 #define HEAD_MARKER(n, size) \
242 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
244 /* The number which the find_bswap_or_nop_1 result should match in
245 order to have a nop. The number is masked according to the size of
246 the symbolic number before using it. */
247 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
248 (uint64_t)0x08070605 << 32 | 0x04030201)
250 /* The number which the find_bswap_or_nop_1 result should match in
251 order to have a byte swap. The number is masked according to the
252 size of the symbolic number before using it. */
253 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
254 (uint64_t)0x01020304 << 32 | 0x05060708)
256 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
257 number N. Return false if the requested operation is not permitted
258 on a symbolic number. */
261 do_shift_rotate (enum tree_code code
,
262 struct symbolic_number
*n
,
265 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
266 unsigned head_marker
;
269 || count
>= TYPE_PRECISION (n
->type
)
270 || count
% BITS_PER_UNIT
!= 0)
272 count
= (count
/ BITS_PER_UNIT
) * BITS_PER_MARKER
;
274 /* Zero out the extra bits of N in order to avoid them being shifted
275 into the significant bits. */
276 if (size
< 64 / BITS_PER_MARKER
)
277 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
285 head_marker
= HEAD_MARKER (n
->n
, size
);
287 /* Arithmetic shift of signed type: result is dependent on the value. */
288 if (!TYPE_UNSIGNED (n
->type
) && head_marker
)
289 for (i
= 0; i
< count
/ BITS_PER_MARKER
; i
++)
290 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
291 << ((size
- 1 - i
) * BITS_PER_MARKER
);
294 n
->n
= (n
->n
<< count
) | (n
->n
>> ((size
* BITS_PER_MARKER
) - count
));
297 n
->n
= (n
->n
>> count
) | (n
->n
<< ((size
* BITS_PER_MARKER
) - count
));
302 /* Zero unused bits for size. */
303 if (size
< 64 / BITS_PER_MARKER
)
304 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
308 /* Perform sanity checking for the symbolic number N and the gimple
312 verify_symbolic_number_p (struct symbolic_number
*n
, gimple
*stmt
)
316 lhs_type
= gimple_expr_type (stmt
);
318 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
319 && TREE_CODE (lhs_type
) != ENUMERAL_TYPE
)
322 if (TYPE_PRECISION (lhs_type
) != TYPE_PRECISION (n
->type
))
328 /* Initialize the symbolic number N for the bswap pass from the base element
329 SRC manipulated by the bitwise OR expression. */
332 init_symbolic_number (struct symbolic_number
*n
, tree src
)
336 if (! INTEGRAL_TYPE_P (TREE_TYPE (src
)))
339 n
->base_addr
= n
->offset
= n
->alias_set
= n
->vuse
= NULL_TREE
;
342 /* Set up the symbolic number N by setting each byte to a value between 1 and
343 the byte size of rhs1. The highest order byte is set to n->size and the
344 lowest order byte to 1. */
345 n
->type
= TREE_TYPE (src
);
346 size
= TYPE_PRECISION (n
->type
);
347 if (size
% BITS_PER_UNIT
!= 0)
349 size
/= BITS_PER_UNIT
;
350 if (size
> 64 / BITS_PER_MARKER
)
356 if (size
< 64 / BITS_PER_MARKER
)
357 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
362 /* Check if STMT might be a byte swap or a nop from a memory source and returns
363 the answer. If so, REF is that memory source and the base of the memory area
364 accessed and the offset of the access from that base are recorded in N. */
367 find_bswap_or_nop_load (gimple
*stmt
, tree ref
, struct symbolic_number
*n
)
369 /* Leaf node is an array or component ref. Memorize its base and
370 offset from base to compare to other such leaf node. */
371 poly_int64 bitsize
, bitpos
, bytepos
;
373 int unsignedp
, reversep
, volatilep
;
374 tree offset
, base_addr
;
376 /* Not prepared to handle PDP endian. */
377 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
380 if (!gimple_assign_load_p (stmt
) || gimple_has_volatile_ops (stmt
))
383 base_addr
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
384 &unsignedp
, &reversep
, &volatilep
);
386 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
387 /* Do not rewrite TARGET_MEM_REF. */
389 else if (TREE_CODE (base_addr
) == MEM_REF
)
391 poly_offset_int bit_offset
= 0;
392 tree off
= TREE_OPERAND (base_addr
, 1);
394 if (!integer_zerop (off
))
396 poly_offset_int boff
= mem_ref_offset (base_addr
);
397 boff
<<= LOG2_BITS_PER_UNIT
;
401 base_addr
= TREE_OPERAND (base_addr
, 0);
403 /* Avoid returning a negative bitpos as this may wreak havoc later. */
404 if (maybe_lt (bit_offset
, 0))
406 tree byte_offset
= wide_int_to_tree
407 (sizetype
, bits_to_bytes_round_down (bit_offset
));
408 bit_offset
= num_trailing_bits (bit_offset
);
410 offset
= size_binop (PLUS_EXPR
, offset
, byte_offset
);
412 offset
= byte_offset
;
415 bitpos
+= bit_offset
.force_shwi ();
418 base_addr
= build_fold_addr_expr (base_addr
);
420 if (!multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
))
422 if (!multiple_p (bitsize
, BITS_PER_UNIT
))
427 if (!init_symbolic_number (n
, ref
))
429 n
->base_addr
= base_addr
;
431 n
->bytepos
= bytepos
;
432 n
->alias_set
= reference_alias_ptr_type (ref
);
433 n
->vuse
= gimple_vuse (stmt
);
437 /* Compute the symbolic number N representing the result of a bitwise OR on 2
438 symbolic number N1 and N2 whose source statements are respectively
439 SOURCE_STMT1 and SOURCE_STMT2. */
442 perform_symbolic_merge (gimple
*source_stmt1
, struct symbolic_number
*n1
,
443 gimple
*source_stmt2
, struct symbolic_number
*n2
,
444 struct symbolic_number
*n
)
449 struct symbolic_number
*n_start
;
451 tree rhs1
= gimple_assign_rhs1 (source_stmt1
);
452 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
454 rhs1
= TREE_OPERAND (rhs1
, 0);
455 tree rhs2
= gimple_assign_rhs1 (source_stmt2
);
456 if (TREE_CODE (rhs2
) == BIT_FIELD_REF
457 && TREE_CODE (TREE_OPERAND (rhs2
, 0)) == SSA_NAME
)
458 rhs2
= TREE_OPERAND (rhs2
, 0);
460 /* Sources are different, cancel bswap if they are not memory location with
461 the same base (array, structure, ...). */
465 HOST_WIDE_INT start1
, start2
, start_sub
, end_sub
, end1
, end2
, end
;
466 struct symbolic_number
*toinc_n_ptr
, *n_end
;
467 basic_block bb1
, bb2
;
469 if (!n1
->base_addr
|| !n2
->base_addr
470 || !operand_equal_p (n1
->base_addr
, n2
->base_addr
, 0))
473 if (!n1
->offset
!= !n2
->offset
474 || (n1
->offset
&& !operand_equal_p (n1
->offset
, n2
->offset
, 0)))
478 if (!(n2
->bytepos
- n1
->bytepos
).is_constant (&start2
))
484 start_sub
= start2
- start1
;
489 start_sub
= start1
- start2
;
492 bb1
= gimple_bb (source_stmt1
);
493 bb2
= gimple_bb (source_stmt2
);
494 if (dominated_by_p (CDI_DOMINATORS
, bb1
, bb2
))
495 source_stmt
= source_stmt1
;
497 source_stmt
= source_stmt2
;
499 /* Find the highest address at which a load is performed and
500 compute related info. */
501 end1
= start1
+ (n1
->range
- 1);
502 end2
= start2
+ (n2
->range
- 1);
506 end_sub
= end2
- end1
;
511 end_sub
= end1
- end2
;
513 n_end
= (end2
> end1
) ? n2
: n1
;
515 /* Find symbolic number whose lsb is the most significant. */
516 if (BYTES_BIG_ENDIAN
)
517 toinc_n_ptr
= (n_end
== n1
) ? n2
: n1
;
519 toinc_n_ptr
= (n_start
== n1
) ? n2
: n1
;
521 n
->range
= end
- MIN (start1
, start2
) + 1;
523 /* Check that the range of memory covered can be represented by
524 a symbolic number. */
525 if (n
->range
> 64 / BITS_PER_MARKER
)
528 /* Reinterpret byte marks in symbolic number holding the value of
529 bigger weight according to target endianness. */
530 inc
= BYTES_BIG_ENDIAN
? end_sub
: start_sub
;
531 size
= TYPE_PRECISION (n1
->type
) / BITS_PER_UNIT
;
532 for (i
= 0; i
< size
; i
++, inc
<<= BITS_PER_MARKER
)
535 = (toinc_n_ptr
->n
>> (i
* BITS_PER_MARKER
)) & MARKER_MASK
;
536 if (marker
&& marker
!= MARKER_BYTE_UNKNOWN
)
537 toinc_n_ptr
->n
+= inc
;
542 n
->range
= n1
->range
;
544 source_stmt
= source_stmt1
;
548 || alias_ptr_types_compatible_p (n1
->alias_set
, n2
->alias_set
))
549 n
->alias_set
= n1
->alias_set
;
551 n
->alias_set
= ptr_type_node
;
552 n
->vuse
= n_start
->vuse
;
553 n
->base_addr
= n_start
->base_addr
;
554 n
->offset
= n_start
->offset
;
555 n
->src
= n_start
->src
;
556 n
->bytepos
= n_start
->bytepos
;
557 n
->type
= n_start
->type
;
558 size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
560 for (i
= 0, mask
= MARKER_MASK
; i
< size
; i
++, mask
<<= BITS_PER_MARKER
)
562 uint64_t masked1
, masked2
;
564 masked1
= n1
->n
& mask
;
565 masked2
= n2
->n
& mask
;
566 if (masked1
&& masked2
&& masked1
!= masked2
)
569 n
->n
= n1
->n
| n2
->n
;
570 n
->n_ops
= n1
->n_ops
+ n2
->n_ops
;
575 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
576 the operation given by the rhs of STMT on the result. If the operation
577 could successfully be executed the function returns a gimple stmt whose
578 rhs's first tree is the expression of the source operand and NULL
582 find_bswap_or_nop_1 (gimple
*stmt
, struct symbolic_number
*n
, int limit
)
585 tree rhs1
, rhs2
= NULL
;
586 gimple
*rhs1_stmt
, *rhs2_stmt
, *source_stmt1
;
587 enum gimple_rhs_class rhs_class
;
589 if (!limit
|| !is_gimple_assign (stmt
))
592 rhs1
= gimple_assign_rhs1 (stmt
);
594 if (find_bswap_or_nop_load (stmt
, rhs1
, n
))
597 /* Handle BIT_FIELD_REF. */
598 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
599 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
601 unsigned HOST_WIDE_INT bitsize
= tree_to_uhwi (TREE_OPERAND (rhs1
, 1));
602 unsigned HOST_WIDE_INT bitpos
= tree_to_uhwi (TREE_OPERAND (rhs1
, 2));
603 if (bitpos
% BITS_PER_UNIT
== 0
604 && bitsize
% BITS_PER_UNIT
== 0
605 && init_symbolic_number (n
, TREE_OPERAND (rhs1
, 0)))
607 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
608 if (BYTES_BIG_ENDIAN
)
609 bitpos
= TYPE_PRECISION (n
->type
) - bitpos
- bitsize
;
612 if (!do_shift_rotate (RSHIFT_EXPR
, n
, bitpos
))
617 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
618 for (unsigned i
= 0; i
< bitsize
/ BITS_PER_UNIT
;
619 i
++, tmp
<<= BITS_PER_UNIT
)
620 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
624 n
->type
= TREE_TYPE (rhs1
);
626 n
->range
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
628 return verify_symbolic_number_p (n
, stmt
) ? stmt
: NULL
;
634 if (TREE_CODE (rhs1
) != SSA_NAME
)
637 code
= gimple_assign_rhs_code (stmt
);
638 rhs_class
= gimple_assign_rhs_class (stmt
);
639 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
641 if (rhs_class
== GIMPLE_BINARY_RHS
)
642 rhs2
= gimple_assign_rhs2 (stmt
);
644 /* Handle unary rhs and binary rhs with integer constants as second
647 if (rhs_class
== GIMPLE_UNARY_RHS
648 || (rhs_class
== GIMPLE_BINARY_RHS
649 && TREE_CODE (rhs2
) == INTEGER_CST
))
651 if (code
!= BIT_AND_EXPR
652 && code
!= LSHIFT_EXPR
653 && code
!= RSHIFT_EXPR
654 && code
!= LROTATE_EXPR
655 && code
!= RROTATE_EXPR
656 && !CONVERT_EXPR_CODE_P (code
))
659 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, n
, limit
- 1);
661 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
662 we have to initialize the symbolic number. */
665 if (gimple_assign_load_p (stmt
)
666 || !init_symbolic_number (n
, rhs1
))
675 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
676 uint64_t val
= int_cst_value (rhs2
), mask
= 0;
677 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
679 /* Only constants masking full bytes are allowed. */
680 for (i
= 0; i
< size
; i
++, tmp
<<= BITS_PER_UNIT
)
681 if ((val
& tmp
) != 0 && (val
& tmp
) != tmp
)
684 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
693 if (!do_shift_rotate (code
, n
, (int) TREE_INT_CST_LOW (rhs2
)))
698 int i
, type_size
, old_type_size
;
701 type
= gimple_expr_type (stmt
);
702 type_size
= TYPE_PRECISION (type
);
703 if (type_size
% BITS_PER_UNIT
!= 0)
705 type_size
/= BITS_PER_UNIT
;
706 if (type_size
> 64 / BITS_PER_MARKER
)
709 /* Sign extension: result is dependent on the value. */
710 old_type_size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
711 if (!TYPE_UNSIGNED (n
->type
) && type_size
> old_type_size
712 && HEAD_MARKER (n
->n
, old_type_size
))
713 for (i
= 0; i
< type_size
- old_type_size
; i
++)
714 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
715 << ((type_size
- 1 - i
) * BITS_PER_MARKER
);
717 if (type_size
< 64 / BITS_PER_MARKER
)
719 /* If STMT casts to a smaller type mask out the bits not
720 belonging to the target type. */
721 n
->n
&= ((uint64_t) 1 << (type_size
* BITS_PER_MARKER
)) - 1;
725 n
->range
= type_size
;
731 return verify_symbolic_number_p (n
, stmt
) ? source_stmt1
: NULL
;
734 /* Handle binary rhs. */
736 if (rhs_class
== GIMPLE_BINARY_RHS
)
738 struct symbolic_number n1
, n2
;
739 gimple
*source_stmt
, *source_stmt2
;
741 if (code
!= BIT_IOR_EXPR
)
744 if (TREE_CODE (rhs2
) != SSA_NAME
)
747 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
752 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, &n1
, limit
- 1);
757 source_stmt2
= find_bswap_or_nop_1 (rhs2_stmt
, &n2
, limit
- 1);
762 if (TYPE_PRECISION (n1
.type
) != TYPE_PRECISION (n2
.type
))
765 if (n1
.vuse
!= n2
.vuse
)
769 = perform_symbolic_merge (source_stmt1
, &n1
, source_stmt2
, &n2
, n
);
774 if (!verify_symbolic_number_p (n
, stmt
))
786 /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
787 *CMPXCHG, *CMPNOP and adjust *N. */
790 find_bswap_or_nop_finalize (struct symbolic_number
*n
, uint64_t *cmpxchg
,
796 /* The number which the find_bswap_or_nop_1 result should match in order
797 to have a full byte swap. The number is shifted to the right
798 according to the size of the symbolic number before using it. */
802 /* Find real size of result (highest non-zero byte). */
804 for (tmpn
= n
->n
, rsize
= 0; tmpn
; tmpn
>>= BITS_PER_MARKER
, rsize
++);
808 /* Zero out the bits corresponding to untouched bytes in original gimple
810 if (n
->range
< (int) sizeof (int64_t))
812 mask
= ((uint64_t) 1 << (n
->range
* BITS_PER_MARKER
)) - 1;
813 *cmpxchg
>>= (64 / BITS_PER_MARKER
- n
->range
) * BITS_PER_MARKER
;
817 /* Zero out the bits corresponding to unused bytes in the result of the
818 gimple expression. */
819 if (rsize
< n
->range
)
821 if (BYTES_BIG_ENDIAN
)
823 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
825 *cmpnop
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
829 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
830 *cmpxchg
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
836 n
->range
*= BITS_PER_UNIT
;
839 /* Check if STMT completes a bswap implementation or a read in a given
840 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
841 accordingly. It also sets N to represent the kind of operations
842 performed: size of the resulting expression and whether it works on
843 a memory source, and if so alias-set and vuse. At last, the
844 function returns a stmt whose rhs's first tree is the source
848 find_bswap_or_nop (gimple
*stmt
, struct symbolic_number
*n
, bool *bswap
)
850 /* The last parameter determines the depth search limit. It usually
851 correlates directly to the number n of bytes to be touched. We
852 increase that number by 2 * (log2(n) + 1) here in order to also
853 cover signed -> unsigned conversions of the src operand as can be seen
854 in libgcc, and for initial shift/and operation of the src operand. */
855 int limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
856 limit
+= 2 * (1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
));
857 gimple
*ins_stmt
= find_bswap_or_nop_1 (stmt
, n
, limit
);
862 uint64_t cmpxchg
, cmpnop
;
863 find_bswap_or_nop_finalize (n
, &cmpxchg
, &cmpnop
);
865 /* A complete byte swap should make the symbolic number to start with
866 the largest digit in the highest order byte. Unchanged symbolic
867 number indicates a read with same endianness as target architecture. */
870 else if (n
->n
== cmpxchg
)
875 /* Useless bit manipulation performed by code. */
876 if (!n
->base_addr
&& n
->n
== cmpnop
&& n
->n_ops
== 1)
882 const pass_data pass_data_optimize_bswap
=
884 GIMPLE_PASS
, /* type */
886 OPTGROUP_NONE
, /* optinfo_flags */
888 PROP_ssa
, /* properties_required */
889 0, /* properties_provided */
890 0, /* properties_destroyed */
891 0, /* todo_flags_start */
892 0, /* todo_flags_finish */
895 class pass_optimize_bswap
: public gimple_opt_pass
898 pass_optimize_bswap (gcc::context
*ctxt
)
899 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
902 /* opt_pass methods: */
903 virtual bool gate (function
*)
905 return flag_expensive_optimizations
&& optimize
&& BITS_PER_UNIT
== 8;
908 virtual unsigned int execute (function
*);
910 }; // class pass_optimize_bswap
912 /* Perform the bswap optimization: replace the expression computed in the rhs
913 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
914 bswap, load or load + bswap expression.
915 Which of these alternatives replace the rhs is given by N->base_addr (non
916 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
917 load to perform are also given in N while the builtin bswap invoke is given
918 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
919 load statements involved to construct the rhs in gsi_stmt (GSI) and
920 N->range gives the size of the rhs expression for maintaining some
923 Note that if the replacement involve a load and if gsi_stmt (GSI) is
924 non-NULL, that stmt is moved just after INS_STMT to do the load with the
925 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
928 bswap_replace (gimple_stmt_iterator gsi
, gimple
*ins_stmt
, tree fndecl
,
929 tree bswap_type
, tree load_type
, struct symbolic_number
*n
,
932 tree src
, tmp
, tgt
= NULL_TREE
;
935 gimple
*cur_stmt
= gsi_stmt (gsi
);
938 tgt
= gimple_assign_lhs (cur_stmt
);
940 /* Need to load the value from memory first. */
943 gimple_stmt_iterator gsi_ins
= gsi
;
945 gsi_ins
= gsi_for_stmt (ins_stmt
);
946 tree addr_expr
, addr_tmp
, val_expr
, val_tmp
;
947 tree load_offset_ptr
, aligned_load_type
;
949 unsigned align
= get_object_alignment (src
);
950 poly_int64 load_offset
= 0;
954 basic_block ins_bb
= gimple_bb (ins_stmt
);
955 basic_block cur_bb
= gimple_bb (cur_stmt
);
956 if (!dominated_by_p (CDI_DOMINATORS
, cur_bb
, ins_bb
))
959 /* Move cur_stmt just before one of the load of the original
960 to ensure it has the same VUSE. See PR61517 for what could
962 if (gimple_bb (cur_stmt
) != gimple_bb (ins_stmt
))
963 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt
));
964 gsi_move_before (&gsi
, &gsi_ins
);
965 gsi
= gsi_for_stmt (cur_stmt
);
970 /* Compute address to load from and cast according to the size
972 addr_expr
= build_fold_addr_expr (src
);
973 if (is_gimple_mem_ref_addr (addr_expr
))
974 addr_tmp
= unshare_expr (addr_expr
);
977 addr_tmp
= unshare_expr (n
->base_addr
);
978 if (!is_gimple_mem_ref_addr (addr_tmp
))
979 addr_tmp
= force_gimple_operand_gsi_1 (&gsi
, addr_tmp
,
980 is_gimple_mem_ref_addr
,
983 load_offset
= n
->bytepos
;
987 = force_gimple_operand_gsi (&gsi
, unshare_expr (n
->offset
),
988 true, NULL_TREE
, true,
991 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp
)),
992 POINTER_PLUS_EXPR
, addr_tmp
, off
);
993 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
994 addr_tmp
= gimple_assign_lhs (stmt
);
998 /* Perform the load. */
999 aligned_load_type
= load_type
;
1000 if (align
< TYPE_ALIGN (load_type
))
1001 aligned_load_type
= build_aligned_type (load_type
, align
);
1002 load_offset_ptr
= build_int_cst (n
->alias_set
, load_offset
);
1003 val_expr
= fold_build2 (MEM_REF
, aligned_load_type
, addr_tmp
,
1009 nop_stats
.found_16bit
++;
1010 else if (n
->range
== 32)
1011 nop_stats
.found_32bit
++;
1014 gcc_assert (n
->range
== 64);
1015 nop_stats
.found_64bit
++;
1018 /* Convert the result of load if necessary. */
1019 if (tgt
&& !useless_type_conversion_p (TREE_TYPE (tgt
), load_type
))
1021 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
,
1023 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
1024 gimple_set_vuse (load_stmt
, n
->vuse
);
1025 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1026 gimple_assign_set_rhs_with_ops (&gsi
, NOP_EXPR
, val_tmp
);
1027 update_stmt (cur_stmt
);
1031 gimple_assign_set_rhs_with_ops (&gsi
, MEM_REF
, val_expr
);
1032 gimple_set_vuse (cur_stmt
, n
->vuse
);
1033 update_stmt (cur_stmt
);
1037 tgt
= make_ssa_name (load_type
);
1038 cur_stmt
= gimple_build_assign (tgt
, MEM_REF
, val_expr
);
1039 gimple_set_vuse (cur_stmt
, n
->vuse
);
1040 gsi_insert_before (&gsi
, cur_stmt
, GSI_SAME_STMT
);
1046 "%d bit load in target endianness found at: ",
1048 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1054 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
, "load_dst");
1055 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
1056 gimple_set_vuse (load_stmt
, n
->vuse
);
1057 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1064 if (tgt
&& !useless_type_conversion_p (TREE_TYPE (tgt
), TREE_TYPE (src
)))
1066 if (!is_gimple_val (src
))
1068 g
= gimple_build_assign (tgt
, NOP_EXPR
, src
);
1071 g
= gimple_build_assign (tgt
, src
);
1075 nop_stats
.found_16bit
++;
1076 else if (n
->range
== 32)
1077 nop_stats
.found_32bit
++;
1080 gcc_assert (n
->range
== 64);
1081 nop_stats
.found_64bit
++;
1086 "%d bit reshuffle in target endianness found at: ",
1089 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1092 print_generic_expr (dump_file
, tgt
, TDF_NONE
);
1093 fprintf (dump_file
, "\n");
1097 gsi_replace (&gsi
, g
, true);
1100 else if (TREE_CODE (src
) == BIT_FIELD_REF
)
1101 src
= TREE_OPERAND (src
, 0);
1104 bswap_stats
.found_16bit
++;
1105 else if (n
->range
== 32)
1106 bswap_stats
.found_32bit
++;
1109 gcc_assert (n
->range
== 64);
1110 bswap_stats
.found_64bit
++;
1115 /* Convert the src expression if necessary. */
1116 if (!useless_type_conversion_p (TREE_TYPE (tmp
), bswap_type
))
1118 gimple
*convert_stmt
;
1120 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
1121 convert_stmt
= gimple_build_assign (tmp
, NOP_EXPR
, src
);
1122 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1125 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1126 are considered as rotation of 2N bit values by N bits is generally not
1127 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1128 gives 0x03040102 while a bswap for that value is 0x04030201. */
1129 if (bswap
&& n
->range
== 16)
1131 tree count
= build_int_cst (NULL
, BITS_PER_UNIT
);
1132 src
= fold_build2 (LROTATE_EXPR
, bswap_type
, tmp
, count
);
1133 bswap_stmt
= gimple_build_assign (NULL
, src
);
1136 bswap_stmt
= gimple_build_call (fndecl
, 1, tmp
);
1138 if (tgt
== NULL_TREE
)
1139 tgt
= make_ssa_name (bswap_type
);
1142 /* Convert the result if necessary. */
1143 if (!useless_type_conversion_p (TREE_TYPE (tgt
), bswap_type
))
1145 gimple
*convert_stmt
;
1147 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
1148 convert_stmt
= gimple_build_assign (tgt
, NOP_EXPR
, tmp
);
1149 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1152 gimple_set_lhs (bswap_stmt
, tmp
);
1156 fprintf (dump_file
, "%d bit bswap implementation found at: ",
1159 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1162 print_generic_expr (dump_file
, tgt
, TDF_NONE
);
1163 fprintf (dump_file
, "\n");
1169 gsi_insert_after (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1170 gsi_remove (&gsi
, true);
1173 gsi_insert_before (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1177 /* Find manual byte swap implementations as well as load in a given
1178 endianness. Byte swaps are turned into a bswap builtin invokation
1179 while endian loads are converted to bswap builtin invokation or
1180 simple load according to the target endianness. */
1183 pass_optimize_bswap::execute (function
*fun
)
1186 bool bswap32_p
, bswap64_p
;
1187 bool changed
= false;
1188 tree bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
1190 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
1191 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
1192 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
1193 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
1194 || (bswap32_p
&& word_mode
== SImode
)));
1196 /* Determine the argument type of the builtins. The code later on
1197 assumes that the return and argument type are the same. */
1200 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1201 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1206 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1207 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1210 memset (&nop_stats
, 0, sizeof (nop_stats
));
1211 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
1212 calculate_dominance_info (CDI_DOMINATORS
);
1214 FOR_EACH_BB_FN (bb
, fun
)
1216 gimple_stmt_iterator gsi
;
1218 /* We do a reverse scan for bswap patterns to make sure we get the
1219 widest match. As bswap pattern matching doesn't handle previously
1220 inserted smaller bswap replacements as sub-patterns, the wider
1221 variant wouldn't be detected. */
1222 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
);)
1224 gimple
*ins_stmt
, *cur_stmt
= gsi_stmt (gsi
);
1225 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
1226 enum tree_code code
;
1227 struct symbolic_number n
;
1230 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1231 might be moved to a different basic block by bswap_replace and gsi
1232 must not points to it if that's the case. Moving the gsi_prev
1233 there make sure that gsi points to the statement previous to
1234 cur_stmt while still making sure that all statements are
1235 considered in this basic block. */
1238 if (!is_gimple_assign (cur_stmt
))
1241 code
= gimple_assign_rhs_code (cur_stmt
);
1246 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt
))
1247 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt
))
1257 ins_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
1265 /* Already in canonical form, nothing to do. */
1266 if (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
1268 load_type
= bswap_type
= uint16_type_node
;
1271 load_type
= uint32_type_node
;
1274 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1275 bswap_type
= bswap32_type
;
1279 load_type
= uint64_type_node
;
1282 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1283 bswap_type
= bswap64_type
;
1290 if (bswap
&& !fndecl
&& n
.range
!= 16)
1293 if (bswap_replace (gsi_for_stmt (cur_stmt
), ins_stmt
, fndecl
,
1294 bswap_type
, load_type
, &n
, bswap
))
1299 statistics_counter_event (fun
, "16-bit nop implementations found",
1300 nop_stats
.found_16bit
);
1301 statistics_counter_event (fun
, "32-bit nop implementations found",
1302 nop_stats
.found_32bit
);
1303 statistics_counter_event (fun
, "64-bit nop implementations found",
1304 nop_stats
.found_64bit
);
1305 statistics_counter_event (fun
, "16-bit bswap implementations found",
1306 bswap_stats
.found_16bit
);
1307 statistics_counter_event (fun
, "32-bit bswap implementations found",
1308 bswap_stats
.found_32bit
);
1309 statistics_counter_event (fun
, "64-bit bswap implementations found",
1310 bswap_stats
.found_64bit
);
1312 return (changed
? TODO_update_ssa
: 0);
1318 make_pass_optimize_bswap (gcc::context
*ctxt
)
1320 return new pass_optimize_bswap (ctxt
);
1325 /* Struct recording one operand for the store, which is either a constant,
1326 then VAL represents the constant and all the other fields are zero, or
1327 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1328 and the other fields also reflect the memory load, or an SSA name, then
1329 VAL represents the SSA name and all the other fields are zero, */
1331 class store_operand_info
1336 poly_uint64 bitsize
;
1338 poly_uint64 bitregion_start
;
1339 poly_uint64 bitregion_end
;
1342 store_operand_info ();
1345 store_operand_info::store_operand_info ()
1346 : val (NULL_TREE
), base_addr (NULL_TREE
), bitsize (0), bitpos (0),
1347 bitregion_start (0), bitregion_end (0), stmt (NULL
), bit_not_p (false)
1351 /* Struct recording the information about a single store of an immediate
1352 to memory. These are created in the first phase and coalesced into
1353 merged_store_group objects in the second phase. */
1355 class store_immediate_info
1358 unsigned HOST_WIDE_INT bitsize
;
1359 unsigned HOST_WIDE_INT bitpos
;
1360 unsigned HOST_WIDE_INT bitregion_start
;
1361 /* This is one past the last bit of the bit region. */
1362 unsigned HOST_WIDE_INT bitregion_end
;
1365 /* INTEGER_CST for constant stores, MEM_REF for memory copy,
1366 BIT_*_EXPR for logical bitwise operation, BIT_INSERT_EXPR
1368 LROTATE_EXPR if it can be only bswap optimized and
1369 ops are not really meaningful.
1370 NOP_EXPR if bswap optimization detected identity, ops
1371 are not meaningful. */
1372 enum tree_code rhs_code
;
1373 /* Two fields for bswap optimization purposes. */
1374 struct symbolic_number n
;
1376 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1378 /* True if ops have been swapped and thus ops[1] represents
1379 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1381 /* The index number of the landing pad, or 0 if there is none. */
1383 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1384 just the first one. */
1385 store_operand_info ops
[2];
1386 store_immediate_info (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1387 unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1388 gimple
*, unsigned int, enum tree_code
,
1389 struct symbolic_number
&, gimple
*, bool, int,
1390 const store_operand_info
&,
1391 const store_operand_info
&);
1394 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs
,
1395 unsigned HOST_WIDE_INT bp
,
1396 unsigned HOST_WIDE_INT brs
,
1397 unsigned HOST_WIDE_INT bre
,
1400 enum tree_code rhscode
,
1401 struct symbolic_number
&nr
,
1405 const store_operand_info
&op0r
,
1406 const store_operand_info
&op1r
)
1407 : bitsize (bs
), bitpos (bp
), bitregion_start (brs
), bitregion_end (bre
),
1408 stmt (st
), order (ord
), rhs_code (rhscode
), n (nr
),
1409 ins_stmt (ins_stmtp
), bit_not_p (bitnotp
), ops_swapped_p (false),
1411 #if __cplusplus >= 201103L
1412 , ops
{ op0r
, op1r
}
1422 /* Struct representing a group of stores to contiguous memory locations.
1423 These are produced by the second phase (coalescing) and consumed in the
1424 third phase that outputs the widened stores. */
1426 class merged_store_group
1429 unsigned HOST_WIDE_INT start
;
1430 unsigned HOST_WIDE_INT width
;
1431 unsigned HOST_WIDE_INT bitregion_start
;
1432 unsigned HOST_WIDE_INT bitregion_end
;
1433 /* The size of the allocated memory for val and mask. */
1434 unsigned HOST_WIDE_INT buf_size
;
1435 unsigned HOST_WIDE_INT align_base
;
1436 poly_uint64 load_align_base
[2];
1439 unsigned int load_align
[2];
1440 unsigned int first_order
;
1441 unsigned int last_order
;
1443 bool only_constants
;
1444 unsigned int first_nonmergeable_order
;
1447 auto_vec
<store_immediate_info
*> stores
;
1448 /* We record the first and last original statements in the sequence because
1449 we'll need their vuse/vdef and replacement position. It's easier to keep
1450 track of them separately as 'stores' is reordered by apply_stores. */
1454 unsigned char *mask
;
1456 merged_store_group (store_immediate_info
*);
1457 ~merged_store_group ();
1458 bool can_be_merged_into (store_immediate_info
*);
1459 void merge_into (store_immediate_info
*);
1460 void merge_overlapping (store_immediate_info
*);
1461 bool apply_stores ();
1463 void do_merge (store_immediate_info
*);
1466 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1469 dump_char_array (FILE *fd
, unsigned char *ptr
, unsigned int len
)
1474 for (unsigned int i
= 0; i
< len
; i
++)
1475 fprintf (fd
, "%02x ", ptr
[i
]);
1479 /* Clear out LEN bits starting from bit START in the byte array
1480 PTR. This clears the bits to the *right* from START.
1481 START must be within [0, BITS_PER_UNIT) and counts starting from
1482 the least significant bit. */
1485 clear_bit_region_be (unsigned char *ptr
, unsigned int start
,
1490 /* Clear len bits to the right of start. */
1491 else if (len
<= start
+ 1)
1493 unsigned char mask
= (~(~0U << len
));
1494 mask
= mask
<< (start
+ 1U - len
);
1497 else if (start
!= BITS_PER_UNIT
- 1)
1499 clear_bit_region_be (ptr
, start
, (start
% BITS_PER_UNIT
) + 1);
1500 clear_bit_region_be (ptr
+ 1, BITS_PER_UNIT
- 1,
1501 len
- (start
% BITS_PER_UNIT
) - 1);
1503 else if (start
== BITS_PER_UNIT
- 1
1504 && len
> BITS_PER_UNIT
)
1506 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1507 memset (ptr
, 0, nbytes
);
1508 if (len
% BITS_PER_UNIT
!= 0)
1509 clear_bit_region_be (ptr
+ nbytes
, BITS_PER_UNIT
- 1,
1510 len
% BITS_PER_UNIT
);
1516 /* In the byte array PTR clear the bit region starting at bit
1517 START and is LEN bits wide.
1518 For regions spanning multiple bytes do this recursively until we reach
1519 zero LEN or a region contained within a single byte. */
1522 clear_bit_region (unsigned char *ptr
, unsigned int start
,
1525 /* Degenerate base case. */
1528 else if (start
>= BITS_PER_UNIT
)
1529 clear_bit_region (ptr
+ 1, start
- BITS_PER_UNIT
, len
);
1530 /* Second base case. */
1531 else if ((start
+ len
) <= BITS_PER_UNIT
)
1533 unsigned char mask
= (~0U) << (unsigned char) (BITS_PER_UNIT
- len
);
1534 mask
>>= BITS_PER_UNIT
- (start
+ len
);
1540 /* Clear most significant bits in a byte and proceed with the next byte. */
1541 else if (start
!= 0)
1543 clear_bit_region (ptr
, start
, BITS_PER_UNIT
- start
);
1544 clear_bit_region (ptr
+ 1, 0, len
- (BITS_PER_UNIT
- start
));
1546 /* Whole bytes need to be cleared. */
1547 else if (start
== 0 && len
> BITS_PER_UNIT
)
1549 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1550 /* We could recurse on each byte but we clear whole bytes, so a simple
1552 memset (ptr
, '\0', nbytes
);
1553 /* Clear the remaining sub-byte region if there is one. */
1554 if (len
% BITS_PER_UNIT
!= 0)
1555 clear_bit_region (ptr
+ nbytes
, 0, len
% BITS_PER_UNIT
);
1561 /* Write BITLEN bits of EXPR to the byte array PTR at
1562 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1563 Return true if the operation succeeded. */
1566 encode_tree_to_bitpos (tree expr
, unsigned char *ptr
, int bitlen
, int bitpos
,
1567 unsigned int total_bytes
)
1569 unsigned int first_byte
= bitpos
/ BITS_PER_UNIT
;
1570 bool sub_byte_op_p
= ((bitlen
% BITS_PER_UNIT
)
1571 || (bitpos
% BITS_PER_UNIT
)
1572 || !int_mode_for_size (bitlen
, 0).exists ());
1574 = (TREE_CODE (expr
) == CONSTRUCTOR
1575 && CONSTRUCTOR_NELTS (expr
) == 0
1576 && TYPE_SIZE_UNIT (TREE_TYPE (expr
))
1577 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr
))));
1581 if (first_byte
>= total_bytes
)
1583 total_bytes
-= first_byte
;
1586 unsigned HOST_WIDE_INT rhs_bytes
1587 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
1588 if (rhs_bytes
> total_bytes
)
1590 memset (ptr
+ first_byte
, '\0', rhs_bytes
);
1593 return native_encode_expr (expr
, ptr
+ first_byte
, total_bytes
) != 0;
1597 We are writing a non byte-sized quantity or at a position that is not
1599 |--------|--------|--------| ptr + first_byte
1601 xxx xxxxxxxx xxx< bp>
1604 First native_encode_expr EXPR into a temporary buffer and shift each
1605 byte in the buffer by 'bp' (carrying the bits over as necessary).
1606 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1607 <------bitlen---->< bp>
1608 Then we clear the destination bits:
1609 |---00000|00000000|000-----| ptr + first_byte
1610 <-------bitlen--->< bp>
1612 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1613 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1616 We are writing a non byte-sized quantity or at a position that is not
1618 ptr + first_byte |--------|--------|--------|
1620 <bp >xxx xxxxxxxx xxx
1623 First native_encode_expr EXPR into a temporary buffer and shift each
1624 byte in the buffer to the right by (carrying the bits over as necessary).
1625 We shift by as much as needed to align the most significant bit of EXPR
1627 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1628 <---bitlen----> <bp ><-----bitlen----->
1629 Then we clear the destination bits:
1630 ptr + first_byte |-----000||00000000||00000---|
1631 <bp ><-------bitlen----->
1633 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1634 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1635 The awkwardness comes from the fact that bitpos is counted from the
1636 most significant bit of a byte. */
1638 /* We must be dealing with fixed-size data at this point, since the
1639 total size is also fixed. */
1640 unsigned int byte_size
;
1643 unsigned HOST_WIDE_INT rhs_bytes
1644 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
1645 if (rhs_bytes
> total_bytes
)
1647 byte_size
= rhs_bytes
;
1651 fixed_size_mode mode
1652 = as_a
<fixed_size_mode
> (TYPE_MODE (TREE_TYPE (expr
)));
1653 byte_size
= GET_MODE_SIZE (mode
);
1655 /* Allocate an extra byte so that we have space to shift into. */
1657 unsigned char *tmpbuf
= XALLOCAVEC (unsigned char, byte_size
);
1658 memset (tmpbuf
, '\0', byte_size
);
1659 /* The store detection code should only have allowed constants that are
1660 accepted by native_encode_expr or empty ctors. */
1662 && native_encode_expr (expr
, tmpbuf
, byte_size
- 1) == 0)
1665 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1666 bytes to write. This means it can write more than
1667 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1668 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1669 bitlen and zero out the bits that are not relevant as well (that may
1670 contain a sign bit due to sign-extension). */
1671 unsigned int padding
1672 = byte_size
- ROUND_UP (bitlen
, BITS_PER_UNIT
) / BITS_PER_UNIT
- 1;
1673 /* On big-endian the padding is at the 'front' so just skip the initial
1675 if (BYTES_BIG_ENDIAN
)
1678 byte_size
-= padding
;
1680 if (bitlen
% BITS_PER_UNIT
!= 0)
1682 if (BYTES_BIG_ENDIAN
)
1683 clear_bit_region_be (tmpbuf
, BITS_PER_UNIT
- 1,
1684 BITS_PER_UNIT
- (bitlen
% BITS_PER_UNIT
));
1686 clear_bit_region (tmpbuf
, bitlen
,
1687 byte_size
* BITS_PER_UNIT
- bitlen
);
1689 /* Left shifting relies on the last byte being clear if bitlen is
1690 a multiple of BITS_PER_UNIT, which might not be clear if
1691 there are padding bytes. */
1692 else if (!BYTES_BIG_ENDIAN
)
1693 tmpbuf
[byte_size
- 1] = '\0';
1695 /* Clear the bit region in PTR where the bits from TMPBUF will be
1697 if (BYTES_BIG_ENDIAN
)
1698 clear_bit_region_be (ptr
+ first_byte
,
1699 BITS_PER_UNIT
- 1 - (bitpos
% BITS_PER_UNIT
), bitlen
);
1701 clear_bit_region (ptr
+ first_byte
, bitpos
% BITS_PER_UNIT
, bitlen
);
1704 int bitlen_mod
= bitlen
% BITS_PER_UNIT
;
1705 int bitpos_mod
= bitpos
% BITS_PER_UNIT
;
1707 bool skip_byte
= false;
1708 if (BYTES_BIG_ENDIAN
)
1710 /* BITPOS and BITLEN are exactly aligned and no shifting
1712 if (bitpos_mod
+ bitlen_mod
== BITS_PER_UNIT
1713 || (bitpos_mod
== 0 && bitlen_mod
== 0))
1715 /* |. . . . . . . .|
1717 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1718 of the value until it aligns with 'bp' in the next byte over. */
1719 else if (bitpos_mod
+ bitlen_mod
< BITS_PER_UNIT
)
1721 shift_amnt
= bitlen_mod
+ bitpos_mod
;
1722 skip_byte
= bitlen_mod
!= 0;
1724 /* |. . . . . . . .|
1727 Shift the value right within the same byte so it aligns with 'bp'. */
1729 shift_amnt
= bitlen_mod
+ bitpos_mod
- BITS_PER_UNIT
;
1732 shift_amnt
= bitpos
% BITS_PER_UNIT
;
1734 /* Create the shifted version of EXPR. */
1735 if (!BYTES_BIG_ENDIAN
)
1737 shift_bytes_in_array_left (tmpbuf
, byte_size
, shift_amnt
);
1738 if (shift_amnt
== 0)
1743 gcc_assert (BYTES_BIG_ENDIAN
);
1744 shift_bytes_in_array_right (tmpbuf
, byte_size
, shift_amnt
);
1745 /* If shifting right forced us to move into the next byte skip the now
1754 /* Insert the bits from TMPBUF. */
1755 for (unsigned int i
= 0; i
< byte_size
; i
++)
1756 ptr
[first_byte
+ i
] |= tmpbuf
[i
];
1761 /* Sorting function for store_immediate_info objects.
1762 Sorts them by bitposition. */
1765 sort_by_bitpos (const void *x
, const void *y
)
1767 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1768 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1770 if ((*tmp
)->bitpos
< (*tmp2
)->bitpos
)
1772 else if ((*tmp
)->bitpos
> (*tmp2
)->bitpos
)
1775 /* If they are the same let's use the order which is guaranteed to
1777 return (*tmp
)->order
- (*tmp2
)->order
;
1780 /* Sorting function for store_immediate_info objects.
1781 Sorts them by the order field. */
1784 sort_by_order (const void *x
, const void *y
)
1786 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1787 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1789 if ((*tmp
)->order
< (*tmp2
)->order
)
1791 else if ((*tmp
)->order
> (*tmp2
)->order
)
1797 /* Initialize a merged_store_group object from a store_immediate_info
1800 merged_store_group::merged_store_group (store_immediate_info
*info
)
1802 start
= info
->bitpos
;
1803 width
= info
->bitsize
;
1804 bitregion_start
= info
->bitregion_start
;
1805 bitregion_end
= info
->bitregion_end
;
1806 /* VAL has memory allocated for it in apply_stores once the group
1807 width has been finalized. */
1810 bit_insertion
= false;
1811 only_constants
= info
->rhs_code
== INTEGER_CST
;
1812 first_nonmergeable_order
= ~0U;
1813 lp_nr
= info
->lp_nr
;
1814 unsigned HOST_WIDE_INT align_bitpos
= 0;
1815 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
1816 &align
, &align_bitpos
);
1817 align_base
= start
- align_bitpos
;
1818 for (int i
= 0; i
< 2; ++i
)
1820 store_operand_info
&op
= info
->ops
[i
];
1821 if (op
.base_addr
== NULL_TREE
)
1824 load_align_base
[i
] = 0;
1828 get_object_alignment_1 (op
.val
, &load_align
[i
], &align_bitpos
);
1829 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
1833 stores
.safe_push (info
);
1834 last_stmt
= info
->stmt
;
1835 last_order
= info
->order
;
1836 first_stmt
= last_stmt
;
1837 first_order
= last_order
;
1841 merged_store_group::~merged_store_group ()
1847 /* Return true if the store described by INFO can be merged into the group. */
1850 merged_store_group::can_be_merged_into (store_immediate_info
*info
)
1852 /* Do not merge bswap patterns. */
1853 if (info
->rhs_code
== LROTATE_EXPR
)
1856 if (info
->lp_nr
!= lp_nr
)
1859 /* The canonical case. */
1860 if (info
->rhs_code
== stores
[0]->rhs_code
)
1863 /* BIT_INSERT_EXPR is compatible with INTEGER_CST. */
1864 if (info
->rhs_code
== BIT_INSERT_EXPR
&& stores
[0]->rhs_code
== INTEGER_CST
)
1867 if (stores
[0]->rhs_code
== BIT_INSERT_EXPR
&& info
->rhs_code
== INTEGER_CST
)
1870 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
1871 if (info
->rhs_code
== MEM_REF
1872 && (stores
[0]->rhs_code
== INTEGER_CST
1873 || stores
[0]->rhs_code
== BIT_INSERT_EXPR
)
1874 && info
->bitregion_start
== stores
[0]->bitregion_start
1875 && info
->bitregion_end
== stores
[0]->bitregion_end
)
1878 if (stores
[0]->rhs_code
== MEM_REF
1879 && (info
->rhs_code
== INTEGER_CST
1880 || info
->rhs_code
== BIT_INSERT_EXPR
)
1881 && info
->bitregion_start
== stores
[0]->bitregion_start
1882 && info
->bitregion_end
== stores
[0]->bitregion_end
)
1888 /* Helper method for merge_into and merge_overlapping to do
1892 merged_store_group::do_merge (store_immediate_info
*info
)
1894 bitregion_start
= MIN (bitregion_start
, info
->bitregion_start
);
1895 bitregion_end
= MAX (bitregion_end
, info
->bitregion_end
);
1897 unsigned int this_align
;
1898 unsigned HOST_WIDE_INT align_bitpos
= 0;
1899 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
1900 &this_align
, &align_bitpos
);
1901 if (this_align
> align
)
1904 align_base
= info
->bitpos
- align_bitpos
;
1906 for (int i
= 0; i
< 2; ++i
)
1908 store_operand_info
&op
= info
->ops
[i
];
1912 get_object_alignment_1 (op
.val
, &this_align
, &align_bitpos
);
1913 if (this_align
> load_align
[i
])
1915 load_align
[i
] = this_align
;
1916 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
1920 gimple
*stmt
= info
->stmt
;
1921 stores
.safe_push (info
);
1922 if (info
->order
> last_order
)
1924 last_order
= info
->order
;
1927 else if (info
->order
< first_order
)
1929 first_order
= info
->order
;
1932 if (info
->rhs_code
!= INTEGER_CST
)
1933 only_constants
= false;
1936 /* Merge a store recorded by INFO into this merged store.
1937 The store is not overlapping with the existing recorded
1941 merged_store_group::merge_into (store_immediate_info
*info
)
1943 /* Make sure we're inserting in the position we think we're inserting. */
1944 gcc_assert (info
->bitpos
>= start
+ width
1945 && info
->bitregion_start
<= bitregion_end
);
1947 width
= info
->bitpos
+ info
->bitsize
- start
;
1951 /* Merge a store described by INFO into this merged store.
1952 INFO overlaps in some way with the current store (i.e. it's not contiguous
1953 which is handled by merged_store_group::merge_into). */
1956 merged_store_group::merge_overlapping (store_immediate_info
*info
)
1958 /* If the store extends the size of the group, extend the width. */
1959 if (info
->bitpos
+ info
->bitsize
> start
+ width
)
1960 width
= info
->bitpos
+ info
->bitsize
- start
;
1965 /* Go through all the recorded stores in this group in program order and
1966 apply their values to the VAL byte array to create the final merged
1967 value. Return true if the operation succeeded. */
1970 merged_store_group::apply_stores ()
1972 /* Make sure we have more than one store in the group, otherwise we cannot
1974 if (bitregion_start
% BITS_PER_UNIT
!= 0
1975 || bitregion_end
% BITS_PER_UNIT
!= 0
1976 || stores
.length () == 1)
1979 stores
.qsort (sort_by_order
);
1980 store_immediate_info
*info
;
1982 /* Create a power-of-2-sized buffer for native_encode_expr. */
1983 buf_size
= 1 << ceil_log2 ((bitregion_end
- bitregion_start
) / BITS_PER_UNIT
);
1984 val
= XNEWVEC (unsigned char, 2 * buf_size
);
1985 mask
= val
+ buf_size
;
1986 memset (val
, 0, buf_size
);
1987 memset (mask
, ~0U, buf_size
);
1989 FOR_EACH_VEC_ELT (stores
, i
, info
)
1991 unsigned int pos_in_buffer
= info
->bitpos
- bitregion_start
;
1993 if (info
->ops
[0].val
&& info
->ops
[0].base_addr
== NULL_TREE
)
1994 cst
= info
->ops
[0].val
;
1995 else if (info
->ops
[1].val
&& info
->ops
[1].base_addr
== NULL_TREE
)
1996 cst
= info
->ops
[1].val
;
2002 if (info
->rhs_code
== BIT_INSERT_EXPR
)
2003 bit_insertion
= true;
2005 ret
= encode_tree_to_bitpos (cst
, val
, info
->bitsize
,
2006 pos_in_buffer
, buf_size
);
2008 unsigned char *m
= mask
+ (pos_in_buffer
/ BITS_PER_UNIT
);
2009 if (BYTES_BIG_ENDIAN
)
2010 clear_bit_region_be (m
, (BITS_PER_UNIT
- 1
2011 - (pos_in_buffer
% BITS_PER_UNIT
)),
2014 clear_bit_region (m
, pos_in_buffer
% BITS_PER_UNIT
, info
->bitsize
);
2015 if (cst
&& dump_file
&& (dump_flags
& TDF_DETAILS
))
2019 fputs ("After writing ", dump_file
);
2020 print_generic_expr (dump_file
, cst
, TDF_NONE
);
2021 fprintf (dump_file
, " of size " HOST_WIDE_INT_PRINT_DEC
2022 " at position %d\n", info
->bitsize
, pos_in_buffer
);
2023 fputs (" the merged value contains ", dump_file
);
2024 dump_char_array (dump_file
, val
, buf_size
);
2025 fputs (" the merged mask contains ", dump_file
);
2026 dump_char_array (dump_file
, mask
, buf_size
);
2028 fputs (" bit insertion is required\n", dump_file
);
2031 fprintf (dump_file
, "Failed to merge stores\n");
2036 stores
.qsort (sort_by_bitpos
);
2040 /* Structure describing the store chain. */
2042 class imm_store_chain_info
2045 /* Doubly-linked list that imposes an order on chain processing.
2046 PNXP (prev's next pointer) points to the head of a list, or to
2047 the next field in the previous chain in the list.
2048 See pass_store_merging::m_stores_head for more rationale. */
2049 imm_store_chain_info
*next
, **pnxp
;
2051 auto_vec
<store_immediate_info
*> m_store_info
;
2052 auto_vec
<merged_store_group
*> m_merged_store_groups
;
2054 imm_store_chain_info (imm_store_chain_info
*&inspt
, tree b_a
)
2055 : next (inspt
), pnxp (&inspt
), base_addr (b_a
)
2060 gcc_checking_assert (pnxp
== next
->pnxp
);
2064 ~imm_store_chain_info ()
2069 gcc_checking_assert (&next
== next
->pnxp
);
2073 bool terminate_and_process_chain ();
2074 bool try_coalesce_bswap (merged_store_group
*, unsigned int, unsigned int);
2075 bool coalesce_immediate_stores ();
2076 bool output_merged_store (merged_store_group
*);
2077 bool output_merged_stores ();
2080 const pass_data pass_data_tree_store_merging
= {
2081 GIMPLE_PASS
, /* type */
2082 "store-merging", /* name */
2083 OPTGROUP_NONE
, /* optinfo_flags */
2084 TV_GIMPLE_STORE_MERGING
, /* tv_id */
2085 PROP_ssa
, /* properties_required */
2086 0, /* properties_provided */
2087 0, /* properties_destroyed */
2088 0, /* todo_flags_start */
2089 TODO_update_ssa
, /* todo_flags_finish */
2092 class pass_store_merging
: public gimple_opt_pass
2095 pass_store_merging (gcc::context
*ctxt
)
2096 : gimple_opt_pass (pass_data_tree_store_merging
, ctxt
), m_stores_head ()
2100 /* Pass not supported for PDP-endian, nor for insane hosts or
2101 target character sizes where native_{encode,interpret}_expr
2102 doesn't work properly. */
2106 return flag_store_merging
2107 && BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
2109 && BITS_PER_UNIT
== 8;
2112 virtual unsigned int execute (function
*);
2115 hash_map
<tree_operand_hash
, class imm_store_chain_info
*> m_stores
;
2117 /* Form a doubly-linked stack of the elements of m_stores, so that
2118 we can iterate over them in a predictable way. Using this order
2119 avoids extraneous differences in the compiler output just because
2120 of tree pointer variations (e.g. different chains end up in
2121 different positions of m_stores, so they are handled in different
2122 orders, so they allocate or release SSA names in different
2123 orders, and when they get reused, subsequent passes end up
2124 getting different SSA names, which may ultimately change
2125 decisions when going out of SSA). */
2126 imm_store_chain_info
*m_stores_head
;
2128 bool process_store (gimple
*);
2129 bool terminate_and_process_chain (imm_store_chain_info
*);
2130 bool terminate_all_aliasing_chains (imm_store_chain_info
**, gimple
*);
2131 bool terminate_and_process_all_chains ();
2132 }; // class pass_store_merging
2134 /* Terminate and process all recorded chains. Return true if any changes
2138 pass_store_merging::terminate_and_process_all_chains ()
2141 while (m_stores_head
)
2142 ret
|= terminate_and_process_chain (m_stores_head
);
2143 gcc_assert (m_stores
.is_empty ());
2147 /* Terminate all chains that are affected by the statement STMT.
2148 CHAIN_INFO is the chain we should ignore from the checks if
2149 non-NULL. Return true if any changes were made. */
2152 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
2158 /* If the statement doesn't touch memory it can't alias. */
2159 if (!gimple_vuse (stmt
))
2162 tree store_lhs
= gimple_store_p (stmt
) ? gimple_get_lhs (stmt
) : NULL_TREE
;
2163 ao_ref store_lhs_ref
;
2164 ao_ref_init (&store_lhs_ref
, store_lhs
);
2165 for (imm_store_chain_info
*next
= m_stores_head
, *cur
= next
; cur
; cur
= next
)
2169 /* We already checked all the stores in chain_info and terminated the
2170 chain if necessary. Skip it here. */
2171 if (chain_info
&& *chain_info
== cur
)
2174 store_immediate_info
*info
;
2176 FOR_EACH_VEC_ELT (cur
->m_store_info
, i
, info
)
2178 tree lhs
= gimple_assign_lhs (info
->stmt
);
2180 ao_ref_init (&lhs_ref
, lhs
);
2181 if (ref_maybe_used_by_stmt_p (stmt
, &lhs_ref
)
2182 || stmt_may_clobber_ref_p_1 (stmt
, &lhs_ref
)
2183 || (store_lhs
&& refs_may_alias_p_1 (&store_lhs_ref
,
2186 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2188 fprintf (dump_file
, "stmt causes chain termination:\n");
2189 print_gimple_stmt (dump_file
, stmt
, 0);
2191 ret
|= terminate_and_process_chain (cur
);
2200 /* Helper function. Terminate the recorded chain storing to base object
2201 BASE. Return true if the merging and output was successful. The m_stores
2202 entry is removed after the processing in any case. */
2205 pass_store_merging::terminate_and_process_chain (imm_store_chain_info
*chain_info
)
2207 bool ret
= chain_info
->terminate_and_process_chain ();
2208 m_stores
.remove (chain_info
->base_addr
);
2213 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2214 may clobber REF. FIRST and LAST must have non-NULL vdef. We want to
2215 be able to sink load of REF across stores between FIRST and LAST, up
2216 to right before LAST. */
2219 stmts_may_clobber_ref_p (gimple
*first
, gimple
*last
, tree ref
)
2222 ao_ref_init (&r
, ref
);
2223 unsigned int count
= 0;
2224 tree vop
= gimple_vdef (last
);
2227 /* Return true conservatively if the basic blocks are different. */
2228 if (gimple_bb (first
) != gimple_bb (last
))
2233 stmt
= SSA_NAME_DEF_STMT (vop
);
2234 if (stmt_may_clobber_ref_p_1 (stmt
, &r
))
2236 if (gimple_store_p (stmt
)
2237 && refs_anti_dependent_p (ref
, gimple_get_lhs (stmt
)))
2239 /* Avoid quadratic compile time by bounding the number of checks
2241 if (++count
> MAX_STORE_ALIAS_CHECKS
)
2243 vop
= gimple_vuse (stmt
);
2245 while (stmt
!= first
);
2250 /* Return true if INFO->ops[IDX] is mergeable with the
2251 corresponding loads already in MERGED_STORE group.
2252 BASE_ADDR is the base address of the whole store group. */
2255 compatible_load_p (merged_store_group
*merged_store
,
2256 store_immediate_info
*info
,
2257 tree base_addr
, int idx
)
2259 store_immediate_info
*infof
= merged_store
->stores
[0];
2260 if (!info
->ops
[idx
].base_addr
2261 || maybe_ne (info
->ops
[idx
].bitpos
- infof
->ops
[idx
].bitpos
,
2262 info
->bitpos
- infof
->bitpos
)
2263 || !operand_equal_p (info
->ops
[idx
].base_addr
,
2264 infof
->ops
[idx
].base_addr
, 0))
2267 store_immediate_info
*infol
= merged_store
->stores
.last ();
2268 tree load_vuse
= gimple_vuse (info
->ops
[idx
].stmt
);
2269 /* In this case all vuses should be the same, e.g.
2270 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2272 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2273 and we can emit the coalesced load next to any of those loads. */
2274 if (gimple_vuse (infof
->ops
[idx
].stmt
) == load_vuse
2275 && gimple_vuse (infol
->ops
[idx
].stmt
) == load_vuse
)
2278 /* Otherwise, at least for now require that the load has the same
2279 vuse as the store. See following examples. */
2280 if (gimple_vuse (info
->stmt
) != load_vuse
)
2283 if (gimple_vuse (infof
->stmt
) != gimple_vuse (infof
->ops
[idx
].stmt
)
2285 && gimple_vuse (infol
->stmt
) != gimple_vuse (infol
->ops
[idx
].stmt
)))
2288 /* If the load is from the same location as the store, already
2289 the construction of the immediate chain info guarantees no intervening
2290 stores, so no further checks are needed. Example:
2291 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2292 if (known_eq (info
->ops
[idx
].bitpos
, info
->bitpos
)
2293 && operand_equal_p (info
->ops
[idx
].base_addr
, base_addr
, 0))
2296 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2297 of the stores in the group, or any other stores in between those.
2298 Previous calls to compatible_load_p ensured that for all the
2299 merged_store->stores IDX loads, no stmts starting with
2300 merged_store->first_stmt and ending right before merged_store->last_stmt
2301 clobbers those loads. */
2302 gimple
*first
= merged_store
->first_stmt
;
2303 gimple
*last
= merged_store
->last_stmt
;
2305 store_immediate_info
*infoc
;
2306 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2307 comes before the so far first load, we'll be changing
2308 merged_store->first_stmt. In that case we need to give up if
2309 any of the earlier processed loads clobber with the stmts in the new
2311 if (info
->order
< merged_store
->first_order
)
2313 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2314 if (stmts_may_clobber_ref_p (info
->stmt
, first
, infoc
->ops
[idx
].val
))
2318 /* Similarly, we could change merged_store->last_stmt, so ensure
2319 in that case no stmts in the new range clobber any of the earlier
2321 else if (info
->order
> merged_store
->last_order
)
2323 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2324 if (stmts_may_clobber_ref_p (last
, info
->stmt
, infoc
->ops
[idx
].val
))
2328 /* And finally, we'd be adding a new load to the set, ensure it isn't
2329 clobbered in the new range. */
2330 if (stmts_may_clobber_ref_p (first
, last
, info
->ops
[idx
].val
))
2333 /* Otherwise, we are looking for:
2334 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2336 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2340 /* Add all refs loaded to compute VAL to REFS vector. */
2343 gather_bswap_load_refs (vec
<tree
> *refs
, tree val
)
2345 if (TREE_CODE (val
) != SSA_NAME
)
2348 gimple
*stmt
= SSA_NAME_DEF_STMT (val
);
2349 if (!is_gimple_assign (stmt
))
2352 if (gimple_assign_load_p (stmt
))
2354 refs
->safe_push (gimple_assign_rhs1 (stmt
));
2358 switch (gimple_assign_rhs_class (stmt
))
2360 case GIMPLE_BINARY_RHS
:
2361 gather_bswap_load_refs (refs
, gimple_assign_rhs2 (stmt
));
2363 case GIMPLE_UNARY_RHS
:
2364 gather_bswap_load_refs (refs
, gimple_assign_rhs1 (stmt
));
2371 /* Check if there are any stores in M_STORE_INFO after index I
2372 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2373 a potential group ending with END that have their order
2374 smaller than LAST_ORDER. ALL_INTEGER_CST_P is true if
2375 all the stores already merged and the one under consideration
2376 have rhs_code of INTEGER_CST. Return true if there are no such stores.
2378 MEM[(long long int *)p_28] = 0;
2379 MEM[(long long int *)p_28 + 8B] = 0;
2380 MEM[(long long int *)p_28 + 16B] = 0;
2381 MEM[(long long int *)p_28 + 24B] = 0;
2383 MEM[(int *)p_28 + 8B] = _129;
2384 MEM[(int *)p_28].a = -1;
2386 MEM[(long long int *)p_28] = 0;
2387 MEM[(int *)p_28].a = -1;
2388 stmts in the current group and need to consider if it is safe to
2389 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2390 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2391 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2392 into the group and merging of those 3 stores is successful, merged
2393 stmts will be emitted at the latest store from that group, i.e.
2394 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2395 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2396 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2397 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2398 into the group. That way it will be its own store group and will
2399 not be touched. If ALL_INTEGER_CST_P and there are overlapping
2400 INTEGER_CST stores, those are mergeable using merge_overlapping,
2401 so don't return false for those. */
2404 check_no_overlap (vec
<store_immediate_info
*> m_store_info
, unsigned int i
,
2405 bool all_integer_cst_p
, unsigned int last_order
,
2406 unsigned HOST_WIDE_INT end
)
2408 unsigned int len
= m_store_info
.length ();
2409 for (++i
; i
< len
; ++i
)
2411 store_immediate_info
*info
= m_store_info
[i
];
2412 if (info
->bitpos
>= end
)
2414 if (info
->order
< last_order
2415 && (!all_integer_cst_p
|| info
->rhs_code
!= INTEGER_CST
))
2421 /* Return true if m_store_info[first] and at least one following store
2422 form a group which store try_size bitsize value which is byte swapped
2423 from a memory load or some value, or identity from some value.
2424 This uses the bswap pass APIs. */
2427 imm_store_chain_info::try_coalesce_bswap (merged_store_group
*merged_store
,
2429 unsigned int try_size
)
2431 unsigned int len
= m_store_info
.length (), last
= first
;
2432 unsigned HOST_WIDE_INT width
= m_store_info
[first
]->bitsize
;
2433 if (width
>= try_size
)
2435 for (unsigned int i
= first
+ 1; i
< len
; ++i
)
2437 if (m_store_info
[i
]->bitpos
!= m_store_info
[first
]->bitpos
+ width
2438 || m_store_info
[i
]->lp_nr
!= merged_store
->lp_nr
2439 || m_store_info
[i
]->ins_stmt
== NULL
)
2441 width
+= m_store_info
[i
]->bitsize
;
2442 if (width
>= try_size
)
2448 if (width
!= try_size
)
2451 bool allow_unaligned
2452 = !STRICT_ALIGNMENT
&& param_store_merging_allow_unaligned
;
2453 /* Punt if the combined store would not be aligned and we need alignment. */
2454 if (!allow_unaligned
)
2456 unsigned int align
= merged_store
->align
;
2457 unsigned HOST_WIDE_INT align_base
= merged_store
->align_base
;
2458 for (unsigned int i
= first
+ 1; i
<= last
; ++i
)
2460 unsigned int this_align
;
2461 unsigned HOST_WIDE_INT align_bitpos
= 0;
2462 get_object_alignment_1 (gimple_assign_lhs (m_store_info
[i
]->stmt
),
2463 &this_align
, &align_bitpos
);
2464 if (this_align
> align
)
2467 align_base
= m_store_info
[i
]->bitpos
- align_bitpos
;
2470 unsigned HOST_WIDE_INT align_bitpos
2471 = (m_store_info
[first
]->bitpos
- align_base
) & (align
- 1);
2473 align
= least_bit_hwi (align_bitpos
);
2474 if (align
< try_size
)
2481 case 16: type
= uint16_type_node
; break;
2482 case 32: type
= uint32_type_node
; break;
2483 case 64: type
= uint64_type_node
; break;
2484 default: gcc_unreachable ();
2486 struct symbolic_number n
;
2487 gimple
*ins_stmt
= NULL
;
2488 int vuse_store
= -1;
2489 unsigned int first_order
= merged_store
->first_order
;
2490 unsigned int last_order
= merged_store
->last_order
;
2491 gimple
*first_stmt
= merged_store
->first_stmt
;
2492 gimple
*last_stmt
= merged_store
->last_stmt
;
2493 unsigned HOST_WIDE_INT end
= merged_store
->start
+ merged_store
->width
;
2494 store_immediate_info
*infof
= m_store_info
[first
];
2496 for (unsigned int i
= first
; i
<= last
; ++i
)
2498 store_immediate_info
*info
= m_store_info
[i
];
2499 struct symbolic_number this_n
= info
->n
;
2501 if (!this_n
.base_addr
)
2502 this_n
.range
= try_size
/ BITS_PER_UNIT
;
2504 /* Update vuse in case it has changed by output_merged_stores. */
2505 this_n
.vuse
= gimple_vuse (info
->ins_stmt
);
2506 unsigned int bitpos
= info
->bitpos
- infof
->bitpos
;
2507 if (!do_shift_rotate (LSHIFT_EXPR
, &this_n
,
2509 ? try_size
- info
->bitsize
- bitpos
2512 if (this_n
.base_addr
&& vuse_store
)
2515 for (j
= first
; j
<= last
; ++j
)
2516 if (this_n
.vuse
== gimple_vuse (m_store_info
[j
]->stmt
))
2520 if (vuse_store
== 1)
2528 ins_stmt
= info
->ins_stmt
;
2532 if (n
.base_addr
&& n
.vuse
!= this_n
.vuse
)
2534 if (vuse_store
== 0)
2538 if (info
->order
> last_order
)
2540 last_order
= info
->order
;
2541 last_stmt
= info
->stmt
;
2543 else if (info
->order
< first_order
)
2545 first_order
= info
->order
;
2546 first_stmt
= info
->stmt
;
2548 end
= MAX (end
, info
->bitpos
+ info
->bitsize
);
2550 ins_stmt
= perform_symbolic_merge (ins_stmt
, &n
, info
->ins_stmt
,
2552 if (ins_stmt
== NULL
)
2557 uint64_t cmpxchg
, cmpnop
;
2558 find_bswap_or_nop_finalize (&n
, &cmpxchg
, &cmpnop
);
2560 /* A complete byte swap should make the symbolic number to start with
2561 the largest digit in the highest order byte. Unchanged symbolic
2562 number indicates a read with same endianness as target architecture. */
2563 if (n
.n
!= cmpnop
&& n
.n
!= cmpxchg
)
2566 if (n
.base_addr
== NULL_TREE
&& !is_gimple_val (n
.src
))
2569 if (!check_no_overlap (m_store_info
, last
, false, last_order
, end
))
2572 /* Don't handle memory copy this way if normal non-bswap processing
2573 would handle it too. */
2574 if (n
.n
== cmpnop
&& (unsigned) n
.n_ops
== last
- first
+ 1)
2577 for (i
= first
; i
<= last
; ++i
)
2578 if (m_store_info
[i
]->rhs_code
!= MEM_REF
)
2588 /* Will emit LROTATE_EXPR. */
2591 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
2592 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
)
2596 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
2597 && optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
)
2604 if (!allow_unaligned
&& n
.base_addr
)
2606 unsigned int align
= get_object_alignment (n
.src
);
2607 if (align
< try_size
)
2611 /* If each load has vuse of the corresponding store, need to verify
2612 the loads can be sunk right before the last store. */
2613 if (vuse_store
== 1)
2615 auto_vec
<tree
, 64> refs
;
2616 for (unsigned int i
= first
; i
<= last
; ++i
)
2617 gather_bswap_load_refs (&refs
,
2618 gimple_assign_rhs1 (m_store_info
[i
]->stmt
));
2622 FOR_EACH_VEC_ELT (refs
, i
, ref
)
2623 if (stmts_may_clobber_ref_p (first_stmt
, last_stmt
, ref
))
2629 infof
->ins_stmt
= ins_stmt
;
2630 for (unsigned int i
= first
; i
<= last
; ++i
)
2632 m_store_info
[i
]->rhs_code
= n
.n
== cmpxchg
? LROTATE_EXPR
: NOP_EXPR
;
2633 m_store_info
[i
]->ops
[0].base_addr
= NULL_TREE
;
2634 m_store_info
[i
]->ops
[1].base_addr
= NULL_TREE
;
2636 merged_store
->merge_into (m_store_info
[i
]);
2642 /* Go through the candidate stores recorded in m_store_info and merge them
2643 into merged_store_group objects recorded into m_merged_store_groups
2644 representing the widened stores. Return true if coalescing was successful
2645 and the number of widened stores is fewer than the original number
2649 imm_store_chain_info::coalesce_immediate_stores ()
2651 /* Anything less can't be processed. */
2652 if (m_store_info
.length () < 2)
2655 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2656 fprintf (dump_file
, "Attempting to coalesce %u stores in chain\n",
2657 m_store_info
.length ());
2659 store_immediate_info
*info
;
2660 unsigned int i
, ignore
= 0;
2662 /* Order the stores by the bitposition they write to. */
2663 m_store_info
.qsort (sort_by_bitpos
);
2665 info
= m_store_info
[0];
2666 merged_store_group
*merged_store
= new merged_store_group (info
);
2667 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2668 fputs ("New store group\n", dump_file
);
2670 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
2672 unsigned HOST_WIDE_INT new_bitregion_start
, new_bitregion_end
;
2677 /* First try to handle group of stores like:
2682 using the bswap framework. */
2683 if (info
->bitpos
== merged_store
->start
+ merged_store
->width
2684 && merged_store
->stores
.length () == 1
2685 && merged_store
->stores
[0]->ins_stmt
!= NULL
2686 && info
->lp_nr
== merged_store
->lp_nr
2687 && info
->ins_stmt
!= NULL
)
2689 unsigned int try_size
;
2690 for (try_size
= 64; try_size
>= 16; try_size
>>= 1)
2691 if (try_coalesce_bswap (merged_store
, i
- 1, try_size
))
2696 ignore
= i
+ merged_store
->stores
.length () - 1;
2697 m_merged_store_groups
.safe_push (merged_store
);
2698 if (ignore
< m_store_info
.length ())
2699 merged_store
= new merged_store_group (m_store_info
[ignore
]);
2701 merged_store
= NULL
;
2707 = MIN (merged_store
->bitregion_start
, info
->bitregion_start
);
2709 = MAX (merged_store
->bitregion_end
, info
->bitregion_end
);
2711 if (info
->order
>= merged_store
->first_nonmergeable_order
2712 || (((new_bitregion_end
- new_bitregion_start
+ 1) / BITS_PER_UNIT
)
2713 > (unsigned) param_store_merging_max_size
))
2718 Overlapping stores. */
2719 else if (IN_RANGE (info
->bitpos
, merged_store
->start
,
2720 merged_store
->start
+ merged_store
->width
- 1)
2721 /* |---store 1---||---store 2---|
2722 Handle also the consecutive INTEGER_CST stores case here,
2723 as we have here the code to deal with overlaps. */
2724 || (info
->bitregion_start
<= merged_store
->bitregion_end
2725 && info
->rhs_code
== INTEGER_CST
2726 && merged_store
->only_constants
2727 && merged_store
->can_be_merged_into (info
)))
2729 /* Only allow overlapping stores of constants. */
2730 if (info
->rhs_code
== INTEGER_CST
2731 && merged_store
->only_constants
2732 && info
->lp_nr
== merged_store
->lp_nr
)
2734 unsigned int last_order
2735 = MAX (merged_store
->last_order
, info
->order
);
2736 unsigned HOST_WIDE_INT end
2737 = MAX (merged_store
->start
+ merged_store
->width
,
2738 info
->bitpos
+ info
->bitsize
);
2739 if (check_no_overlap (m_store_info
, i
, true, last_order
, end
))
2741 /* check_no_overlap call above made sure there are no
2742 overlapping stores with non-INTEGER_CST rhs_code
2743 in between the first and last of the stores we've
2744 just merged. If there are any INTEGER_CST rhs_code
2745 stores in between, we need to merge_overlapping them
2746 even if in the sort_by_bitpos order there are other
2747 overlapping stores in between. Keep those stores as is.
2749 MEM[(int *)p_28] = 0;
2750 MEM[(char *)p_28 + 3B] = 1;
2751 MEM[(char *)p_28 + 1B] = 2;
2752 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
2753 We can't merge the zero store with the store of two and
2754 not merge anything else, because the store of one is
2755 in the original order in between those two, but in
2756 store_by_bitpos order it comes after the last store that
2757 we can't merge with them. We can merge the first 3 stores
2758 and keep the last store as is though. */
2759 unsigned int len
= m_store_info
.length ();
2760 unsigned int try_order
= last_order
;
2761 unsigned int first_nonmergeable_order
;
2763 bool last_iter
= false;
2767 unsigned int max_order
= 0;
2768 unsigned first_nonmergeable_int_order
= ~0U;
2769 unsigned HOST_WIDE_INT this_end
= end
;
2771 first_nonmergeable_order
= ~0U;
2772 for (unsigned int j
= i
+ 1; j
< len
; ++j
)
2774 store_immediate_info
*info2
= m_store_info
[j
];
2775 if (info2
->bitpos
>= this_end
)
2777 if (info2
->order
< try_order
)
2779 if (info2
->rhs_code
!= INTEGER_CST
2780 || info2
->lp_nr
!= merged_store
->lp_nr
)
2782 /* Normally check_no_overlap makes sure this
2783 doesn't happen, but if end grows below,
2784 then we need to process more stores than
2785 check_no_overlap verified. Example:
2786 MEM[(int *)p_5] = 0;
2787 MEM[(short *)p_5 + 3B] = 1;
2788 MEM[(char *)p_5 + 4B] = _9;
2789 MEM[(char *)p_5 + 2B] = 2; */
2794 this_end
= MAX (this_end
,
2795 info2
->bitpos
+ info2
->bitsize
);
2797 else if (info2
->rhs_code
== INTEGER_CST
2798 && info2
->lp_nr
== merged_store
->lp_nr
2801 max_order
= MAX (max_order
, info2
->order
+ 1);
2802 first_nonmergeable_int_order
2803 = MIN (first_nonmergeable_int_order
,
2807 first_nonmergeable_order
2808 = MIN (first_nonmergeable_order
, info2
->order
);
2812 if (last_order
== try_order
)
2814 /* If this failed, but only because we grew
2815 try_order, retry with the last working one,
2816 so that we merge at least something. */
2817 try_order
= last_order
;
2821 last_order
= try_order
;
2822 /* Retry with a larger try_order to see if we could
2823 merge some further INTEGER_CST stores. */
2825 && (first_nonmergeable_int_order
2826 < first_nonmergeable_order
))
2828 try_order
= MIN (max_order
,
2829 first_nonmergeable_order
);
2832 merged_store
->first_nonmergeable_order
);
2833 if (try_order
> last_order
&& ++attempts
< 16)
2836 first_nonmergeable_order
2837 = MIN (first_nonmergeable_order
,
2838 first_nonmergeable_int_order
);
2846 merged_store
->merge_overlapping (info
);
2848 merged_store
->first_nonmergeable_order
2849 = MIN (merged_store
->first_nonmergeable_order
,
2850 first_nonmergeable_order
);
2852 for (unsigned int j
= i
+ 1; j
<= k
; j
++)
2854 store_immediate_info
*info2
= m_store_info
[j
];
2855 gcc_assert (info2
->bitpos
< end
);
2856 if (info2
->order
< last_order
)
2858 gcc_assert (info2
->rhs_code
== INTEGER_CST
);
2860 merged_store
->merge_overlapping (info2
);
2862 /* Other stores are kept and not merged in any
2871 /* |---store 1---||---store 2---|
2872 This store is consecutive to the previous one.
2873 Merge it into the current store group. There can be gaps in between
2874 the stores, but there can't be gaps in between bitregions. */
2875 else if (info
->bitregion_start
<= merged_store
->bitregion_end
2876 && merged_store
->can_be_merged_into (info
))
2878 store_immediate_info
*infof
= merged_store
->stores
[0];
2880 /* All the rhs_code ops that take 2 operands are commutative,
2881 swap the operands if it could make the operands compatible. */
2882 if (infof
->ops
[0].base_addr
2883 && infof
->ops
[1].base_addr
2884 && info
->ops
[0].base_addr
2885 && info
->ops
[1].base_addr
2886 && known_eq (info
->ops
[1].bitpos
- infof
->ops
[0].bitpos
,
2887 info
->bitpos
- infof
->bitpos
)
2888 && operand_equal_p (info
->ops
[1].base_addr
,
2889 infof
->ops
[0].base_addr
, 0))
2891 std::swap (info
->ops
[0], info
->ops
[1]);
2892 info
->ops_swapped_p
= true;
2894 if (check_no_overlap (m_store_info
, i
, false,
2895 MAX (merged_store
->last_order
, info
->order
),
2896 MAX (merged_store
->start
+ merged_store
->width
,
2897 info
->bitpos
+ info
->bitsize
)))
2899 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
2900 if (info
->rhs_code
== MEM_REF
&& infof
->rhs_code
!= MEM_REF
)
2902 info
->rhs_code
= BIT_INSERT_EXPR
;
2903 info
->ops
[0].val
= gimple_assign_rhs1 (info
->stmt
);
2904 info
->ops
[0].base_addr
= NULL_TREE
;
2906 else if (infof
->rhs_code
== MEM_REF
&& info
->rhs_code
!= MEM_REF
)
2908 store_immediate_info
*infoj
;
2910 FOR_EACH_VEC_ELT (merged_store
->stores
, j
, infoj
)
2912 infoj
->rhs_code
= BIT_INSERT_EXPR
;
2913 infoj
->ops
[0].val
= gimple_assign_rhs1 (infoj
->stmt
);
2914 infoj
->ops
[0].base_addr
= NULL_TREE
;
2917 if ((infof
->ops
[0].base_addr
2918 ? compatible_load_p (merged_store
, info
, base_addr
, 0)
2919 : !info
->ops
[0].base_addr
)
2920 && (infof
->ops
[1].base_addr
2921 ? compatible_load_p (merged_store
, info
, base_addr
, 1)
2922 : !info
->ops
[1].base_addr
))
2924 merged_store
->merge_into (info
);
2930 /* |---store 1---| <gap> |---store 2---|.
2931 Gap between stores or the rhs not compatible. Start a new group. */
2933 /* Try to apply all the stores recorded for the group to determine
2934 the bitpattern they write and discard it if that fails.
2935 This will also reject single-store groups. */
2936 if (merged_store
->apply_stores ())
2937 m_merged_store_groups
.safe_push (merged_store
);
2939 delete merged_store
;
2941 merged_store
= new merged_store_group (info
);
2942 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2943 fputs ("New store group\n", dump_file
);
2946 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2948 fprintf (dump_file
, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
2949 " bitpos:" HOST_WIDE_INT_PRINT_DEC
" val:",
2950 i
, info
->bitsize
, info
->bitpos
);
2951 print_generic_expr (dump_file
, gimple_assign_rhs1 (info
->stmt
));
2952 fputc ('\n', dump_file
);
2956 /* Record or discard the last store group. */
2959 if (merged_store
->apply_stores ())
2960 m_merged_store_groups
.safe_push (merged_store
);
2962 delete merged_store
;
2965 gcc_assert (m_merged_store_groups
.length () <= m_store_info
.length ());
2968 = !m_merged_store_groups
.is_empty ()
2969 && m_merged_store_groups
.length () < m_store_info
.length ();
2971 if (success
&& dump_file
)
2972 fprintf (dump_file
, "Coalescing successful!\nMerged into %u stores\n",
2973 m_merged_store_groups
.length ());
2978 /* Return the type to use for the merged stores or loads described by STMTS.
2979 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
2980 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
2981 of the MEM_REFs if any. */
2984 get_alias_type_for_stmts (vec
<gimple
*> &stmts
, bool is_load
,
2985 unsigned short *cliquep
, unsigned short *basep
)
2989 tree type
= NULL_TREE
;
2990 tree ret
= NULL_TREE
;
2994 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
2996 tree ref
= is_load
? gimple_assign_rhs1 (stmt
)
2997 : gimple_assign_lhs (stmt
);
2998 tree type1
= reference_alias_ptr_type (ref
);
2999 tree base
= get_base_address (ref
);
3003 if (TREE_CODE (base
) == MEM_REF
)
3005 *cliquep
= MR_DEPENDENCE_CLIQUE (base
);
3006 *basep
= MR_DEPENDENCE_BASE (base
);
3011 if (!alias_ptr_types_compatible_p (type
, type1
))
3012 ret
= ptr_type_node
;
3013 if (TREE_CODE (base
) != MEM_REF
3014 || *cliquep
!= MR_DEPENDENCE_CLIQUE (base
)
3015 || *basep
!= MR_DEPENDENCE_BASE (base
))
3024 /* Return the location_t information we can find among the statements
3028 get_location_for_stmts (vec
<gimple
*> &stmts
)
3033 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
3034 if (gimple_has_location (stmt
))
3035 return gimple_location (stmt
);
3037 return UNKNOWN_LOCATION
;
3040 /* Used to decribe a store resulting from splitting a wide store in smaller
3041 regularly-sized stores in split_group. */
3046 unsigned HOST_WIDE_INT bytepos
;
3047 unsigned HOST_WIDE_INT size
;
3048 unsigned HOST_WIDE_INT align
;
3049 auto_vec
<store_immediate_info
*> orig_stores
;
3050 /* True if there is a single orig stmt covering the whole split store. */
3052 split_store (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
3053 unsigned HOST_WIDE_INT
);
3056 /* Simple constructor. */
3058 split_store::split_store (unsigned HOST_WIDE_INT bp
,
3059 unsigned HOST_WIDE_INT sz
,
3060 unsigned HOST_WIDE_INT al
)
3061 : bytepos (bp
), size (sz
), align (al
), orig (false)
3063 orig_stores
.create (0);
3066 /* Record all stores in GROUP that write to the region starting at BITPOS and
3067 is of size BITSIZE. Record infos for such statements in STORES if
3068 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
3069 if there is exactly one original store in the range (in that case ignore
3070 clobber stmts, unless there are only clobber stmts). */
3072 static store_immediate_info
*
3073 find_constituent_stores (class merged_store_group
*group
,
3074 vec
<store_immediate_info
*> *stores
,
3075 unsigned int *first
,
3076 unsigned HOST_WIDE_INT bitpos
,
3077 unsigned HOST_WIDE_INT bitsize
)
3079 store_immediate_info
*info
, *ret
= NULL
;
3081 bool second
= false;
3082 bool update_first
= true;
3083 unsigned HOST_WIDE_INT end
= bitpos
+ bitsize
;
3084 for (i
= *first
; group
->stores
.iterate (i
, &info
); ++i
)
3086 unsigned HOST_WIDE_INT stmt_start
= info
->bitpos
;
3087 unsigned HOST_WIDE_INT stmt_end
= stmt_start
+ info
->bitsize
;
3088 if (stmt_end
<= bitpos
)
3090 /* BITPOS passed to this function never decreases from within the
3091 same split_group call, so optimize and don't scan info records
3092 which are known to end before or at BITPOS next time.
3093 Only do it if all stores before this one also pass this. */
3099 update_first
= false;
3101 /* The stores in GROUP are ordered by bitposition so if we're past
3102 the region for this group return early. */
3103 if (stmt_start
>= end
)
3106 if (gimple_clobber_p (info
->stmt
))
3109 stores
->safe_push (info
);
3116 stores
->safe_push (info
);
3117 if (ret
&& !gimple_clobber_p (ret
->stmt
))
3123 else if (ret
&& !gimple_clobber_p (ret
->stmt
))
3131 /* Return how many SSA_NAMEs used to compute value to store in the INFO
3132 store have multiple uses. If any SSA_NAME has multiple uses, also
3133 count statements needed to compute it. */
3136 count_multiple_uses (store_immediate_info
*info
)
3138 gimple
*stmt
= info
->stmt
;
3140 switch (info
->rhs_code
)
3147 if (info
->bit_not_p
)
3149 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3150 ret
= 1; /* Fall through below to return
3151 the BIT_NOT_EXPR stmt and then
3152 BIT_{AND,IOR,XOR}_EXPR and anything it
3155 /* stmt is after this the BIT_NOT_EXPR. */
3156 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3158 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3160 ret
+= 1 + info
->ops
[0].bit_not_p
;
3161 if (info
->ops
[1].base_addr
)
3162 ret
+= 1 + info
->ops
[1].bit_not_p
;
3165 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3166 /* stmt is now the BIT_*_EXPR. */
3167 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3168 ret
+= 1 + info
->ops
[info
->ops_swapped_p
].bit_not_p
;
3169 else if (info
->ops
[info
->ops_swapped_p
].bit_not_p
)
3171 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3172 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
3175 if (info
->ops
[1].base_addr
== NULL_TREE
)
3177 gcc_checking_assert (!info
->ops_swapped_p
);
3180 if (!has_single_use (gimple_assign_rhs2 (stmt
)))
3181 ret
+= 1 + info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
;
3182 else if (info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
)
3184 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
3185 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
3190 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3191 return 1 + info
->ops
[0].bit_not_p
;
3192 else if (info
->ops
[0].bit_not_p
)
3194 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3195 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3199 case BIT_INSERT_EXPR
:
3200 return has_single_use (gimple_assign_rhs1 (stmt
)) ? 0 : 1;
3206 /* Split a merged store described by GROUP by populating the SPLIT_STORES
3207 vector (if non-NULL) with split_store structs describing the byte offset
3208 (from the base), the bit size and alignment of each store as well as the
3209 original statements involved in each such split group.
3210 This is to separate the splitting strategy from the statement
3211 building/emission/linking done in output_merged_store.
3212 Return number of new stores.
3213 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3214 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3215 BZERO_FIRST may be true only when the first store covers the whole group
3216 and clears it; if BZERO_FIRST is true, keep that first store in the set
3217 unmodified and emit further stores for the overrides only.
3218 If SPLIT_STORES is NULL, it is just a dry run to count number of
3222 split_group (merged_store_group
*group
, bool allow_unaligned_store
,
3223 bool allow_unaligned_load
, bool bzero_first
,
3224 vec
<split_store
*> *split_stores
,
3225 unsigned *total_orig
,
3226 unsigned *total_new
)
3228 unsigned HOST_WIDE_INT pos
= group
->bitregion_start
;
3229 unsigned HOST_WIDE_INT size
= group
->bitregion_end
- pos
;
3230 unsigned HOST_WIDE_INT bytepos
= pos
/ BITS_PER_UNIT
;
3231 unsigned HOST_WIDE_INT group_align
= group
->align
;
3232 unsigned HOST_WIDE_INT align_base
= group
->align_base
;
3233 unsigned HOST_WIDE_INT group_load_align
= group_align
;
3234 bool any_orig
= false;
3236 gcc_assert ((size
% BITS_PER_UNIT
== 0) && (pos
% BITS_PER_UNIT
== 0));
3238 if (group
->stores
[0]->rhs_code
== LROTATE_EXPR
3239 || group
->stores
[0]->rhs_code
== NOP_EXPR
)
3241 gcc_assert (!bzero_first
);
3242 /* For bswap framework using sets of stores, all the checking
3243 has been done earlier in try_coalesce_bswap and needs to be
3244 emitted as a single store. */
3247 /* Avoid the old/new stmt count heuristics. It should be
3248 always beneficial. */
3255 unsigned HOST_WIDE_INT align_bitpos
3256 = (group
->start
- align_base
) & (group_align
- 1);
3257 unsigned HOST_WIDE_INT align
= group_align
;
3259 align
= least_bit_hwi (align_bitpos
);
3260 bytepos
= group
->start
/ BITS_PER_UNIT
;
3262 = new split_store (bytepos
, group
->width
, align
);
3263 unsigned int first
= 0;
3264 find_constituent_stores (group
, &store
->orig_stores
,
3265 &first
, group
->start
, group
->width
);
3266 split_stores
->safe_push (store
);
3272 unsigned int ret
= 0, first
= 0;
3273 unsigned HOST_WIDE_INT try_pos
= bytepos
;
3278 store_immediate_info
*info
= group
->stores
[0];
3281 total_orig
[0] = 1; /* The orig store. */
3282 info
= group
->stores
[0];
3283 if (info
->ops
[0].base_addr
)
3285 if (info
->ops
[1].base_addr
)
3287 switch (info
->rhs_code
)
3292 total_orig
[0]++; /* The orig BIT_*_EXPR stmt. */
3297 total_orig
[0] *= group
->stores
.length ();
3299 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
3301 total_new
[0] += count_multiple_uses (info
);
3302 total_orig
[0] += (info
->bit_not_p
3303 + info
->ops
[0].bit_not_p
3304 + info
->ops
[1].bit_not_p
);
3308 if (!allow_unaligned_load
)
3309 for (int i
= 0; i
< 2; ++i
)
3310 if (group
->load_align
[i
])
3311 group_load_align
= MIN (group_load_align
, group
->load_align
[i
]);
3315 store_immediate_info
*gstore
;
3316 FOR_EACH_VEC_ELT (group
->stores
, first
, gstore
)
3317 if (!gimple_clobber_p (gstore
->stmt
))
3324 = new split_store (bytepos
, gstore
->bitsize
, align_base
);
3325 store
->orig_stores
.safe_push (gstore
);
3328 split_stores
->safe_push (store
);
3334 if ((allow_unaligned_store
|| group_align
<= BITS_PER_UNIT
)
3335 && (group
->mask
[try_pos
- bytepos
] == (unsigned char) ~0U
3336 || (bzero_first
&& group
->val
[try_pos
- bytepos
] == 0)))
3338 /* Skip padding bytes. */
3340 size
-= BITS_PER_UNIT
;
3344 unsigned HOST_WIDE_INT try_bitpos
= try_pos
* BITS_PER_UNIT
;
3345 unsigned int try_size
= MAX_STORE_BITSIZE
, nonmasked
;
3346 unsigned HOST_WIDE_INT align_bitpos
3347 = (try_bitpos
- align_base
) & (group_align
- 1);
3348 unsigned HOST_WIDE_INT align
= group_align
;
3349 bool found_orig
= false;
3351 align
= least_bit_hwi (align_bitpos
);
3352 if (!allow_unaligned_store
)
3353 try_size
= MIN (try_size
, align
);
3354 if (!allow_unaligned_load
)
3356 /* If we can't do or don't want to do unaligned stores
3357 as well as loads, we need to take the loads into account
3359 unsigned HOST_WIDE_INT load_align
= group_load_align
;
3360 align_bitpos
= (try_bitpos
- align_base
) & (load_align
- 1);
3362 load_align
= least_bit_hwi (align_bitpos
);
3363 for (int i
= 0; i
< 2; ++i
)
3364 if (group
->load_align
[i
])
3367 = known_alignment (try_bitpos
3368 - group
->stores
[0]->bitpos
3369 + group
->stores
[0]->ops
[i
].bitpos
3370 - group
->load_align_base
[i
]);
3371 if (align_bitpos
& (group_load_align
- 1))
3373 unsigned HOST_WIDE_INT a
= least_bit_hwi (align_bitpos
);
3374 load_align
= MIN (load_align
, a
);
3377 try_size
= MIN (try_size
, load_align
);
3379 store_immediate_info
*info
3380 = find_constituent_stores (group
, NULL
, &first
, try_bitpos
, try_size
);
3381 if (info
&& !gimple_clobber_p (info
->stmt
))
3383 /* If there is just one original statement for the range, see if
3384 we can just reuse the original store which could be even larger
3386 unsigned HOST_WIDE_INT stmt_end
3387 = ROUND_UP (info
->bitpos
+ info
->bitsize
, BITS_PER_UNIT
);
3388 info
= find_constituent_stores (group
, NULL
, &first
, try_bitpos
,
3389 stmt_end
- try_bitpos
);
3390 if (info
&& info
->bitpos
>= try_bitpos
)
3392 store_immediate_info
*info2
= NULL
;
3393 unsigned int first_copy
= first
;
3394 if (info
->bitpos
> try_bitpos
3395 && stmt_end
- try_bitpos
<= try_size
)
3397 info2
= find_constituent_stores (group
, NULL
, &first_copy
,
3399 info
->bitpos
- try_bitpos
);
3400 gcc_assert (info2
== NULL
|| gimple_clobber_p (info2
->stmt
));
3402 if (info2
== NULL
&& stmt_end
- try_bitpos
< try_size
)
3404 info2
= find_constituent_stores (group
, NULL
, &first_copy
,
3406 (try_bitpos
+ try_size
)
3408 gcc_assert (info2
== NULL
|| gimple_clobber_p (info2
->stmt
));
3412 try_size
= stmt_end
- try_bitpos
;
3419 /* Approximate store bitsize for the case when there are no padding
3421 while (try_size
> size
)
3423 /* Now look for whole padding bytes at the end of that bitsize. */
3424 for (nonmasked
= try_size
/ BITS_PER_UNIT
; nonmasked
> 0; --nonmasked
)
3425 if (group
->mask
[try_pos
- bytepos
+ nonmasked
- 1]
3426 != (unsigned char) ~0U
3428 || group
->val
[try_pos
- bytepos
+ nonmasked
- 1] != 0))
3430 if (nonmasked
== 0 || (info
&& gimple_clobber_p (info
->stmt
)))
3432 /* If entire try_size range is padding, skip it. */
3433 try_pos
+= try_size
/ BITS_PER_UNIT
;
3437 /* Otherwise try to decrease try_size if second half, last 3 quarters
3438 etc. are padding. */
3439 nonmasked
*= BITS_PER_UNIT
;
3440 while (nonmasked
<= try_size
/ 2)
3442 if (!allow_unaligned_store
&& group_align
> BITS_PER_UNIT
)
3444 /* Now look for whole padding bytes at the start of that bitsize. */
3445 unsigned int try_bytesize
= try_size
/ BITS_PER_UNIT
, masked
;
3446 for (masked
= 0; masked
< try_bytesize
; ++masked
)
3447 if (group
->mask
[try_pos
- bytepos
+ masked
] != (unsigned char) ~0U
3449 || group
->val
[try_pos
- bytepos
+ masked
] != 0))
3451 masked
*= BITS_PER_UNIT
;
3452 gcc_assert (masked
< try_size
);
3453 if (masked
>= try_size
/ 2)
3455 while (masked
>= try_size
/ 2)
3458 try_pos
+= try_size
/ BITS_PER_UNIT
;
3462 /* Need to recompute the alignment, so just retry at the new
3474 = new split_store (try_pos
, try_size
, align
);
3475 info
= find_constituent_stores (group
, &store
->orig_stores
,
3476 &first
, try_bitpos
, try_size
);
3478 && !gimple_clobber_p (info
->stmt
)
3479 && info
->bitpos
>= try_bitpos
3480 && info
->bitpos
+ info
->bitsize
<= try_bitpos
+ try_size
3481 && (store
->orig_stores
.length () == 1
3483 || (info
->bitpos
== try_bitpos
3484 && (info
->bitpos
+ info
->bitsize
3485 == try_bitpos
+ try_size
))))
3490 split_stores
->safe_push (store
);
3493 try_pos
+= try_size
/ BITS_PER_UNIT
;
3501 /* If we are reusing some original stores and any of the
3502 original SSA_NAMEs had multiple uses, we need to subtract
3503 those now before we add the new ones. */
3504 if (total_new
[0] && any_orig
)
3506 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
3508 total_new
[0] -= count_multiple_uses (store
->orig_stores
[0]);
3510 total_new
[0] += ret
; /* The new store. */
3511 store_immediate_info
*info
= group
->stores
[0];
3512 if (info
->ops
[0].base_addr
)
3513 total_new
[0] += ret
;
3514 if (info
->ops
[1].base_addr
)
3515 total_new
[0] += ret
;
3516 switch (info
->rhs_code
)
3521 total_new
[0] += ret
; /* The new BIT_*_EXPR stmt. */
3526 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
3529 bool bit_not_p
[3] = { false, false, false };
3530 /* If all orig_stores have certain bit_not_p set, then
3531 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3532 If some orig_stores have certain bit_not_p set, then
3533 we'd use a BIT_XOR_EXPR with a mask and need to account for
3535 FOR_EACH_VEC_ELT (store
->orig_stores
, j
, info
)
3537 if (info
->ops
[0].bit_not_p
)
3538 bit_not_p
[0] = true;
3539 if (info
->ops
[1].bit_not_p
)
3540 bit_not_p
[1] = true;
3541 if (info
->bit_not_p
)
3542 bit_not_p
[2] = true;
3544 total_new
[0] += bit_not_p
[0] + bit_not_p
[1] + bit_not_p
[2];
3552 /* Return the operation through which the operand IDX (if < 2) or
3553 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3554 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3555 the bits should be xored with mask. */
3557 static enum tree_code
3558 invert_op (split_store
*split_store
, int idx
, tree int_type
, tree
&mask
)
3561 store_immediate_info
*info
;
3562 unsigned int cnt
= 0;
3563 bool any_paddings
= false;
3564 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
3566 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
3570 tree lhs
= gimple_assign_lhs (info
->stmt
);
3571 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3572 && TYPE_PRECISION (TREE_TYPE (lhs
)) < info
->bitsize
)
3573 any_paddings
= true;
3579 if (cnt
== split_store
->orig_stores
.length () && !any_paddings
)
3580 return BIT_NOT_EXPR
;
3582 unsigned HOST_WIDE_INT try_bitpos
= split_store
->bytepos
* BITS_PER_UNIT
;
3583 unsigned buf_size
= split_store
->size
/ BITS_PER_UNIT
;
3585 = XALLOCAVEC (unsigned char, buf_size
);
3586 memset (buf
, ~0U, buf_size
);
3587 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
3589 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
3592 /* Clear regions with bit_not_p and invert afterwards, rather than
3593 clear regions with !bit_not_p, so that gaps in between stores aren't
3595 unsigned HOST_WIDE_INT bitsize
= info
->bitsize
;
3596 unsigned HOST_WIDE_INT prec
= bitsize
;
3597 unsigned int pos_in_buffer
= 0;
3600 tree lhs
= gimple_assign_lhs (info
->stmt
);
3601 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3602 && TYPE_PRECISION (TREE_TYPE (lhs
)) < bitsize
)
3603 prec
= TYPE_PRECISION (TREE_TYPE (lhs
));
3605 if (info
->bitpos
< try_bitpos
)
3607 gcc_assert (info
->bitpos
+ bitsize
> try_bitpos
);
3608 if (!BYTES_BIG_ENDIAN
)
3610 if (prec
<= try_bitpos
- info
->bitpos
)
3612 prec
-= try_bitpos
- info
->bitpos
;
3614 bitsize
-= try_bitpos
- info
->bitpos
;
3615 if (BYTES_BIG_ENDIAN
&& prec
> bitsize
)
3619 pos_in_buffer
= info
->bitpos
- try_bitpos
;
3622 /* If this is a bool inversion, invert just the least significant
3623 prec bits rather than all bits of it. */
3624 if (BYTES_BIG_ENDIAN
)
3626 pos_in_buffer
+= bitsize
- prec
;
3627 if (pos_in_buffer
>= split_store
->size
)
3632 if (pos_in_buffer
+ bitsize
> split_store
->size
)
3633 bitsize
= split_store
->size
- pos_in_buffer
;
3634 unsigned char *p
= buf
+ (pos_in_buffer
/ BITS_PER_UNIT
);
3635 if (BYTES_BIG_ENDIAN
)
3636 clear_bit_region_be (p
, (BITS_PER_UNIT
- 1
3637 - (pos_in_buffer
% BITS_PER_UNIT
)), bitsize
);
3639 clear_bit_region (p
, pos_in_buffer
% BITS_PER_UNIT
, bitsize
);
3641 for (unsigned int i
= 0; i
< buf_size
; ++i
)
3643 mask
= native_interpret_expr (int_type
, buf
, buf_size
);
3644 return BIT_XOR_EXPR
;
3647 /* Given a merged store group GROUP output the widened version of it.
3648 The store chain is against the base object BASE.
3649 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3650 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3651 Make sure that the number of statements output is less than the number of
3652 original statements. If a better sequence is possible emit it and
3656 imm_store_chain_info::output_merged_store (merged_store_group
*group
)
3658 split_store
*split_store
;
3660 unsigned HOST_WIDE_INT start_byte_pos
3661 = group
->bitregion_start
/ BITS_PER_UNIT
;
3663 unsigned int orig_num_stmts
= group
->stores
.length ();
3664 if (orig_num_stmts
< 2)
3667 auto_vec
<class split_store
*, 32> split_stores
;
3668 bool allow_unaligned_store
3669 = !STRICT_ALIGNMENT
&& param_store_merging_allow_unaligned
;
3670 bool allow_unaligned_load
= allow_unaligned_store
;
3671 bool bzero_first
= false;
3672 store_immediate_info
*store
;
3673 unsigned int num_clobber_stmts
= 0;
3674 if (group
->stores
[0]->rhs_code
== INTEGER_CST
)
3676 FOR_EACH_VEC_ELT (group
->stores
, i
, store
)
3677 if (gimple_clobber_p (store
->stmt
))
3678 num_clobber_stmts
++;
3679 else if (TREE_CODE (gimple_assign_rhs1 (store
->stmt
)) == CONSTRUCTOR
3680 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (store
->stmt
)) == 0
3681 && group
->start
== store
->bitpos
3682 && group
->width
== store
->bitsize
3683 && (group
->start
% BITS_PER_UNIT
) == 0
3684 && (group
->width
% BITS_PER_UNIT
) == 0)
3691 FOR_EACH_VEC_ELT_FROM (group
->stores
, i
, store
, i
)
3692 if (gimple_clobber_p (store
->stmt
))
3693 num_clobber_stmts
++;
3694 if (num_clobber_stmts
== orig_num_stmts
)
3696 orig_num_stmts
-= num_clobber_stmts
;
3698 if (allow_unaligned_store
|| bzero_first
)
3700 /* If unaligned stores are allowed, see how many stores we'd emit
3701 for unaligned and how many stores we'd emit for aligned stores.
3702 Only use unaligned stores if it allows fewer stores than aligned.
3703 Similarly, if there is a whole region clear first, prefer expanding
3704 it together compared to expanding clear first followed by merged
3706 unsigned cnt
[4] = { ~0, ~0, ~0, ~0 };
3708 for (int pass
= 0; pass
< 4; ++pass
)
3710 if (!allow_unaligned_store
&& (pass
& 1) != 0)
3712 if (!bzero_first
&& (pass
& 2) != 0)
3714 cnt
[pass
] = split_group (group
, (pass
& 1) != 0,
3715 allow_unaligned_load
, (pass
& 2) != 0,
3717 if (cnt
[pass
] < cnt
[pass_min
])
3720 if ((pass_min
& 1) == 0)
3721 allow_unaligned_store
= false;
3722 if ((pass_min
& 2) == 0)
3723 bzero_first
= false;
3725 unsigned total_orig
, total_new
;
3726 split_group (group
, allow_unaligned_store
, allow_unaligned_load
, bzero_first
,
3727 &split_stores
, &total_orig
, &total_new
);
3729 /* Determine if there is a clobber covering the whole group at the start,
3730 followed by proposed split stores that cover the whole group. In that
3731 case, prefer the transformation even if
3732 split_stores.length () == orig_num_stmts. */
3733 bool clobber_first
= false;
3734 if (num_clobber_stmts
3735 && gimple_clobber_p (group
->stores
[0]->stmt
)
3736 && group
->start
== group
->stores
[0]->bitpos
3737 && group
->width
== group
->stores
[0]->bitsize
3738 && (group
->start
% BITS_PER_UNIT
) == 0
3739 && (group
->width
% BITS_PER_UNIT
) == 0)
3741 clobber_first
= true;
3742 unsigned HOST_WIDE_INT pos
= group
->start
/ BITS_PER_UNIT
;
3743 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3744 if (split_store
->bytepos
!= pos
)
3746 clobber_first
= false;
3750 pos
+= split_store
->size
/ BITS_PER_UNIT
;
3751 if (pos
!= (group
->start
+ group
->width
) / BITS_PER_UNIT
)
3752 clobber_first
= false;
3755 if (split_stores
.length () >= orig_num_stmts
+ clobber_first
)
3758 /* We didn't manage to reduce the number of statements. Bail out. */
3759 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3760 fprintf (dump_file
, "Exceeded original number of stmts (%u)."
3761 " Not profitable to emit new sequence.\n",
3763 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3767 if (total_orig
<= total_new
)
3769 /* If number of estimated new statements is above estimated original
3770 statements, bail out too. */
3771 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3772 fprintf (dump_file
, "Estimated number of original stmts (%u)"
3773 " not larger than estimated number of new"
3775 total_orig
, total_new
);
3776 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3780 if (group
->stores
[0]->rhs_code
== INTEGER_CST
)
3782 bool all_orig
= true;
3783 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3784 if (!split_store
->orig
)
3791 unsigned int cnt
= split_stores
.length ();
3792 store_immediate_info
*store
;
3793 FOR_EACH_VEC_ELT (group
->stores
, i
, store
)
3794 if (gimple_clobber_p (store
->stmt
))
3796 /* Punt if we wouldn't make any real changes, i.e. keep all
3797 orig stmts + all clobbers. */
3798 if (cnt
== group
->stores
.length ())
3800 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3801 fprintf (dump_file
, "Exceeded original number of stmts (%u)."
3802 " Not profitable to emit new sequence.\n",
3804 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3811 gimple_stmt_iterator last_gsi
= gsi_for_stmt (group
->last_stmt
);
3812 gimple_seq seq
= NULL
;
3813 tree last_vdef
, new_vuse
;
3814 last_vdef
= gimple_vdef (group
->last_stmt
);
3815 new_vuse
= gimple_vuse (group
->last_stmt
);
3816 tree bswap_res
= NULL_TREE
;
3818 /* Clobbers are not removed. */
3819 if (gimple_clobber_p (group
->last_stmt
))
3821 new_vuse
= make_ssa_name (gimple_vop (cfun
), group
->last_stmt
);
3822 gimple_set_vdef (group
->last_stmt
, new_vuse
);
3825 if (group
->stores
[0]->rhs_code
== LROTATE_EXPR
3826 || group
->stores
[0]->rhs_code
== NOP_EXPR
)
3828 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
3829 gimple
*ins_stmt
= group
->stores
[0]->ins_stmt
;
3830 struct symbolic_number
*n
= &group
->stores
[0]->n
;
3831 bool bswap
= group
->stores
[0]->rhs_code
== LROTATE_EXPR
;
3836 load_type
= bswap_type
= uint16_type_node
;
3839 load_type
= uint32_type_node
;
3842 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
3843 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
3847 load_type
= uint64_type_node
;
3850 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
3851 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
3858 /* If the loads have each vuse of the corresponding store,
3859 we've checked the aliasing already in try_coalesce_bswap and
3860 we want to sink the need load into seq. So need to use new_vuse
3864 if (n
->vuse
== NULL
)
3870 /* Update vuse in case it has changed by output_merged_stores. */
3871 n
->vuse
= gimple_vuse (ins_stmt
);
3873 bswap_res
= bswap_replace (gsi_start (seq
), ins_stmt
, fndecl
,
3874 bswap_type
, load_type
, n
, bswap
);
3875 gcc_assert (bswap_res
);
3878 gimple
*stmt
= NULL
;
3879 auto_vec
<gimple
*, 32> orig_stmts
;
3880 gimple_seq this_seq
;
3881 tree addr
= force_gimple_operand_1 (unshare_expr (base_addr
), &this_seq
,
3882 is_gimple_mem_ref_addr
, NULL_TREE
);
3883 gimple_seq_add_seq_without_update (&seq
, this_seq
);
3885 tree load_addr
[2] = { NULL_TREE
, NULL_TREE
};
3886 gimple_seq load_seq
[2] = { NULL
, NULL
};
3887 gimple_stmt_iterator load_gsi
[2] = { gsi_none (), gsi_none () };
3888 for (int j
= 0; j
< 2; ++j
)
3890 store_operand_info
&op
= group
->stores
[0]->ops
[j
];
3891 if (op
.base_addr
== NULL_TREE
)
3894 store_immediate_info
*infol
= group
->stores
.last ();
3895 if (gimple_vuse (op
.stmt
) == gimple_vuse (infol
->ops
[j
].stmt
))
3897 /* We can't pick the location randomly; while we've verified
3898 all the loads have the same vuse, they can be still in different
3899 basic blocks and we need to pick the one from the last bb:
3905 otherwise if we put the wider load at the q[0] load, we might
3906 segfault if q[1] is not mapped. */
3907 basic_block bb
= gimple_bb (op
.stmt
);
3908 gimple
*ostmt
= op
.stmt
;
3909 store_immediate_info
*info
;
3910 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
3912 gimple
*tstmt
= info
->ops
[j
].stmt
;
3913 basic_block tbb
= gimple_bb (tstmt
);
3914 if (dominated_by_p (CDI_DOMINATORS
, tbb
, bb
))
3920 load_gsi
[j
] = gsi_for_stmt (ostmt
);
3922 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
3923 &load_seq
[j
], is_gimple_mem_ref_addr
,
3926 else if (operand_equal_p (base_addr
, op
.base_addr
, 0))
3927 load_addr
[j
] = addr
;
3931 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
3932 &this_seq
, is_gimple_mem_ref_addr
,
3934 gimple_seq_add_seq_without_update (&seq
, this_seq
);
3938 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3940 unsigned HOST_WIDE_INT try_size
= split_store
->size
;
3941 unsigned HOST_WIDE_INT try_pos
= split_store
->bytepos
;
3942 unsigned HOST_WIDE_INT try_bitpos
= try_pos
* BITS_PER_UNIT
;
3943 unsigned HOST_WIDE_INT align
= split_store
->align
;
3946 if (split_store
->orig
)
3948 /* If there is just a single non-clobber constituent store
3949 which covers the whole area, just reuse the lhs and rhs. */
3950 gimple
*orig_stmt
= NULL
;
3951 store_immediate_info
*store
;
3953 FOR_EACH_VEC_ELT (split_store
->orig_stores
, j
, store
)
3954 if (!gimple_clobber_p (store
->stmt
))
3956 orig_stmt
= store
->stmt
;
3959 dest
= gimple_assign_lhs (orig_stmt
);
3960 src
= gimple_assign_rhs1 (orig_stmt
);
3961 loc
= gimple_location (orig_stmt
);
3965 store_immediate_info
*info
;
3966 unsigned short clique
, base
;
3968 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3969 orig_stmts
.safe_push (info
->stmt
);
3971 = get_alias_type_for_stmts (orig_stmts
, false, &clique
, &base
);
3972 loc
= get_location_for_stmts (orig_stmts
);
3973 orig_stmts
.truncate (0);
3975 tree int_type
= build_nonstandard_integer_type (try_size
, UNSIGNED
);
3976 int_type
= build_aligned_type (int_type
, align
);
3977 dest
= fold_build2 (MEM_REF
, int_type
, addr
,
3978 build_int_cst (offset_type
, try_pos
));
3979 if (TREE_CODE (dest
) == MEM_REF
)
3981 MR_DEPENDENCE_CLIQUE (dest
) = clique
;
3982 MR_DEPENDENCE_BASE (dest
) = base
;
3987 mask
= integer_zero_node
;
3989 mask
= native_interpret_expr (int_type
,
3990 group
->mask
+ try_pos
3996 j
< 1 + (split_store
->orig_stores
[0]->ops
[1].val
!= NULL_TREE
);
3999 store_operand_info
&op
= split_store
->orig_stores
[0]->ops
[j
];
4002 else if (op
.base_addr
)
4004 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
4005 orig_stmts
.safe_push (info
->ops
[j
].stmt
);
4007 offset_type
= get_alias_type_for_stmts (orig_stmts
, true,
4009 location_t load_loc
= get_location_for_stmts (orig_stmts
);
4010 orig_stmts
.truncate (0);
4012 unsigned HOST_WIDE_INT load_align
= group
->load_align
[j
];
4013 unsigned HOST_WIDE_INT align_bitpos
4014 = known_alignment (try_bitpos
4015 - split_store
->orig_stores
[0]->bitpos
4017 if (align_bitpos
& (load_align
- 1))
4018 load_align
= least_bit_hwi (align_bitpos
);
4021 = build_nonstandard_integer_type (try_size
, UNSIGNED
);
4023 = build_aligned_type (load_int_type
, load_align
);
4025 poly_uint64 load_pos
4026 = exact_div (try_bitpos
4027 - split_store
->orig_stores
[0]->bitpos
4030 ops
[j
] = fold_build2 (MEM_REF
, load_int_type
, load_addr
[j
],
4031 build_int_cst (offset_type
, load_pos
));
4032 if (TREE_CODE (ops
[j
]) == MEM_REF
)
4034 MR_DEPENDENCE_CLIQUE (ops
[j
]) = clique
;
4035 MR_DEPENDENCE_BASE (ops
[j
]) = base
;
4037 if (!integer_zerop (mask
))
4038 /* The load might load some bits (that will be masked off
4039 later on) uninitialized, avoid -W*uninitialized
4040 warnings in that case. */
4041 TREE_NO_WARNING (ops
[j
]) = 1;
4043 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4045 gimple_set_location (stmt
, load_loc
);
4046 if (gsi_bb (load_gsi
[j
]))
4048 gimple_set_vuse (stmt
, gimple_vuse (op
.stmt
));
4049 gimple_seq_add_stmt_without_update (&load_seq
[j
], stmt
);
4053 gimple_set_vuse (stmt
, new_vuse
);
4054 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4056 ops
[j
] = gimple_assign_lhs (stmt
);
4058 enum tree_code inv_op
4059 = invert_op (split_store
, j
, int_type
, xor_mask
);
4060 if (inv_op
!= NOP_EXPR
)
4062 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4063 inv_op
, ops
[j
], xor_mask
);
4064 gimple_set_location (stmt
, load_loc
);
4065 ops
[j
] = gimple_assign_lhs (stmt
);
4067 if (gsi_bb (load_gsi
[j
]))
4068 gimple_seq_add_stmt_without_update (&load_seq
[j
],
4071 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4075 ops
[j
] = native_interpret_expr (int_type
,
4076 group
->val
+ try_pos
4081 switch (split_store
->orig_stores
[0]->rhs_code
)
4086 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
4088 tree rhs1
= gimple_assign_rhs1 (info
->stmt
);
4089 orig_stmts
.safe_push (SSA_NAME_DEF_STMT (rhs1
));
4092 bit_loc
= get_location_for_stmts (orig_stmts
);
4093 orig_stmts
.truncate (0);
4096 = gimple_build_assign (make_ssa_name (int_type
),
4097 split_store
->orig_stores
[0]->rhs_code
,
4099 gimple_set_location (stmt
, bit_loc
);
4100 /* If there is just one load and there is a separate
4101 load_seq[0], emit the bitwise op right after it. */
4102 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
4103 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
4104 /* Otherwise, if at least one load is in seq, we need to
4105 emit the bitwise op right before the store. If there
4106 are two loads and are emitted somewhere else, it would
4107 be better to emit the bitwise op as early as possible;
4108 we don't track where that would be possible right now
4111 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4112 src
= gimple_assign_lhs (stmt
);
4114 enum tree_code inv_op
;
4115 inv_op
= invert_op (split_store
, 2, int_type
, xor_mask
);
4116 if (inv_op
!= NOP_EXPR
)
4118 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4119 inv_op
, src
, xor_mask
);
4120 gimple_set_location (stmt
, bit_loc
);
4121 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
4122 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
4124 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4125 src
= gimple_assign_lhs (stmt
);
4131 if (!is_gimple_val (src
))
4133 stmt
= gimple_build_assign (make_ssa_name (TREE_TYPE (src
)),
4135 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4136 src
= gimple_assign_lhs (stmt
);
4138 if (!useless_type_conversion_p (int_type
, TREE_TYPE (src
)))
4140 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4142 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4143 src
= gimple_assign_lhs (stmt
);
4145 inv_op
= invert_op (split_store
, 2, int_type
, xor_mask
);
4146 if (inv_op
!= NOP_EXPR
)
4148 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4149 inv_op
, src
, xor_mask
);
4150 gimple_set_location (stmt
, loc
);
4151 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4152 src
= gimple_assign_lhs (stmt
);
4160 /* If bit insertion is required, we use the source as an accumulator
4161 into which the successive bit-field values are manually inserted.
4162 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4163 if (group
->bit_insertion
)
4164 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
4165 if (info
->rhs_code
== BIT_INSERT_EXPR
4166 && info
->bitpos
< try_bitpos
+ try_size
4167 && info
->bitpos
+ info
->bitsize
> try_bitpos
)
4169 /* Mask, truncate, convert to final type, shift and ior into
4170 the accumulator. Note that every step can be a no-op. */
4171 const HOST_WIDE_INT start_gap
= info
->bitpos
- try_bitpos
;
4172 const HOST_WIDE_INT end_gap
4173 = (try_bitpos
+ try_size
) - (info
->bitpos
+ info
->bitsize
);
4174 tree tem
= info
->ops
[0].val
;
4175 if (TYPE_PRECISION (TREE_TYPE (tem
)) <= info
->bitsize
)
4178 = build_nonstandard_integer_type (info
->bitsize
,
4180 tem
= gimple_convert (&seq
, loc
, bitfield_type
, tem
);
4182 else if ((BYTES_BIG_ENDIAN
? start_gap
: end_gap
) > 0)
4184 const unsigned HOST_WIDE_INT imask
4185 = (HOST_WIDE_INT_1U
<< info
->bitsize
) - 1;
4186 tem
= gimple_build (&seq
, loc
,
4187 BIT_AND_EXPR
, TREE_TYPE (tem
), tem
,
4188 build_int_cst (TREE_TYPE (tem
),
4191 const HOST_WIDE_INT shift
4192 = (BYTES_BIG_ENDIAN
? end_gap
: start_gap
);
4194 tem
= gimple_build (&seq
, loc
,
4195 RSHIFT_EXPR
, TREE_TYPE (tem
), tem
,
4196 build_int_cst (NULL_TREE
, -shift
));
4197 tem
= gimple_convert (&seq
, loc
, int_type
, tem
);
4199 tem
= gimple_build (&seq
, loc
,
4200 LSHIFT_EXPR
, int_type
, tem
,
4201 build_int_cst (NULL_TREE
, shift
));
4202 src
= gimple_build (&seq
, loc
,
4203 BIT_IOR_EXPR
, int_type
, tem
, src
);
4206 if (!integer_zerop (mask
))
4208 tree tem
= make_ssa_name (int_type
);
4209 tree load_src
= unshare_expr (dest
);
4210 /* The load might load some or all bits uninitialized,
4211 avoid -W*uninitialized warnings in that case.
4212 As optimization, it would be nice if all the bits are
4213 provably uninitialized (no stores at all yet or previous
4214 store a CLOBBER) we'd optimize away the load and replace
4216 TREE_NO_WARNING (load_src
) = 1;
4217 stmt
= gimple_build_assign (tem
, load_src
);
4218 gimple_set_location (stmt
, loc
);
4219 gimple_set_vuse (stmt
, new_vuse
);
4220 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4222 /* FIXME: If there is a single chunk of zero bits in mask,
4223 perhaps use BIT_INSERT_EXPR instead? */
4224 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4225 BIT_AND_EXPR
, tem
, mask
);
4226 gimple_set_location (stmt
, loc
);
4227 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4228 tem
= gimple_assign_lhs (stmt
);
4230 if (TREE_CODE (src
) == INTEGER_CST
)
4231 src
= wide_int_to_tree (int_type
,
4232 wi::bit_and_not (wi::to_wide (src
),
4233 wi::to_wide (mask
)));
4237 = wide_int_to_tree (int_type
,
4238 wi::bit_not (wi::to_wide (mask
)));
4239 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4240 BIT_AND_EXPR
, src
, nmask
);
4241 gimple_set_location (stmt
, loc
);
4242 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4243 src
= gimple_assign_lhs (stmt
);
4245 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4246 BIT_IOR_EXPR
, tem
, src
);
4247 gimple_set_location (stmt
, loc
);
4248 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4249 src
= gimple_assign_lhs (stmt
);
4253 stmt
= gimple_build_assign (dest
, src
);
4254 gimple_set_location (stmt
, loc
);
4255 gimple_set_vuse (stmt
, new_vuse
);
4256 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4258 if (group
->lp_nr
&& stmt_could_throw_p (cfun
, stmt
))
4259 add_stmt_to_eh_lp (stmt
, group
->lp_nr
);
4262 if (i
< split_stores
.length () - 1)
4263 new_vdef
= make_ssa_name (gimple_vop (cfun
), stmt
);
4265 new_vdef
= last_vdef
;
4267 gimple_set_vdef (stmt
, new_vdef
);
4268 SSA_NAME_DEF_STMT (new_vdef
) = stmt
;
4269 new_vuse
= new_vdef
;
4272 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
4279 "New sequence of %u stores to replace old one of %u stores\n",
4280 split_stores
.length (), orig_num_stmts
);
4281 if (dump_flags
& TDF_DETAILS
)
4282 print_gimple_seq (dump_file
, seq
, 0, TDF_VOPS
| TDF_MEMSYMS
);
4285 if (gimple_clobber_p (group
->last_stmt
))
4286 update_stmt (group
->last_stmt
);
4288 if (group
->lp_nr
> 0)
4290 /* We're going to insert a sequence of (potentially) throwing stores
4291 into an active EH region. This means that we're going to create
4292 new basic blocks with EH edges pointing to the post landing pad
4293 and, therefore, to have to update its PHI nodes, if any. For the
4294 virtual PHI node, we're going to use the VDEFs created above, but
4295 for the other nodes, we need to record the original reaching defs. */
4296 eh_landing_pad lp
= get_eh_landing_pad_from_number (group
->lp_nr
);
4297 basic_block lp_bb
= label_to_block (cfun
, lp
->post_landing_pad
);
4298 basic_block last_bb
= gimple_bb (group
->last_stmt
);
4299 edge last_edge
= find_edge (last_bb
, lp_bb
);
4300 auto_vec
<tree
, 16> last_defs
;
4302 for (gpi
= gsi_start_phis (lp_bb
); !gsi_end_p (gpi
); gsi_next (&gpi
))
4304 gphi
*phi
= gpi
.phi ();
4306 if (virtual_operand_p (gimple_phi_result (phi
)))
4307 last_def
= NULL_TREE
;
4309 last_def
= gimple_phi_arg_def (phi
, last_edge
->dest_idx
);
4310 last_defs
.safe_push (last_def
);
4313 /* Do the insertion. Then, if new basic blocks have been created in the
4314 process, rewind the chain of VDEFs create above to walk the new basic
4315 blocks and update the corresponding arguments of the PHI nodes. */
4316 update_modified_stmts (seq
);
4317 if (gimple_find_sub_bbs (seq
, &last_gsi
))
4318 while (last_vdef
!= gimple_vuse (group
->last_stmt
))
4320 gimple
*stmt
= SSA_NAME_DEF_STMT (last_vdef
);
4321 if (stmt_could_throw_p (cfun
, stmt
))
4323 edge new_edge
= find_edge (gimple_bb (stmt
), lp_bb
);
4325 for (gpi
= gsi_start_phis (lp_bb
), i
= 0;
4327 gsi_next (&gpi
), i
++)
4329 gphi
*phi
= gpi
.phi ();
4331 if (virtual_operand_p (gimple_phi_result (phi
)))
4332 new_def
= last_vdef
;
4334 new_def
= last_defs
[i
];
4335 add_phi_arg (phi
, new_def
, new_edge
, UNKNOWN_LOCATION
);
4338 last_vdef
= gimple_vuse (stmt
);
4342 gsi_insert_seq_after (&last_gsi
, seq
, GSI_SAME_STMT
);
4344 for (int j
= 0; j
< 2; ++j
)
4346 gsi_insert_seq_after (&load_gsi
[j
], load_seq
[j
], GSI_SAME_STMT
);
4351 /* Process the merged_store_group objects created in the coalescing phase.
4352 The stores are all against the base object BASE.
4353 Try to output the widened stores and delete the original statements if
4354 successful. Return true iff any changes were made. */
4357 imm_store_chain_info::output_merged_stores ()
4360 merged_store_group
*merged_store
;
4362 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_store
)
4364 if (dbg_cnt (store_merging
)
4365 && output_merged_store (merged_store
))
4368 store_immediate_info
*store
;
4369 FOR_EACH_VEC_ELT (merged_store
->stores
, j
, store
)
4371 gimple
*stmt
= store
->stmt
;
4372 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
4373 /* Don't remove clobbers, they are still useful even if
4374 everything is overwritten afterwards. */
4375 if (gimple_clobber_p (stmt
))
4377 gsi_remove (&gsi
, true);
4379 remove_stmt_from_eh_lp (stmt
);
4380 if (stmt
!= merged_store
->last_stmt
)
4382 unlink_stmt_vdef (stmt
);
4383 release_defs (stmt
);
4389 if (ret
&& dump_file
)
4390 fprintf (dump_file
, "Merging successful!\n");
4395 /* Coalesce the store_immediate_info objects recorded against the base object
4396 BASE in the first phase and output them.
4397 Delete the allocated structures.
4398 Return true if any changes were made. */
4401 imm_store_chain_info::terminate_and_process_chain ()
4403 /* Process store chain. */
4405 if (m_store_info
.length () > 1)
4407 ret
= coalesce_immediate_stores ();
4409 ret
= output_merged_stores ();
4412 /* Delete all the entries we allocated ourselves. */
4413 store_immediate_info
*info
;
4415 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
4418 merged_store_group
*merged_info
;
4419 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_info
)
4425 /* Return true iff LHS is a destination potentially interesting for
4426 store merging. In practice these are the codes that get_inner_reference
4430 lhs_valid_for_store_merging_p (tree lhs
)
4435 switch (TREE_CODE (lhs
))
4438 case ARRAY_RANGE_REF
:
4450 /* Return true if the tree RHS is a constant we want to consider
4451 during store merging. In practice accept all codes that
4452 native_encode_expr accepts. */
4455 rhs_valid_for_store_merging_p (tree rhs
)
4457 unsigned HOST_WIDE_INT size
;
4458 if (TREE_CODE (rhs
) == CONSTRUCTOR
4459 && CONSTRUCTOR_NELTS (rhs
) == 0
4460 && TYPE_SIZE_UNIT (TREE_TYPE (rhs
))
4461 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs
))))
4463 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs
))).is_constant (&size
)
4464 && native_encode_expr (rhs
, NULL
, size
) != 0);
4467 /* Adjust *PBITPOS, *PBITREGION_START and *PBITREGION_END by BYTE_OFF bytes
4468 and return true on success or false on failure. */
4471 adjust_bit_pos (poly_offset_int byte_off
,
4472 poly_int64
*pbitpos
,
4473 poly_uint64
*pbitregion_start
,
4474 poly_uint64
*pbitregion_end
)
4476 poly_offset_int bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4477 bit_off
+= *pbitpos
;
4479 if (known_ge (bit_off
, 0) && bit_off
.to_shwi (pbitpos
))
4481 if (maybe_ne (*pbitregion_end
, 0U))
4483 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4484 bit_off
+= *pbitregion_start
;
4485 if (bit_off
.to_uhwi (pbitregion_start
))
4487 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4488 bit_off
+= *pbitregion_end
;
4489 if (!bit_off
.to_uhwi (pbitregion_end
))
4490 *pbitregion_end
= 0;
4493 *pbitregion_end
= 0;
4501 /* If MEM is a memory reference usable for store merging (either as
4502 store destination or for loads), return the non-NULL base_addr
4503 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4504 Otherwise return NULL, *PBITPOS should be still valid even for that
4508 mem_valid_for_store_merging (tree mem
, poly_uint64
*pbitsize
,
4509 poly_uint64
*pbitpos
,
4510 poly_uint64
*pbitregion_start
,
4511 poly_uint64
*pbitregion_end
)
4513 poly_int64 bitsize
, bitpos
;
4514 poly_uint64 bitregion_start
= 0, bitregion_end
= 0;
4516 int unsignedp
= 0, reversep
= 0, volatilep
= 0;
4518 tree base_addr
= get_inner_reference (mem
, &bitsize
, &bitpos
, &offset
, &mode
,
4519 &unsignedp
, &reversep
, &volatilep
);
4520 *pbitsize
= bitsize
;
4521 if (known_eq (bitsize
, 0))
4524 if (TREE_CODE (mem
) == COMPONENT_REF
4525 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem
, 1)))
4527 get_bit_range (&bitregion_start
, &bitregion_end
, mem
, &bitpos
, &offset
);
4528 if (maybe_ne (bitregion_end
, 0U))
4535 /* We do not want to rewrite TARGET_MEM_REFs. */
4536 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
4538 /* In some cases get_inner_reference may return a
4539 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4540 canonicalize the base_addr to MEM_REF [ptr] and take
4541 byteoffset into account in the bitpos. This occurs in
4542 PR 23684 and this way we can catch more chains. */
4543 else if (TREE_CODE (base_addr
) == MEM_REF
)
4545 if (!adjust_bit_pos (mem_ref_offset (base_addr
), &bitpos
,
4546 &bitregion_start
, &bitregion_end
))
4548 base_addr
= TREE_OPERAND (base_addr
, 0);
4550 /* get_inner_reference returns the base object, get at its
4554 if (maybe_lt (bitpos
, 0))
4556 base_addr
= build_fold_addr_expr (base_addr
);
4561 /* If the access is variable offset then a base decl has to be
4562 address-taken to be able to emit pointer-based stores to it.
4563 ??? We might be able to get away with re-using the original
4564 base up to the first variable part and then wrapping that inside
4566 tree base
= get_base_address (base_addr
);
4567 if (!base
|| (DECL_P (base
) && !TREE_ADDRESSABLE (base
)))
4570 /* Similarly to above for the base, remove constant from the offset. */
4571 if (TREE_CODE (offset
) == PLUS_EXPR
4572 && TREE_CODE (TREE_OPERAND (offset
, 1)) == INTEGER_CST
4573 && adjust_bit_pos (wi::to_poly_offset (TREE_OPERAND (offset
, 1)),
4574 &bitpos
, &bitregion_start
, &bitregion_end
))
4575 offset
= TREE_OPERAND (offset
, 0);
4577 base_addr
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base_addr
),
4581 if (known_eq (bitregion_end
, 0U))
4583 bitregion_start
= round_down_to_byte_boundary (bitpos
);
4584 bitregion_end
= round_up_to_byte_boundary (bitpos
+ bitsize
);
4587 *pbitsize
= bitsize
;
4589 *pbitregion_start
= bitregion_start
;
4590 *pbitregion_end
= bitregion_end
;
4594 /* Return true if STMT is a load that can be used for store merging.
4595 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4596 BITREGION_END are properties of the corresponding store. */
4599 handled_load (gimple
*stmt
, store_operand_info
*op
,
4600 poly_uint64 bitsize
, poly_uint64 bitpos
,
4601 poly_uint64 bitregion_start
, poly_uint64 bitregion_end
)
4603 if (!is_gimple_assign (stmt
))
4605 if (gimple_assign_rhs_code (stmt
) == BIT_NOT_EXPR
)
4607 tree rhs1
= gimple_assign_rhs1 (stmt
);
4608 if (TREE_CODE (rhs1
) == SSA_NAME
4609 && handled_load (SSA_NAME_DEF_STMT (rhs1
), op
, bitsize
, bitpos
,
4610 bitregion_start
, bitregion_end
))
4612 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4613 been optimized earlier, but if allowed here, would confuse the
4614 multiple uses counting. */
4617 op
->bit_not_p
= !op
->bit_not_p
;
4622 if (gimple_vuse (stmt
)
4623 && gimple_assign_load_p (stmt
)
4624 && !stmt_can_throw_internal (cfun
, stmt
)
4625 && !gimple_has_volatile_ops (stmt
))
4627 tree mem
= gimple_assign_rhs1 (stmt
);
4629 = mem_valid_for_store_merging (mem
, &op
->bitsize
, &op
->bitpos
,
4630 &op
->bitregion_start
,
4631 &op
->bitregion_end
);
4632 if (op
->base_addr
!= NULL_TREE
4633 && known_eq (op
->bitsize
, bitsize
)
4634 && multiple_p (op
->bitpos
- bitpos
, BITS_PER_UNIT
)
4635 && known_ge (op
->bitpos
- op
->bitregion_start
,
4636 bitpos
- bitregion_start
)
4637 && known_ge (op
->bitregion_end
- op
->bitpos
,
4638 bitregion_end
- bitpos
))
4642 op
->bit_not_p
= false;
4649 /* Return the index number of the landing pad for STMT, if any. */
4652 lp_nr_for_store (gimple
*stmt
)
4654 if (!cfun
->can_throw_non_call_exceptions
|| !cfun
->eh
)
4657 if (!stmt_could_throw_p (cfun
, stmt
))
4660 return lookup_stmt_eh_lp (stmt
);
4663 /* Record the store STMT for store merging optimization if it can be
4664 optimized. Return true if any changes were made. */
4667 pass_store_merging::process_store (gimple
*stmt
)
4669 tree lhs
= gimple_assign_lhs (stmt
);
4670 tree rhs
= gimple_assign_rhs1 (stmt
);
4671 poly_uint64 bitsize
, bitpos
;
4672 poly_uint64 bitregion_start
, bitregion_end
;
4674 = mem_valid_for_store_merging (lhs
, &bitsize
, &bitpos
,
4675 &bitregion_start
, &bitregion_end
);
4676 if (known_eq (bitsize
, 0U))
4679 bool invalid
= (base_addr
== NULL_TREE
4680 || (maybe_gt (bitsize
,
4681 (unsigned int) MAX_BITSIZE_MODE_ANY_INT
)
4682 && TREE_CODE (rhs
) != INTEGER_CST
4683 && (TREE_CODE (rhs
) != CONSTRUCTOR
4684 || CONSTRUCTOR_NELTS (rhs
) != 0)));
4685 enum tree_code rhs_code
= ERROR_MARK
;
4686 bool bit_not_p
= false;
4687 struct symbolic_number n
;
4688 gimple
*ins_stmt
= NULL
;
4689 store_operand_info ops
[2];
4692 else if (rhs_valid_for_store_merging_p (rhs
))
4694 rhs_code
= INTEGER_CST
;
4697 else if (TREE_CODE (rhs
) != SSA_NAME
)
4701 gimple
*def_stmt
= SSA_NAME_DEF_STMT (rhs
), *def_stmt1
, *def_stmt2
;
4702 if (!is_gimple_assign (def_stmt
))
4704 else if (handled_load (def_stmt
, &ops
[0], bitsize
, bitpos
,
4705 bitregion_start
, bitregion_end
))
4707 else if (gimple_assign_rhs_code (def_stmt
) == BIT_NOT_EXPR
)
4709 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
4710 if (TREE_CODE (rhs1
) == SSA_NAME
4711 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1
)))
4714 def_stmt
= SSA_NAME_DEF_STMT (rhs1
);
4718 if (rhs_code
== ERROR_MARK
&& !invalid
)
4719 switch ((rhs_code
= gimple_assign_rhs_code (def_stmt
)))
4725 rhs1
= gimple_assign_rhs1 (def_stmt
);
4726 rhs2
= gimple_assign_rhs2 (def_stmt
);
4728 if (TREE_CODE (rhs1
) != SSA_NAME
)
4730 def_stmt1
= SSA_NAME_DEF_STMT (rhs1
);
4731 if (!is_gimple_assign (def_stmt1
)
4732 || !handled_load (def_stmt1
, &ops
[0], bitsize
, bitpos
,
4733 bitregion_start
, bitregion_end
))
4735 if (rhs_valid_for_store_merging_p (rhs2
))
4737 else if (TREE_CODE (rhs2
) != SSA_NAME
)
4741 def_stmt2
= SSA_NAME_DEF_STMT (rhs2
);
4742 if (!is_gimple_assign (def_stmt2
))
4744 else if (!handled_load (def_stmt2
, &ops
[1], bitsize
, bitpos
,
4745 bitregion_start
, bitregion_end
))
4755 unsigned HOST_WIDE_INT const_bitsize
;
4756 if (bitsize
.is_constant (&const_bitsize
)
4757 && (const_bitsize
% BITS_PER_UNIT
) == 0
4758 && const_bitsize
<= 64
4759 && multiple_p (bitpos
, BITS_PER_UNIT
))
4761 ins_stmt
= find_bswap_or_nop_1 (def_stmt
, &n
, 12);
4765 for (unsigned HOST_WIDE_INT i
= 0;
4767 i
+= BITS_PER_UNIT
, nn
>>= BITS_PER_MARKER
)
4768 if ((nn
& MARKER_MASK
) == 0
4769 || (nn
& MARKER_MASK
) == MARKER_BYTE_UNKNOWN
)
4778 rhs_code
= LROTATE_EXPR
;
4779 ops
[0].base_addr
= NULL_TREE
;
4780 ops
[1].base_addr
= NULL_TREE
;
4788 && bitsize
.is_constant (&const_bitsize
)
4789 && ((const_bitsize
% BITS_PER_UNIT
) != 0
4790 || !multiple_p (bitpos
, BITS_PER_UNIT
))
4791 && const_bitsize
<= 64)
4793 /* Bypass a conversion to the bit-field type. */
4795 && is_gimple_assign (def_stmt
)
4796 && CONVERT_EXPR_CODE_P (rhs_code
))
4798 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
4799 if (TREE_CODE (rhs1
) == SSA_NAME
4800 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1
)))
4803 rhs_code
= BIT_INSERT_EXPR
;
4806 ops
[0].base_addr
= NULL_TREE
;
4807 ops
[1].base_addr
= NULL_TREE
;
4812 unsigned HOST_WIDE_INT const_bitsize
, const_bitpos
;
4813 unsigned HOST_WIDE_INT const_bitregion_start
, const_bitregion_end
;
4815 || !bitsize
.is_constant (&const_bitsize
)
4816 || !bitpos
.is_constant (&const_bitpos
)
4817 || !bitregion_start
.is_constant (&const_bitregion_start
)
4818 || !bitregion_end
.is_constant (&const_bitregion_end
))
4819 return terminate_all_aliasing_chains (NULL
, stmt
);
4822 memset (&n
, 0, sizeof (n
));
4824 class imm_store_chain_info
**chain_info
= NULL
;
4827 chain_info
= m_stores
.get (base_addr
);
4829 store_immediate_info
*info
;
4832 unsigned int ord
= (*chain_info
)->m_store_info
.length ();
4833 info
= new store_immediate_info (const_bitsize
, const_bitpos
,
4834 const_bitregion_start
,
4835 const_bitregion_end
,
4836 stmt
, ord
, rhs_code
, n
, ins_stmt
,
4837 bit_not_p
, lp_nr_for_store (stmt
),
4839 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4841 fprintf (dump_file
, "Recording immediate store from stmt:\n");
4842 print_gimple_stmt (dump_file
, stmt
, 0);
4844 (*chain_info
)->m_store_info
.safe_push (info
);
4845 ret
|= terminate_all_aliasing_chains (chain_info
, stmt
);
4846 /* If we reach the limit of stores to merge in a chain terminate and
4847 process the chain now. */
4848 if ((*chain_info
)->m_store_info
.length ()
4849 == (unsigned int) param_max_stores_to_merge
)
4851 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4853 "Reached maximum number of statements to merge:\n");
4854 ret
|= terminate_and_process_chain (*chain_info
);
4859 /* Store aliases any existing chain? */
4860 ret
|= terminate_all_aliasing_chains (NULL
, stmt
);
4861 /* Start a new chain. */
4862 class imm_store_chain_info
*new_chain
4863 = new imm_store_chain_info (m_stores_head
, base_addr
);
4864 info
= new store_immediate_info (const_bitsize
, const_bitpos
,
4865 const_bitregion_start
,
4866 const_bitregion_end
,
4867 stmt
, 0, rhs_code
, n
, ins_stmt
,
4868 bit_not_p
, lp_nr_for_store (stmt
),
4870 new_chain
->m_store_info
.safe_push (info
);
4871 m_stores
.put (base_addr
, new_chain
);
4872 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4874 fprintf (dump_file
, "Starting new chain with statement:\n");
4875 print_gimple_stmt (dump_file
, stmt
, 0);
4876 fprintf (dump_file
, "The base object is:\n");
4877 print_generic_expr (dump_file
, base_addr
);
4878 fprintf (dump_file
, "\n");
4883 /* Return true if STMT is a store valid for store merging. */
4886 store_valid_for_store_merging_p (gimple
*stmt
)
4888 return gimple_assign_single_p (stmt
)
4889 && gimple_vdef (stmt
)
4890 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt
))
4891 && (!gimple_has_volatile_ops (stmt
) || gimple_clobber_p (stmt
));
4894 enum basic_block_status
{ BB_INVALID
, BB_VALID
, BB_EXTENDED_VALID
};
4896 /* Return the status of basic block BB wrt store merging. */
4898 static enum basic_block_status
4899 get_status_for_store_merging (basic_block bb
)
4901 unsigned int num_statements
= 0;
4902 gimple_stmt_iterator gsi
;
4905 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4907 gimple
*stmt
= gsi_stmt (gsi
);
4909 if (is_gimple_debug (stmt
))
4912 if (store_valid_for_store_merging_p (stmt
) && ++num_statements
>= 2)
4916 if (num_statements
== 0)
4919 if (cfun
->can_throw_non_call_exceptions
&& cfun
->eh
4920 && store_valid_for_store_merging_p (gimple_seq_last_stmt (bb_seq (bb
)))
4921 && (e
= find_fallthru_edge (bb
->succs
))
4922 && e
->dest
== bb
->next_bb
)
4923 return BB_EXTENDED_VALID
;
4925 return num_statements
>= 2 ? BB_VALID
: BB_INVALID
;
4928 /* Entry point for the pass. Go over each basic block recording chains of
4929 immediate stores. Upon encountering a terminating statement (as defined
4930 by stmt_terminates_chain_p) process the recorded stores and emit the widened
4934 pass_store_merging::execute (function
*fun
)
4937 hash_set
<gimple
*> orig_stmts
;
4938 bool changed
= false, open_chains
= false;
4940 /* If the function can throw and catch non-call exceptions, we'll be trying
4941 to merge stores across different basic blocks so we need to first unsplit
4942 the EH edges in order to streamline the CFG of the function. */
4943 if (cfun
->can_throw_non_call_exceptions
&& cfun
->eh
)
4944 unsplit_eh_edges ();
4946 calculate_dominance_info (CDI_DOMINATORS
);
4948 FOR_EACH_BB_FN (bb
, fun
)
4950 const basic_block_status bb_status
= get_status_for_store_merging (bb
);
4951 gimple_stmt_iterator gsi
;
4953 if (open_chains
&& (bb_status
== BB_INVALID
|| !single_pred_p (bb
)))
4955 changed
|= terminate_and_process_all_chains ();
4956 open_chains
= false;
4959 if (bb_status
== BB_INVALID
)
4962 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4963 fprintf (dump_file
, "Processing basic block <%d>:\n", bb
->index
);
4965 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4967 gimple
*stmt
= gsi_stmt (gsi
);
4969 if (is_gimple_debug (stmt
))
4972 if (gimple_has_volatile_ops (stmt
) && !gimple_clobber_p (stmt
))
4974 /* Terminate all chains. */
4975 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4976 fprintf (dump_file
, "Volatile access terminates "
4978 changed
|= terminate_and_process_all_chains ();
4979 open_chains
= false;
4983 if (store_valid_for_store_merging_p (stmt
))
4984 changed
|= process_store (stmt
);
4986 changed
|= terminate_all_aliasing_chains (NULL
, stmt
);
4989 if (bb_status
== BB_EXTENDED_VALID
)
4993 changed
|= terminate_and_process_all_chains ();
4994 open_chains
= false;
4999 changed
|= terminate_and_process_all_chains ();
5001 /* If the function can throw and catch non-call exceptions and something
5002 changed during the pass, then the CFG has (very likely) changed too. */
5003 if (cfun
->can_throw_non_call_exceptions
&& cfun
->eh
&& changed
)
5005 free_dominance_info (CDI_DOMINATORS
);
5006 return TODO_cleanup_cfg
;
5014 /* Construct and return a store merging pass object. */
5017 make_pass_store_merging (gcc::context
*ctxt
)
5019 return new pass_store_merging (ctxt
);
5024 namespace selftest
{
5026 /* Selftests for store merging helpers. */
5028 /* Assert that all elements of the byte arrays X and Y, both of length N
5032 verify_array_eq (unsigned char *x
, unsigned char *y
, unsigned int n
)
5034 for (unsigned int i
= 0; i
< n
; i
++)
5038 fprintf (stderr
, "Arrays do not match. X:\n");
5039 dump_char_array (stderr
, x
, n
);
5040 fprintf (stderr
, "Y:\n");
5041 dump_char_array (stderr
, y
, n
);
5043 ASSERT_EQ (x
[i
], y
[i
]);
5047 /* Test shift_bytes_in_array_left and that it carries bits across between
5051 verify_shift_bytes_in_array_left (void)
5054 00011111 | 11100000. */
5055 unsigned char orig
[2] = { 0xe0, 0x1f };
5056 unsigned char in
[2];
5057 memcpy (in
, orig
, sizeof orig
);
5059 unsigned char expected
[2] = { 0x80, 0x7f };
5060 shift_bytes_in_array_left (in
, sizeof (in
), 2);
5061 verify_array_eq (in
, expected
, sizeof (in
));
5063 memcpy (in
, orig
, sizeof orig
);
5064 memcpy (expected
, orig
, sizeof orig
);
5065 /* Check that shifting by zero doesn't change anything. */
5066 shift_bytes_in_array_left (in
, sizeof (in
), 0);
5067 verify_array_eq (in
, expected
, sizeof (in
));
5071 /* Test shift_bytes_in_array_right and that it carries bits across between
5075 verify_shift_bytes_in_array_right (void)
5078 00011111 | 11100000. */
5079 unsigned char orig
[2] = { 0x1f, 0xe0};
5080 unsigned char in
[2];
5081 memcpy (in
, orig
, sizeof orig
);
5082 unsigned char expected
[2] = { 0x07, 0xf8};
5083 shift_bytes_in_array_right (in
, sizeof (in
), 2);
5084 verify_array_eq (in
, expected
, sizeof (in
));
5086 memcpy (in
, orig
, sizeof orig
);
5087 memcpy (expected
, orig
, sizeof orig
);
5088 /* Check that shifting by zero doesn't change anything. */
5089 shift_bytes_in_array_right (in
, sizeof (in
), 0);
5090 verify_array_eq (in
, expected
, sizeof (in
));
5093 /* Test clear_bit_region that it clears exactly the bits asked and
5097 verify_clear_bit_region (void)
5099 /* Start with all bits set and test clearing various patterns in them. */
5100 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
5101 unsigned char in
[3];
5102 unsigned char expected
[3];
5103 memcpy (in
, orig
, sizeof in
);
5105 /* Check zeroing out all the bits. */
5106 clear_bit_region (in
, 0, 3 * BITS_PER_UNIT
);
5107 expected
[0] = expected
[1] = expected
[2] = 0;
5108 verify_array_eq (in
, expected
, sizeof in
);
5110 memcpy (in
, orig
, sizeof in
);
5111 /* Leave the first and last bits intact. */
5112 clear_bit_region (in
, 1, 3 * BITS_PER_UNIT
- 2);
5116 verify_array_eq (in
, expected
, sizeof in
);
5119 /* Test clear_bit_region_be that it clears exactly the bits asked and
5123 verify_clear_bit_region_be (void)
5125 /* Start with all bits set and test clearing various patterns in them. */
5126 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
5127 unsigned char in
[3];
5128 unsigned char expected
[3];
5129 memcpy (in
, orig
, sizeof in
);
5131 /* Check zeroing out all the bits. */
5132 clear_bit_region_be (in
, BITS_PER_UNIT
- 1, 3 * BITS_PER_UNIT
);
5133 expected
[0] = expected
[1] = expected
[2] = 0;
5134 verify_array_eq (in
, expected
, sizeof in
);
5136 memcpy (in
, orig
, sizeof in
);
5137 /* Leave the first and last bits intact. */
5138 clear_bit_region_be (in
, BITS_PER_UNIT
- 2, 3 * BITS_PER_UNIT
- 2);
5142 verify_array_eq (in
, expected
, sizeof in
);
5146 /* Run all of the selftests within this file. */
5149 store_merging_c_tests (void)
5151 verify_shift_bytes_in_array_left ();
5152 verify_shift_bytes_in_array_right ();
5153 verify_clear_bit_region ();
5154 verify_clear_bit_region_be ();
5157 } // namespace selftest
5158 #endif /* CHECKING_P. */