1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
31 we can transform this into a single 4-byte store if the target supports it:
32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
56 The algorithm is applied to each basic block in three phases:
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
64 terminate such chains when appropriate (for example when when the stored
65 values get used subsequently).
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
76 store_immediate_info objects) and coalesce contiguous stores into
77 merged_store_group objects. For bit-field stores, we don't need to
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
84 For example, given the stores:
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
143 #include "coretypes.h"
147 #include "builtins.h"
148 #include "fold-const.h"
149 #include "tree-pass.h"
151 #include "gimple-pretty-print.h"
153 #include "fold-const.h"
155 #include "print-tree.h"
156 #include "tree-hash-traits.h"
157 #include "gimple-iterator.h"
158 #include "gimplify.h"
159 #include "gimple-fold.h"
160 #include "stor-layout.h"
162 #include "tree-cfg.h"
165 #include "gimplify-me.h"
167 #include "expr.h" /* For get_bit_range. */
168 #include "optabs-tree.h"
169 #include "selftest.h"
171 /* The maximum size (in bits) of the stores this pass should generate. */
172 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
173 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
175 /* Limit to bound the number of aliasing checks for loads with the same
176 vuse as the corresponding store. */
177 #define MAX_STORE_ALIAS_CHECKS 64
183 /* Number of hand-written 16-bit nop / bswaps found. */
186 /* Number of hand-written 32-bit nop / bswaps found. */
189 /* Number of hand-written 64-bit nop / bswaps found. */
191 } nop_stats
, bswap_stats
;
193 /* A symbolic number structure is used to detect byte permutation and selection
194 patterns of a source. To achieve that, its field N contains an artificial
195 number consisting of BITS_PER_MARKER sized markers tracking where does each
196 byte come from in the source:
198 0 - target byte has the value 0
199 FF - target byte has an unknown value (eg. due to sign extension)
200 1..size - marker value is the byte index in the source (0 for lsb).
202 To detect permutations on memory sources (arrays and structures), a symbolic
203 number is also associated:
204 - a base address BASE_ADDR and an OFFSET giving the address of the source;
205 - a range which gives the difference between the highest and lowest accessed
206 memory location to make such a symbolic number;
207 - the address SRC of the source element of lowest address as a convenience
208 to easily get BASE_ADDR + offset + lowest bytepos;
209 - number of expressions N_OPS bitwise ored together to represent
210 approximate cost of the computation.
212 Note 1: the range is different from size as size reflects the size of the
213 type of the current expression. For instance, for an array char a[],
214 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
215 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
218 Note 2: for non-memory sources, range holds the same value as size.
220 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
222 struct symbolic_number
{
227 poly_int64_pod bytepos
;
231 unsigned HOST_WIDE_INT range
;
235 #define BITS_PER_MARKER 8
236 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
237 #define MARKER_BYTE_UNKNOWN MARKER_MASK
238 #define HEAD_MARKER(n, size) \
239 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
241 /* The number which the find_bswap_or_nop_1 result should match in
242 order to have a nop. The number is masked according to the size of
243 the symbolic number before using it. */
244 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
245 (uint64_t)0x08070605 << 32 | 0x04030201)
247 /* The number which the find_bswap_or_nop_1 result should match in
248 order to have a byte swap. The number is masked according to the
249 size of the symbolic number before using it. */
250 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
251 (uint64_t)0x01020304 << 32 | 0x05060708)
253 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
254 number N. Return false if the requested operation is not permitted
255 on a symbolic number. */
258 do_shift_rotate (enum tree_code code
,
259 struct symbolic_number
*n
,
262 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
263 unsigned head_marker
;
266 || count
>= TYPE_PRECISION (n
->type
)
267 || count
% BITS_PER_UNIT
!= 0)
269 count
= (count
/ BITS_PER_UNIT
) * BITS_PER_MARKER
;
271 /* Zero out the extra bits of N in order to avoid them being shifted
272 into the significant bits. */
273 if (size
< 64 / BITS_PER_MARKER
)
274 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
282 head_marker
= HEAD_MARKER (n
->n
, size
);
284 /* Arithmetic shift of signed type: result is dependent on the value. */
285 if (!TYPE_UNSIGNED (n
->type
) && head_marker
)
286 for (i
= 0; i
< count
/ BITS_PER_MARKER
; i
++)
287 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
288 << ((size
- 1 - i
) * BITS_PER_MARKER
);
291 n
->n
= (n
->n
<< count
) | (n
->n
>> ((size
* BITS_PER_MARKER
) - count
));
294 n
->n
= (n
->n
>> count
) | (n
->n
<< ((size
* BITS_PER_MARKER
) - count
));
299 /* Zero unused bits for size. */
300 if (size
< 64 / BITS_PER_MARKER
)
301 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
305 /* Perform sanity checking for the symbolic number N and the gimple
309 verify_symbolic_number_p (struct symbolic_number
*n
, gimple
*stmt
)
313 lhs_type
= gimple_expr_type (stmt
);
315 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
)
318 if (TYPE_PRECISION (lhs_type
) != TYPE_PRECISION (n
->type
))
324 /* Initialize the symbolic number N for the bswap pass from the base element
325 SRC manipulated by the bitwise OR expression. */
328 init_symbolic_number (struct symbolic_number
*n
, tree src
)
332 if (! INTEGRAL_TYPE_P (TREE_TYPE (src
)))
335 n
->base_addr
= n
->offset
= n
->alias_set
= n
->vuse
= NULL_TREE
;
338 /* Set up the symbolic number N by setting each byte to a value between 1 and
339 the byte size of rhs1. The highest order byte is set to n->size and the
340 lowest order byte to 1. */
341 n
->type
= TREE_TYPE (src
);
342 size
= TYPE_PRECISION (n
->type
);
343 if (size
% BITS_PER_UNIT
!= 0)
345 size
/= BITS_PER_UNIT
;
346 if (size
> 64 / BITS_PER_MARKER
)
352 if (size
< 64 / BITS_PER_MARKER
)
353 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
358 /* Check if STMT might be a byte swap or a nop from a memory source and returns
359 the answer. If so, REF is that memory source and the base of the memory area
360 accessed and the offset of the access from that base are recorded in N. */
363 find_bswap_or_nop_load (gimple
*stmt
, tree ref
, struct symbolic_number
*n
)
365 /* Leaf node is an array or component ref. Memorize its base and
366 offset from base to compare to other such leaf node. */
367 poly_int64 bitsize
, bitpos
, bytepos
;
369 int unsignedp
, reversep
, volatilep
;
370 tree offset
, base_addr
;
372 /* Not prepared to handle PDP endian. */
373 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
376 if (!gimple_assign_load_p (stmt
) || gimple_has_volatile_ops (stmt
))
379 base_addr
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
380 &unsignedp
, &reversep
, &volatilep
);
382 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
383 /* Do not rewrite TARGET_MEM_REF. */
385 else if (TREE_CODE (base_addr
) == MEM_REF
)
387 poly_offset_int bit_offset
= 0;
388 tree off
= TREE_OPERAND (base_addr
, 1);
390 if (!integer_zerop (off
))
392 poly_offset_int boff
= mem_ref_offset (base_addr
);
393 boff
<<= LOG2_BITS_PER_UNIT
;
397 base_addr
= TREE_OPERAND (base_addr
, 0);
399 /* Avoid returning a negative bitpos as this may wreak havoc later. */
400 if (maybe_lt (bit_offset
, 0))
402 tree byte_offset
= wide_int_to_tree
403 (sizetype
, bits_to_bytes_round_down (bit_offset
));
404 bit_offset
= num_trailing_bits (bit_offset
);
406 offset
= size_binop (PLUS_EXPR
, offset
, byte_offset
);
408 offset
= byte_offset
;
411 bitpos
+= bit_offset
.force_shwi ();
414 base_addr
= build_fold_addr_expr (base_addr
);
416 if (!multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
))
418 if (!multiple_p (bitsize
, BITS_PER_UNIT
))
423 if (!init_symbolic_number (n
, ref
))
425 n
->base_addr
= base_addr
;
427 n
->bytepos
= bytepos
;
428 n
->alias_set
= reference_alias_ptr_type (ref
);
429 n
->vuse
= gimple_vuse (stmt
);
433 /* Compute the symbolic number N representing the result of a bitwise OR on 2
434 symbolic number N1 and N2 whose source statements are respectively
435 SOURCE_STMT1 and SOURCE_STMT2. */
438 perform_symbolic_merge (gimple
*source_stmt1
, struct symbolic_number
*n1
,
439 gimple
*source_stmt2
, struct symbolic_number
*n2
,
440 struct symbolic_number
*n
)
445 struct symbolic_number
*n_start
;
447 tree rhs1
= gimple_assign_rhs1 (source_stmt1
);
448 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
449 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
450 rhs1
= TREE_OPERAND (rhs1
, 0);
451 tree rhs2
= gimple_assign_rhs1 (source_stmt2
);
452 if (TREE_CODE (rhs2
) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs2
, 0)) == SSA_NAME
)
454 rhs2
= TREE_OPERAND (rhs2
, 0);
456 /* Sources are different, cancel bswap if they are not memory location with
457 the same base (array, structure, ...). */
461 HOST_WIDE_INT start1
, start2
, start_sub
, end_sub
, end1
, end2
, end
;
462 struct symbolic_number
*toinc_n_ptr
, *n_end
;
463 basic_block bb1
, bb2
;
465 if (!n1
->base_addr
|| !n2
->base_addr
466 || !operand_equal_p (n1
->base_addr
, n2
->base_addr
, 0))
469 if (!n1
->offset
!= !n2
->offset
470 || (n1
->offset
&& !operand_equal_p (n1
->offset
, n2
->offset
, 0)))
474 if (!(n2
->bytepos
- n1
->bytepos
).is_constant (&start2
))
480 start_sub
= start2
- start1
;
485 start_sub
= start1
- start2
;
488 bb1
= gimple_bb (source_stmt1
);
489 bb2
= gimple_bb (source_stmt2
);
490 if (dominated_by_p (CDI_DOMINATORS
, bb1
, bb2
))
491 source_stmt
= source_stmt1
;
493 source_stmt
= source_stmt2
;
495 /* Find the highest address at which a load is performed and
496 compute related info. */
497 end1
= start1
+ (n1
->range
- 1);
498 end2
= start2
+ (n2
->range
- 1);
502 end_sub
= end2
- end1
;
507 end_sub
= end1
- end2
;
509 n_end
= (end2
> end1
) ? n2
: n1
;
511 /* Find symbolic number whose lsb is the most significant. */
512 if (BYTES_BIG_ENDIAN
)
513 toinc_n_ptr
= (n_end
== n1
) ? n2
: n1
;
515 toinc_n_ptr
= (n_start
== n1
) ? n2
: n1
;
517 n
->range
= end
- MIN (start1
, start2
) + 1;
519 /* Check that the range of memory covered can be represented by
520 a symbolic number. */
521 if (n
->range
> 64 / BITS_PER_MARKER
)
524 /* Reinterpret byte marks in symbolic number holding the value of
525 bigger weight according to target endianness. */
526 inc
= BYTES_BIG_ENDIAN
? end_sub
: start_sub
;
527 size
= TYPE_PRECISION (n1
->type
) / BITS_PER_UNIT
;
528 for (i
= 0; i
< size
; i
++, inc
<<= BITS_PER_MARKER
)
531 = (toinc_n_ptr
->n
>> (i
* BITS_PER_MARKER
)) & MARKER_MASK
;
532 if (marker
&& marker
!= MARKER_BYTE_UNKNOWN
)
533 toinc_n_ptr
->n
+= inc
;
538 n
->range
= n1
->range
;
540 source_stmt
= source_stmt1
;
544 || alias_ptr_types_compatible_p (n1
->alias_set
, n2
->alias_set
))
545 n
->alias_set
= n1
->alias_set
;
547 n
->alias_set
= ptr_type_node
;
548 n
->vuse
= n_start
->vuse
;
549 n
->base_addr
= n_start
->base_addr
;
550 n
->offset
= n_start
->offset
;
551 n
->src
= n_start
->src
;
552 n
->bytepos
= n_start
->bytepos
;
553 n
->type
= n_start
->type
;
554 size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
556 for (i
= 0, mask
= MARKER_MASK
; i
< size
; i
++, mask
<<= BITS_PER_MARKER
)
558 uint64_t masked1
, masked2
;
560 masked1
= n1
->n
& mask
;
561 masked2
= n2
->n
& mask
;
562 if (masked1
&& masked2
&& masked1
!= masked2
)
565 n
->n
= n1
->n
| n2
->n
;
566 n
->n_ops
= n1
->n_ops
+ n2
->n_ops
;
571 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
572 the operation given by the rhs of STMT on the result. If the operation
573 could successfully be executed the function returns a gimple stmt whose
574 rhs's first tree is the expression of the source operand and NULL
578 find_bswap_or_nop_1 (gimple
*stmt
, struct symbolic_number
*n
, int limit
)
581 tree rhs1
, rhs2
= NULL
;
582 gimple
*rhs1_stmt
, *rhs2_stmt
, *source_stmt1
;
583 enum gimple_rhs_class rhs_class
;
585 if (!limit
|| !is_gimple_assign (stmt
))
588 rhs1
= gimple_assign_rhs1 (stmt
);
590 if (find_bswap_or_nop_load (stmt
, rhs1
, n
))
593 /* Handle BIT_FIELD_REF. */
594 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
595 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
597 unsigned HOST_WIDE_INT bitsize
= tree_to_uhwi (TREE_OPERAND (rhs1
, 1));
598 unsigned HOST_WIDE_INT bitpos
= tree_to_uhwi (TREE_OPERAND (rhs1
, 2));
599 if (bitpos
% BITS_PER_UNIT
== 0
600 && bitsize
% BITS_PER_UNIT
== 0
601 && init_symbolic_number (n
, TREE_OPERAND (rhs1
, 0)))
603 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
604 if (BYTES_BIG_ENDIAN
)
605 bitpos
= TYPE_PRECISION (n
->type
) - bitpos
- bitsize
;
608 if (!do_shift_rotate (RSHIFT_EXPR
, n
, bitpos
))
613 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
614 for (unsigned i
= 0; i
< bitsize
/ BITS_PER_UNIT
;
615 i
++, tmp
<<= BITS_PER_UNIT
)
616 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
620 n
->type
= TREE_TYPE (rhs1
);
622 n
->range
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
624 return verify_symbolic_number_p (n
, stmt
) ? stmt
: NULL
;
630 if (TREE_CODE (rhs1
) != SSA_NAME
)
633 code
= gimple_assign_rhs_code (stmt
);
634 rhs_class
= gimple_assign_rhs_class (stmt
);
635 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
637 if (rhs_class
== GIMPLE_BINARY_RHS
)
638 rhs2
= gimple_assign_rhs2 (stmt
);
640 /* Handle unary rhs and binary rhs with integer constants as second
643 if (rhs_class
== GIMPLE_UNARY_RHS
644 || (rhs_class
== GIMPLE_BINARY_RHS
645 && TREE_CODE (rhs2
) == INTEGER_CST
))
647 if (code
!= BIT_AND_EXPR
648 && code
!= LSHIFT_EXPR
649 && code
!= RSHIFT_EXPR
650 && code
!= LROTATE_EXPR
651 && code
!= RROTATE_EXPR
652 && !CONVERT_EXPR_CODE_P (code
))
655 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, n
, limit
- 1);
657 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
658 we have to initialize the symbolic number. */
661 if (gimple_assign_load_p (stmt
)
662 || !init_symbolic_number (n
, rhs1
))
671 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
672 uint64_t val
= int_cst_value (rhs2
), mask
= 0;
673 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
675 /* Only constants masking full bytes are allowed. */
676 for (i
= 0; i
< size
; i
++, tmp
<<= BITS_PER_UNIT
)
677 if ((val
& tmp
) != 0 && (val
& tmp
) != tmp
)
680 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
689 if (!do_shift_rotate (code
, n
, (int) TREE_INT_CST_LOW (rhs2
)))
694 int i
, type_size
, old_type_size
;
697 type
= gimple_expr_type (stmt
);
698 type_size
= TYPE_PRECISION (type
);
699 if (type_size
% BITS_PER_UNIT
!= 0)
701 type_size
/= BITS_PER_UNIT
;
702 if (type_size
> 64 / BITS_PER_MARKER
)
705 /* Sign extension: result is dependent on the value. */
706 old_type_size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
707 if (!TYPE_UNSIGNED (n
->type
) && type_size
> old_type_size
708 && HEAD_MARKER (n
->n
, old_type_size
))
709 for (i
= 0; i
< type_size
- old_type_size
; i
++)
710 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
711 << ((type_size
- 1 - i
) * BITS_PER_MARKER
);
713 if (type_size
< 64 / BITS_PER_MARKER
)
715 /* If STMT casts to a smaller type mask out the bits not
716 belonging to the target type. */
717 n
->n
&= ((uint64_t) 1 << (type_size
* BITS_PER_MARKER
)) - 1;
721 n
->range
= type_size
;
727 return verify_symbolic_number_p (n
, stmt
) ? source_stmt1
: NULL
;
730 /* Handle binary rhs. */
732 if (rhs_class
== GIMPLE_BINARY_RHS
)
734 struct symbolic_number n1
, n2
;
735 gimple
*source_stmt
, *source_stmt2
;
737 if (code
!= BIT_IOR_EXPR
)
740 if (TREE_CODE (rhs2
) != SSA_NAME
)
743 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
748 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, &n1
, limit
- 1);
753 source_stmt2
= find_bswap_or_nop_1 (rhs2_stmt
, &n2
, limit
- 1);
758 if (TYPE_PRECISION (n1
.type
) != TYPE_PRECISION (n2
.type
))
761 if (n1
.vuse
!= n2
.vuse
)
765 = perform_symbolic_merge (source_stmt1
, &n1
, source_stmt2
, &n2
, n
);
770 if (!verify_symbolic_number_p (n
, stmt
))
782 /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
783 *CMPXCHG, *CMPNOP and adjust *N. */
786 find_bswap_or_nop_finalize (struct symbolic_number
*n
, uint64_t *cmpxchg
,
792 /* The number which the find_bswap_or_nop_1 result should match in order
793 to have a full byte swap. The number is shifted to the right
794 according to the size of the symbolic number before using it. */
798 /* Find real size of result (highest non-zero byte). */
800 for (tmpn
= n
->n
, rsize
= 0; tmpn
; tmpn
>>= BITS_PER_MARKER
, rsize
++);
804 /* Zero out the bits corresponding to untouched bytes in original gimple
806 if (n
->range
< (int) sizeof (int64_t))
808 mask
= ((uint64_t) 1 << (n
->range
* BITS_PER_MARKER
)) - 1;
809 *cmpxchg
>>= (64 / BITS_PER_MARKER
- n
->range
) * BITS_PER_MARKER
;
813 /* Zero out the bits corresponding to unused bytes in the result of the
814 gimple expression. */
815 if (rsize
< n
->range
)
817 if (BYTES_BIG_ENDIAN
)
819 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
821 *cmpnop
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
825 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
826 *cmpxchg
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
832 n
->range
*= BITS_PER_UNIT
;
835 /* Check if STMT completes a bswap implementation or a read in a given
836 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
837 accordingly. It also sets N to represent the kind of operations
838 performed: size of the resulting expression and whether it works on
839 a memory source, and if so alias-set and vuse. At last, the
840 function returns a stmt whose rhs's first tree is the source
844 find_bswap_or_nop (gimple
*stmt
, struct symbolic_number
*n
, bool *bswap
)
846 /* The last parameter determines the depth search limit. It usually
847 correlates directly to the number n of bytes to be touched. We
848 increase that number by log2(n) + 1 here in order to also
849 cover signed -> unsigned conversions of the src operand as can be seen
850 in libgcc, and for initial shift/and operation of the src operand. */
851 int limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
852 limit
+= 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
);
853 gimple
*ins_stmt
= find_bswap_or_nop_1 (stmt
, n
, limit
);
858 uint64_t cmpxchg
, cmpnop
;
859 find_bswap_or_nop_finalize (n
, &cmpxchg
, &cmpnop
);
861 /* A complete byte swap should make the symbolic number to start with
862 the largest digit in the highest order byte. Unchanged symbolic
863 number indicates a read with same endianness as target architecture. */
866 else if (n
->n
== cmpxchg
)
871 /* Useless bit manipulation performed by code. */
872 if (!n
->base_addr
&& n
->n
== cmpnop
&& n
->n_ops
== 1)
878 const pass_data pass_data_optimize_bswap
=
880 GIMPLE_PASS
, /* type */
882 OPTGROUP_NONE
, /* optinfo_flags */
884 PROP_ssa
, /* properties_required */
885 0, /* properties_provided */
886 0, /* properties_destroyed */
887 0, /* todo_flags_start */
888 0, /* todo_flags_finish */
891 class pass_optimize_bswap
: public gimple_opt_pass
894 pass_optimize_bswap (gcc::context
*ctxt
)
895 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
898 /* opt_pass methods: */
899 virtual bool gate (function
*)
901 return flag_expensive_optimizations
&& optimize
&& BITS_PER_UNIT
== 8;
904 virtual unsigned int execute (function
*);
906 }; // class pass_optimize_bswap
908 /* Perform the bswap optimization: replace the expression computed in the rhs
909 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
910 bswap, load or load + bswap expression.
911 Which of these alternatives replace the rhs is given by N->base_addr (non
912 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
913 load to perform are also given in N while the builtin bswap invoke is given
914 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
915 load statements involved to construct the rhs in gsi_stmt (GSI) and
916 N->range gives the size of the rhs expression for maintaining some
919 Note that if the replacement involve a load and if gsi_stmt (GSI) is
920 non-NULL, that stmt is moved just after INS_STMT to do the load with the
921 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
924 bswap_replace (gimple_stmt_iterator gsi
, gimple
*ins_stmt
, tree fndecl
,
925 tree bswap_type
, tree load_type
, struct symbolic_number
*n
,
928 tree src
, tmp
, tgt
= NULL_TREE
;
931 gimple
*cur_stmt
= gsi_stmt (gsi
);
934 tgt
= gimple_assign_lhs (cur_stmt
);
936 /* Need to load the value from memory first. */
939 gimple_stmt_iterator gsi_ins
= gsi
;
941 gsi_ins
= gsi_for_stmt (ins_stmt
);
942 tree addr_expr
, addr_tmp
, val_expr
, val_tmp
;
943 tree load_offset_ptr
, aligned_load_type
;
945 unsigned align
= get_object_alignment (src
);
946 poly_int64 load_offset
= 0;
950 basic_block ins_bb
= gimple_bb (ins_stmt
);
951 basic_block cur_bb
= gimple_bb (cur_stmt
);
952 if (!dominated_by_p (CDI_DOMINATORS
, cur_bb
, ins_bb
))
955 /* Move cur_stmt just before one of the load of the original
956 to ensure it has the same VUSE. See PR61517 for what could
958 if (gimple_bb (cur_stmt
) != gimple_bb (ins_stmt
))
959 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt
));
960 gsi_move_before (&gsi
, &gsi_ins
);
961 gsi
= gsi_for_stmt (cur_stmt
);
966 /* Compute address to load from and cast according to the size
968 addr_expr
= build_fold_addr_expr (src
);
969 if (is_gimple_mem_ref_addr (addr_expr
))
970 addr_tmp
= unshare_expr (addr_expr
);
973 addr_tmp
= unshare_expr (n
->base_addr
);
974 if (!is_gimple_mem_ref_addr (addr_tmp
))
975 addr_tmp
= force_gimple_operand_gsi_1 (&gsi
, addr_tmp
,
976 is_gimple_mem_ref_addr
,
979 load_offset
= n
->bytepos
;
983 = force_gimple_operand_gsi (&gsi
, unshare_expr (n
->offset
),
984 true, NULL_TREE
, true,
987 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp
)),
988 POINTER_PLUS_EXPR
, addr_tmp
, off
);
989 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
990 addr_tmp
= gimple_assign_lhs (stmt
);
994 /* Perform the load. */
995 aligned_load_type
= load_type
;
996 if (align
< TYPE_ALIGN (load_type
))
997 aligned_load_type
= build_aligned_type (load_type
, align
);
998 load_offset_ptr
= build_int_cst (n
->alias_set
, load_offset
);
999 val_expr
= fold_build2 (MEM_REF
, aligned_load_type
, addr_tmp
,
1005 nop_stats
.found_16bit
++;
1006 else if (n
->range
== 32)
1007 nop_stats
.found_32bit
++;
1010 gcc_assert (n
->range
== 64);
1011 nop_stats
.found_64bit
++;
1014 /* Convert the result of load if necessary. */
1015 if (tgt
&& !useless_type_conversion_p (TREE_TYPE (tgt
), load_type
))
1017 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
,
1019 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
1020 gimple_set_vuse (load_stmt
, n
->vuse
);
1021 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1022 gimple_assign_set_rhs_with_ops (&gsi
, NOP_EXPR
, val_tmp
);
1023 update_stmt (cur_stmt
);
1027 gimple_assign_set_rhs_with_ops (&gsi
, MEM_REF
, val_expr
);
1028 gimple_set_vuse (cur_stmt
, n
->vuse
);
1029 update_stmt (cur_stmt
);
1033 tgt
= make_ssa_name (load_type
);
1034 cur_stmt
= gimple_build_assign (tgt
, MEM_REF
, val_expr
);
1035 gimple_set_vuse (cur_stmt
, n
->vuse
);
1036 gsi_insert_before (&gsi
, cur_stmt
, GSI_SAME_STMT
);
1042 "%d bit load in target endianness found at: ",
1044 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1050 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
, "load_dst");
1051 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
1052 gimple_set_vuse (load_stmt
, n
->vuse
);
1053 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1060 if (tgt
&& !useless_type_conversion_p (TREE_TYPE (tgt
), TREE_TYPE (src
)))
1062 if (!is_gimple_val (src
))
1064 g
= gimple_build_assign (tgt
, NOP_EXPR
, src
);
1067 g
= gimple_build_assign (tgt
, src
);
1071 nop_stats
.found_16bit
++;
1072 else if (n
->range
== 32)
1073 nop_stats
.found_32bit
++;
1076 gcc_assert (n
->range
== 64);
1077 nop_stats
.found_64bit
++;
1082 "%d bit reshuffle in target endianness found at: ",
1085 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1088 print_generic_expr (dump_file
, tgt
, TDF_NONE
);
1089 fprintf (dump_file
, "\n");
1093 gsi_replace (&gsi
, g
, true);
1096 else if (TREE_CODE (src
) == BIT_FIELD_REF
)
1097 src
= TREE_OPERAND (src
, 0);
1100 bswap_stats
.found_16bit
++;
1101 else if (n
->range
== 32)
1102 bswap_stats
.found_32bit
++;
1105 gcc_assert (n
->range
== 64);
1106 bswap_stats
.found_64bit
++;
1111 /* Convert the src expression if necessary. */
1112 if (!useless_type_conversion_p (TREE_TYPE (tmp
), bswap_type
))
1114 gimple
*convert_stmt
;
1116 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
1117 convert_stmt
= gimple_build_assign (tmp
, NOP_EXPR
, src
);
1118 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1121 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1122 are considered as rotation of 2N bit values by N bits is generally not
1123 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1124 gives 0x03040102 while a bswap for that value is 0x04030201. */
1125 if (bswap
&& n
->range
== 16)
1127 tree count
= build_int_cst (NULL
, BITS_PER_UNIT
);
1128 src
= fold_build2 (LROTATE_EXPR
, bswap_type
, tmp
, count
);
1129 bswap_stmt
= gimple_build_assign (NULL
, src
);
1132 bswap_stmt
= gimple_build_call (fndecl
, 1, tmp
);
1134 if (tgt
== NULL_TREE
)
1135 tgt
= make_ssa_name (bswap_type
);
1138 /* Convert the result if necessary. */
1139 if (!useless_type_conversion_p (TREE_TYPE (tgt
), bswap_type
))
1141 gimple
*convert_stmt
;
1143 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
1144 convert_stmt
= gimple_build_assign (tgt
, NOP_EXPR
, tmp
);
1145 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1148 gimple_set_lhs (bswap_stmt
, tmp
);
1152 fprintf (dump_file
, "%d bit bswap implementation found at: ",
1155 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1158 print_generic_expr (dump_file
, tgt
, TDF_NONE
);
1159 fprintf (dump_file
, "\n");
1165 gsi_insert_after (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1166 gsi_remove (&gsi
, true);
1169 gsi_insert_before (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1173 /* Find manual byte swap implementations as well as load in a given
1174 endianness. Byte swaps are turned into a bswap builtin invokation
1175 while endian loads are converted to bswap builtin invokation or
1176 simple load according to the target endianness. */
1179 pass_optimize_bswap::execute (function
*fun
)
1182 bool bswap32_p
, bswap64_p
;
1183 bool changed
= false;
1184 tree bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
1186 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
1187 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
1188 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
1189 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
1190 || (bswap32_p
&& word_mode
== SImode
)));
1192 /* Determine the argument type of the builtins. The code later on
1193 assumes that the return and argument type are the same. */
1196 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1197 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1202 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1203 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1206 memset (&nop_stats
, 0, sizeof (nop_stats
));
1207 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
1208 calculate_dominance_info (CDI_DOMINATORS
);
1210 FOR_EACH_BB_FN (bb
, fun
)
1212 gimple_stmt_iterator gsi
;
1214 /* We do a reverse scan for bswap patterns to make sure we get the
1215 widest match. As bswap pattern matching doesn't handle previously
1216 inserted smaller bswap replacements as sub-patterns, the wider
1217 variant wouldn't be detected. */
1218 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
);)
1220 gimple
*ins_stmt
, *cur_stmt
= gsi_stmt (gsi
);
1221 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
1222 enum tree_code code
;
1223 struct symbolic_number n
;
1226 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1227 might be moved to a different basic block by bswap_replace and gsi
1228 must not points to it if that's the case. Moving the gsi_prev
1229 there make sure that gsi points to the statement previous to
1230 cur_stmt while still making sure that all statements are
1231 considered in this basic block. */
1234 if (!is_gimple_assign (cur_stmt
))
1237 code
= gimple_assign_rhs_code (cur_stmt
);
1242 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt
))
1243 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt
))
1253 ins_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
1261 /* Already in canonical form, nothing to do. */
1262 if (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
1264 load_type
= bswap_type
= uint16_type_node
;
1267 load_type
= uint32_type_node
;
1270 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1271 bswap_type
= bswap32_type
;
1275 load_type
= uint64_type_node
;
1278 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1279 bswap_type
= bswap64_type
;
1286 if (bswap
&& !fndecl
&& n
.range
!= 16)
1289 if (bswap_replace (gsi_for_stmt (cur_stmt
), ins_stmt
, fndecl
,
1290 bswap_type
, load_type
, &n
, bswap
))
1295 statistics_counter_event (fun
, "16-bit nop implementations found",
1296 nop_stats
.found_16bit
);
1297 statistics_counter_event (fun
, "32-bit nop implementations found",
1298 nop_stats
.found_32bit
);
1299 statistics_counter_event (fun
, "64-bit nop implementations found",
1300 nop_stats
.found_64bit
);
1301 statistics_counter_event (fun
, "16-bit bswap implementations found",
1302 bswap_stats
.found_16bit
);
1303 statistics_counter_event (fun
, "32-bit bswap implementations found",
1304 bswap_stats
.found_32bit
);
1305 statistics_counter_event (fun
, "64-bit bswap implementations found",
1306 bswap_stats
.found_64bit
);
1308 return (changed
? TODO_update_ssa
: 0);
1314 make_pass_optimize_bswap (gcc::context
*ctxt
)
1316 return new pass_optimize_bswap (ctxt
);
1321 /* Struct recording one operand for the store, which is either a constant,
1322 then VAL represents the constant and all the other fields are zero, or
1323 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1324 and the other fields also reflect the memory load, or an SSA name, then
1325 VAL represents the SSA name and all the other fields are zero, */
1327 struct store_operand_info
1331 poly_uint64 bitsize
;
1333 poly_uint64 bitregion_start
;
1334 poly_uint64 bitregion_end
;
1337 store_operand_info ();
1340 store_operand_info::store_operand_info ()
1341 : val (NULL_TREE
), base_addr (NULL_TREE
), bitsize (0), bitpos (0),
1342 bitregion_start (0), bitregion_end (0), stmt (NULL
), bit_not_p (false)
1346 /* Struct recording the information about a single store of an immediate
1347 to memory. These are created in the first phase and coalesced into
1348 merged_store_group objects in the second phase. */
1350 struct store_immediate_info
1352 unsigned HOST_WIDE_INT bitsize
;
1353 unsigned HOST_WIDE_INT bitpos
;
1354 unsigned HOST_WIDE_INT bitregion_start
;
1355 /* This is one past the last bit of the bit region. */
1356 unsigned HOST_WIDE_INT bitregion_end
;
1359 /* INTEGER_CST for constant stores, MEM_REF for memory copy,
1360 BIT_*_EXPR for logical bitwise operation, BIT_INSERT_EXPR
1362 LROTATE_EXPR if it can be only bswap optimized and
1363 ops are not really meaningful.
1364 NOP_EXPR if bswap optimization detected identity, ops
1365 are not meaningful. */
1366 enum tree_code rhs_code
;
1367 /* Two fields for bswap optimization purposes. */
1368 struct symbolic_number n
;
1370 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1372 /* True if ops have been swapped and thus ops[1] represents
1373 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1375 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1376 just the first one. */
1377 store_operand_info ops
[2];
1378 store_immediate_info (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1379 unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1380 gimple
*, unsigned int, enum tree_code
,
1381 struct symbolic_number
&, gimple
*, bool,
1382 const store_operand_info
&,
1383 const store_operand_info
&);
1386 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs
,
1387 unsigned HOST_WIDE_INT bp
,
1388 unsigned HOST_WIDE_INT brs
,
1389 unsigned HOST_WIDE_INT bre
,
1392 enum tree_code rhscode
,
1393 struct symbolic_number
&nr
,
1396 const store_operand_info
&op0r
,
1397 const store_operand_info
&op1r
)
1398 : bitsize (bs
), bitpos (bp
), bitregion_start (brs
), bitregion_end (bre
),
1399 stmt (st
), order (ord
), rhs_code (rhscode
), n (nr
),
1400 ins_stmt (ins_stmtp
), bit_not_p (bitnotp
), ops_swapped_p (false)
1401 #if __cplusplus >= 201103L
1402 , ops
{ op0r
, op1r
}
1412 /* Struct representing a group of stores to contiguous memory locations.
1413 These are produced by the second phase (coalescing) and consumed in the
1414 third phase that outputs the widened stores. */
1416 struct merged_store_group
1418 unsigned HOST_WIDE_INT start
;
1419 unsigned HOST_WIDE_INT width
;
1420 unsigned HOST_WIDE_INT bitregion_start
;
1421 unsigned HOST_WIDE_INT bitregion_end
;
1422 /* The size of the allocated memory for val and mask. */
1423 unsigned HOST_WIDE_INT buf_size
;
1424 unsigned HOST_WIDE_INT align_base
;
1425 poly_uint64 load_align_base
[2];
1428 unsigned int load_align
[2];
1429 unsigned int first_order
;
1430 unsigned int last_order
;
1432 bool only_constants
;
1433 unsigned int first_nonmergeable_order
;
1435 auto_vec
<store_immediate_info
*> stores
;
1436 /* We record the first and last original statements in the sequence because
1437 we'll need their vuse/vdef and replacement position. It's easier to keep
1438 track of them separately as 'stores' is reordered by apply_stores. */
1442 unsigned char *mask
;
1444 merged_store_group (store_immediate_info
*);
1445 ~merged_store_group ();
1446 bool can_be_merged_into (store_immediate_info
*);
1447 void merge_into (store_immediate_info
*);
1448 void merge_overlapping (store_immediate_info
*);
1449 bool apply_stores ();
1451 void do_merge (store_immediate_info
*);
1454 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1457 dump_char_array (FILE *fd
, unsigned char *ptr
, unsigned int len
)
1462 for (unsigned int i
= 0; i
< len
; i
++)
1463 fprintf (fd
, "%02x ", ptr
[i
]);
1467 /* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the
1468 bits between adjacent elements. AMNT should be within
1471 00011111|11100000 << 2 = 01111111|10000000
1472 PTR[1] | PTR[0] PTR[1] | PTR[0]. */
1475 shift_bytes_in_array (unsigned char *ptr
, unsigned int sz
, unsigned int amnt
)
1480 unsigned char carry_over
= 0U;
1481 unsigned char carry_mask
= (~0U) << (unsigned char) (BITS_PER_UNIT
- amnt
);
1482 unsigned char clear_mask
= (~0U) << amnt
;
1484 for (unsigned int i
= 0; i
< sz
; i
++)
1486 unsigned prev_carry_over
= carry_over
;
1487 carry_over
= (ptr
[i
] & carry_mask
) >> (BITS_PER_UNIT
- amnt
);
1492 ptr
[i
] &= clear_mask
;
1493 ptr
[i
] |= prev_carry_over
;
1498 /* Like shift_bytes_in_array but for big-endian.
1499 Shift right the bytes in PTR of SZ elements by AMNT bits, carrying over the
1500 bits between adjacent elements. AMNT should be within
1503 00011111|11100000 >> 2 = 00000111|11111000
1504 PTR[0] | PTR[1] PTR[0] | PTR[1]. */
1507 shift_bytes_in_array_right (unsigned char *ptr
, unsigned int sz
,
1513 unsigned char carry_over
= 0U;
1514 unsigned char carry_mask
= ~(~0U << amnt
);
1516 for (unsigned int i
= 0; i
< sz
; i
++)
1518 unsigned prev_carry_over
= carry_over
;
1519 carry_over
= ptr
[i
] & carry_mask
;
1521 carry_over
<<= (unsigned char) BITS_PER_UNIT
- amnt
;
1523 ptr
[i
] |= prev_carry_over
;
1527 /* Clear out LEN bits starting from bit START in the byte array
1528 PTR. This clears the bits to the *right* from START.
1529 START must be within [0, BITS_PER_UNIT) and counts starting from
1530 the least significant bit. */
1533 clear_bit_region_be (unsigned char *ptr
, unsigned int start
,
1538 /* Clear len bits to the right of start. */
1539 else if (len
<= start
+ 1)
1541 unsigned char mask
= (~(~0U << len
));
1542 mask
= mask
<< (start
+ 1U - len
);
1545 else if (start
!= BITS_PER_UNIT
- 1)
1547 clear_bit_region_be (ptr
, start
, (start
% BITS_PER_UNIT
) + 1);
1548 clear_bit_region_be (ptr
+ 1, BITS_PER_UNIT
- 1,
1549 len
- (start
% BITS_PER_UNIT
) - 1);
1551 else if (start
== BITS_PER_UNIT
- 1
1552 && len
> BITS_PER_UNIT
)
1554 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1555 memset (ptr
, 0, nbytes
);
1556 if (len
% BITS_PER_UNIT
!= 0)
1557 clear_bit_region_be (ptr
+ nbytes
, BITS_PER_UNIT
- 1,
1558 len
% BITS_PER_UNIT
);
1564 /* In the byte array PTR clear the bit region starting at bit
1565 START and is LEN bits wide.
1566 For regions spanning multiple bytes do this recursively until we reach
1567 zero LEN or a region contained within a single byte. */
1570 clear_bit_region (unsigned char *ptr
, unsigned int start
,
1573 /* Degenerate base case. */
1576 else if (start
>= BITS_PER_UNIT
)
1577 clear_bit_region (ptr
+ 1, start
- BITS_PER_UNIT
, len
);
1578 /* Second base case. */
1579 else if ((start
+ len
) <= BITS_PER_UNIT
)
1581 unsigned char mask
= (~0U) << (unsigned char) (BITS_PER_UNIT
- len
);
1582 mask
>>= BITS_PER_UNIT
- (start
+ len
);
1588 /* Clear most significant bits in a byte and proceed with the next byte. */
1589 else if (start
!= 0)
1591 clear_bit_region (ptr
, start
, BITS_PER_UNIT
- start
);
1592 clear_bit_region (ptr
+ 1, 0, len
- (BITS_PER_UNIT
- start
));
1594 /* Whole bytes need to be cleared. */
1595 else if (start
== 0 && len
> BITS_PER_UNIT
)
1597 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1598 /* We could recurse on each byte but we clear whole bytes, so a simple
1600 memset (ptr
, '\0', nbytes
);
1601 /* Clear the remaining sub-byte region if there is one. */
1602 if (len
% BITS_PER_UNIT
!= 0)
1603 clear_bit_region (ptr
+ nbytes
, 0, len
% BITS_PER_UNIT
);
1609 /* Write BITLEN bits of EXPR to the byte array PTR at
1610 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1611 Return true if the operation succeeded. */
1614 encode_tree_to_bitpos (tree expr
, unsigned char *ptr
, int bitlen
, int bitpos
,
1615 unsigned int total_bytes
)
1617 unsigned int first_byte
= bitpos
/ BITS_PER_UNIT
;
1618 bool sub_byte_op_p
= ((bitlen
% BITS_PER_UNIT
)
1619 || (bitpos
% BITS_PER_UNIT
)
1620 || !int_mode_for_size (bitlen
, 0).exists ());
1622 = (TREE_CODE (expr
) == CONSTRUCTOR
1623 && CONSTRUCTOR_NELTS (expr
) == 0
1624 && TYPE_SIZE_UNIT (TREE_TYPE (expr
))
1625 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr
))));
1629 if (first_byte
>= total_bytes
)
1631 total_bytes
-= first_byte
;
1634 unsigned HOST_WIDE_INT rhs_bytes
1635 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
1636 if (rhs_bytes
> total_bytes
)
1638 memset (ptr
+ first_byte
, '\0', rhs_bytes
);
1641 return native_encode_expr (expr
, ptr
+ first_byte
, total_bytes
) != 0;
1645 We are writing a non byte-sized quantity or at a position that is not
1647 |--------|--------|--------| ptr + first_byte
1649 xxx xxxxxxxx xxx< bp>
1652 First native_encode_expr EXPR into a temporary buffer and shift each
1653 byte in the buffer by 'bp' (carrying the bits over as necessary).
1654 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1655 <------bitlen---->< bp>
1656 Then we clear the destination bits:
1657 |---00000|00000000|000-----| ptr + first_byte
1658 <-------bitlen--->< bp>
1660 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1661 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1664 We are writing a non byte-sized quantity or at a position that is not
1666 ptr + first_byte |--------|--------|--------|
1668 <bp >xxx xxxxxxxx xxx
1671 First native_encode_expr EXPR into a temporary buffer and shift each
1672 byte in the buffer to the right by (carrying the bits over as necessary).
1673 We shift by as much as needed to align the most significant bit of EXPR
1675 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1676 <---bitlen----> <bp ><-----bitlen----->
1677 Then we clear the destination bits:
1678 ptr + first_byte |-----000||00000000||00000---|
1679 <bp ><-------bitlen----->
1681 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1682 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1683 The awkwardness comes from the fact that bitpos is counted from the
1684 most significant bit of a byte. */
1686 /* We must be dealing with fixed-size data at this point, since the
1687 total size is also fixed. */
1688 unsigned int byte_size
;
1691 unsigned HOST_WIDE_INT rhs_bytes
1692 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr
)));
1693 if (rhs_bytes
> total_bytes
)
1695 byte_size
= rhs_bytes
;
1699 fixed_size_mode mode
1700 = as_a
<fixed_size_mode
> (TYPE_MODE (TREE_TYPE (expr
)));
1701 byte_size
= GET_MODE_SIZE (mode
);
1703 /* Allocate an extra byte so that we have space to shift into. */
1705 unsigned char *tmpbuf
= XALLOCAVEC (unsigned char, byte_size
);
1706 memset (tmpbuf
, '\0', byte_size
);
1707 /* The store detection code should only have allowed constants that are
1708 accepted by native_encode_expr or empty ctors. */
1710 && native_encode_expr (expr
, tmpbuf
, byte_size
- 1) == 0)
1713 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1714 bytes to write. This means it can write more than
1715 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1716 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1717 bitlen and zero out the bits that are not relevant as well (that may
1718 contain a sign bit due to sign-extension). */
1719 unsigned int padding
1720 = byte_size
- ROUND_UP (bitlen
, BITS_PER_UNIT
) / BITS_PER_UNIT
- 1;
1721 /* On big-endian the padding is at the 'front' so just skip the initial
1723 if (BYTES_BIG_ENDIAN
)
1726 byte_size
-= padding
;
1728 if (bitlen
% BITS_PER_UNIT
!= 0)
1730 if (BYTES_BIG_ENDIAN
)
1731 clear_bit_region_be (tmpbuf
, BITS_PER_UNIT
- 1,
1732 BITS_PER_UNIT
- (bitlen
% BITS_PER_UNIT
));
1734 clear_bit_region (tmpbuf
, bitlen
,
1735 byte_size
* BITS_PER_UNIT
- bitlen
);
1737 /* Left shifting relies on the last byte being clear if bitlen is
1738 a multiple of BITS_PER_UNIT, which might not be clear if
1739 there are padding bytes. */
1740 else if (!BYTES_BIG_ENDIAN
)
1741 tmpbuf
[byte_size
- 1] = '\0';
1743 /* Clear the bit region in PTR where the bits from TMPBUF will be
1745 if (BYTES_BIG_ENDIAN
)
1746 clear_bit_region_be (ptr
+ first_byte
,
1747 BITS_PER_UNIT
- 1 - (bitpos
% BITS_PER_UNIT
), bitlen
);
1749 clear_bit_region (ptr
+ first_byte
, bitpos
% BITS_PER_UNIT
, bitlen
);
1752 int bitlen_mod
= bitlen
% BITS_PER_UNIT
;
1753 int bitpos_mod
= bitpos
% BITS_PER_UNIT
;
1755 bool skip_byte
= false;
1756 if (BYTES_BIG_ENDIAN
)
1758 /* BITPOS and BITLEN are exactly aligned and no shifting
1760 if (bitpos_mod
+ bitlen_mod
== BITS_PER_UNIT
1761 || (bitpos_mod
== 0 && bitlen_mod
== 0))
1763 /* |. . . . . . . .|
1765 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1766 of the value until it aligns with 'bp' in the next byte over. */
1767 else if (bitpos_mod
+ bitlen_mod
< BITS_PER_UNIT
)
1769 shift_amnt
= bitlen_mod
+ bitpos_mod
;
1770 skip_byte
= bitlen_mod
!= 0;
1772 /* |. . . . . . . .|
1775 Shift the value right within the same byte so it aligns with 'bp'. */
1777 shift_amnt
= bitlen_mod
+ bitpos_mod
- BITS_PER_UNIT
;
1780 shift_amnt
= bitpos
% BITS_PER_UNIT
;
1782 /* Create the shifted version of EXPR. */
1783 if (!BYTES_BIG_ENDIAN
)
1785 shift_bytes_in_array (tmpbuf
, byte_size
, shift_amnt
);
1786 if (shift_amnt
== 0)
1791 gcc_assert (BYTES_BIG_ENDIAN
);
1792 shift_bytes_in_array_right (tmpbuf
, byte_size
, shift_amnt
);
1793 /* If shifting right forced us to move into the next byte skip the now
1802 /* Insert the bits from TMPBUF. */
1803 for (unsigned int i
= 0; i
< byte_size
; i
++)
1804 ptr
[first_byte
+ i
] |= tmpbuf
[i
];
1809 /* Sorting function for store_immediate_info objects.
1810 Sorts them by bitposition. */
1813 sort_by_bitpos (const void *x
, const void *y
)
1815 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1816 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1818 if ((*tmp
)->bitpos
< (*tmp2
)->bitpos
)
1820 else if ((*tmp
)->bitpos
> (*tmp2
)->bitpos
)
1823 /* If they are the same let's use the order which is guaranteed to
1825 return (*tmp
)->order
- (*tmp2
)->order
;
1828 /* Sorting function for store_immediate_info objects.
1829 Sorts them by the order field. */
1832 sort_by_order (const void *x
, const void *y
)
1834 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1835 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1837 if ((*tmp
)->order
< (*tmp2
)->order
)
1839 else if ((*tmp
)->order
> (*tmp2
)->order
)
1845 /* Initialize a merged_store_group object from a store_immediate_info
1848 merged_store_group::merged_store_group (store_immediate_info
*info
)
1850 start
= info
->bitpos
;
1851 width
= info
->bitsize
;
1852 bitregion_start
= info
->bitregion_start
;
1853 bitregion_end
= info
->bitregion_end
;
1854 /* VAL has memory allocated for it in apply_stores once the group
1855 width has been finalized. */
1858 bit_insertion
= false;
1859 only_constants
= info
->rhs_code
== INTEGER_CST
;
1860 first_nonmergeable_order
= ~0U;
1861 unsigned HOST_WIDE_INT align_bitpos
= 0;
1862 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
1863 &align
, &align_bitpos
);
1864 align_base
= start
- align_bitpos
;
1865 for (int i
= 0; i
< 2; ++i
)
1867 store_operand_info
&op
= info
->ops
[i
];
1868 if (op
.base_addr
== NULL_TREE
)
1871 load_align_base
[i
] = 0;
1875 get_object_alignment_1 (op
.val
, &load_align
[i
], &align_bitpos
);
1876 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
1880 stores
.safe_push (info
);
1881 last_stmt
= info
->stmt
;
1882 last_order
= info
->order
;
1883 first_stmt
= last_stmt
;
1884 first_order
= last_order
;
1888 merged_store_group::~merged_store_group ()
1894 /* Return true if the store described by INFO can be merged into the group. */
1897 merged_store_group::can_be_merged_into (store_immediate_info
*info
)
1899 /* Do not merge bswap patterns. */
1900 if (info
->rhs_code
== LROTATE_EXPR
)
1903 /* The canonical case. */
1904 if (info
->rhs_code
== stores
[0]->rhs_code
)
1907 /* BIT_INSERT_EXPR is compatible with INTEGER_CST. */
1908 if (info
->rhs_code
== BIT_INSERT_EXPR
&& stores
[0]->rhs_code
== INTEGER_CST
)
1911 if (stores
[0]->rhs_code
== BIT_INSERT_EXPR
&& info
->rhs_code
== INTEGER_CST
)
1914 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
1915 if (info
->rhs_code
== MEM_REF
1916 && (stores
[0]->rhs_code
== INTEGER_CST
1917 || stores
[0]->rhs_code
== BIT_INSERT_EXPR
)
1918 && info
->bitregion_start
== stores
[0]->bitregion_start
1919 && info
->bitregion_end
== stores
[0]->bitregion_end
)
1922 if (stores
[0]->rhs_code
== MEM_REF
1923 && (info
->rhs_code
== INTEGER_CST
1924 || info
->rhs_code
== BIT_INSERT_EXPR
)
1925 && info
->bitregion_start
== stores
[0]->bitregion_start
1926 && info
->bitregion_end
== stores
[0]->bitregion_end
)
1932 /* Helper method for merge_into and merge_overlapping to do
1936 merged_store_group::do_merge (store_immediate_info
*info
)
1938 bitregion_start
= MIN (bitregion_start
, info
->bitregion_start
);
1939 bitregion_end
= MAX (bitregion_end
, info
->bitregion_end
);
1941 unsigned int this_align
;
1942 unsigned HOST_WIDE_INT align_bitpos
= 0;
1943 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
1944 &this_align
, &align_bitpos
);
1945 if (this_align
> align
)
1948 align_base
= info
->bitpos
- align_bitpos
;
1950 for (int i
= 0; i
< 2; ++i
)
1952 store_operand_info
&op
= info
->ops
[i
];
1956 get_object_alignment_1 (op
.val
, &this_align
, &align_bitpos
);
1957 if (this_align
> load_align
[i
])
1959 load_align
[i
] = this_align
;
1960 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
1964 gimple
*stmt
= info
->stmt
;
1965 stores
.safe_push (info
);
1966 if (info
->order
> last_order
)
1968 last_order
= info
->order
;
1971 else if (info
->order
< first_order
)
1973 first_order
= info
->order
;
1976 if (info
->rhs_code
!= INTEGER_CST
)
1977 only_constants
= false;
1980 /* Merge a store recorded by INFO into this merged store.
1981 The store is not overlapping with the existing recorded
1985 merged_store_group::merge_into (store_immediate_info
*info
)
1987 /* Make sure we're inserting in the position we think we're inserting. */
1988 gcc_assert (info
->bitpos
>= start
+ width
1989 && info
->bitregion_start
<= bitregion_end
);
1991 width
= info
->bitpos
+ info
->bitsize
- start
;
1995 /* Merge a store described by INFO into this merged store.
1996 INFO overlaps in some way with the current store (i.e. it's not contiguous
1997 which is handled by merged_store_group::merge_into). */
2000 merged_store_group::merge_overlapping (store_immediate_info
*info
)
2002 /* If the store extends the size of the group, extend the width. */
2003 if (info
->bitpos
+ info
->bitsize
> start
+ width
)
2004 width
= info
->bitpos
+ info
->bitsize
- start
;
2009 /* Go through all the recorded stores in this group in program order and
2010 apply their values to the VAL byte array to create the final merged
2011 value. Return true if the operation succeeded. */
2014 merged_store_group::apply_stores ()
2016 /* Make sure we have more than one store in the group, otherwise we cannot
2018 if (bitregion_start
% BITS_PER_UNIT
!= 0
2019 || bitregion_end
% BITS_PER_UNIT
!= 0
2020 || stores
.length () == 1)
2023 stores
.qsort (sort_by_order
);
2024 store_immediate_info
*info
;
2026 /* Create a power-of-2-sized buffer for native_encode_expr. */
2027 buf_size
= 1 << ceil_log2 ((bitregion_end
- bitregion_start
) / BITS_PER_UNIT
);
2028 val
= XNEWVEC (unsigned char, 2 * buf_size
);
2029 mask
= val
+ buf_size
;
2030 memset (val
, 0, buf_size
);
2031 memset (mask
, ~0U, buf_size
);
2033 FOR_EACH_VEC_ELT (stores
, i
, info
)
2035 unsigned int pos_in_buffer
= info
->bitpos
- bitregion_start
;
2037 if (info
->ops
[0].val
&& info
->ops
[0].base_addr
== NULL_TREE
)
2038 cst
= info
->ops
[0].val
;
2039 else if (info
->ops
[1].val
&& info
->ops
[1].base_addr
== NULL_TREE
)
2040 cst
= info
->ops
[1].val
;
2046 if (info
->rhs_code
== BIT_INSERT_EXPR
)
2047 bit_insertion
= true;
2049 ret
= encode_tree_to_bitpos (cst
, val
, info
->bitsize
,
2050 pos_in_buffer
, buf_size
);
2052 unsigned char *m
= mask
+ (pos_in_buffer
/ BITS_PER_UNIT
);
2053 if (BYTES_BIG_ENDIAN
)
2054 clear_bit_region_be (m
, (BITS_PER_UNIT
- 1
2055 - (pos_in_buffer
% BITS_PER_UNIT
)),
2058 clear_bit_region (m
, pos_in_buffer
% BITS_PER_UNIT
, info
->bitsize
);
2059 if (cst
&& dump_file
&& (dump_flags
& TDF_DETAILS
))
2063 fputs ("After writing ", dump_file
);
2064 print_generic_expr (dump_file
, cst
, TDF_NONE
);
2065 fprintf (dump_file
, " of size " HOST_WIDE_INT_PRINT_DEC
2066 " at position %d\n", info
->bitsize
, pos_in_buffer
);
2067 fputs (" the merged value contains ", dump_file
);
2068 dump_char_array (dump_file
, val
, buf_size
);
2069 fputs (" the merged mask contains ", dump_file
);
2070 dump_char_array (dump_file
, mask
, buf_size
);
2072 fputs (" bit insertion is required\n", dump_file
);
2075 fprintf (dump_file
, "Failed to merge stores\n");
2080 stores
.qsort (sort_by_bitpos
);
2084 /* Structure describing the store chain. */
2086 struct imm_store_chain_info
2088 /* Doubly-linked list that imposes an order on chain processing.
2089 PNXP (prev's next pointer) points to the head of a list, or to
2090 the next field in the previous chain in the list.
2091 See pass_store_merging::m_stores_head for more rationale. */
2092 imm_store_chain_info
*next
, **pnxp
;
2094 auto_vec
<store_immediate_info
*> m_store_info
;
2095 auto_vec
<merged_store_group
*> m_merged_store_groups
;
2097 imm_store_chain_info (imm_store_chain_info
*&inspt
, tree b_a
)
2098 : next (inspt
), pnxp (&inspt
), base_addr (b_a
)
2103 gcc_checking_assert (pnxp
== next
->pnxp
);
2107 ~imm_store_chain_info ()
2112 gcc_checking_assert (&next
== next
->pnxp
);
2116 bool terminate_and_process_chain ();
2117 bool try_coalesce_bswap (merged_store_group
*, unsigned int, unsigned int);
2118 bool coalesce_immediate_stores ();
2119 bool output_merged_store (merged_store_group
*);
2120 bool output_merged_stores ();
2123 const pass_data pass_data_tree_store_merging
= {
2124 GIMPLE_PASS
, /* type */
2125 "store-merging", /* name */
2126 OPTGROUP_NONE
, /* optinfo_flags */
2127 TV_GIMPLE_STORE_MERGING
, /* tv_id */
2128 PROP_ssa
, /* properties_required */
2129 0, /* properties_provided */
2130 0, /* properties_destroyed */
2131 0, /* todo_flags_start */
2132 TODO_update_ssa
, /* todo_flags_finish */
2135 class pass_store_merging
: public gimple_opt_pass
2138 pass_store_merging (gcc::context
*ctxt
)
2139 : gimple_opt_pass (pass_data_tree_store_merging
, ctxt
), m_stores_head ()
2143 /* Pass not supported for PDP-endian, nor for insane hosts or
2144 target character sizes where native_{encode,interpret}_expr
2145 doesn't work properly. */
2149 return flag_store_merging
2150 && BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
2152 && BITS_PER_UNIT
== 8;
2155 virtual unsigned int execute (function
*);
2158 hash_map
<tree_operand_hash
, struct imm_store_chain_info
*> m_stores
;
2160 /* Form a doubly-linked stack of the elements of m_stores, so that
2161 we can iterate over them in a predictable way. Using this order
2162 avoids extraneous differences in the compiler output just because
2163 of tree pointer variations (e.g. different chains end up in
2164 different positions of m_stores, so they are handled in different
2165 orders, so they allocate or release SSA names in different
2166 orders, and when they get reused, subsequent passes end up
2167 getting different SSA names, which may ultimately change
2168 decisions when going out of SSA). */
2169 imm_store_chain_info
*m_stores_head
;
2171 void process_store (gimple
*);
2172 bool terminate_and_process_all_chains ();
2173 bool terminate_all_aliasing_chains (imm_store_chain_info
**, gimple
*);
2174 bool terminate_and_release_chain (imm_store_chain_info
*);
2175 }; // class pass_store_merging
2177 /* Terminate and process all recorded chains. Return true if any changes
2181 pass_store_merging::terminate_and_process_all_chains ()
2184 while (m_stores_head
)
2185 ret
|= terminate_and_release_chain (m_stores_head
);
2186 gcc_assert (m_stores
.is_empty ());
2187 gcc_assert (m_stores_head
== NULL
);
2192 /* Terminate all chains that are affected by the statement STMT.
2193 CHAIN_INFO is the chain we should ignore from the checks if
2197 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
2203 /* If the statement doesn't touch memory it can't alias. */
2204 if (!gimple_vuse (stmt
))
2207 tree store_lhs
= gimple_store_p (stmt
) ? gimple_get_lhs (stmt
) : NULL_TREE
;
2208 for (imm_store_chain_info
*next
= m_stores_head
, *cur
= next
; cur
; cur
= next
)
2212 /* We already checked all the stores in chain_info and terminated the
2213 chain if necessary. Skip it here. */
2214 if (chain_info
&& *chain_info
== cur
)
2217 store_immediate_info
*info
;
2219 FOR_EACH_VEC_ELT (cur
->m_store_info
, i
, info
)
2221 tree lhs
= gimple_assign_lhs (info
->stmt
);
2222 if (ref_maybe_used_by_stmt_p (stmt
, lhs
)
2223 || stmt_may_clobber_ref_p (stmt
, lhs
)
2224 || (store_lhs
&& refs_output_dependent_p (store_lhs
, lhs
)))
2226 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2228 fprintf (dump_file
, "stmt causes chain termination:\n");
2229 print_gimple_stmt (dump_file
, stmt
, 0);
2231 terminate_and_release_chain (cur
);
2241 /* Helper function. Terminate the recorded chain storing to base object
2242 BASE. Return true if the merging and output was successful. The m_stores
2243 entry is removed after the processing in any case. */
2246 pass_store_merging::terminate_and_release_chain (imm_store_chain_info
*chain_info
)
2248 bool ret
= chain_info
->terminate_and_process_chain ();
2249 m_stores
.remove (chain_info
->base_addr
);
2254 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2255 may clobber REF. FIRST and LAST must be in the same basic block and
2256 have non-NULL vdef. We want to be able to sink load of REF across
2257 stores between FIRST and LAST, up to right before LAST. */
2260 stmts_may_clobber_ref_p (gimple
*first
, gimple
*last
, tree ref
)
2263 ao_ref_init (&r
, ref
);
2264 unsigned int count
= 0;
2265 tree vop
= gimple_vdef (last
);
2268 gcc_checking_assert (gimple_bb (first
) == gimple_bb (last
));
2271 stmt
= SSA_NAME_DEF_STMT (vop
);
2272 if (stmt_may_clobber_ref_p_1 (stmt
, &r
))
2274 if (gimple_store_p (stmt
)
2275 && refs_anti_dependent_p (ref
, gimple_get_lhs (stmt
)))
2277 /* Avoid quadratic compile time by bounding the number of checks
2279 if (++count
> MAX_STORE_ALIAS_CHECKS
)
2281 vop
= gimple_vuse (stmt
);
2283 while (stmt
!= first
);
2287 /* Return true if INFO->ops[IDX] is mergeable with the
2288 corresponding loads already in MERGED_STORE group.
2289 BASE_ADDR is the base address of the whole store group. */
2292 compatible_load_p (merged_store_group
*merged_store
,
2293 store_immediate_info
*info
,
2294 tree base_addr
, int idx
)
2296 store_immediate_info
*infof
= merged_store
->stores
[0];
2297 if (!info
->ops
[idx
].base_addr
2298 || maybe_ne (info
->ops
[idx
].bitpos
- infof
->ops
[idx
].bitpos
,
2299 info
->bitpos
- infof
->bitpos
)
2300 || !operand_equal_p (info
->ops
[idx
].base_addr
,
2301 infof
->ops
[idx
].base_addr
, 0))
2304 store_immediate_info
*infol
= merged_store
->stores
.last ();
2305 tree load_vuse
= gimple_vuse (info
->ops
[idx
].stmt
);
2306 /* In this case all vuses should be the same, e.g.
2307 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2309 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2310 and we can emit the coalesced load next to any of those loads. */
2311 if (gimple_vuse (infof
->ops
[idx
].stmt
) == load_vuse
2312 && gimple_vuse (infol
->ops
[idx
].stmt
) == load_vuse
)
2315 /* Otherwise, at least for now require that the load has the same
2316 vuse as the store. See following examples. */
2317 if (gimple_vuse (info
->stmt
) != load_vuse
)
2320 if (gimple_vuse (infof
->stmt
) != gimple_vuse (infof
->ops
[idx
].stmt
)
2322 && gimple_vuse (infol
->stmt
) != gimple_vuse (infol
->ops
[idx
].stmt
)))
2325 /* If the load is from the same location as the store, already
2326 the construction of the immediate chain info guarantees no intervening
2327 stores, so no further checks are needed. Example:
2328 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2329 if (known_eq (info
->ops
[idx
].bitpos
, info
->bitpos
)
2330 && operand_equal_p (info
->ops
[idx
].base_addr
, base_addr
, 0))
2333 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2334 of the stores in the group, or any other stores in between those.
2335 Previous calls to compatible_load_p ensured that for all the
2336 merged_store->stores IDX loads, no stmts starting with
2337 merged_store->first_stmt and ending right before merged_store->last_stmt
2338 clobbers those loads. */
2339 gimple
*first
= merged_store
->first_stmt
;
2340 gimple
*last
= merged_store
->last_stmt
;
2342 store_immediate_info
*infoc
;
2343 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2344 comes before the so far first load, we'll be changing
2345 merged_store->first_stmt. In that case we need to give up if
2346 any of the earlier processed loads clobber with the stmts in the new
2348 if (info
->order
< merged_store
->first_order
)
2350 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2351 if (stmts_may_clobber_ref_p (info
->stmt
, first
, infoc
->ops
[idx
].val
))
2355 /* Similarly, we could change merged_store->last_stmt, so ensure
2356 in that case no stmts in the new range clobber any of the earlier
2358 else if (info
->order
> merged_store
->last_order
)
2360 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2361 if (stmts_may_clobber_ref_p (last
, info
->stmt
, infoc
->ops
[idx
].val
))
2365 /* And finally, we'd be adding a new load to the set, ensure it isn't
2366 clobbered in the new range. */
2367 if (stmts_may_clobber_ref_p (first
, last
, info
->ops
[idx
].val
))
2370 /* Otherwise, we are looking for:
2371 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2373 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2377 /* Add all refs loaded to compute VAL to REFS vector. */
2380 gather_bswap_load_refs (vec
<tree
> *refs
, tree val
)
2382 if (TREE_CODE (val
) != SSA_NAME
)
2385 gimple
*stmt
= SSA_NAME_DEF_STMT (val
);
2386 if (!is_gimple_assign (stmt
))
2389 if (gimple_assign_load_p (stmt
))
2391 refs
->safe_push (gimple_assign_rhs1 (stmt
));
2395 switch (gimple_assign_rhs_class (stmt
))
2397 case GIMPLE_BINARY_RHS
:
2398 gather_bswap_load_refs (refs
, gimple_assign_rhs2 (stmt
));
2400 case GIMPLE_UNARY_RHS
:
2401 gather_bswap_load_refs (refs
, gimple_assign_rhs1 (stmt
));
2408 /* Check if there are any stores in M_STORE_INFO after index I
2409 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2410 a potential group ending with END that have their order
2411 smaller than LAST_ORDER. RHS_CODE is the kind of store in the
2412 group. Return true if there are no such stores.
2414 MEM[(long long int *)p_28] = 0;
2415 MEM[(long long int *)p_28 + 8B] = 0;
2416 MEM[(long long int *)p_28 + 16B] = 0;
2417 MEM[(long long int *)p_28 + 24B] = 0;
2419 MEM[(int *)p_28 + 8B] = _129;
2420 MEM[(int *)p_28].a = -1;
2422 MEM[(long long int *)p_28] = 0;
2423 MEM[(int *)p_28].a = -1;
2424 stmts in the current group and need to consider if it is safe to
2425 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2426 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2427 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2428 into the group and merging of those 3 stores is successful, merged
2429 stmts will be emitted at the latest store from that group, i.e.
2430 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2431 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2432 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2433 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2434 into the group. That way it will be its own store group and will
2435 not be touched. If RHS_CODE is INTEGER_CST and there are overlapping
2436 INTEGER_CST stores, those are mergeable using merge_overlapping,
2437 so don't return false for those. */
2440 check_no_overlap (vec
<store_immediate_info
*> m_store_info
, unsigned int i
,
2441 enum tree_code rhs_code
, unsigned int last_order
,
2442 unsigned HOST_WIDE_INT end
)
2444 unsigned int len
= m_store_info
.length ();
2445 for (++i
; i
< len
; ++i
)
2447 store_immediate_info
*info
= m_store_info
[i
];
2448 if (info
->bitpos
>= end
)
2450 if (info
->order
< last_order
2451 && (rhs_code
!= INTEGER_CST
|| info
->rhs_code
!= INTEGER_CST
))
2457 /* Return true if m_store_info[first] and at least one following store
2458 form a group which store try_size bitsize value which is byte swapped
2459 from a memory load or some value, or identity from some value.
2460 This uses the bswap pass APIs. */
2463 imm_store_chain_info::try_coalesce_bswap (merged_store_group
*merged_store
,
2465 unsigned int try_size
)
2467 unsigned int len
= m_store_info
.length (), last
= first
;
2468 unsigned HOST_WIDE_INT width
= m_store_info
[first
]->bitsize
;
2469 if (width
>= try_size
)
2471 for (unsigned int i
= first
+ 1; i
< len
; ++i
)
2473 if (m_store_info
[i
]->bitpos
!= m_store_info
[first
]->bitpos
+ width
2474 || m_store_info
[i
]->ins_stmt
== NULL
)
2476 width
+= m_store_info
[i
]->bitsize
;
2477 if (width
>= try_size
)
2483 if (width
!= try_size
)
2486 bool allow_unaligned
2487 = !STRICT_ALIGNMENT
&& PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED
);
2488 /* Punt if the combined store would not be aligned and we need alignment. */
2489 if (!allow_unaligned
)
2491 unsigned int align
= merged_store
->align
;
2492 unsigned HOST_WIDE_INT align_base
= merged_store
->align_base
;
2493 for (unsigned int i
= first
+ 1; i
<= last
; ++i
)
2495 unsigned int this_align
;
2496 unsigned HOST_WIDE_INT align_bitpos
= 0;
2497 get_object_alignment_1 (gimple_assign_lhs (m_store_info
[i
]->stmt
),
2498 &this_align
, &align_bitpos
);
2499 if (this_align
> align
)
2502 align_base
= m_store_info
[i
]->bitpos
- align_bitpos
;
2505 unsigned HOST_WIDE_INT align_bitpos
2506 = (m_store_info
[first
]->bitpos
- align_base
) & (align
- 1);
2508 align
= least_bit_hwi (align_bitpos
);
2509 if (align
< try_size
)
2516 case 16: type
= uint16_type_node
; break;
2517 case 32: type
= uint32_type_node
; break;
2518 case 64: type
= uint64_type_node
; break;
2519 default: gcc_unreachable ();
2521 struct symbolic_number n
;
2522 gimple
*ins_stmt
= NULL
;
2523 int vuse_store
= -1;
2524 unsigned int first_order
= merged_store
->first_order
;
2525 unsigned int last_order
= merged_store
->last_order
;
2526 gimple
*first_stmt
= merged_store
->first_stmt
;
2527 gimple
*last_stmt
= merged_store
->last_stmt
;
2528 unsigned HOST_WIDE_INT end
= merged_store
->start
+ merged_store
->width
;
2529 store_immediate_info
*infof
= m_store_info
[first
];
2531 for (unsigned int i
= first
; i
<= last
; ++i
)
2533 store_immediate_info
*info
= m_store_info
[i
];
2534 struct symbolic_number this_n
= info
->n
;
2536 if (!this_n
.base_addr
)
2537 this_n
.range
= try_size
/ BITS_PER_UNIT
;
2539 /* Update vuse in case it has changed by output_merged_stores. */
2540 this_n
.vuse
= gimple_vuse (info
->ins_stmt
);
2541 unsigned int bitpos
= info
->bitpos
- infof
->bitpos
;
2542 if (!do_shift_rotate (LSHIFT_EXPR
, &this_n
,
2544 ? try_size
- info
->bitsize
- bitpos
2547 if (this_n
.base_addr
&& vuse_store
)
2550 for (j
= first
; j
<= last
; ++j
)
2551 if (this_n
.vuse
== gimple_vuse (m_store_info
[j
]->stmt
))
2555 if (vuse_store
== 1)
2563 ins_stmt
= info
->ins_stmt
;
2567 if (n
.base_addr
&& n
.vuse
!= this_n
.vuse
)
2569 if (vuse_store
== 0)
2573 if (info
->order
> last_order
)
2575 last_order
= info
->order
;
2576 last_stmt
= info
->stmt
;
2578 else if (info
->order
< first_order
)
2580 first_order
= info
->order
;
2581 first_stmt
= info
->stmt
;
2583 end
= MAX (end
, info
->bitpos
+ info
->bitsize
);
2585 ins_stmt
= perform_symbolic_merge (ins_stmt
, &n
, info
->ins_stmt
,
2587 if (ins_stmt
== NULL
)
2592 uint64_t cmpxchg
, cmpnop
;
2593 find_bswap_or_nop_finalize (&n
, &cmpxchg
, &cmpnop
);
2595 /* A complete byte swap should make the symbolic number to start with
2596 the largest digit in the highest order byte. Unchanged symbolic
2597 number indicates a read with same endianness as target architecture. */
2598 if (n
.n
!= cmpnop
&& n
.n
!= cmpxchg
)
2601 if (n
.base_addr
== NULL_TREE
&& !is_gimple_val (n
.src
))
2604 if (!check_no_overlap (m_store_info
, last
, LROTATE_EXPR
, last_order
, end
))
2607 /* Don't handle memory copy this way if normal non-bswap processing
2608 would handle it too. */
2609 if (n
.n
== cmpnop
&& (unsigned) n
.n_ops
== last
- first
+ 1)
2612 for (i
= first
; i
<= last
; ++i
)
2613 if (m_store_info
[i
]->rhs_code
!= MEM_REF
)
2623 /* Will emit LROTATE_EXPR. */
2626 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
2627 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
)
2631 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
2632 && optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
)
2639 if (!allow_unaligned
&& n
.base_addr
)
2641 unsigned int align
= get_object_alignment (n
.src
);
2642 if (align
< try_size
)
2646 /* If each load has vuse of the corresponding store, need to verify
2647 the loads can be sunk right before the last store. */
2648 if (vuse_store
== 1)
2650 auto_vec
<tree
, 64> refs
;
2651 for (unsigned int i
= first
; i
<= last
; ++i
)
2652 gather_bswap_load_refs (&refs
,
2653 gimple_assign_rhs1 (m_store_info
[i
]->stmt
));
2657 FOR_EACH_VEC_ELT (refs
, i
, ref
)
2658 if (stmts_may_clobber_ref_p (first_stmt
, last_stmt
, ref
))
2664 infof
->ins_stmt
= ins_stmt
;
2665 for (unsigned int i
= first
; i
<= last
; ++i
)
2667 m_store_info
[i
]->rhs_code
= n
.n
== cmpxchg
? LROTATE_EXPR
: NOP_EXPR
;
2668 m_store_info
[i
]->ops
[0].base_addr
= NULL_TREE
;
2669 m_store_info
[i
]->ops
[1].base_addr
= NULL_TREE
;
2671 merged_store
->merge_into (m_store_info
[i
]);
2677 /* Go through the candidate stores recorded in m_store_info and merge them
2678 into merged_store_group objects recorded into m_merged_store_groups
2679 representing the widened stores. Return true if coalescing was successful
2680 and the number of widened stores is fewer than the original number
2684 imm_store_chain_info::coalesce_immediate_stores ()
2686 /* Anything less can't be processed. */
2687 if (m_store_info
.length () < 2)
2690 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2691 fprintf (dump_file
, "Attempting to coalesce %u stores in chain\n",
2692 m_store_info
.length ());
2694 store_immediate_info
*info
;
2695 unsigned int i
, ignore
= 0;
2697 /* Order the stores by the bitposition they write to. */
2698 m_store_info
.qsort (sort_by_bitpos
);
2700 info
= m_store_info
[0];
2701 merged_store_group
*merged_store
= new merged_store_group (info
);
2702 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2703 fputs ("New store group\n", dump_file
);
2705 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
2707 unsigned HOST_WIDE_INT new_bitregion_start
, new_bitregion_end
;
2712 /* First try to handle group of stores like:
2717 using the bswap framework. */
2718 if (info
->bitpos
== merged_store
->start
+ merged_store
->width
2719 && merged_store
->stores
.length () == 1
2720 && merged_store
->stores
[0]->ins_stmt
!= NULL
2721 && info
->ins_stmt
!= NULL
)
2723 unsigned int try_size
;
2724 for (try_size
= 64; try_size
>= 16; try_size
>>= 1)
2725 if (try_coalesce_bswap (merged_store
, i
- 1, try_size
))
2730 ignore
= i
+ merged_store
->stores
.length () - 1;
2731 m_merged_store_groups
.safe_push (merged_store
);
2732 if (ignore
< m_store_info
.length ())
2733 merged_store
= new merged_store_group (m_store_info
[ignore
]);
2735 merged_store
= NULL
;
2741 = MIN (merged_store
->bitregion_start
, info
->bitregion_start
);
2743 = MAX (merged_store
->bitregion_end
, info
->bitregion_end
);
2745 if (info
->order
>= merged_store
->first_nonmergeable_order
2746 || (((new_bitregion_end
- new_bitregion_start
+ 1) / BITS_PER_UNIT
)
2747 > (unsigned) PARAM_VALUE (PARAM_STORE_MERGING_MAX_SIZE
)))
2752 Overlapping stores. */
2753 else if (IN_RANGE (info
->bitpos
, merged_store
->start
,
2754 merged_store
->start
+ merged_store
->width
- 1))
2756 /* Only allow overlapping stores of constants. */
2757 if (info
->rhs_code
== INTEGER_CST
&& merged_store
->only_constants
)
2759 unsigned int last_order
2760 = MAX (merged_store
->last_order
, info
->order
);
2761 unsigned HOST_WIDE_INT end
2762 = MAX (merged_store
->start
+ merged_store
->width
,
2763 info
->bitpos
+ info
->bitsize
);
2764 if (check_no_overlap (m_store_info
, i
, INTEGER_CST
,
2767 /* check_no_overlap call above made sure there are no
2768 overlapping stores with non-INTEGER_CST rhs_code
2769 in between the first and last of the stores we've
2770 just merged. If there are any INTEGER_CST rhs_code
2771 stores in between, we need to merge_overlapping them
2772 even if in the sort_by_bitpos order there are other
2773 overlapping stores in between. Keep those stores as is.
2775 MEM[(int *)p_28] = 0;
2776 MEM[(char *)p_28 + 3B] = 1;
2777 MEM[(char *)p_28 + 1B] = 2;
2778 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
2779 We can't merge the zero store with the store of two and
2780 not merge anything else, because the store of one is
2781 in the original order in between those two, but in
2782 store_by_bitpos order it comes after the last store that
2783 we can't merge with them. We can merge the first 3 stores
2784 and keep the last store as is though. */
2785 unsigned int len
= m_store_info
.length ();
2786 unsigned int try_order
= last_order
;
2787 unsigned int first_nonmergeable_order
;
2789 bool last_iter
= false;
2793 unsigned int max_order
= 0;
2794 unsigned first_nonmergeable_int_order
= ~0U;
2795 unsigned HOST_WIDE_INT this_end
= end
;
2797 first_nonmergeable_order
= ~0U;
2798 for (unsigned int j
= i
+ 1; j
< len
; ++j
)
2800 store_immediate_info
*info2
= m_store_info
[j
];
2801 if (info2
->bitpos
>= this_end
)
2803 if (info2
->order
< try_order
)
2805 if (info2
->rhs_code
!= INTEGER_CST
)
2807 /* Normally check_no_overlap makes sure this
2808 doesn't happen, but if end grows below,
2809 then we need to process more stores than
2810 check_no_overlap verified. Example:
2811 MEM[(int *)p_5] = 0;
2812 MEM[(short *)p_5 + 3B] = 1;
2813 MEM[(char *)p_5 + 4B] = _9;
2814 MEM[(char *)p_5 + 2B] = 2; */
2819 this_end
= MAX (this_end
,
2820 info2
->bitpos
+ info2
->bitsize
);
2822 else if (info2
->rhs_code
== INTEGER_CST
2825 max_order
= MAX (max_order
, info2
->order
+ 1);
2826 first_nonmergeable_int_order
2827 = MIN (first_nonmergeable_int_order
,
2831 first_nonmergeable_order
2832 = MIN (first_nonmergeable_order
, info2
->order
);
2836 if (last_order
== try_order
)
2838 /* If this failed, but only because we grew
2839 try_order, retry with the last working one,
2840 so that we merge at least something. */
2841 try_order
= last_order
;
2845 last_order
= try_order
;
2846 /* Retry with a larger try_order to see if we could
2847 merge some further INTEGER_CST stores. */
2849 && (first_nonmergeable_int_order
2850 < first_nonmergeable_order
))
2852 try_order
= MIN (max_order
,
2853 first_nonmergeable_order
);
2856 merged_store
->first_nonmergeable_order
);
2857 if (try_order
> last_order
&& ++attempts
< 16)
2860 first_nonmergeable_order
2861 = MIN (first_nonmergeable_order
,
2862 first_nonmergeable_int_order
);
2870 merged_store
->merge_overlapping (info
);
2872 merged_store
->first_nonmergeable_order
2873 = MIN (merged_store
->first_nonmergeable_order
,
2874 first_nonmergeable_order
);
2876 for (unsigned int j
= i
+ 1; j
<= k
; j
++)
2878 store_immediate_info
*info2
= m_store_info
[j
];
2879 gcc_assert (info2
->bitpos
< end
);
2880 if (info2
->order
< last_order
)
2882 gcc_assert (info2
->rhs_code
== INTEGER_CST
);
2884 merged_store
->merge_overlapping (info2
);
2886 /* Other stores are kept and not merged in any
2895 /* |---store 1---||---store 2---|
2896 This store is consecutive to the previous one.
2897 Merge it into the current store group. There can be gaps in between
2898 the stores, but there can't be gaps in between bitregions. */
2899 else if (info
->bitregion_start
<= merged_store
->bitregion_end
2900 && merged_store
->can_be_merged_into (info
))
2902 store_immediate_info
*infof
= merged_store
->stores
[0];
2904 /* All the rhs_code ops that take 2 operands are commutative,
2905 swap the operands if it could make the operands compatible. */
2906 if (infof
->ops
[0].base_addr
2907 && infof
->ops
[1].base_addr
2908 && info
->ops
[0].base_addr
2909 && info
->ops
[1].base_addr
2910 && known_eq (info
->ops
[1].bitpos
- infof
->ops
[0].bitpos
,
2911 info
->bitpos
- infof
->bitpos
)
2912 && operand_equal_p (info
->ops
[1].base_addr
,
2913 infof
->ops
[0].base_addr
, 0))
2915 std::swap (info
->ops
[0], info
->ops
[1]);
2916 info
->ops_swapped_p
= true;
2918 if (check_no_overlap (m_store_info
, i
, info
->rhs_code
,
2919 MAX (merged_store
->last_order
, info
->order
),
2920 MAX (merged_store
->start
+ merged_store
->width
,
2921 info
->bitpos
+ info
->bitsize
)))
2923 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
2924 if (info
->rhs_code
== MEM_REF
&& infof
->rhs_code
!= MEM_REF
)
2926 info
->rhs_code
= BIT_INSERT_EXPR
;
2927 info
->ops
[0].val
= gimple_assign_rhs1 (info
->stmt
);
2928 info
->ops
[0].base_addr
= NULL_TREE
;
2930 else if (infof
->rhs_code
== MEM_REF
&& info
->rhs_code
!= MEM_REF
)
2932 store_immediate_info
*infoj
;
2934 FOR_EACH_VEC_ELT (merged_store
->stores
, j
, infoj
)
2936 infoj
->rhs_code
= BIT_INSERT_EXPR
;
2937 infoj
->ops
[0].val
= gimple_assign_rhs1 (infoj
->stmt
);
2938 infoj
->ops
[0].base_addr
= NULL_TREE
;
2941 if ((infof
->ops
[0].base_addr
2942 ? compatible_load_p (merged_store
, info
, base_addr
, 0)
2943 : !info
->ops
[0].base_addr
)
2944 && (infof
->ops
[1].base_addr
2945 ? compatible_load_p (merged_store
, info
, base_addr
, 1)
2946 : !info
->ops
[1].base_addr
))
2948 merged_store
->merge_into (info
);
2954 /* |---store 1---| <gap> |---store 2---|.
2955 Gap between stores or the rhs not compatible. Start a new group. */
2957 /* Try to apply all the stores recorded for the group to determine
2958 the bitpattern they write and discard it if that fails.
2959 This will also reject single-store groups. */
2960 if (merged_store
->apply_stores ())
2961 m_merged_store_groups
.safe_push (merged_store
);
2963 delete merged_store
;
2965 merged_store
= new merged_store_group (info
);
2966 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2967 fputs ("New store group\n", dump_file
);
2970 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2972 fprintf (dump_file
, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
2973 " bitpos:" HOST_WIDE_INT_PRINT_DEC
" val:",
2974 i
, info
->bitsize
, info
->bitpos
);
2975 print_generic_expr (dump_file
, gimple_assign_rhs1 (info
->stmt
));
2976 fputc ('\n', dump_file
);
2980 /* Record or discard the last store group. */
2983 if (merged_store
->apply_stores ())
2984 m_merged_store_groups
.safe_push (merged_store
);
2986 delete merged_store
;
2989 gcc_assert (m_merged_store_groups
.length () <= m_store_info
.length ());
2992 = !m_merged_store_groups
.is_empty ()
2993 && m_merged_store_groups
.length () < m_store_info
.length ();
2995 if (success
&& dump_file
)
2996 fprintf (dump_file
, "Coalescing successful!\nMerged into %u stores\n",
2997 m_merged_store_groups
.length ());
3002 /* Return the type to use for the merged stores or loads described by STMTS.
3003 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
3004 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
3005 of the MEM_REFs if any. */
3008 get_alias_type_for_stmts (vec
<gimple
*> &stmts
, bool is_load
,
3009 unsigned short *cliquep
, unsigned short *basep
)
3013 tree type
= NULL_TREE
;
3014 tree ret
= NULL_TREE
;
3018 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
3020 tree ref
= is_load
? gimple_assign_rhs1 (stmt
)
3021 : gimple_assign_lhs (stmt
);
3022 tree type1
= reference_alias_ptr_type (ref
);
3023 tree base
= get_base_address (ref
);
3027 if (TREE_CODE (base
) == MEM_REF
)
3029 *cliquep
= MR_DEPENDENCE_CLIQUE (base
);
3030 *basep
= MR_DEPENDENCE_BASE (base
);
3035 if (!alias_ptr_types_compatible_p (type
, type1
))
3036 ret
= ptr_type_node
;
3037 if (TREE_CODE (base
) != MEM_REF
3038 || *cliquep
!= MR_DEPENDENCE_CLIQUE (base
)
3039 || *basep
!= MR_DEPENDENCE_BASE (base
))
3048 /* Return the location_t information we can find among the statements
3052 get_location_for_stmts (vec
<gimple
*> &stmts
)
3057 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
3058 if (gimple_has_location (stmt
))
3059 return gimple_location (stmt
);
3061 return UNKNOWN_LOCATION
;
3064 /* Used to decribe a store resulting from splitting a wide store in smaller
3065 regularly-sized stores in split_group. */
3069 unsigned HOST_WIDE_INT bytepos
;
3070 unsigned HOST_WIDE_INT size
;
3071 unsigned HOST_WIDE_INT align
;
3072 auto_vec
<store_immediate_info
*> orig_stores
;
3073 /* True if there is a single orig stmt covering the whole split store. */
3075 split_store (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
3076 unsigned HOST_WIDE_INT
);
3079 /* Simple constructor. */
3081 split_store::split_store (unsigned HOST_WIDE_INT bp
,
3082 unsigned HOST_WIDE_INT sz
,
3083 unsigned HOST_WIDE_INT al
)
3084 : bytepos (bp
), size (sz
), align (al
), orig (false)
3086 orig_stores
.create (0);
3089 /* Record all stores in GROUP that write to the region starting at BITPOS and
3090 is of size BITSIZE. Record infos for such statements in STORES if
3091 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
3092 if there is exactly one original store in the range. */
3094 static store_immediate_info
*
3095 find_constituent_stores (struct merged_store_group
*group
,
3096 vec
<store_immediate_info
*> *stores
,
3097 unsigned int *first
,
3098 unsigned HOST_WIDE_INT bitpos
,
3099 unsigned HOST_WIDE_INT bitsize
)
3101 store_immediate_info
*info
, *ret
= NULL
;
3103 bool second
= false;
3104 bool update_first
= true;
3105 unsigned HOST_WIDE_INT end
= bitpos
+ bitsize
;
3106 for (i
= *first
; group
->stores
.iterate (i
, &info
); ++i
)
3108 unsigned HOST_WIDE_INT stmt_start
= info
->bitpos
;
3109 unsigned HOST_WIDE_INT stmt_end
= stmt_start
+ info
->bitsize
;
3110 if (stmt_end
<= bitpos
)
3112 /* BITPOS passed to this function never decreases from within the
3113 same split_group call, so optimize and don't scan info records
3114 which are known to end before or at BITPOS next time.
3115 Only do it if all stores before this one also pass this. */
3121 update_first
= false;
3123 /* The stores in GROUP are ordered by bitposition so if we're past
3124 the region for this group return early. */
3125 if (stmt_start
>= end
)
3130 stores
->safe_push (info
);
3145 /* Return how many SSA_NAMEs used to compute value to store in the INFO
3146 store have multiple uses. If any SSA_NAME has multiple uses, also
3147 count statements needed to compute it. */
3150 count_multiple_uses (store_immediate_info
*info
)
3152 gimple
*stmt
= info
->stmt
;
3154 switch (info
->rhs_code
)
3161 if (info
->bit_not_p
)
3163 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3164 ret
= 1; /* Fall through below to return
3165 the BIT_NOT_EXPR stmt and then
3166 BIT_{AND,IOR,XOR}_EXPR and anything it
3169 /* stmt is after this the BIT_NOT_EXPR. */
3170 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3172 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3174 ret
+= 1 + info
->ops
[0].bit_not_p
;
3175 if (info
->ops
[1].base_addr
)
3176 ret
+= 1 + info
->ops
[1].bit_not_p
;
3179 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3180 /* stmt is now the BIT_*_EXPR. */
3181 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3182 ret
+= 1 + info
->ops
[info
->ops_swapped_p
].bit_not_p
;
3183 else if (info
->ops
[info
->ops_swapped_p
].bit_not_p
)
3185 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3186 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
3189 if (info
->ops
[1].base_addr
== NULL_TREE
)
3191 gcc_checking_assert (!info
->ops_swapped_p
);
3194 if (!has_single_use (gimple_assign_rhs2 (stmt
)))
3195 ret
+= 1 + info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
;
3196 else if (info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
)
3198 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
3199 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
3204 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3205 return 1 + info
->ops
[0].bit_not_p
;
3206 else if (info
->ops
[0].bit_not_p
)
3208 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
3209 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
3213 case BIT_INSERT_EXPR
:
3214 return has_single_use (gimple_assign_rhs1 (stmt
)) ? 0 : 1;
3220 /* Split a merged store described by GROUP by populating the SPLIT_STORES
3221 vector (if non-NULL) with split_store structs describing the byte offset
3222 (from the base), the bit size and alignment of each store as well as the
3223 original statements involved in each such split group.
3224 This is to separate the splitting strategy from the statement
3225 building/emission/linking done in output_merged_store.
3226 Return number of new stores.
3227 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3228 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3229 BZERO_FIRST may be true only when the first store covers the whole group
3230 and clears it; if BZERO_FIRST is true, keep that first store in the set
3231 unmodified and emit further stores for the overrides only.
3232 If SPLIT_STORES is NULL, it is just a dry run to count number of
3236 split_group (merged_store_group
*group
, bool allow_unaligned_store
,
3237 bool allow_unaligned_load
, bool bzero_first
,
3238 vec
<struct split_store
*> *split_stores
,
3239 unsigned *total_orig
,
3240 unsigned *total_new
)
3242 unsigned HOST_WIDE_INT pos
= group
->bitregion_start
;
3243 unsigned HOST_WIDE_INT size
= group
->bitregion_end
- pos
;
3244 unsigned HOST_WIDE_INT bytepos
= pos
/ BITS_PER_UNIT
;
3245 unsigned HOST_WIDE_INT group_align
= group
->align
;
3246 unsigned HOST_WIDE_INT align_base
= group
->align_base
;
3247 unsigned HOST_WIDE_INT group_load_align
= group_align
;
3248 bool any_orig
= false;
3250 gcc_assert ((size
% BITS_PER_UNIT
== 0) && (pos
% BITS_PER_UNIT
== 0));
3252 if (group
->stores
[0]->rhs_code
== LROTATE_EXPR
3253 || group
->stores
[0]->rhs_code
== NOP_EXPR
)
3255 gcc_assert (!bzero_first
);
3256 /* For bswap framework using sets of stores, all the checking
3257 has been done earlier in try_coalesce_bswap and needs to be
3258 emitted as a single store. */
3261 /* Avoid the old/new stmt count heuristics. It should be
3262 always beneficial. */
3269 unsigned HOST_WIDE_INT align_bitpos
3270 = (group
->start
- align_base
) & (group_align
- 1);
3271 unsigned HOST_WIDE_INT align
= group_align
;
3273 align
= least_bit_hwi (align_bitpos
);
3274 bytepos
= group
->start
/ BITS_PER_UNIT
;
3275 struct split_store
*store
3276 = new split_store (bytepos
, group
->width
, align
);
3277 unsigned int first
= 0;
3278 find_constituent_stores (group
, &store
->orig_stores
,
3279 &first
, group
->start
, group
->width
);
3280 split_stores
->safe_push (store
);
3286 unsigned int ret
= 0, first
= 0;
3287 unsigned HOST_WIDE_INT try_pos
= bytepos
;
3292 store_immediate_info
*info
= group
->stores
[0];
3295 total_orig
[0] = 1; /* The orig store. */
3296 info
= group
->stores
[0];
3297 if (info
->ops
[0].base_addr
)
3299 if (info
->ops
[1].base_addr
)
3301 switch (info
->rhs_code
)
3306 total_orig
[0]++; /* The orig BIT_*_EXPR stmt. */
3311 total_orig
[0] *= group
->stores
.length ();
3313 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
3315 total_new
[0] += count_multiple_uses (info
);
3316 total_orig
[0] += (info
->bit_not_p
3317 + info
->ops
[0].bit_not_p
3318 + info
->ops
[1].bit_not_p
);
3322 if (!allow_unaligned_load
)
3323 for (int i
= 0; i
< 2; ++i
)
3324 if (group
->load_align
[i
])
3325 group_load_align
= MIN (group_load_align
, group
->load_align
[i
]);
3333 struct split_store
*store
3334 = new split_store (bytepos
, group
->stores
[0]->bitsize
, align_base
);
3335 store
->orig_stores
.safe_push (group
->stores
[0]);
3338 split_stores
->safe_push (store
);
3344 if ((allow_unaligned_store
|| group_align
<= BITS_PER_UNIT
)
3345 && (group
->mask
[try_pos
- bytepos
] == (unsigned char) ~0U
3346 || (bzero_first
&& group
->val
[try_pos
- bytepos
] == 0)))
3348 /* Skip padding bytes. */
3350 size
-= BITS_PER_UNIT
;
3354 unsigned HOST_WIDE_INT try_bitpos
= try_pos
* BITS_PER_UNIT
;
3355 unsigned int try_size
= MAX_STORE_BITSIZE
, nonmasked
;
3356 unsigned HOST_WIDE_INT align_bitpos
3357 = (try_bitpos
- align_base
) & (group_align
- 1);
3358 unsigned HOST_WIDE_INT align
= group_align
;
3360 align
= least_bit_hwi (align_bitpos
);
3361 if (!allow_unaligned_store
)
3362 try_size
= MIN (try_size
, align
);
3363 if (!allow_unaligned_load
)
3365 /* If we can't do or don't want to do unaligned stores
3366 as well as loads, we need to take the loads into account
3368 unsigned HOST_WIDE_INT load_align
= group_load_align
;
3369 align_bitpos
= (try_bitpos
- align_base
) & (load_align
- 1);
3371 load_align
= least_bit_hwi (align_bitpos
);
3372 for (int i
= 0; i
< 2; ++i
)
3373 if (group
->load_align
[i
])
3376 = known_alignment (try_bitpos
3377 - group
->stores
[0]->bitpos
3378 + group
->stores
[0]->ops
[i
].bitpos
3379 - group
->load_align_base
[i
]);
3380 if (align_bitpos
& (group_load_align
- 1))
3382 unsigned HOST_WIDE_INT a
= least_bit_hwi (align_bitpos
);
3383 load_align
= MIN (load_align
, a
);
3386 try_size
= MIN (try_size
, load_align
);
3388 store_immediate_info
*info
3389 = find_constituent_stores (group
, NULL
, &first
, try_bitpos
, try_size
);
3392 /* If there is just one original statement for the range, see if
3393 we can just reuse the original store which could be even larger
3395 unsigned HOST_WIDE_INT stmt_end
3396 = ROUND_UP (info
->bitpos
+ info
->bitsize
, BITS_PER_UNIT
);
3397 info
= find_constituent_stores (group
, NULL
, &first
, try_bitpos
,
3398 stmt_end
- try_bitpos
);
3399 if (info
&& info
->bitpos
>= try_bitpos
)
3401 try_size
= stmt_end
- try_bitpos
;
3406 /* Approximate store bitsize for the case when there are no padding
3408 while (try_size
> size
)
3410 /* Now look for whole padding bytes at the end of that bitsize. */
3411 for (nonmasked
= try_size
/ BITS_PER_UNIT
; nonmasked
> 0; --nonmasked
)
3412 if (group
->mask
[try_pos
- bytepos
+ nonmasked
- 1]
3413 != (unsigned char) ~0U
3415 || group
->val
[try_pos
- bytepos
+ nonmasked
- 1] != 0))
3419 /* If entire try_size range is padding, skip it. */
3420 try_pos
+= try_size
/ BITS_PER_UNIT
;
3424 /* Otherwise try to decrease try_size if second half, last 3 quarters
3425 etc. are padding. */
3426 nonmasked
*= BITS_PER_UNIT
;
3427 while (nonmasked
<= try_size
/ 2)
3429 if (!allow_unaligned_store
&& group_align
> BITS_PER_UNIT
)
3431 /* Now look for whole padding bytes at the start of that bitsize. */
3432 unsigned int try_bytesize
= try_size
/ BITS_PER_UNIT
, masked
;
3433 for (masked
= 0; masked
< try_bytesize
; ++masked
)
3434 if (group
->mask
[try_pos
- bytepos
+ masked
] != (unsigned char) ~0U
3436 || group
->val
[try_pos
- bytepos
+ masked
] != 0))
3438 masked
*= BITS_PER_UNIT
;
3439 gcc_assert (masked
< try_size
);
3440 if (masked
>= try_size
/ 2)
3442 while (masked
>= try_size
/ 2)
3445 try_pos
+= try_size
/ BITS_PER_UNIT
;
3449 /* Need to recompute the alignment, so just retry at the new
3460 struct split_store
*store
3461 = new split_store (try_pos
, try_size
, align
);
3462 info
= find_constituent_stores (group
, &store
->orig_stores
,
3463 &first
, try_bitpos
, try_size
);
3465 && info
->bitpos
>= try_bitpos
3466 && info
->bitpos
+ info
->bitsize
<= try_bitpos
+ try_size
)
3471 split_stores
->safe_push (store
);
3474 try_pos
+= try_size
/ BITS_PER_UNIT
;
3481 struct split_store
*store
;
3482 /* If we are reusing some original stores and any of the
3483 original SSA_NAMEs had multiple uses, we need to subtract
3484 those now before we add the new ones. */
3485 if (total_new
[0] && any_orig
)
3487 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
3489 total_new
[0] -= count_multiple_uses (store
->orig_stores
[0]);
3491 total_new
[0] += ret
; /* The new store. */
3492 store_immediate_info
*info
= group
->stores
[0];
3493 if (info
->ops
[0].base_addr
)
3494 total_new
[0] += ret
;
3495 if (info
->ops
[1].base_addr
)
3496 total_new
[0] += ret
;
3497 switch (info
->rhs_code
)
3502 total_new
[0] += ret
; /* The new BIT_*_EXPR stmt. */
3507 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
3510 bool bit_not_p
[3] = { false, false, false };
3511 /* If all orig_stores have certain bit_not_p set, then
3512 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3513 If some orig_stores have certain bit_not_p set, then
3514 we'd use a BIT_XOR_EXPR with a mask and need to account for
3516 FOR_EACH_VEC_ELT (store
->orig_stores
, j
, info
)
3518 if (info
->ops
[0].bit_not_p
)
3519 bit_not_p
[0] = true;
3520 if (info
->ops
[1].bit_not_p
)
3521 bit_not_p
[1] = true;
3522 if (info
->bit_not_p
)
3523 bit_not_p
[2] = true;
3525 total_new
[0] += bit_not_p
[0] + bit_not_p
[1] + bit_not_p
[2];
3533 /* Return the operation through which the operand IDX (if < 2) or
3534 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3535 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3536 the bits should be xored with mask. */
3538 static enum tree_code
3539 invert_op (split_store
*split_store
, int idx
, tree int_type
, tree
&mask
)
3542 store_immediate_info
*info
;
3543 unsigned int cnt
= 0;
3544 bool any_paddings
= false;
3545 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
3547 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
3551 tree lhs
= gimple_assign_lhs (info
->stmt
);
3552 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3553 && TYPE_PRECISION (TREE_TYPE (lhs
)) < info
->bitsize
)
3554 any_paddings
= true;
3560 if (cnt
== split_store
->orig_stores
.length () && !any_paddings
)
3561 return BIT_NOT_EXPR
;
3563 unsigned HOST_WIDE_INT try_bitpos
= split_store
->bytepos
* BITS_PER_UNIT
;
3564 unsigned buf_size
= split_store
->size
/ BITS_PER_UNIT
;
3566 = XALLOCAVEC (unsigned char, buf_size
);
3567 memset (buf
, ~0U, buf_size
);
3568 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
3570 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
3573 /* Clear regions with bit_not_p and invert afterwards, rather than
3574 clear regions with !bit_not_p, so that gaps in between stores aren't
3576 unsigned HOST_WIDE_INT bitsize
= info
->bitsize
;
3577 unsigned HOST_WIDE_INT prec
= bitsize
;
3578 unsigned int pos_in_buffer
= 0;
3581 tree lhs
= gimple_assign_lhs (info
->stmt
);
3582 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3583 && TYPE_PRECISION (TREE_TYPE (lhs
)) < bitsize
)
3584 prec
= TYPE_PRECISION (TREE_TYPE (lhs
));
3586 if (info
->bitpos
< try_bitpos
)
3588 gcc_assert (info
->bitpos
+ bitsize
> try_bitpos
);
3589 if (!BYTES_BIG_ENDIAN
)
3591 if (prec
<= try_bitpos
- info
->bitpos
)
3593 prec
-= try_bitpos
- info
->bitpos
;
3595 bitsize
-= try_bitpos
- info
->bitpos
;
3596 if (BYTES_BIG_ENDIAN
&& prec
> bitsize
)
3600 pos_in_buffer
= info
->bitpos
- try_bitpos
;
3603 /* If this is a bool inversion, invert just the least significant
3604 prec bits rather than all bits of it. */
3605 if (BYTES_BIG_ENDIAN
)
3607 pos_in_buffer
+= bitsize
- prec
;
3608 if (pos_in_buffer
>= split_store
->size
)
3613 if (pos_in_buffer
+ bitsize
> split_store
->size
)
3614 bitsize
= split_store
->size
- pos_in_buffer
;
3615 unsigned char *p
= buf
+ (pos_in_buffer
/ BITS_PER_UNIT
);
3616 if (BYTES_BIG_ENDIAN
)
3617 clear_bit_region_be (p
, (BITS_PER_UNIT
- 1
3618 - (pos_in_buffer
% BITS_PER_UNIT
)), bitsize
);
3620 clear_bit_region (p
, pos_in_buffer
% BITS_PER_UNIT
, bitsize
);
3622 for (unsigned int i
= 0; i
< buf_size
; ++i
)
3624 mask
= native_interpret_expr (int_type
, buf
, buf_size
);
3625 return BIT_XOR_EXPR
;
3628 /* Given a merged store group GROUP output the widened version of it.
3629 The store chain is against the base object BASE.
3630 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3631 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3632 Make sure that the number of statements output is less than the number of
3633 original statements. If a better sequence is possible emit it and
3637 imm_store_chain_info::output_merged_store (merged_store_group
*group
)
3639 split_store
*split_store
;
3641 unsigned HOST_WIDE_INT start_byte_pos
3642 = group
->bitregion_start
/ BITS_PER_UNIT
;
3644 unsigned int orig_num_stmts
= group
->stores
.length ();
3645 if (orig_num_stmts
< 2)
3648 auto_vec
<struct split_store
*, 32> split_stores
;
3649 bool allow_unaligned_store
3650 = !STRICT_ALIGNMENT
&& PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED
);
3651 bool allow_unaligned_load
= allow_unaligned_store
;
3652 bool bzero_first
= false;
3653 if (group
->stores
[0]->rhs_code
== INTEGER_CST
3654 && TREE_CODE (gimple_assign_rhs1 (group
->stores
[0]->stmt
)) == CONSTRUCTOR
3655 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (group
->stores
[0]->stmt
)) == 0
3656 && group
->start
== group
->stores
[0]->bitpos
3657 && group
->width
== group
->stores
[0]->bitsize
3658 && (group
->start
% BITS_PER_UNIT
) == 0
3659 && (group
->width
% BITS_PER_UNIT
) == 0)
3661 if (allow_unaligned_store
|| bzero_first
)
3663 /* If unaligned stores are allowed, see how many stores we'd emit
3664 for unaligned and how many stores we'd emit for aligned stores.
3665 Only use unaligned stores if it allows fewer stores than aligned.
3666 Similarly, if there is a whole region clear first, prefer expanding
3667 it together compared to expanding clear first followed by merged
3669 unsigned cnt
[4] = { ~0, ~0, ~0, ~0 };
3671 for (int pass
= 0; pass
< 4; ++pass
)
3673 if (!allow_unaligned_store
&& (pass
& 1) != 0)
3675 if (!bzero_first
&& (pass
& 2) != 0)
3677 cnt
[pass
] = split_group (group
, (pass
& 1) != 0,
3678 allow_unaligned_load
, (pass
& 2) != 0,
3680 if (cnt
[pass
] < cnt
[pass_min
])
3683 if ((pass_min
& 1) == 0)
3684 allow_unaligned_store
= false;
3685 if ((pass_min
& 2) == 0)
3686 bzero_first
= false;
3688 unsigned total_orig
, total_new
;
3689 split_group (group
, allow_unaligned_store
, allow_unaligned_load
, bzero_first
,
3690 &split_stores
, &total_orig
, &total_new
);
3692 if (split_stores
.length () >= orig_num_stmts
)
3694 /* We didn't manage to reduce the number of statements. Bail out. */
3695 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3696 fprintf (dump_file
, "Exceeded original number of stmts (%u)."
3697 " Not profitable to emit new sequence.\n",
3699 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3703 if (total_orig
<= total_new
)
3705 /* If number of estimated new statements is above estimated original
3706 statements, bail out too. */
3707 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3708 fprintf (dump_file
, "Estimated number of original stmts (%u)"
3709 " not larger than estimated number of new"
3711 total_orig
, total_new
);
3712 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3717 gimple_stmt_iterator last_gsi
= gsi_for_stmt (group
->last_stmt
);
3718 gimple_seq seq
= NULL
;
3719 tree last_vdef
, new_vuse
;
3720 last_vdef
= gimple_vdef (group
->last_stmt
);
3721 new_vuse
= gimple_vuse (group
->last_stmt
);
3722 tree bswap_res
= NULL_TREE
;
3724 if (group
->stores
[0]->rhs_code
== LROTATE_EXPR
3725 || group
->stores
[0]->rhs_code
== NOP_EXPR
)
3727 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
3728 gimple
*ins_stmt
= group
->stores
[0]->ins_stmt
;
3729 struct symbolic_number
*n
= &group
->stores
[0]->n
;
3730 bool bswap
= group
->stores
[0]->rhs_code
== LROTATE_EXPR
;
3735 load_type
= bswap_type
= uint16_type_node
;
3738 load_type
= uint32_type_node
;
3741 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
3742 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
3746 load_type
= uint64_type_node
;
3749 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
3750 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
3757 /* If the loads have each vuse of the corresponding store,
3758 we've checked the aliasing already in try_coalesce_bswap and
3759 we want to sink the need load into seq. So need to use new_vuse
3763 if (n
->vuse
== NULL
)
3769 /* Update vuse in case it has changed by output_merged_stores. */
3770 n
->vuse
= gimple_vuse (ins_stmt
);
3772 bswap_res
= bswap_replace (gsi_start (seq
), ins_stmt
, fndecl
,
3773 bswap_type
, load_type
, n
, bswap
);
3774 gcc_assert (bswap_res
);
3777 gimple
*stmt
= NULL
;
3778 auto_vec
<gimple
*, 32> orig_stmts
;
3779 gimple_seq this_seq
;
3780 tree addr
= force_gimple_operand_1 (unshare_expr (base_addr
), &this_seq
,
3781 is_gimple_mem_ref_addr
, NULL_TREE
);
3782 gimple_seq_add_seq_without_update (&seq
, this_seq
);
3784 tree load_addr
[2] = { NULL_TREE
, NULL_TREE
};
3785 gimple_seq load_seq
[2] = { NULL
, NULL
};
3786 gimple_stmt_iterator load_gsi
[2] = { gsi_none (), gsi_none () };
3787 for (int j
= 0; j
< 2; ++j
)
3789 store_operand_info
&op
= group
->stores
[0]->ops
[j
];
3790 if (op
.base_addr
== NULL_TREE
)
3793 store_immediate_info
*infol
= group
->stores
.last ();
3794 if (gimple_vuse (op
.stmt
) == gimple_vuse (infol
->ops
[j
].stmt
))
3796 /* We can't pick the location randomly; while we've verified
3797 all the loads have the same vuse, they can be still in different
3798 basic blocks and we need to pick the one from the last bb:
3804 otherwise if we put the wider load at the q[0] load, we might
3805 segfault if q[1] is not mapped. */
3806 basic_block bb
= gimple_bb (op
.stmt
);
3807 gimple
*ostmt
= op
.stmt
;
3808 store_immediate_info
*info
;
3809 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
3811 gimple
*tstmt
= info
->ops
[j
].stmt
;
3812 basic_block tbb
= gimple_bb (tstmt
);
3813 if (dominated_by_p (CDI_DOMINATORS
, tbb
, bb
))
3819 load_gsi
[j
] = gsi_for_stmt (ostmt
);
3821 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
3822 &load_seq
[j
], is_gimple_mem_ref_addr
,
3825 else if (operand_equal_p (base_addr
, op
.base_addr
, 0))
3826 load_addr
[j
] = addr
;
3830 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
3831 &this_seq
, is_gimple_mem_ref_addr
,
3833 gimple_seq_add_seq_without_update (&seq
, this_seq
);
3837 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3839 unsigned HOST_WIDE_INT try_size
= split_store
->size
;
3840 unsigned HOST_WIDE_INT try_pos
= split_store
->bytepos
;
3841 unsigned HOST_WIDE_INT try_bitpos
= try_pos
* BITS_PER_UNIT
;
3842 unsigned HOST_WIDE_INT align
= split_store
->align
;
3845 if (split_store
->orig
)
3847 /* If there is just a single constituent store which covers
3848 the whole area, just reuse the lhs and rhs. */
3849 gimple
*orig_stmt
= split_store
->orig_stores
[0]->stmt
;
3850 dest
= gimple_assign_lhs (orig_stmt
);
3851 src
= gimple_assign_rhs1 (orig_stmt
);
3852 loc
= gimple_location (orig_stmt
);
3856 store_immediate_info
*info
;
3857 unsigned short clique
, base
;
3859 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3860 orig_stmts
.safe_push (info
->stmt
);
3862 = get_alias_type_for_stmts (orig_stmts
, false, &clique
, &base
);
3863 loc
= get_location_for_stmts (orig_stmts
);
3864 orig_stmts
.truncate (0);
3866 tree int_type
= build_nonstandard_integer_type (try_size
, UNSIGNED
);
3867 int_type
= build_aligned_type (int_type
, align
);
3868 dest
= fold_build2 (MEM_REF
, int_type
, addr
,
3869 build_int_cst (offset_type
, try_pos
));
3870 if (TREE_CODE (dest
) == MEM_REF
)
3872 MR_DEPENDENCE_CLIQUE (dest
) = clique
;
3873 MR_DEPENDENCE_BASE (dest
) = base
;
3878 mask
= integer_zero_node
;
3880 mask
= native_interpret_expr (int_type
,
3881 group
->mask
+ try_pos
3887 j
< 1 + (split_store
->orig_stores
[0]->ops
[1].val
!= NULL_TREE
);
3890 store_operand_info
&op
= split_store
->orig_stores
[0]->ops
[j
];
3893 else if (op
.base_addr
)
3895 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3896 orig_stmts
.safe_push (info
->ops
[j
].stmt
);
3898 offset_type
= get_alias_type_for_stmts (orig_stmts
, true,
3900 location_t load_loc
= get_location_for_stmts (orig_stmts
);
3901 orig_stmts
.truncate (0);
3903 unsigned HOST_WIDE_INT load_align
= group
->load_align
[j
];
3904 unsigned HOST_WIDE_INT align_bitpos
3905 = known_alignment (try_bitpos
3906 - split_store
->orig_stores
[0]->bitpos
3908 if (align_bitpos
& (load_align
- 1))
3909 load_align
= least_bit_hwi (align_bitpos
);
3912 = build_nonstandard_integer_type (try_size
, UNSIGNED
);
3914 = build_aligned_type (load_int_type
, load_align
);
3916 poly_uint64 load_pos
3917 = exact_div (try_bitpos
3918 - split_store
->orig_stores
[0]->bitpos
3921 ops
[j
] = fold_build2 (MEM_REF
, load_int_type
, load_addr
[j
],
3922 build_int_cst (offset_type
, load_pos
));
3923 if (TREE_CODE (ops
[j
]) == MEM_REF
)
3925 MR_DEPENDENCE_CLIQUE (ops
[j
]) = clique
;
3926 MR_DEPENDENCE_BASE (ops
[j
]) = base
;
3928 if (!integer_zerop (mask
))
3929 /* The load might load some bits (that will be masked off
3930 later on) uninitialized, avoid -W*uninitialized
3931 warnings in that case. */
3932 TREE_NO_WARNING (ops
[j
]) = 1;
3934 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3936 gimple_set_location (stmt
, load_loc
);
3937 if (gsi_bb (load_gsi
[j
]))
3939 gimple_set_vuse (stmt
, gimple_vuse (op
.stmt
));
3940 gimple_seq_add_stmt_without_update (&load_seq
[j
], stmt
);
3944 gimple_set_vuse (stmt
, new_vuse
);
3945 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3947 ops
[j
] = gimple_assign_lhs (stmt
);
3949 enum tree_code inv_op
3950 = invert_op (split_store
, j
, int_type
, xor_mask
);
3951 if (inv_op
!= NOP_EXPR
)
3953 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3954 inv_op
, ops
[j
], xor_mask
);
3955 gimple_set_location (stmt
, load_loc
);
3956 ops
[j
] = gimple_assign_lhs (stmt
);
3958 if (gsi_bb (load_gsi
[j
]))
3959 gimple_seq_add_stmt_without_update (&load_seq
[j
],
3962 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3966 ops
[j
] = native_interpret_expr (int_type
,
3967 group
->val
+ try_pos
3972 switch (split_store
->orig_stores
[0]->rhs_code
)
3977 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3979 tree rhs1
= gimple_assign_rhs1 (info
->stmt
);
3980 orig_stmts
.safe_push (SSA_NAME_DEF_STMT (rhs1
));
3983 bit_loc
= get_location_for_stmts (orig_stmts
);
3984 orig_stmts
.truncate (0);
3987 = gimple_build_assign (make_ssa_name (int_type
),
3988 split_store
->orig_stores
[0]->rhs_code
,
3990 gimple_set_location (stmt
, bit_loc
);
3991 /* If there is just one load and there is a separate
3992 load_seq[0], emit the bitwise op right after it. */
3993 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
3994 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
3995 /* Otherwise, if at least one load is in seq, we need to
3996 emit the bitwise op right before the store. If there
3997 are two loads and are emitted somewhere else, it would
3998 be better to emit the bitwise op as early as possible;
3999 we don't track where that would be possible right now
4002 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4003 src
= gimple_assign_lhs (stmt
);
4005 enum tree_code inv_op
;
4006 inv_op
= invert_op (split_store
, 2, int_type
, xor_mask
);
4007 if (inv_op
!= NOP_EXPR
)
4009 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4010 inv_op
, src
, xor_mask
);
4011 gimple_set_location (stmt
, bit_loc
);
4012 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
4013 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
4015 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4016 src
= gimple_assign_lhs (stmt
);
4022 if (!is_gimple_val (src
))
4024 stmt
= gimple_build_assign (make_ssa_name (TREE_TYPE (src
)),
4026 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4027 src
= gimple_assign_lhs (stmt
);
4029 if (!useless_type_conversion_p (int_type
, TREE_TYPE (src
)))
4031 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4033 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4034 src
= gimple_assign_lhs (stmt
);
4036 inv_op
= invert_op (split_store
, 2, int_type
, xor_mask
);
4037 if (inv_op
!= NOP_EXPR
)
4039 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4040 inv_op
, src
, xor_mask
);
4041 gimple_set_location (stmt
, loc
);
4042 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4043 src
= gimple_assign_lhs (stmt
);
4051 /* If bit insertion is required, we use the source as an accumulator
4052 into which the successive bit-field values are manually inserted.
4053 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4054 if (group
->bit_insertion
)
4055 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
4056 if (info
->rhs_code
== BIT_INSERT_EXPR
4057 && info
->bitpos
< try_bitpos
+ try_size
4058 && info
->bitpos
+ info
->bitsize
> try_bitpos
)
4060 /* Mask, truncate, convert to final type, shift and ior into
4061 the accumulator. Note that every step can be a no-op. */
4062 const HOST_WIDE_INT start_gap
= info
->bitpos
- try_bitpos
;
4063 const HOST_WIDE_INT end_gap
4064 = (try_bitpos
+ try_size
) - (info
->bitpos
+ info
->bitsize
);
4065 tree tem
= info
->ops
[0].val
;
4066 if (TYPE_PRECISION (TREE_TYPE (tem
)) <= info
->bitsize
)
4069 = build_nonstandard_integer_type (info
->bitsize
,
4071 tem
= gimple_convert (&seq
, loc
, bitfield_type
, tem
);
4073 else if ((BYTES_BIG_ENDIAN
? start_gap
: end_gap
) > 0)
4075 const unsigned HOST_WIDE_INT imask
4076 = (HOST_WIDE_INT_1U
<< info
->bitsize
) - 1;
4077 tem
= gimple_build (&seq
, loc
,
4078 BIT_AND_EXPR
, TREE_TYPE (tem
), tem
,
4079 build_int_cst (TREE_TYPE (tem
),
4082 const HOST_WIDE_INT shift
4083 = (BYTES_BIG_ENDIAN
? end_gap
: start_gap
);
4085 tem
= gimple_build (&seq
, loc
,
4086 RSHIFT_EXPR
, TREE_TYPE (tem
), tem
,
4087 build_int_cst (NULL_TREE
, -shift
));
4088 tem
= gimple_convert (&seq
, loc
, int_type
, tem
);
4090 tem
= gimple_build (&seq
, loc
,
4091 LSHIFT_EXPR
, int_type
, tem
,
4092 build_int_cst (NULL_TREE
, shift
));
4093 src
= gimple_build (&seq
, loc
,
4094 BIT_IOR_EXPR
, int_type
, tem
, src
);
4097 if (!integer_zerop (mask
))
4099 tree tem
= make_ssa_name (int_type
);
4100 tree load_src
= unshare_expr (dest
);
4101 /* The load might load some or all bits uninitialized,
4102 avoid -W*uninitialized warnings in that case.
4103 As optimization, it would be nice if all the bits are
4104 provably uninitialized (no stores at all yet or previous
4105 store a CLOBBER) we'd optimize away the load and replace
4107 TREE_NO_WARNING (load_src
) = 1;
4108 stmt
= gimple_build_assign (tem
, load_src
);
4109 gimple_set_location (stmt
, loc
);
4110 gimple_set_vuse (stmt
, new_vuse
);
4111 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4113 /* FIXME: If there is a single chunk of zero bits in mask,
4114 perhaps use BIT_INSERT_EXPR instead? */
4115 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4116 BIT_AND_EXPR
, tem
, mask
);
4117 gimple_set_location (stmt
, loc
);
4118 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4119 tem
= gimple_assign_lhs (stmt
);
4121 if (TREE_CODE (src
) == INTEGER_CST
)
4122 src
= wide_int_to_tree (int_type
,
4123 wi::bit_and_not (wi::to_wide (src
),
4124 wi::to_wide (mask
)));
4128 = wide_int_to_tree (int_type
,
4129 wi::bit_not (wi::to_wide (mask
)));
4130 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4131 BIT_AND_EXPR
, src
, nmask
);
4132 gimple_set_location (stmt
, loc
);
4133 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4134 src
= gimple_assign_lhs (stmt
);
4136 stmt
= gimple_build_assign (make_ssa_name (int_type
),
4137 BIT_IOR_EXPR
, tem
, src
);
4138 gimple_set_location (stmt
, loc
);
4139 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4140 src
= gimple_assign_lhs (stmt
);
4144 stmt
= gimple_build_assign (dest
, src
);
4145 gimple_set_location (stmt
, loc
);
4146 gimple_set_vuse (stmt
, new_vuse
);
4147 gimple_seq_add_stmt_without_update (&seq
, stmt
);
4150 if (i
< split_stores
.length () - 1)
4151 new_vdef
= make_ssa_name (gimple_vop (cfun
), stmt
);
4153 new_vdef
= last_vdef
;
4155 gimple_set_vdef (stmt
, new_vdef
);
4156 SSA_NAME_DEF_STMT (new_vdef
) = stmt
;
4157 new_vuse
= new_vdef
;
4160 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
4167 "New sequence of %u stores to replace old one of %u stores\n",
4168 split_stores
.length (), orig_num_stmts
);
4169 if (dump_flags
& TDF_DETAILS
)
4170 print_gimple_seq (dump_file
, seq
, 0, TDF_VOPS
| TDF_MEMSYMS
);
4172 gsi_insert_seq_after (&last_gsi
, seq
, GSI_SAME_STMT
);
4173 for (int j
= 0; j
< 2; ++j
)
4175 gsi_insert_seq_after (&load_gsi
[j
], load_seq
[j
], GSI_SAME_STMT
);
4180 /* Process the merged_store_group objects created in the coalescing phase.
4181 The stores are all against the base object BASE.
4182 Try to output the widened stores and delete the original statements if
4183 successful. Return true iff any changes were made. */
4186 imm_store_chain_info::output_merged_stores ()
4189 merged_store_group
*merged_store
;
4191 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_store
)
4193 if (output_merged_store (merged_store
))
4196 store_immediate_info
*store
;
4197 FOR_EACH_VEC_ELT (merged_store
->stores
, j
, store
)
4199 gimple
*stmt
= store
->stmt
;
4200 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
4201 gsi_remove (&gsi
, true);
4202 if (stmt
!= merged_store
->last_stmt
)
4204 unlink_stmt_vdef (stmt
);
4205 release_defs (stmt
);
4211 if (ret
&& dump_file
)
4212 fprintf (dump_file
, "Merging successful!\n");
4217 /* Coalesce the store_immediate_info objects recorded against the base object
4218 BASE in the first phase and output them.
4219 Delete the allocated structures.
4220 Return true if any changes were made. */
4223 imm_store_chain_info::terminate_and_process_chain ()
4225 /* Process store chain. */
4227 if (m_store_info
.length () > 1)
4229 ret
= coalesce_immediate_stores ();
4231 ret
= output_merged_stores ();
4234 /* Delete all the entries we allocated ourselves. */
4235 store_immediate_info
*info
;
4237 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
4240 merged_store_group
*merged_info
;
4241 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_info
)
4247 /* Return true iff LHS is a destination potentially interesting for
4248 store merging. In practice these are the codes that get_inner_reference
4252 lhs_valid_for_store_merging_p (tree lhs
)
4254 tree_code code
= TREE_CODE (lhs
);
4256 if (code
== ARRAY_REF
|| code
== ARRAY_RANGE_REF
|| code
== MEM_REF
4257 || code
== COMPONENT_REF
|| code
== BIT_FIELD_REF
4264 /* Return true if the tree RHS is a constant we want to consider
4265 during store merging. In practice accept all codes that
4266 native_encode_expr accepts. */
4269 rhs_valid_for_store_merging_p (tree rhs
)
4271 unsigned HOST_WIDE_INT size
;
4272 if (TREE_CODE (rhs
) == CONSTRUCTOR
4273 && !TREE_CLOBBER_P (rhs
)
4274 && CONSTRUCTOR_NELTS (rhs
) == 0
4275 && TYPE_SIZE_UNIT (TREE_TYPE (rhs
))
4276 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs
))))
4278 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs
))).is_constant (&size
)
4279 && native_encode_expr (rhs
, NULL
, size
) != 0);
4282 /* If MEM is a memory reference usable for store merging (either as
4283 store destination or for loads), return the non-NULL base_addr
4284 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4285 Otherwise return NULL, *PBITPOS should be still valid even for that
4289 mem_valid_for_store_merging (tree mem
, poly_uint64
*pbitsize
,
4290 poly_uint64
*pbitpos
,
4291 poly_uint64
*pbitregion_start
,
4292 poly_uint64
*pbitregion_end
)
4294 poly_int64 bitsize
, bitpos
;
4295 poly_uint64 bitregion_start
= 0, bitregion_end
= 0;
4297 int unsignedp
= 0, reversep
= 0, volatilep
= 0;
4299 tree base_addr
= get_inner_reference (mem
, &bitsize
, &bitpos
, &offset
, &mode
,
4300 &unsignedp
, &reversep
, &volatilep
);
4301 *pbitsize
= bitsize
;
4302 if (known_eq (bitsize
, 0))
4305 if (TREE_CODE (mem
) == COMPONENT_REF
4306 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem
, 1)))
4308 get_bit_range (&bitregion_start
, &bitregion_end
, mem
, &bitpos
, &offset
);
4309 if (maybe_ne (bitregion_end
, 0U))
4316 /* We do not want to rewrite TARGET_MEM_REFs. */
4317 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
4319 /* In some cases get_inner_reference may return a
4320 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4321 canonicalize the base_addr to MEM_REF [ptr] and take
4322 byteoffset into account in the bitpos. This occurs in
4323 PR 23684 and this way we can catch more chains. */
4324 else if (TREE_CODE (base_addr
) == MEM_REF
)
4326 poly_offset_int byte_off
= mem_ref_offset (base_addr
);
4327 poly_offset_int bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4329 if (known_ge (bit_off
, 0) && bit_off
.to_shwi (&bitpos
))
4331 if (maybe_ne (bitregion_end
, 0U))
4333 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4334 bit_off
+= bitregion_start
;
4335 if (bit_off
.to_uhwi (&bitregion_start
))
4337 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4338 bit_off
+= bitregion_end
;
4339 if (!bit_off
.to_uhwi (&bitregion_end
))
4348 base_addr
= TREE_OPERAND (base_addr
, 0);
4350 /* get_inner_reference returns the base object, get at its
4354 if (maybe_lt (bitpos
, 0))
4356 base_addr
= build_fold_addr_expr (base_addr
);
4359 if (known_eq (bitregion_end
, 0U))
4361 bitregion_start
= round_down_to_byte_boundary (bitpos
);
4362 bitregion_end
= bitpos
;
4363 bitregion_end
= round_up_to_byte_boundary (bitregion_end
+ bitsize
);
4366 if (offset
!= NULL_TREE
)
4368 /* If the access is variable offset then a base decl has to be
4369 address-taken to be able to emit pointer-based stores to it.
4370 ??? We might be able to get away with re-using the original
4371 base up to the first variable part and then wrapping that inside
4373 tree base
= get_base_address (base_addr
);
4375 || (DECL_P (base
) && ! TREE_ADDRESSABLE (base
)))
4378 base_addr
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base_addr
),
4382 *pbitsize
= bitsize
;
4384 *pbitregion_start
= bitregion_start
;
4385 *pbitregion_end
= bitregion_end
;
4389 /* Return true if STMT is a load that can be used for store merging.
4390 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4391 BITREGION_END are properties of the corresponding store. */
4394 handled_load (gimple
*stmt
, store_operand_info
*op
,
4395 poly_uint64 bitsize
, poly_uint64 bitpos
,
4396 poly_uint64 bitregion_start
, poly_uint64 bitregion_end
)
4398 if (!is_gimple_assign (stmt
))
4400 if (gimple_assign_rhs_code (stmt
) == BIT_NOT_EXPR
)
4402 tree rhs1
= gimple_assign_rhs1 (stmt
);
4403 if (TREE_CODE (rhs1
) == SSA_NAME
4404 && handled_load (SSA_NAME_DEF_STMT (rhs1
), op
, bitsize
, bitpos
,
4405 bitregion_start
, bitregion_end
))
4407 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4408 been optimized earlier, but if allowed here, would confuse the
4409 multiple uses counting. */
4412 op
->bit_not_p
= !op
->bit_not_p
;
4417 if (gimple_vuse (stmt
)
4418 && gimple_assign_load_p (stmt
)
4419 && !stmt_can_throw_internal (cfun
, stmt
)
4420 && !gimple_has_volatile_ops (stmt
))
4422 tree mem
= gimple_assign_rhs1 (stmt
);
4424 = mem_valid_for_store_merging (mem
, &op
->bitsize
, &op
->bitpos
,
4425 &op
->bitregion_start
,
4426 &op
->bitregion_end
);
4427 if (op
->base_addr
!= NULL_TREE
4428 && known_eq (op
->bitsize
, bitsize
)
4429 && multiple_p (op
->bitpos
- bitpos
, BITS_PER_UNIT
)
4430 && known_ge (op
->bitpos
- op
->bitregion_start
,
4431 bitpos
- bitregion_start
)
4432 && known_ge (op
->bitregion_end
- op
->bitpos
,
4433 bitregion_end
- bitpos
))
4437 op
->bit_not_p
= false;
4444 /* Record the store STMT for store merging optimization if it can be
4448 pass_store_merging::process_store (gimple
*stmt
)
4450 tree lhs
= gimple_assign_lhs (stmt
);
4451 tree rhs
= gimple_assign_rhs1 (stmt
);
4452 poly_uint64 bitsize
, bitpos
;
4453 poly_uint64 bitregion_start
, bitregion_end
;
4455 = mem_valid_for_store_merging (lhs
, &bitsize
, &bitpos
,
4456 &bitregion_start
, &bitregion_end
);
4457 if (known_eq (bitsize
, 0U))
4460 bool invalid
= (base_addr
== NULL_TREE
4461 || (maybe_gt (bitsize
,
4462 (unsigned int) MAX_BITSIZE_MODE_ANY_INT
)
4463 && TREE_CODE (rhs
) != INTEGER_CST
4464 && (TREE_CODE (rhs
) != CONSTRUCTOR
4465 || CONSTRUCTOR_NELTS (rhs
) != 0)));
4466 enum tree_code rhs_code
= ERROR_MARK
;
4467 bool bit_not_p
= false;
4468 struct symbolic_number n
;
4469 gimple
*ins_stmt
= NULL
;
4470 store_operand_info ops
[2];
4473 else if (rhs_valid_for_store_merging_p (rhs
))
4475 rhs_code
= INTEGER_CST
;
4478 else if (TREE_CODE (rhs
) != SSA_NAME
)
4482 gimple
*def_stmt
= SSA_NAME_DEF_STMT (rhs
), *def_stmt1
, *def_stmt2
;
4483 if (!is_gimple_assign (def_stmt
))
4485 else if (handled_load (def_stmt
, &ops
[0], bitsize
, bitpos
,
4486 bitregion_start
, bitregion_end
))
4488 else if (gimple_assign_rhs_code (def_stmt
) == BIT_NOT_EXPR
)
4490 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
4491 if (TREE_CODE (rhs1
) == SSA_NAME
4492 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1
)))
4495 def_stmt
= SSA_NAME_DEF_STMT (rhs1
);
4499 if (rhs_code
== ERROR_MARK
&& !invalid
)
4500 switch ((rhs_code
= gimple_assign_rhs_code (def_stmt
)))
4506 rhs1
= gimple_assign_rhs1 (def_stmt
);
4507 rhs2
= gimple_assign_rhs2 (def_stmt
);
4509 if (TREE_CODE (rhs1
) != SSA_NAME
)
4511 def_stmt1
= SSA_NAME_DEF_STMT (rhs1
);
4512 if (!is_gimple_assign (def_stmt1
)
4513 || !handled_load (def_stmt1
, &ops
[0], bitsize
, bitpos
,
4514 bitregion_start
, bitregion_end
))
4516 if (rhs_valid_for_store_merging_p (rhs2
))
4518 else if (TREE_CODE (rhs2
) != SSA_NAME
)
4522 def_stmt2
= SSA_NAME_DEF_STMT (rhs2
);
4523 if (!is_gimple_assign (def_stmt2
))
4525 else if (!handled_load (def_stmt2
, &ops
[1], bitsize
, bitpos
,
4526 bitregion_start
, bitregion_end
))
4536 unsigned HOST_WIDE_INT const_bitsize
;
4537 if (bitsize
.is_constant (&const_bitsize
)
4538 && (const_bitsize
% BITS_PER_UNIT
) == 0
4539 && const_bitsize
<= 64
4540 && multiple_p (bitpos
, BITS_PER_UNIT
))
4542 ins_stmt
= find_bswap_or_nop_1 (def_stmt
, &n
, 12);
4546 for (unsigned HOST_WIDE_INT i
= 0;
4548 i
+= BITS_PER_UNIT
, nn
>>= BITS_PER_MARKER
)
4549 if ((nn
& MARKER_MASK
) == 0
4550 || (nn
& MARKER_MASK
) == MARKER_BYTE_UNKNOWN
)
4559 rhs_code
= LROTATE_EXPR
;
4560 ops
[0].base_addr
= NULL_TREE
;
4561 ops
[1].base_addr
= NULL_TREE
;
4569 && bitsize
.is_constant (&const_bitsize
)
4570 && ((const_bitsize
% BITS_PER_UNIT
) != 0
4571 || !multiple_p (bitpos
, BITS_PER_UNIT
))
4572 && const_bitsize
<= 64)
4574 /* Bypass a conversion to the bit-field type. */
4576 && is_gimple_assign (def_stmt
)
4577 && CONVERT_EXPR_CODE_P (rhs_code
))
4579 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
4580 if (TREE_CODE (rhs1
) == SSA_NAME
4581 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1
)))
4584 rhs_code
= BIT_INSERT_EXPR
;
4587 ops
[0].base_addr
= NULL_TREE
;
4588 ops
[1].base_addr
= NULL_TREE
;
4593 unsigned HOST_WIDE_INT const_bitsize
, const_bitpos
;
4594 unsigned HOST_WIDE_INT const_bitregion_start
, const_bitregion_end
;
4596 || !bitsize
.is_constant (&const_bitsize
)
4597 || !bitpos
.is_constant (&const_bitpos
)
4598 || !bitregion_start
.is_constant (&const_bitregion_start
)
4599 || !bitregion_end
.is_constant (&const_bitregion_end
))
4601 terminate_all_aliasing_chains (NULL
, stmt
);
4606 memset (&n
, 0, sizeof (n
));
4608 struct imm_store_chain_info
**chain_info
= NULL
;
4610 chain_info
= m_stores
.get (base_addr
);
4612 store_immediate_info
*info
;
4615 unsigned int ord
= (*chain_info
)->m_store_info
.length ();
4616 info
= new store_immediate_info (const_bitsize
, const_bitpos
,
4617 const_bitregion_start
,
4618 const_bitregion_end
,
4619 stmt
, ord
, rhs_code
, n
, ins_stmt
,
4620 bit_not_p
, ops
[0], ops
[1]);
4621 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4623 fprintf (dump_file
, "Recording immediate store from stmt:\n");
4624 print_gimple_stmt (dump_file
, stmt
, 0);
4626 (*chain_info
)->m_store_info
.safe_push (info
);
4627 terminate_all_aliasing_chains (chain_info
, stmt
);
4628 /* If we reach the limit of stores to merge in a chain terminate and
4629 process the chain now. */
4630 if ((*chain_info
)->m_store_info
.length ()
4631 == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE
))
4633 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4635 "Reached maximum number of statements to merge:\n");
4636 terminate_and_release_chain (*chain_info
);
4641 /* Store aliases any existing chain? */
4642 terminate_all_aliasing_chains (NULL
, stmt
);
4643 /* Start a new chain. */
4644 struct imm_store_chain_info
*new_chain
4645 = new imm_store_chain_info (m_stores_head
, base_addr
);
4646 info
= new store_immediate_info (const_bitsize
, const_bitpos
,
4647 const_bitregion_start
,
4648 const_bitregion_end
,
4649 stmt
, 0, rhs_code
, n
, ins_stmt
,
4650 bit_not_p
, ops
[0], ops
[1]);
4651 new_chain
->m_store_info
.safe_push (info
);
4652 m_stores
.put (base_addr
, new_chain
);
4653 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4655 fprintf (dump_file
, "Starting new chain with statement:\n");
4656 print_gimple_stmt (dump_file
, stmt
, 0);
4657 fprintf (dump_file
, "The base object is:\n");
4658 print_generic_expr (dump_file
, base_addr
);
4659 fprintf (dump_file
, "\n");
4663 /* Entry point for the pass. Go over each basic block recording chains of
4664 immediate stores. Upon encountering a terminating statement (as defined
4665 by stmt_terminates_chain_p) process the recorded stores and emit the widened
4669 pass_store_merging::execute (function
*fun
)
4672 hash_set
<gimple
*> orig_stmts
;
4674 calculate_dominance_info (CDI_DOMINATORS
);
4676 FOR_EACH_BB_FN (bb
, fun
)
4678 gimple_stmt_iterator gsi
;
4679 unsigned HOST_WIDE_INT num_statements
= 0;
4680 /* Record the original statements so that we can keep track of
4681 statements emitted in this pass and not re-process new
4683 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4685 if (is_gimple_debug (gsi_stmt (gsi
)))
4688 if (++num_statements
>= 2)
4692 if (num_statements
< 2)
4695 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4696 fprintf (dump_file
, "Processing basic block <%d>:\n", bb
->index
);
4698 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4700 gimple
*stmt
= gsi_stmt (gsi
);
4702 if (is_gimple_debug (stmt
))
4705 if (gimple_has_volatile_ops (stmt
))
4707 /* Terminate all chains. */
4708 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4709 fprintf (dump_file
, "Volatile access terminates "
4711 terminate_and_process_all_chains ();
4715 if (gimple_assign_single_p (stmt
) && gimple_vdef (stmt
)
4716 && !stmt_can_throw_internal (cfun
, stmt
)
4717 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt
)))
4718 process_store (stmt
);
4720 terminate_all_aliasing_chains (NULL
, stmt
);
4722 terminate_and_process_all_chains ();
4729 /* Construct and return a store merging pass object. */
4732 make_pass_store_merging (gcc::context
*ctxt
)
4734 return new pass_store_merging (ctxt
);
4739 namespace selftest
{
4741 /* Selftests for store merging helpers. */
4743 /* Assert that all elements of the byte arrays X and Y, both of length N
4747 verify_array_eq (unsigned char *x
, unsigned char *y
, unsigned int n
)
4749 for (unsigned int i
= 0; i
< n
; i
++)
4753 fprintf (stderr
, "Arrays do not match. X:\n");
4754 dump_char_array (stderr
, x
, n
);
4755 fprintf (stderr
, "Y:\n");
4756 dump_char_array (stderr
, y
, n
);
4758 ASSERT_EQ (x
[i
], y
[i
]);
4762 /* Test shift_bytes_in_array and that it carries bits across between
4766 verify_shift_bytes_in_array (void)
4769 00011111 | 11100000. */
4770 unsigned char orig
[2] = { 0xe0, 0x1f };
4771 unsigned char in
[2];
4772 memcpy (in
, orig
, sizeof orig
);
4774 unsigned char expected
[2] = { 0x80, 0x7f };
4775 shift_bytes_in_array (in
, sizeof (in
), 2);
4776 verify_array_eq (in
, expected
, sizeof (in
));
4778 memcpy (in
, orig
, sizeof orig
);
4779 memcpy (expected
, orig
, sizeof orig
);
4780 /* Check that shifting by zero doesn't change anything. */
4781 shift_bytes_in_array (in
, sizeof (in
), 0);
4782 verify_array_eq (in
, expected
, sizeof (in
));
4786 /* Test shift_bytes_in_array_right and that it carries bits across between
4790 verify_shift_bytes_in_array_right (void)
4793 00011111 | 11100000. */
4794 unsigned char orig
[2] = { 0x1f, 0xe0};
4795 unsigned char in
[2];
4796 memcpy (in
, orig
, sizeof orig
);
4797 unsigned char expected
[2] = { 0x07, 0xf8};
4798 shift_bytes_in_array_right (in
, sizeof (in
), 2);
4799 verify_array_eq (in
, expected
, sizeof (in
));
4801 memcpy (in
, orig
, sizeof orig
);
4802 memcpy (expected
, orig
, sizeof orig
);
4803 /* Check that shifting by zero doesn't change anything. */
4804 shift_bytes_in_array_right (in
, sizeof (in
), 0);
4805 verify_array_eq (in
, expected
, sizeof (in
));
4808 /* Test clear_bit_region that it clears exactly the bits asked and
4812 verify_clear_bit_region (void)
4814 /* Start with all bits set and test clearing various patterns in them. */
4815 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
4816 unsigned char in
[3];
4817 unsigned char expected
[3];
4818 memcpy (in
, orig
, sizeof in
);
4820 /* Check zeroing out all the bits. */
4821 clear_bit_region (in
, 0, 3 * BITS_PER_UNIT
);
4822 expected
[0] = expected
[1] = expected
[2] = 0;
4823 verify_array_eq (in
, expected
, sizeof in
);
4825 memcpy (in
, orig
, sizeof in
);
4826 /* Leave the first and last bits intact. */
4827 clear_bit_region (in
, 1, 3 * BITS_PER_UNIT
- 2);
4831 verify_array_eq (in
, expected
, sizeof in
);
4834 /* Test verify_clear_bit_region_be that it clears exactly the bits asked and
4838 verify_clear_bit_region_be (void)
4840 /* Start with all bits set and test clearing various patterns in them. */
4841 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
4842 unsigned char in
[3];
4843 unsigned char expected
[3];
4844 memcpy (in
, orig
, sizeof in
);
4846 /* Check zeroing out all the bits. */
4847 clear_bit_region_be (in
, BITS_PER_UNIT
- 1, 3 * BITS_PER_UNIT
);
4848 expected
[0] = expected
[1] = expected
[2] = 0;
4849 verify_array_eq (in
, expected
, sizeof in
);
4851 memcpy (in
, orig
, sizeof in
);
4852 /* Leave the first and last bits intact. */
4853 clear_bit_region_be (in
, BITS_PER_UNIT
- 2, 3 * BITS_PER_UNIT
- 2);
4857 verify_array_eq (in
, expected
, sizeof in
);
4861 /* Run all of the selftests within this file. */
4864 store_merging_c_tests (void)
4866 verify_shift_bytes_in_array ();
4867 verify_shift_bytes_in_array_right ();
4868 verify_clear_bit_region ();
4869 verify_clear_bit_region_be ();
4872 } // namespace selftest
4873 #endif /* CHECKING_P. */