1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
31 we can transform this into a single 4-byte store if the target supports it:
32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
56 The algorithm is applied to each basic block in three phases:
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
64 terminate such chains when appropriate (for example when when the stored
65 values get used subsequently).
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
76 store_immediate_info objects) and coalesce contiguous stores into
77 merged_store_group objects. For bit-field stores, we don't need to
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
84 For example, given the stores:
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
143 #include "coretypes.h"
147 #include "builtins.h"
148 #include "fold-const.h"
149 #include "tree-pass.h"
151 #include "gimple-pretty-print.h"
153 #include "fold-const.h"
155 #include "print-tree.h"
156 #include "tree-hash-traits.h"
157 #include "gimple-iterator.h"
158 #include "gimplify.h"
159 #include "gimple-fold.h"
160 #include "stor-layout.h"
162 #include "tree-cfg.h"
165 #include "gimplify-me.h"
167 #include "expr.h" /* For get_bit_range. */
168 #include "optabs-tree.h"
169 #include "selftest.h"
171 /* The maximum size (in bits) of the stores this pass should generate. */
172 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
173 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
175 /* Limit to bound the number of aliasing checks for loads with the same
176 vuse as the corresponding store. */
177 #define MAX_STORE_ALIAS_CHECKS 64
183 /* Number of hand-written 16-bit nop / bswaps found. */
186 /* Number of hand-written 32-bit nop / bswaps found. */
189 /* Number of hand-written 64-bit nop / bswaps found. */
191 } nop_stats
, bswap_stats
;
193 /* A symbolic number structure is used to detect byte permutation and selection
194 patterns of a source. To achieve that, its field N contains an artificial
195 number consisting of BITS_PER_MARKER sized markers tracking where does each
196 byte come from in the source:
198 0 - target byte has the value 0
199 FF - target byte has an unknown value (eg. due to sign extension)
200 1..size - marker value is the byte index in the source (0 for lsb).
202 To detect permutations on memory sources (arrays and structures), a symbolic
203 number is also associated:
204 - a base address BASE_ADDR and an OFFSET giving the address of the source;
205 - a range which gives the difference between the highest and lowest accessed
206 memory location to make such a symbolic number;
207 - the address SRC of the source element of lowest address as a convenience
208 to easily get BASE_ADDR + offset + lowest bytepos;
209 - number of expressions N_OPS bitwise ored together to represent
210 approximate cost of the computation.
212 Note 1: the range is different from size as size reflects the size of the
213 type of the current expression. For instance, for an array char a[],
214 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
215 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
218 Note 2: for non-memory sources, range holds the same value as size.
220 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
222 struct symbolic_number
{
227 poly_int64_pod bytepos
;
231 unsigned HOST_WIDE_INT range
;
235 #define BITS_PER_MARKER 8
236 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
237 #define MARKER_BYTE_UNKNOWN MARKER_MASK
238 #define HEAD_MARKER(n, size) \
239 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
241 /* The number which the find_bswap_or_nop_1 result should match in
242 order to have a nop. The number is masked according to the size of
243 the symbolic number before using it. */
244 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
245 (uint64_t)0x08070605 << 32 | 0x04030201)
247 /* The number which the find_bswap_or_nop_1 result should match in
248 order to have a byte swap. The number is masked according to the
249 size of the symbolic number before using it. */
250 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
251 (uint64_t)0x01020304 << 32 | 0x05060708)
253 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
254 number N. Return false if the requested operation is not permitted
255 on a symbolic number. */
258 do_shift_rotate (enum tree_code code
,
259 struct symbolic_number
*n
,
262 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
263 unsigned head_marker
;
265 if (count
% BITS_PER_UNIT
!= 0)
267 count
= (count
/ BITS_PER_UNIT
) * BITS_PER_MARKER
;
269 /* Zero out the extra bits of N in order to avoid them being shifted
270 into the significant bits. */
271 if (size
< 64 / BITS_PER_MARKER
)
272 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
280 head_marker
= HEAD_MARKER (n
->n
, size
);
282 /* Arithmetic shift of signed type: result is dependent on the value. */
283 if (!TYPE_UNSIGNED (n
->type
) && head_marker
)
284 for (i
= 0; i
< count
/ BITS_PER_MARKER
; i
++)
285 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
286 << ((size
- 1 - i
) * BITS_PER_MARKER
);
289 n
->n
= (n
->n
<< count
) | (n
->n
>> ((size
* BITS_PER_MARKER
) - count
));
292 n
->n
= (n
->n
>> count
) | (n
->n
<< ((size
* BITS_PER_MARKER
) - count
));
297 /* Zero unused bits for size. */
298 if (size
< 64 / BITS_PER_MARKER
)
299 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
303 /* Perform sanity checking for the symbolic number N and the gimple
307 verify_symbolic_number_p (struct symbolic_number
*n
, gimple
*stmt
)
311 lhs_type
= gimple_expr_type (stmt
);
313 if (TREE_CODE (lhs_type
) != INTEGER_TYPE
)
316 if (TYPE_PRECISION (lhs_type
) != TYPE_PRECISION (n
->type
))
322 /* Initialize the symbolic number N for the bswap pass from the base element
323 SRC manipulated by the bitwise OR expression. */
326 init_symbolic_number (struct symbolic_number
*n
, tree src
)
330 if (! INTEGRAL_TYPE_P (TREE_TYPE (src
)))
333 n
->base_addr
= n
->offset
= n
->alias_set
= n
->vuse
= NULL_TREE
;
336 /* Set up the symbolic number N by setting each byte to a value between 1 and
337 the byte size of rhs1. The highest order byte is set to n->size and the
338 lowest order byte to 1. */
339 n
->type
= TREE_TYPE (src
);
340 size
= TYPE_PRECISION (n
->type
);
341 if (size
% BITS_PER_UNIT
!= 0)
343 size
/= BITS_PER_UNIT
;
344 if (size
> 64 / BITS_PER_MARKER
)
350 if (size
< 64 / BITS_PER_MARKER
)
351 n
->n
&= ((uint64_t) 1 << (size
* BITS_PER_MARKER
)) - 1;
356 /* Check if STMT might be a byte swap or a nop from a memory source and returns
357 the answer. If so, REF is that memory source and the base of the memory area
358 accessed and the offset of the access from that base are recorded in N. */
361 find_bswap_or_nop_load (gimple
*stmt
, tree ref
, struct symbolic_number
*n
)
363 /* Leaf node is an array or component ref. Memorize its base and
364 offset from base to compare to other such leaf node. */
365 poly_int64 bitsize
, bitpos
, bytepos
;
367 int unsignedp
, reversep
, volatilep
;
368 tree offset
, base_addr
;
370 /* Not prepared to handle PDP endian. */
371 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
)
374 if (!gimple_assign_load_p (stmt
) || gimple_has_volatile_ops (stmt
))
377 base_addr
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
378 &unsignedp
, &reversep
, &volatilep
);
380 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
381 /* Do not rewrite TARGET_MEM_REF. */
383 else if (TREE_CODE (base_addr
) == MEM_REF
)
385 poly_offset_int bit_offset
= 0;
386 tree off
= TREE_OPERAND (base_addr
, 1);
388 if (!integer_zerop (off
))
390 poly_offset_int boff
= mem_ref_offset (base_addr
);
391 boff
<<= LOG2_BITS_PER_UNIT
;
395 base_addr
= TREE_OPERAND (base_addr
, 0);
397 /* Avoid returning a negative bitpos as this may wreak havoc later. */
398 if (maybe_lt (bit_offset
, 0))
400 tree byte_offset
= wide_int_to_tree
401 (sizetype
, bits_to_bytes_round_down (bit_offset
));
402 bit_offset
= num_trailing_bits (bit_offset
);
404 offset
= size_binop (PLUS_EXPR
, offset
, byte_offset
);
406 offset
= byte_offset
;
409 bitpos
+= bit_offset
.force_shwi ();
412 base_addr
= build_fold_addr_expr (base_addr
);
414 if (!multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
))
416 if (!multiple_p (bitsize
, BITS_PER_UNIT
))
421 if (!init_symbolic_number (n
, ref
))
423 n
->base_addr
= base_addr
;
425 n
->bytepos
= bytepos
;
426 n
->alias_set
= reference_alias_ptr_type (ref
);
427 n
->vuse
= gimple_vuse (stmt
);
431 /* Compute the symbolic number N representing the result of a bitwise OR on 2
432 symbolic number N1 and N2 whose source statements are respectively
433 SOURCE_STMT1 and SOURCE_STMT2. */
436 perform_symbolic_merge (gimple
*source_stmt1
, struct symbolic_number
*n1
,
437 gimple
*source_stmt2
, struct symbolic_number
*n2
,
438 struct symbolic_number
*n
)
443 struct symbolic_number
*n_start
;
445 tree rhs1
= gimple_assign_rhs1 (source_stmt1
);
446 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
447 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
448 rhs1
= TREE_OPERAND (rhs1
, 0);
449 tree rhs2
= gimple_assign_rhs1 (source_stmt2
);
450 if (TREE_CODE (rhs2
) == BIT_FIELD_REF
451 && TREE_CODE (TREE_OPERAND (rhs2
, 0)) == SSA_NAME
)
452 rhs2
= TREE_OPERAND (rhs2
, 0);
454 /* Sources are different, cancel bswap if they are not memory location with
455 the same base (array, structure, ...). */
459 HOST_WIDE_INT start1
, start2
, start_sub
, end_sub
, end1
, end2
, end
;
460 struct symbolic_number
*toinc_n_ptr
, *n_end
;
461 basic_block bb1
, bb2
;
463 if (!n1
->base_addr
|| !n2
->base_addr
464 || !operand_equal_p (n1
->base_addr
, n2
->base_addr
, 0))
467 if (!n1
->offset
!= !n2
->offset
468 || (n1
->offset
&& !operand_equal_p (n1
->offset
, n2
->offset
, 0)))
472 if (!(n2
->bytepos
- n1
->bytepos
).is_constant (&start2
))
478 start_sub
= start2
- start1
;
483 start_sub
= start1
- start2
;
486 bb1
= gimple_bb (source_stmt1
);
487 bb2
= gimple_bb (source_stmt2
);
488 if (dominated_by_p (CDI_DOMINATORS
, bb1
, bb2
))
489 source_stmt
= source_stmt1
;
491 source_stmt
= source_stmt2
;
493 /* Find the highest address at which a load is performed and
494 compute related info. */
495 end1
= start1
+ (n1
->range
- 1);
496 end2
= start2
+ (n2
->range
- 1);
500 end_sub
= end2
- end1
;
505 end_sub
= end1
- end2
;
507 n_end
= (end2
> end1
) ? n2
: n1
;
509 /* Find symbolic number whose lsb is the most significant. */
510 if (BYTES_BIG_ENDIAN
)
511 toinc_n_ptr
= (n_end
== n1
) ? n2
: n1
;
513 toinc_n_ptr
= (n_start
== n1
) ? n2
: n1
;
515 n
->range
= end
- MIN (start1
, start2
) + 1;
517 /* Check that the range of memory covered can be represented by
518 a symbolic number. */
519 if (n
->range
> 64 / BITS_PER_MARKER
)
522 /* Reinterpret byte marks in symbolic number holding the value of
523 bigger weight according to target endianness. */
524 inc
= BYTES_BIG_ENDIAN
? end_sub
: start_sub
;
525 size
= TYPE_PRECISION (n1
->type
) / BITS_PER_UNIT
;
526 for (i
= 0; i
< size
; i
++, inc
<<= BITS_PER_MARKER
)
529 = (toinc_n_ptr
->n
>> (i
* BITS_PER_MARKER
)) & MARKER_MASK
;
530 if (marker
&& marker
!= MARKER_BYTE_UNKNOWN
)
531 toinc_n_ptr
->n
+= inc
;
536 n
->range
= n1
->range
;
538 source_stmt
= source_stmt1
;
542 || alias_ptr_types_compatible_p (n1
->alias_set
, n2
->alias_set
))
543 n
->alias_set
= n1
->alias_set
;
545 n
->alias_set
= ptr_type_node
;
546 n
->vuse
= n_start
->vuse
;
547 n
->base_addr
= n_start
->base_addr
;
548 n
->offset
= n_start
->offset
;
549 n
->src
= n_start
->src
;
550 n
->bytepos
= n_start
->bytepos
;
551 n
->type
= n_start
->type
;
552 size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
554 for (i
= 0, mask
= MARKER_MASK
; i
< size
; i
++, mask
<<= BITS_PER_MARKER
)
556 uint64_t masked1
, masked2
;
558 masked1
= n1
->n
& mask
;
559 masked2
= n2
->n
& mask
;
560 if (masked1
&& masked2
&& masked1
!= masked2
)
563 n
->n
= n1
->n
| n2
->n
;
564 n
->n_ops
= n1
->n_ops
+ n2
->n_ops
;
569 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
570 the operation given by the rhs of STMT on the result. If the operation
571 could successfully be executed the function returns a gimple stmt whose
572 rhs's first tree is the expression of the source operand and NULL
576 find_bswap_or_nop_1 (gimple
*stmt
, struct symbolic_number
*n
, int limit
)
579 tree rhs1
, rhs2
= NULL
;
580 gimple
*rhs1_stmt
, *rhs2_stmt
, *source_stmt1
;
581 enum gimple_rhs_class rhs_class
;
583 if (!limit
|| !is_gimple_assign (stmt
))
586 rhs1
= gimple_assign_rhs1 (stmt
);
588 if (find_bswap_or_nop_load (stmt
, rhs1
, n
))
591 /* Handle BIT_FIELD_REF. */
592 if (TREE_CODE (rhs1
) == BIT_FIELD_REF
593 && TREE_CODE (TREE_OPERAND (rhs1
, 0)) == SSA_NAME
)
595 unsigned HOST_WIDE_INT bitsize
= tree_to_uhwi (TREE_OPERAND (rhs1
, 1));
596 unsigned HOST_WIDE_INT bitpos
= tree_to_uhwi (TREE_OPERAND (rhs1
, 2));
597 if (bitpos
% BITS_PER_UNIT
== 0
598 && bitsize
% BITS_PER_UNIT
== 0
599 && init_symbolic_number (n
, TREE_OPERAND (rhs1
, 0)))
601 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
602 if (BYTES_BIG_ENDIAN
)
603 bitpos
= TYPE_PRECISION (n
->type
) - bitpos
- bitsize
;
606 if (!do_shift_rotate (RSHIFT_EXPR
, n
, bitpos
))
611 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
612 for (unsigned i
= 0; i
< bitsize
/ BITS_PER_UNIT
;
613 i
++, tmp
<<= BITS_PER_UNIT
)
614 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
618 n
->type
= TREE_TYPE (rhs1
);
620 n
->range
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
622 return verify_symbolic_number_p (n
, stmt
) ? stmt
: NULL
;
628 if (TREE_CODE (rhs1
) != SSA_NAME
)
631 code
= gimple_assign_rhs_code (stmt
);
632 rhs_class
= gimple_assign_rhs_class (stmt
);
633 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
635 if (rhs_class
== GIMPLE_BINARY_RHS
)
636 rhs2
= gimple_assign_rhs2 (stmt
);
638 /* Handle unary rhs and binary rhs with integer constants as second
641 if (rhs_class
== GIMPLE_UNARY_RHS
642 || (rhs_class
== GIMPLE_BINARY_RHS
643 && TREE_CODE (rhs2
) == INTEGER_CST
))
645 if (code
!= BIT_AND_EXPR
646 && code
!= LSHIFT_EXPR
647 && code
!= RSHIFT_EXPR
648 && code
!= LROTATE_EXPR
649 && code
!= RROTATE_EXPR
650 && !CONVERT_EXPR_CODE_P (code
))
653 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, n
, limit
- 1);
655 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
656 we have to initialize the symbolic number. */
659 if (gimple_assign_load_p (stmt
)
660 || !init_symbolic_number (n
, rhs1
))
669 int i
, size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
670 uint64_t val
= int_cst_value (rhs2
), mask
= 0;
671 uint64_t tmp
= (1 << BITS_PER_UNIT
) - 1;
673 /* Only constants masking full bytes are allowed. */
674 for (i
= 0; i
< size
; i
++, tmp
<<= BITS_PER_UNIT
)
675 if ((val
& tmp
) != 0 && (val
& tmp
) != tmp
)
678 mask
|= (uint64_t) MARKER_MASK
<< (i
* BITS_PER_MARKER
);
687 if (!do_shift_rotate (code
, n
, (int) TREE_INT_CST_LOW (rhs2
)))
692 int i
, type_size
, old_type_size
;
695 type
= gimple_expr_type (stmt
);
696 type_size
= TYPE_PRECISION (type
);
697 if (type_size
% BITS_PER_UNIT
!= 0)
699 type_size
/= BITS_PER_UNIT
;
700 if (type_size
> 64 / BITS_PER_MARKER
)
703 /* Sign extension: result is dependent on the value. */
704 old_type_size
= TYPE_PRECISION (n
->type
) / BITS_PER_UNIT
;
705 if (!TYPE_UNSIGNED (n
->type
) && type_size
> old_type_size
706 && HEAD_MARKER (n
->n
, old_type_size
))
707 for (i
= 0; i
< type_size
- old_type_size
; i
++)
708 n
->n
|= (uint64_t) MARKER_BYTE_UNKNOWN
709 << ((type_size
- 1 - i
) * BITS_PER_MARKER
);
711 if (type_size
< 64 / BITS_PER_MARKER
)
713 /* If STMT casts to a smaller type mask out the bits not
714 belonging to the target type. */
715 n
->n
&= ((uint64_t) 1 << (type_size
* BITS_PER_MARKER
)) - 1;
719 n
->range
= type_size
;
725 return verify_symbolic_number_p (n
, stmt
) ? source_stmt1
: NULL
;
728 /* Handle binary rhs. */
730 if (rhs_class
== GIMPLE_BINARY_RHS
)
732 struct symbolic_number n1
, n2
;
733 gimple
*source_stmt
, *source_stmt2
;
735 if (code
!= BIT_IOR_EXPR
)
738 if (TREE_CODE (rhs2
) != SSA_NAME
)
741 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
746 source_stmt1
= find_bswap_or_nop_1 (rhs1_stmt
, &n1
, limit
- 1);
751 source_stmt2
= find_bswap_or_nop_1 (rhs2_stmt
, &n2
, limit
- 1);
756 if (TYPE_PRECISION (n1
.type
) != TYPE_PRECISION (n2
.type
))
759 if (n1
.vuse
!= n2
.vuse
)
763 = perform_symbolic_merge (source_stmt1
, &n1
, source_stmt2
, &n2
, n
);
768 if (!verify_symbolic_number_p (n
, stmt
))
780 /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
781 *CMPXCHG, *CMPNOP and adjust *N. */
784 find_bswap_or_nop_finalize (struct symbolic_number
*n
, uint64_t *cmpxchg
,
790 /* The number which the find_bswap_or_nop_1 result should match in order
791 to have a full byte swap. The number is shifted to the right
792 according to the size of the symbolic number before using it. */
796 /* Find real size of result (highest non-zero byte). */
798 for (tmpn
= n
->n
, rsize
= 0; tmpn
; tmpn
>>= BITS_PER_MARKER
, rsize
++);
802 /* Zero out the bits corresponding to untouched bytes in original gimple
804 if (n
->range
< (int) sizeof (int64_t))
806 mask
= ((uint64_t) 1 << (n
->range
* BITS_PER_MARKER
)) - 1;
807 *cmpxchg
>>= (64 / BITS_PER_MARKER
- n
->range
) * BITS_PER_MARKER
;
811 /* Zero out the bits corresponding to unused bytes in the result of the
812 gimple expression. */
813 if (rsize
< n
->range
)
815 if (BYTES_BIG_ENDIAN
)
817 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
819 *cmpnop
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
823 mask
= ((uint64_t) 1 << (rsize
* BITS_PER_MARKER
)) - 1;
824 *cmpxchg
>>= (n
->range
- rsize
) * BITS_PER_MARKER
;
830 n
->range
*= BITS_PER_UNIT
;
833 /* Check if STMT completes a bswap implementation or a read in a given
834 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
835 accordingly. It also sets N to represent the kind of operations
836 performed: size of the resulting expression and whether it works on
837 a memory source, and if so alias-set and vuse. At last, the
838 function returns a stmt whose rhs's first tree is the source
842 find_bswap_or_nop (gimple
*stmt
, struct symbolic_number
*n
, bool *bswap
)
844 /* The last parameter determines the depth search limit. It usually
845 correlates directly to the number n of bytes to be touched. We
846 increase that number by log2(n) + 1 here in order to also
847 cover signed -> unsigned conversions of the src operand as can be seen
848 in libgcc, and for initial shift/and operation of the src operand. */
849 int limit
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt
)));
850 limit
+= 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT
) limit
);
851 gimple
*ins_stmt
= find_bswap_or_nop_1 (stmt
, n
, limit
);
856 uint64_t cmpxchg
, cmpnop
;
857 find_bswap_or_nop_finalize (n
, &cmpxchg
, &cmpnop
);
859 /* A complete byte swap should make the symbolic number to start with
860 the largest digit in the highest order byte. Unchanged symbolic
861 number indicates a read with same endianness as target architecture. */
864 else if (n
->n
== cmpxchg
)
869 /* Useless bit manipulation performed by code. */
870 if (!n
->base_addr
&& n
->n
== cmpnop
&& n
->n_ops
== 1)
876 const pass_data pass_data_optimize_bswap
=
878 GIMPLE_PASS
, /* type */
880 OPTGROUP_NONE
, /* optinfo_flags */
882 PROP_ssa
, /* properties_required */
883 0, /* properties_provided */
884 0, /* properties_destroyed */
885 0, /* todo_flags_start */
886 0, /* todo_flags_finish */
889 class pass_optimize_bswap
: public gimple_opt_pass
892 pass_optimize_bswap (gcc::context
*ctxt
)
893 : gimple_opt_pass (pass_data_optimize_bswap
, ctxt
)
896 /* opt_pass methods: */
897 virtual bool gate (function
*)
899 return flag_expensive_optimizations
&& optimize
&& BITS_PER_UNIT
== 8;
902 virtual unsigned int execute (function
*);
904 }; // class pass_optimize_bswap
906 /* Perform the bswap optimization: replace the expression computed in the rhs
907 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
908 bswap, load or load + bswap expression.
909 Which of these alternatives replace the rhs is given by N->base_addr (non
910 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
911 load to perform are also given in N while the builtin bswap invoke is given
912 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
913 load statements involved to construct the rhs in gsi_stmt (GSI) and
914 N->range gives the size of the rhs expression for maintaining some
917 Note that if the replacement involve a load and if gsi_stmt (GSI) is
918 non-NULL, that stmt is moved just after INS_STMT to do the load with the
919 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
922 bswap_replace (gimple_stmt_iterator gsi
, gimple
*ins_stmt
, tree fndecl
,
923 tree bswap_type
, tree load_type
, struct symbolic_number
*n
,
926 tree src
, tmp
, tgt
= NULL_TREE
;
929 gimple
*cur_stmt
= gsi_stmt (gsi
);
932 tgt
= gimple_assign_lhs (cur_stmt
);
934 /* Need to load the value from memory first. */
937 gimple_stmt_iterator gsi_ins
= gsi
;
939 gsi_ins
= gsi_for_stmt (ins_stmt
);
940 tree addr_expr
, addr_tmp
, val_expr
, val_tmp
;
941 tree load_offset_ptr
, aligned_load_type
;
943 unsigned align
= get_object_alignment (src
);
944 poly_int64 load_offset
= 0;
948 basic_block ins_bb
= gimple_bb (ins_stmt
);
949 basic_block cur_bb
= gimple_bb (cur_stmt
);
950 if (!dominated_by_p (CDI_DOMINATORS
, cur_bb
, ins_bb
))
953 /* Move cur_stmt just before one of the load of the original
954 to ensure it has the same VUSE. See PR61517 for what could
956 if (gimple_bb (cur_stmt
) != gimple_bb (ins_stmt
))
957 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt
));
958 gsi_move_before (&gsi
, &gsi_ins
);
959 gsi
= gsi_for_stmt (cur_stmt
);
964 /* Compute address to load from and cast according to the size
966 addr_expr
= build_fold_addr_expr (src
);
967 if (is_gimple_mem_ref_addr (addr_expr
))
968 addr_tmp
= unshare_expr (addr_expr
);
971 addr_tmp
= unshare_expr (n
->base_addr
);
972 if (!is_gimple_mem_ref_addr (addr_tmp
))
973 addr_tmp
= force_gimple_operand_gsi_1 (&gsi
, addr_tmp
,
974 is_gimple_mem_ref_addr
,
977 load_offset
= n
->bytepos
;
981 = force_gimple_operand_gsi (&gsi
, unshare_expr (n
->offset
),
982 true, NULL_TREE
, true,
985 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp
)),
986 POINTER_PLUS_EXPR
, addr_tmp
, off
);
987 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
988 addr_tmp
= gimple_assign_lhs (stmt
);
992 /* Perform the load. */
993 aligned_load_type
= load_type
;
994 if (align
< TYPE_ALIGN (load_type
))
995 aligned_load_type
= build_aligned_type (load_type
, align
);
996 load_offset_ptr
= build_int_cst (n
->alias_set
, load_offset
);
997 val_expr
= fold_build2 (MEM_REF
, aligned_load_type
, addr_tmp
,
1003 nop_stats
.found_16bit
++;
1004 else if (n
->range
== 32)
1005 nop_stats
.found_32bit
++;
1008 gcc_assert (n
->range
== 64);
1009 nop_stats
.found_64bit
++;
1012 /* Convert the result of load if necessary. */
1013 if (tgt
&& !useless_type_conversion_p (TREE_TYPE (tgt
), load_type
))
1015 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
,
1017 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
1018 gimple_set_vuse (load_stmt
, n
->vuse
);
1019 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1020 gimple_assign_set_rhs_with_ops (&gsi
, NOP_EXPR
, val_tmp
);
1021 update_stmt (cur_stmt
);
1025 gimple_assign_set_rhs_with_ops (&gsi
, MEM_REF
, val_expr
);
1026 gimple_set_vuse (cur_stmt
, n
->vuse
);
1027 update_stmt (cur_stmt
);
1031 tgt
= make_ssa_name (load_type
);
1032 cur_stmt
= gimple_build_assign (tgt
, MEM_REF
, val_expr
);
1033 gimple_set_vuse (cur_stmt
, n
->vuse
);
1034 gsi_insert_before (&gsi
, cur_stmt
, GSI_SAME_STMT
);
1040 "%d bit load in target endianness found at: ",
1042 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1048 val_tmp
= make_temp_ssa_name (aligned_load_type
, NULL
, "load_dst");
1049 load_stmt
= gimple_build_assign (val_tmp
, val_expr
);
1050 gimple_set_vuse (load_stmt
, n
->vuse
);
1051 gsi_insert_before (&gsi
, load_stmt
, GSI_SAME_STMT
);
1058 if (tgt
&& !useless_type_conversion_p (TREE_TYPE (tgt
), TREE_TYPE (src
)))
1060 if (!is_gimple_val (src
))
1062 g
= gimple_build_assign (tgt
, NOP_EXPR
, src
);
1065 g
= gimple_build_assign (tgt
, src
);
1069 nop_stats
.found_16bit
++;
1070 else if (n
->range
== 32)
1071 nop_stats
.found_32bit
++;
1074 gcc_assert (n
->range
== 64);
1075 nop_stats
.found_64bit
++;
1080 "%d bit reshuffle in target endianness found at: ",
1083 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1086 print_generic_expr (dump_file
, tgt
, 0);
1087 fprintf (dump_file
, "\n");
1091 gsi_replace (&gsi
, g
, true);
1094 else if (TREE_CODE (src
) == BIT_FIELD_REF
)
1095 src
= TREE_OPERAND (src
, 0);
1098 bswap_stats
.found_16bit
++;
1099 else if (n
->range
== 32)
1100 bswap_stats
.found_32bit
++;
1103 gcc_assert (n
->range
== 64);
1104 bswap_stats
.found_64bit
++;
1109 /* Convert the src expression if necessary. */
1110 if (!useless_type_conversion_p (TREE_TYPE (tmp
), bswap_type
))
1112 gimple
*convert_stmt
;
1114 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapsrc");
1115 convert_stmt
= gimple_build_assign (tmp
, NOP_EXPR
, src
);
1116 gsi_insert_before (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1119 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1120 are considered as rotation of 2N bit values by N bits is generally not
1121 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1122 gives 0x03040102 while a bswap for that value is 0x04030201. */
1123 if (bswap
&& n
->range
== 16)
1125 tree count
= build_int_cst (NULL
, BITS_PER_UNIT
);
1126 src
= fold_build2 (LROTATE_EXPR
, bswap_type
, tmp
, count
);
1127 bswap_stmt
= gimple_build_assign (NULL
, src
);
1130 bswap_stmt
= gimple_build_call (fndecl
, 1, tmp
);
1132 if (tgt
== NULL_TREE
)
1133 tgt
= make_ssa_name (bswap_type
);
1136 /* Convert the result if necessary. */
1137 if (!useless_type_conversion_p (TREE_TYPE (tgt
), bswap_type
))
1139 gimple
*convert_stmt
;
1141 tmp
= make_temp_ssa_name (bswap_type
, NULL
, "bswapdst");
1142 convert_stmt
= gimple_build_assign (tgt
, NOP_EXPR
, tmp
);
1143 gsi_insert_after (&gsi
, convert_stmt
, GSI_SAME_STMT
);
1146 gimple_set_lhs (bswap_stmt
, tmp
);
1150 fprintf (dump_file
, "%d bit bswap implementation found at: ",
1153 print_gimple_stmt (dump_file
, cur_stmt
, 0);
1156 print_generic_expr (dump_file
, tgt
, 0);
1157 fprintf (dump_file
, "\n");
1163 gsi_insert_after (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1164 gsi_remove (&gsi
, true);
1167 gsi_insert_before (&gsi
, bswap_stmt
, GSI_SAME_STMT
);
1171 /* Find manual byte swap implementations as well as load in a given
1172 endianness. Byte swaps are turned into a bswap builtin invokation
1173 while endian loads are converted to bswap builtin invokation or
1174 simple load according to the target endianness. */
1177 pass_optimize_bswap::execute (function
*fun
)
1180 bool bswap32_p
, bswap64_p
;
1181 bool changed
= false;
1182 tree bswap32_type
= NULL_TREE
, bswap64_type
= NULL_TREE
;
1184 bswap32_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
1185 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
);
1186 bswap64_p
= (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
1187 && (optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
1188 || (bswap32_p
&& word_mode
== SImode
)));
1190 /* Determine the argument type of the builtins. The code later on
1191 assumes that the return and argument type are the same. */
1194 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1195 bswap32_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1200 tree fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1201 bswap64_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
1204 memset (&nop_stats
, 0, sizeof (nop_stats
));
1205 memset (&bswap_stats
, 0, sizeof (bswap_stats
));
1206 calculate_dominance_info (CDI_DOMINATORS
);
1208 FOR_EACH_BB_FN (bb
, fun
)
1210 gimple_stmt_iterator gsi
;
1212 /* We do a reverse scan for bswap patterns to make sure we get the
1213 widest match. As bswap pattern matching doesn't handle previously
1214 inserted smaller bswap replacements as sub-patterns, the wider
1215 variant wouldn't be detected. */
1216 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
);)
1218 gimple
*ins_stmt
, *cur_stmt
= gsi_stmt (gsi
);
1219 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
1220 enum tree_code code
;
1221 struct symbolic_number n
;
1224 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1225 might be moved to a different basic block by bswap_replace and gsi
1226 must not points to it if that's the case. Moving the gsi_prev
1227 there make sure that gsi points to the statement previous to
1228 cur_stmt while still making sure that all statements are
1229 considered in this basic block. */
1232 if (!is_gimple_assign (cur_stmt
))
1235 code
= gimple_assign_rhs_code (cur_stmt
);
1240 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt
))
1241 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt
))
1251 ins_stmt
= find_bswap_or_nop (cur_stmt
, &n
, &bswap
);
1259 /* Already in canonical form, nothing to do. */
1260 if (code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
1262 load_type
= bswap_type
= uint16_type_node
;
1265 load_type
= uint32_type_node
;
1268 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
1269 bswap_type
= bswap32_type
;
1273 load_type
= uint64_type_node
;
1276 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
1277 bswap_type
= bswap64_type
;
1284 if (bswap
&& !fndecl
&& n
.range
!= 16)
1287 if (bswap_replace (gsi_for_stmt (cur_stmt
), ins_stmt
, fndecl
,
1288 bswap_type
, load_type
, &n
, bswap
))
1293 statistics_counter_event (fun
, "16-bit nop implementations found",
1294 nop_stats
.found_16bit
);
1295 statistics_counter_event (fun
, "32-bit nop implementations found",
1296 nop_stats
.found_32bit
);
1297 statistics_counter_event (fun
, "64-bit nop implementations found",
1298 nop_stats
.found_64bit
);
1299 statistics_counter_event (fun
, "16-bit bswap implementations found",
1300 bswap_stats
.found_16bit
);
1301 statistics_counter_event (fun
, "32-bit bswap implementations found",
1302 bswap_stats
.found_32bit
);
1303 statistics_counter_event (fun
, "64-bit bswap implementations found",
1304 bswap_stats
.found_64bit
);
1306 return (changed
? TODO_update_ssa
: 0);
1312 make_pass_optimize_bswap (gcc::context
*ctxt
)
1314 return new pass_optimize_bswap (ctxt
);
1319 /* Struct recording one operand for the store, which is either a constant,
1320 then VAL represents the constant and all the other fields are zero, or
1321 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1322 and the other fields also reflect the memory load, or an SSA name, then
1323 VAL represents the SSA name and all the other fields are zero, */
1325 struct store_operand_info
1329 poly_uint64 bitsize
;
1331 poly_uint64 bitregion_start
;
1332 poly_uint64 bitregion_end
;
1335 store_operand_info ();
1338 store_operand_info::store_operand_info ()
1339 : val (NULL_TREE
), base_addr (NULL_TREE
), bitsize (0), bitpos (0),
1340 bitregion_start (0), bitregion_end (0), stmt (NULL
), bit_not_p (false)
1344 /* Struct recording the information about a single store of an immediate
1345 to memory. These are created in the first phase and coalesced into
1346 merged_store_group objects in the second phase. */
1348 struct store_immediate_info
1350 unsigned HOST_WIDE_INT bitsize
;
1351 unsigned HOST_WIDE_INT bitpos
;
1352 unsigned HOST_WIDE_INT bitregion_start
;
1353 /* This is one past the last bit of the bit region. */
1354 unsigned HOST_WIDE_INT bitregion_end
;
1357 /* INTEGER_CST for constant stores, MEM_REF for memory copy,
1358 BIT_*_EXPR for logical bitwise operation, BIT_INSERT_EXPR
1360 LROTATE_EXPR if it can be only bswap optimized and
1361 ops are not really meaningful.
1362 NOP_EXPR if bswap optimization detected identity, ops
1363 are not meaningful. */
1364 enum tree_code rhs_code
;
1365 /* Two fields for bswap optimization purposes. */
1366 struct symbolic_number n
;
1368 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1370 /* True if ops have been swapped and thus ops[1] represents
1371 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1373 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1374 just the first one. */
1375 store_operand_info ops
[2];
1376 store_immediate_info (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1377 unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
1378 gimple
*, unsigned int, enum tree_code
,
1379 struct symbolic_number
&, gimple
*, bool,
1380 const store_operand_info
&,
1381 const store_operand_info
&);
1384 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs
,
1385 unsigned HOST_WIDE_INT bp
,
1386 unsigned HOST_WIDE_INT brs
,
1387 unsigned HOST_WIDE_INT bre
,
1390 enum tree_code rhscode
,
1391 struct symbolic_number
&nr
,
1394 const store_operand_info
&op0r
,
1395 const store_operand_info
&op1r
)
1396 : bitsize (bs
), bitpos (bp
), bitregion_start (brs
), bitregion_end (bre
),
1397 stmt (st
), order (ord
), rhs_code (rhscode
), n (nr
),
1398 ins_stmt (ins_stmtp
), bit_not_p (bitnotp
), ops_swapped_p (false)
1399 #if __cplusplus >= 201103L
1400 , ops
{ op0r
, op1r
}
1410 /* Struct representing a group of stores to contiguous memory locations.
1411 These are produced by the second phase (coalescing) and consumed in the
1412 third phase that outputs the widened stores. */
1414 struct merged_store_group
1416 unsigned HOST_WIDE_INT start
;
1417 unsigned HOST_WIDE_INT width
;
1418 unsigned HOST_WIDE_INT bitregion_start
;
1419 unsigned HOST_WIDE_INT bitregion_end
;
1420 /* The size of the allocated memory for val and mask. */
1421 unsigned HOST_WIDE_INT buf_size
;
1422 unsigned HOST_WIDE_INT align_base
;
1423 poly_uint64 load_align_base
[2];
1426 unsigned int load_align
[2];
1427 unsigned int first_order
;
1428 unsigned int last_order
;
1430 auto_vec
<store_immediate_info
*> stores
;
1431 /* We record the first and last original statements in the sequence because
1432 we'll need their vuse/vdef and replacement position. It's easier to keep
1433 track of them separately as 'stores' is reordered by apply_stores. */
1437 unsigned char *mask
;
1440 merged_store_group (store_immediate_info
*);
1441 ~merged_store_group ();
1442 void merge_into (store_immediate_info
*);
1443 void merge_overlapping (store_immediate_info
*);
1444 bool apply_stores ();
1446 void do_merge (store_immediate_info
*);
1449 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1452 dump_char_array (FILE *fd
, unsigned char *ptr
, unsigned int len
)
1457 for (unsigned int i
= 0; i
< len
; i
++)
1458 fprintf (fd
, "%02x ", ptr
[i
]);
1462 /* Shift left the bytes in PTR of SZ elements by AMNT bits, carrying over the
1463 bits between adjacent elements. AMNT should be within
1466 00011111|11100000 << 2 = 01111111|10000000
1467 PTR[1] | PTR[0] PTR[1] | PTR[0]. */
1470 shift_bytes_in_array (unsigned char *ptr
, unsigned int sz
, unsigned int amnt
)
1475 unsigned char carry_over
= 0U;
1476 unsigned char carry_mask
= (~0U) << (unsigned char) (BITS_PER_UNIT
- amnt
);
1477 unsigned char clear_mask
= (~0U) << amnt
;
1479 for (unsigned int i
= 0; i
< sz
; i
++)
1481 unsigned prev_carry_over
= carry_over
;
1482 carry_over
= (ptr
[i
] & carry_mask
) >> (BITS_PER_UNIT
- amnt
);
1487 ptr
[i
] &= clear_mask
;
1488 ptr
[i
] |= prev_carry_over
;
1493 /* Like shift_bytes_in_array but for big-endian.
1494 Shift right the bytes in PTR of SZ elements by AMNT bits, carrying over the
1495 bits between adjacent elements. AMNT should be within
1498 00011111|11100000 >> 2 = 00000111|11111000
1499 PTR[0] | PTR[1] PTR[0] | PTR[1]. */
1502 shift_bytes_in_array_right (unsigned char *ptr
, unsigned int sz
,
1508 unsigned char carry_over
= 0U;
1509 unsigned char carry_mask
= ~(~0U << amnt
);
1511 for (unsigned int i
= 0; i
< sz
; i
++)
1513 unsigned prev_carry_over
= carry_over
;
1514 carry_over
= ptr
[i
] & carry_mask
;
1516 carry_over
<<= (unsigned char) BITS_PER_UNIT
- amnt
;
1518 ptr
[i
] |= prev_carry_over
;
1522 /* Clear out LEN bits starting from bit START in the byte array
1523 PTR. This clears the bits to the *right* from START.
1524 START must be within [0, BITS_PER_UNIT) and counts starting from
1525 the least significant bit. */
1528 clear_bit_region_be (unsigned char *ptr
, unsigned int start
,
1533 /* Clear len bits to the right of start. */
1534 else if (len
<= start
+ 1)
1536 unsigned char mask
= (~(~0U << len
));
1537 mask
= mask
<< (start
+ 1U - len
);
1540 else if (start
!= BITS_PER_UNIT
- 1)
1542 clear_bit_region_be (ptr
, start
, (start
% BITS_PER_UNIT
) + 1);
1543 clear_bit_region_be (ptr
+ 1, BITS_PER_UNIT
- 1,
1544 len
- (start
% BITS_PER_UNIT
) - 1);
1546 else if (start
== BITS_PER_UNIT
- 1
1547 && len
> BITS_PER_UNIT
)
1549 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1550 memset (ptr
, 0, nbytes
);
1551 if (len
% BITS_PER_UNIT
!= 0)
1552 clear_bit_region_be (ptr
+ nbytes
, BITS_PER_UNIT
- 1,
1553 len
% BITS_PER_UNIT
);
1559 /* In the byte array PTR clear the bit region starting at bit
1560 START and is LEN bits wide.
1561 For regions spanning multiple bytes do this recursively until we reach
1562 zero LEN or a region contained within a single byte. */
1565 clear_bit_region (unsigned char *ptr
, unsigned int start
,
1568 /* Degenerate base case. */
1571 else if (start
>= BITS_PER_UNIT
)
1572 clear_bit_region (ptr
+ 1, start
- BITS_PER_UNIT
, len
);
1573 /* Second base case. */
1574 else if ((start
+ len
) <= BITS_PER_UNIT
)
1576 unsigned char mask
= (~0U) << (unsigned char) (BITS_PER_UNIT
- len
);
1577 mask
>>= BITS_PER_UNIT
- (start
+ len
);
1583 /* Clear most significant bits in a byte and proceed with the next byte. */
1584 else if (start
!= 0)
1586 clear_bit_region (ptr
, start
, BITS_PER_UNIT
- start
);
1587 clear_bit_region (ptr
+ 1, 0, len
- (BITS_PER_UNIT
- start
));
1589 /* Whole bytes need to be cleared. */
1590 else if (start
== 0 && len
> BITS_PER_UNIT
)
1592 unsigned int nbytes
= len
/ BITS_PER_UNIT
;
1593 /* We could recurse on each byte but we clear whole bytes, so a simple
1595 memset (ptr
, '\0', nbytes
);
1596 /* Clear the remaining sub-byte region if there is one. */
1597 if (len
% BITS_PER_UNIT
!= 0)
1598 clear_bit_region (ptr
+ nbytes
, 0, len
% BITS_PER_UNIT
);
1604 /* Write BITLEN bits of EXPR to the byte array PTR at
1605 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1606 Return true if the operation succeeded. */
1609 encode_tree_to_bitpos (tree expr
, unsigned char *ptr
, int bitlen
, int bitpos
,
1610 unsigned int total_bytes
)
1612 unsigned int first_byte
= bitpos
/ BITS_PER_UNIT
;
1613 tree tmp_int
= expr
;
1614 bool sub_byte_op_p
= ((bitlen
% BITS_PER_UNIT
)
1615 || (bitpos
% BITS_PER_UNIT
)
1616 || !int_mode_for_size (bitlen
, 0).exists ());
1619 return native_encode_expr (tmp_int
, ptr
+ first_byte
, total_bytes
) != 0;
1622 We are writing a non byte-sized quantity or at a position that is not
1624 |--------|--------|--------| ptr + first_byte
1626 xxx xxxxxxxx xxx< bp>
1629 First native_encode_expr EXPR into a temporary buffer and shift each
1630 byte in the buffer by 'bp' (carrying the bits over as necessary).
1631 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1632 <------bitlen---->< bp>
1633 Then we clear the destination bits:
1634 |---00000|00000000|000-----| ptr + first_byte
1635 <-------bitlen--->< bp>
1637 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1638 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1641 We are writing a non byte-sized quantity or at a position that is not
1643 ptr + first_byte |--------|--------|--------|
1645 <bp >xxx xxxxxxxx xxx
1648 First native_encode_expr EXPR into a temporary buffer and shift each
1649 byte in the buffer to the right by (carrying the bits over as necessary).
1650 We shift by as much as needed to align the most significant bit of EXPR
1652 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1653 <---bitlen----> <bp ><-----bitlen----->
1654 Then we clear the destination bits:
1655 ptr + first_byte |-----000||00000000||00000---|
1656 <bp ><-------bitlen----->
1658 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1659 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1660 The awkwardness comes from the fact that bitpos is counted from the
1661 most significant bit of a byte. */
1663 /* We must be dealing with fixed-size data at this point, since the
1664 total size is also fixed. */
1665 fixed_size_mode mode
= as_a
<fixed_size_mode
> (TYPE_MODE (TREE_TYPE (expr
)));
1666 /* Allocate an extra byte so that we have space to shift into. */
1667 unsigned int byte_size
= GET_MODE_SIZE (mode
) + 1;
1668 unsigned char *tmpbuf
= XALLOCAVEC (unsigned char, byte_size
);
1669 memset (tmpbuf
, '\0', byte_size
);
1670 /* The store detection code should only have allowed constants that are
1671 accepted by native_encode_expr. */
1672 if (native_encode_expr (expr
, tmpbuf
, byte_size
- 1) == 0)
1675 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1676 bytes to write. This means it can write more than
1677 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1678 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1679 bitlen and zero out the bits that are not relevant as well (that may
1680 contain a sign bit due to sign-extension). */
1681 unsigned int padding
1682 = byte_size
- ROUND_UP (bitlen
, BITS_PER_UNIT
) / BITS_PER_UNIT
- 1;
1683 /* On big-endian the padding is at the 'front' so just skip the initial
1685 if (BYTES_BIG_ENDIAN
)
1688 byte_size
-= padding
;
1690 if (bitlen
% BITS_PER_UNIT
!= 0)
1692 if (BYTES_BIG_ENDIAN
)
1693 clear_bit_region_be (tmpbuf
, BITS_PER_UNIT
- 1,
1694 BITS_PER_UNIT
- (bitlen
% BITS_PER_UNIT
));
1696 clear_bit_region (tmpbuf
, bitlen
,
1697 byte_size
* BITS_PER_UNIT
- bitlen
);
1699 /* Left shifting relies on the last byte being clear if bitlen is
1700 a multiple of BITS_PER_UNIT, which might not be clear if
1701 there are padding bytes. */
1702 else if (!BYTES_BIG_ENDIAN
)
1703 tmpbuf
[byte_size
- 1] = '\0';
1705 /* Clear the bit region in PTR where the bits from TMPBUF will be
1707 if (BYTES_BIG_ENDIAN
)
1708 clear_bit_region_be (ptr
+ first_byte
,
1709 BITS_PER_UNIT
- 1 - (bitpos
% BITS_PER_UNIT
), bitlen
);
1711 clear_bit_region (ptr
+ first_byte
, bitpos
% BITS_PER_UNIT
, bitlen
);
1714 int bitlen_mod
= bitlen
% BITS_PER_UNIT
;
1715 int bitpos_mod
= bitpos
% BITS_PER_UNIT
;
1717 bool skip_byte
= false;
1718 if (BYTES_BIG_ENDIAN
)
1720 /* BITPOS and BITLEN are exactly aligned and no shifting
1722 if (bitpos_mod
+ bitlen_mod
== BITS_PER_UNIT
1723 || (bitpos_mod
== 0 && bitlen_mod
== 0))
1725 /* |. . . . . . . .|
1727 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1728 of the value until it aligns with 'bp' in the next byte over. */
1729 else if (bitpos_mod
+ bitlen_mod
< BITS_PER_UNIT
)
1731 shift_amnt
= bitlen_mod
+ bitpos_mod
;
1732 skip_byte
= bitlen_mod
!= 0;
1734 /* |. . . . . . . .|
1737 Shift the value right within the same byte so it aligns with 'bp'. */
1739 shift_amnt
= bitlen_mod
+ bitpos_mod
- BITS_PER_UNIT
;
1742 shift_amnt
= bitpos
% BITS_PER_UNIT
;
1744 /* Create the shifted version of EXPR. */
1745 if (!BYTES_BIG_ENDIAN
)
1747 shift_bytes_in_array (tmpbuf
, byte_size
, shift_amnt
);
1748 if (shift_amnt
== 0)
1753 gcc_assert (BYTES_BIG_ENDIAN
);
1754 shift_bytes_in_array_right (tmpbuf
, byte_size
, shift_amnt
);
1755 /* If shifting right forced us to move into the next byte skip the now
1764 /* Insert the bits from TMPBUF. */
1765 for (unsigned int i
= 0; i
< byte_size
; i
++)
1766 ptr
[first_byte
+ i
] |= tmpbuf
[i
];
1771 /* Sorting function for store_immediate_info objects.
1772 Sorts them by bitposition. */
1775 sort_by_bitpos (const void *x
, const void *y
)
1777 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1778 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1780 if ((*tmp
)->bitpos
< (*tmp2
)->bitpos
)
1782 else if ((*tmp
)->bitpos
> (*tmp2
)->bitpos
)
1785 /* If they are the same let's use the order which is guaranteed to
1787 return (*tmp
)->order
- (*tmp2
)->order
;
1790 /* Sorting function for store_immediate_info objects.
1791 Sorts them by the order field. */
1794 sort_by_order (const void *x
, const void *y
)
1796 store_immediate_info
*const *tmp
= (store_immediate_info
* const *) x
;
1797 store_immediate_info
*const *tmp2
= (store_immediate_info
* const *) y
;
1799 if ((*tmp
)->order
< (*tmp2
)->order
)
1801 else if ((*tmp
)->order
> (*tmp2
)->order
)
1807 /* Initialize a merged_store_group object from a store_immediate_info
1810 merged_store_group::merged_store_group (store_immediate_info
*info
)
1812 start
= info
->bitpos
;
1813 width
= info
->bitsize
;
1814 bitregion_start
= info
->bitregion_start
;
1815 bitregion_end
= info
->bitregion_end
;
1816 /* VAL has memory allocated for it in apply_stores once the group
1817 width has been finalized. */
1820 bit_insertion
= false;
1821 unsigned HOST_WIDE_INT align_bitpos
= 0;
1822 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
1823 &align
, &align_bitpos
);
1824 align_base
= start
- align_bitpos
;
1825 for (int i
= 0; i
< 2; ++i
)
1827 store_operand_info
&op
= info
->ops
[i
];
1828 if (op
.base_addr
== NULL_TREE
)
1831 load_align_base
[i
] = 0;
1835 get_object_alignment_1 (op
.val
, &load_align
[i
], &align_bitpos
);
1836 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
1840 stores
.safe_push (info
);
1841 last_stmt
= info
->stmt
;
1842 last_order
= info
->order
;
1843 first_stmt
= last_stmt
;
1844 first_order
= last_order
;
1848 merged_store_group::~merged_store_group ()
1854 /* Helper method for merge_into and merge_overlapping to do
1857 merged_store_group::do_merge (store_immediate_info
*info
)
1859 bitregion_start
= MIN (bitregion_start
, info
->bitregion_start
);
1860 bitregion_end
= MAX (bitregion_end
, info
->bitregion_end
);
1862 unsigned int this_align
;
1863 unsigned HOST_WIDE_INT align_bitpos
= 0;
1864 get_object_alignment_1 (gimple_assign_lhs (info
->stmt
),
1865 &this_align
, &align_bitpos
);
1866 if (this_align
> align
)
1869 align_base
= info
->bitpos
- align_bitpos
;
1871 for (int i
= 0; i
< 2; ++i
)
1873 store_operand_info
&op
= info
->ops
[i
];
1877 get_object_alignment_1 (op
.val
, &this_align
, &align_bitpos
);
1878 if (this_align
> load_align
[i
])
1880 load_align
[i
] = this_align
;
1881 load_align_base
[i
] = op
.bitpos
- align_bitpos
;
1885 gimple
*stmt
= info
->stmt
;
1886 stores
.safe_push (info
);
1887 if (info
->order
> last_order
)
1889 last_order
= info
->order
;
1892 else if (info
->order
< first_order
)
1894 first_order
= info
->order
;
1899 /* Merge a store recorded by INFO into this merged store.
1900 The store is not overlapping with the existing recorded
1904 merged_store_group::merge_into (store_immediate_info
*info
)
1906 /* Make sure we're inserting in the position we think we're inserting. */
1907 gcc_assert (info
->bitpos
>= start
+ width
1908 && info
->bitregion_start
<= bitregion_end
);
1910 width
= info
->bitpos
+ info
->bitsize
- start
;
1914 /* Merge a store described by INFO into this merged store.
1915 INFO overlaps in some way with the current store (i.e. it's not contiguous
1916 which is handled by merged_store_group::merge_into). */
1919 merged_store_group::merge_overlapping (store_immediate_info
*info
)
1921 /* If the store extends the size of the group, extend the width. */
1922 if (info
->bitpos
+ info
->bitsize
> start
+ width
)
1923 width
= info
->bitpos
+ info
->bitsize
- start
;
1928 /* Go through all the recorded stores in this group in program order and
1929 apply their values to the VAL byte array to create the final merged
1930 value. Return true if the operation succeeded. */
1933 merged_store_group::apply_stores ()
1935 /* Make sure we have more than one store in the group, otherwise we cannot
1937 if (bitregion_start
% BITS_PER_UNIT
!= 0
1938 || bitregion_end
% BITS_PER_UNIT
!= 0
1939 || stores
.length () == 1)
1942 stores
.qsort (sort_by_order
);
1943 store_immediate_info
*info
;
1945 /* Create a power-of-2-sized buffer for native_encode_expr. */
1946 buf_size
= 1 << ceil_log2 ((bitregion_end
- bitregion_start
) / BITS_PER_UNIT
);
1947 val
= XNEWVEC (unsigned char, 2 * buf_size
);
1948 mask
= val
+ buf_size
;
1949 memset (val
, 0, buf_size
);
1950 memset (mask
, ~0U, buf_size
);
1952 FOR_EACH_VEC_ELT (stores
, i
, info
)
1954 unsigned int pos_in_buffer
= info
->bitpos
- bitregion_start
;
1956 if (info
->ops
[0].val
&& info
->ops
[0].base_addr
== NULL_TREE
)
1957 cst
= info
->ops
[0].val
;
1958 else if (info
->ops
[1].val
&& info
->ops
[1].base_addr
== NULL_TREE
)
1959 cst
= info
->ops
[1].val
;
1965 if (info
->rhs_code
== BIT_INSERT_EXPR
)
1966 bit_insertion
= true;
1968 ret
= encode_tree_to_bitpos (cst
, val
, info
->bitsize
,
1969 pos_in_buffer
, buf_size
);
1971 unsigned char *m
= mask
+ (pos_in_buffer
/ BITS_PER_UNIT
);
1972 if (BYTES_BIG_ENDIAN
)
1973 clear_bit_region_be (m
, (BITS_PER_UNIT
- 1
1974 - (pos_in_buffer
% BITS_PER_UNIT
)),
1977 clear_bit_region (m
, pos_in_buffer
% BITS_PER_UNIT
, info
->bitsize
);
1978 if (cst
&& dump_file
&& (dump_flags
& TDF_DETAILS
))
1982 fputs ("After writing ", dump_file
);
1983 print_generic_expr (dump_file
, cst
, 0);
1984 fprintf (dump_file
, " of size " HOST_WIDE_INT_PRINT_DEC
1985 " at position %d\n", info
->bitsize
, pos_in_buffer
);
1986 fputs (" the merged value contains ", dump_file
);
1987 dump_char_array (dump_file
, val
, buf_size
);
1988 fputs (" the merged mask contains ", dump_file
);
1989 dump_char_array (dump_file
, mask
, buf_size
);
1991 fputs (" bit insertion is required\n", dump_file
);
1994 fprintf (dump_file
, "Failed to merge stores\n");
1999 stores
.qsort (sort_by_bitpos
);
2003 /* Structure describing the store chain. */
2005 struct imm_store_chain_info
2007 /* Doubly-linked list that imposes an order on chain processing.
2008 PNXP (prev's next pointer) points to the head of a list, or to
2009 the next field in the previous chain in the list.
2010 See pass_store_merging::m_stores_head for more rationale. */
2011 imm_store_chain_info
*next
, **pnxp
;
2013 auto_vec
<store_immediate_info
*> m_store_info
;
2014 auto_vec
<merged_store_group
*> m_merged_store_groups
;
2016 imm_store_chain_info (imm_store_chain_info
*&inspt
, tree b_a
)
2017 : next (inspt
), pnxp (&inspt
), base_addr (b_a
)
2022 gcc_checking_assert (pnxp
== next
->pnxp
);
2026 ~imm_store_chain_info ()
2031 gcc_checking_assert (&next
== next
->pnxp
);
2035 bool terminate_and_process_chain ();
2036 bool try_coalesce_bswap (merged_store_group
*, unsigned int, unsigned int);
2037 bool coalesce_immediate_stores ();
2038 bool output_merged_store (merged_store_group
*);
2039 bool output_merged_stores ();
2042 const pass_data pass_data_tree_store_merging
= {
2043 GIMPLE_PASS
, /* type */
2044 "store-merging", /* name */
2045 OPTGROUP_NONE
, /* optinfo_flags */
2046 TV_GIMPLE_STORE_MERGING
, /* tv_id */
2047 PROP_ssa
, /* properties_required */
2048 0, /* properties_provided */
2049 0, /* properties_destroyed */
2050 0, /* todo_flags_start */
2051 TODO_update_ssa
, /* todo_flags_finish */
2054 class pass_store_merging
: public gimple_opt_pass
2057 pass_store_merging (gcc::context
*ctxt
)
2058 : gimple_opt_pass (pass_data_tree_store_merging
, ctxt
), m_stores_head ()
2062 /* Pass not supported for PDP-endian, nor for insane hosts or
2063 target character sizes where native_{encode,interpret}_expr
2064 doesn't work properly. */
2068 return flag_store_merging
2069 && BYTES_BIG_ENDIAN
== WORDS_BIG_ENDIAN
2071 && BITS_PER_UNIT
== 8;
2074 virtual unsigned int execute (function
*);
2077 hash_map
<tree_operand_hash
, struct imm_store_chain_info
*> m_stores
;
2079 /* Form a doubly-linked stack of the elements of m_stores, so that
2080 we can iterate over them in a predictable way. Using this order
2081 avoids extraneous differences in the compiler output just because
2082 of tree pointer variations (e.g. different chains end up in
2083 different positions of m_stores, so they are handled in different
2084 orders, so they allocate or release SSA names in different
2085 orders, and when they get reused, subsequent passes end up
2086 getting different SSA names, which may ultimately change
2087 decisions when going out of SSA). */
2088 imm_store_chain_info
*m_stores_head
;
2090 void process_store (gimple
*);
2091 bool terminate_and_process_all_chains ();
2092 bool terminate_all_aliasing_chains (imm_store_chain_info
**, gimple
*);
2093 bool terminate_and_release_chain (imm_store_chain_info
*);
2094 }; // class pass_store_merging
2096 /* Terminate and process all recorded chains. Return true if any changes
2100 pass_store_merging::terminate_and_process_all_chains ()
2103 while (m_stores_head
)
2104 ret
|= terminate_and_release_chain (m_stores_head
);
2105 gcc_assert (m_stores
.elements () == 0);
2106 gcc_assert (m_stores_head
== NULL
);
2111 /* Terminate all chains that are affected by the statement STMT.
2112 CHAIN_INFO is the chain we should ignore from the checks if
2116 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
2122 /* If the statement doesn't touch memory it can't alias. */
2123 if (!gimple_vuse (stmt
))
2126 tree store_lhs
= gimple_store_p (stmt
) ? gimple_get_lhs (stmt
) : NULL_TREE
;
2127 for (imm_store_chain_info
*next
= m_stores_head
, *cur
= next
; cur
; cur
= next
)
2131 /* We already checked all the stores in chain_info and terminated the
2132 chain if necessary. Skip it here. */
2133 if (chain_info
&& *chain_info
== cur
)
2136 store_immediate_info
*info
;
2138 FOR_EACH_VEC_ELT (cur
->m_store_info
, i
, info
)
2140 tree lhs
= gimple_assign_lhs (info
->stmt
);
2141 if (ref_maybe_used_by_stmt_p (stmt
, lhs
)
2142 || stmt_may_clobber_ref_p (stmt
, lhs
)
2143 || (store_lhs
&& refs_output_dependent_p (store_lhs
, lhs
)))
2145 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2147 fprintf (dump_file
, "stmt causes chain termination:\n");
2148 print_gimple_stmt (dump_file
, stmt
, 0);
2150 terminate_and_release_chain (cur
);
2160 /* Helper function. Terminate the recorded chain storing to base object
2161 BASE. Return true if the merging and output was successful. The m_stores
2162 entry is removed after the processing in any case. */
2165 pass_store_merging::terminate_and_release_chain (imm_store_chain_info
*chain_info
)
2167 bool ret
= chain_info
->terminate_and_process_chain ();
2168 m_stores
.remove (chain_info
->base_addr
);
2173 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2174 may clobber REF. FIRST and LAST must be in the same basic block and
2175 have non-NULL vdef. We want to be able to sink load of REF across
2176 stores between FIRST and LAST, up to right before LAST. */
2179 stmts_may_clobber_ref_p (gimple
*first
, gimple
*last
, tree ref
)
2182 ao_ref_init (&r
, ref
);
2183 unsigned int count
= 0;
2184 tree vop
= gimple_vdef (last
);
2187 gcc_checking_assert (gimple_bb (first
) == gimple_bb (last
));
2190 stmt
= SSA_NAME_DEF_STMT (vop
);
2191 if (stmt_may_clobber_ref_p_1 (stmt
, &r
))
2193 if (gimple_store_p (stmt
)
2194 && refs_anti_dependent_p (ref
, gimple_get_lhs (stmt
)))
2196 /* Avoid quadratic compile time by bounding the number of checks
2198 if (++count
> MAX_STORE_ALIAS_CHECKS
)
2200 vop
= gimple_vuse (stmt
);
2202 while (stmt
!= first
);
2206 /* Return true if INFO->ops[IDX] is mergeable with the
2207 corresponding loads already in MERGED_STORE group.
2208 BASE_ADDR is the base address of the whole store group. */
2211 compatible_load_p (merged_store_group
*merged_store
,
2212 store_immediate_info
*info
,
2213 tree base_addr
, int idx
)
2215 store_immediate_info
*infof
= merged_store
->stores
[0];
2216 if (!info
->ops
[idx
].base_addr
2217 || maybe_ne (info
->ops
[idx
].bitpos
- infof
->ops
[idx
].bitpos
,
2218 info
->bitpos
- infof
->bitpos
)
2219 || !operand_equal_p (info
->ops
[idx
].base_addr
,
2220 infof
->ops
[idx
].base_addr
, 0))
2223 store_immediate_info
*infol
= merged_store
->stores
.last ();
2224 tree load_vuse
= gimple_vuse (info
->ops
[idx
].stmt
);
2225 /* In this case all vuses should be the same, e.g.
2226 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2228 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2229 and we can emit the coalesced load next to any of those loads. */
2230 if (gimple_vuse (infof
->ops
[idx
].stmt
) == load_vuse
2231 && gimple_vuse (infol
->ops
[idx
].stmt
) == load_vuse
)
2234 /* Otherwise, at least for now require that the load has the same
2235 vuse as the store. See following examples. */
2236 if (gimple_vuse (info
->stmt
) != load_vuse
)
2239 if (gimple_vuse (infof
->stmt
) != gimple_vuse (infof
->ops
[idx
].stmt
)
2241 && gimple_vuse (infol
->stmt
) != gimple_vuse (infol
->ops
[idx
].stmt
)))
2244 /* If the load is from the same location as the store, already
2245 the construction of the immediate chain info guarantees no intervening
2246 stores, so no further checks are needed. Example:
2247 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2248 if (known_eq (info
->ops
[idx
].bitpos
, info
->bitpos
)
2249 && operand_equal_p (info
->ops
[idx
].base_addr
, base_addr
, 0))
2252 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2253 of the stores in the group, or any other stores in between those.
2254 Previous calls to compatible_load_p ensured that for all the
2255 merged_store->stores IDX loads, no stmts starting with
2256 merged_store->first_stmt and ending right before merged_store->last_stmt
2257 clobbers those loads. */
2258 gimple
*first
= merged_store
->first_stmt
;
2259 gimple
*last
= merged_store
->last_stmt
;
2261 store_immediate_info
*infoc
;
2262 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2263 comes before the so far first load, we'll be changing
2264 merged_store->first_stmt. In that case we need to give up if
2265 any of the earlier processed loads clobber with the stmts in the new
2267 if (info
->order
< merged_store
->first_order
)
2269 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2270 if (stmts_may_clobber_ref_p (info
->stmt
, first
, infoc
->ops
[idx
].val
))
2274 /* Similarly, we could change merged_store->last_stmt, so ensure
2275 in that case no stmts in the new range clobber any of the earlier
2277 else if (info
->order
> merged_store
->last_order
)
2279 FOR_EACH_VEC_ELT (merged_store
->stores
, i
, infoc
)
2280 if (stmts_may_clobber_ref_p (last
, info
->stmt
, infoc
->ops
[idx
].val
))
2284 /* And finally, we'd be adding a new load to the set, ensure it isn't
2285 clobbered in the new range. */
2286 if (stmts_may_clobber_ref_p (first
, last
, info
->ops
[idx
].val
))
2289 /* Otherwise, we are looking for:
2290 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2292 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2296 /* Add all refs loaded to compute VAL to REFS vector. */
2299 gather_bswap_load_refs (vec
<tree
> *refs
, tree val
)
2301 if (TREE_CODE (val
) != SSA_NAME
)
2304 gimple
*stmt
= SSA_NAME_DEF_STMT (val
);
2305 if (!is_gimple_assign (stmt
))
2308 if (gimple_assign_load_p (stmt
))
2310 refs
->safe_push (gimple_assign_rhs1 (stmt
));
2314 switch (gimple_assign_rhs_class (stmt
))
2316 case GIMPLE_BINARY_RHS
:
2317 gather_bswap_load_refs (refs
, gimple_assign_rhs2 (stmt
));
2319 case GIMPLE_UNARY_RHS
:
2320 gather_bswap_load_refs (refs
, gimple_assign_rhs1 (stmt
));
2327 /* Check if there are any stores in M_STORE_INFO after index I
2328 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2329 a potential group ending with END that have their order
2330 smaller than LAST_ORDER. RHS_CODE is the kind of store in the
2331 group. Return true if there are no such stores.
2333 MEM[(long long int *)p_28] = 0;
2334 MEM[(long long int *)p_28 + 8B] = 0;
2335 MEM[(long long int *)p_28 + 16B] = 0;
2336 MEM[(long long int *)p_28 + 24B] = 0;
2338 MEM[(int *)p_28 + 8B] = _129;
2339 MEM[(int *)p_28].a = -1;
2341 MEM[(long long int *)p_28] = 0;
2342 MEM[(int *)p_28].a = -1;
2343 stmts in the current group and need to consider if it is safe to
2344 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2345 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2346 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2347 into the group and merging of those 3 stores is successful, merged
2348 stmts will be emitted at the latest store from that group, i.e.
2349 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2350 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2351 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2352 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2353 into the group. That way it will be its own store group and will
2354 not be touched. If RHS_CODE is INTEGER_CST and there are overlapping
2355 INTEGER_CST stores, those are mergeable using merge_overlapping,
2356 so don't return false for those. */
2359 check_no_overlap (vec
<store_immediate_info
*> m_store_info
, unsigned int i
,
2360 enum tree_code rhs_code
, unsigned int last_order
,
2361 unsigned HOST_WIDE_INT end
)
2363 unsigned int len
= m_store_info
.length ();
2364 for (++i
; i
< len
; ++i
)
2366 store_immediate_info
*info
= m_store_info
[i
];
2367 if (info
->bitpos
>= end
)
2369 if (info
->order
< last_order
2370 && (rhs_code
!= INTEGER_CST
|| info
->rhs_code
!= INTEGER_CST
))
2376 /* Return true if m_store_info[first] and at least one following store
2377 form a group which store try_size bitsize value which is byte swapped
2378 from a memory load or some value, or identity from some value.
2379 This uses the bswap pass APIs. */
2382 imm_store_chain_info::try_coalesce_bswap (merged_store_group
*merged_store
,
2384 unsigned int try_size
)
2386 unsigned int len
= m_store_info
.length (), last
= first
;
2387 unsigned HOST_WIDE_INT width
= m_store_info
[first
]->bitsize
;
2388 if (width
>= try_size
)
2390 for (unsigned int i
= first
+ 1; i
< len
; ++i
)
2392 if (m_store_info
[i
]->bitpos
!= m_store_info
[first
]->bitpos
+ width
2393 || m_store_info
[i
]->ins_stmt
== NULL
)
2395 width
+= m_store_info
[i
]->bitsize
;
2396 if (width
>= try_size
)
2402 if (width
!= try_size
)
2405 bool allow_unaligned
2406 = !STRICT_ALIGNMENT
&& PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED
);
2407 /* Punt if the combined store would not be aligned and we need alignment. */
2408 if (!allow_unaligned
)
2410 unsigned int align
= merged_store
->align
;
2411 unsigned HOST_WIDE_INT align_base
= merged_store
->align_base
;
2412 for (unsigned int i
= first
+ 1; i
<= last
; ++i
)
2414 unsigned int this_align
;
2415 unsigned HOST_WIDE_INT align_bitpos
= 0;
2416 get_object_alignment_1 (gimple_assign_lhs (m_store_info
[i
]->stmt
),
2417 &this_align
, &align_bitpos
);
2418 if (this_align
> align
)
2421 align_base
= m_store_info
[i
]->bitpos
- align_bitpos
;
2424 unsigned HOST_WIDE_INT align_bitpos
2425 = (m_store_info
[first
]->bitpos
- align_base
) & (align
- 1);
2427 align
= least_bit_hwi (align_bitpos
);
2428 if (align
< try_size
)
2435 case 16: type
= uint16_type_node
; break;
2436 case 32: type
= uint32_type_node
; break;
2437 case 64: type
= uint64_type_node
; break;
2438 default: gcc_unreachable ();
2440 struct symbolic_number n
;
2441 gimple
*ins_stmt
= NULL
;
2442 int vuse_store
= -1;
2443 unsigned int first_order
= merged_store
->first_order
;
2444 unsigned int last_order
= merged_store
->last_order
;
2445 gimple
*first_stmt
= merged_store
->first_stmt
;
2446 gimple
*last_stmt
= merged_store
->last_stmt
;
2447 unsigned HOST_WIDE_INT end
= merged_store
->start
+ merged_store
->width
;
2448 store_immediate_info
*infof
= m_store_info
[first
];
2450 for (unsigned int i
= first
; i
<= last
; ++i
)
2452 store_immediate_info
*info
= m_store_info
[i
];
2453 struct symbolic_number this_n
= info
->n
;
2455 if (!this_n
.base_addr
)
2456 this_n
.range
= try_size
/ BITS_PER_UNIT
;
2458 /* Update vuse in case it has changed by output_merged_stores. */
2459 this_n
.vuse
= gimple_vuse (info
->ins_stmt
);
2460 unsigned int bitpos
= info
->bitpos
- infof
->bitpos
;
2461 if (!do_shift_rotate (LSHIFT_EXPR
, &this_n
,
2463 ? try_size
- info
->bitsize
- bitpos
2466 if (this_n
.base_addr
&& vuse_store
)
2469 for (j
= first
; j
<= last
; ++j
)
2470 if (this_n
.vuse
== gimple_vuse (m_store_info
[j
]->stmt
))
2474 if (vuse_store
== 1)
2482 ins_stmt
= info
->ins_stmt
;
2486 if (n
.base_addr
&& n
.vuse
!= this_n
.vuse
)
2488 if (vuse_store
== 0)
2492 if (info
->order
> last_order
)
2494 last_order
= info
->order
;
2495 last_stmt
= info
->stmt
;
2497 else if (info
->order
< first_order
)
2499 first_order
= info
->order
;
2500 first_stmt
= info
->stmt
;
2502 end
= MAX (end
, info
->bitpos
+ info
->bitsize
);
2504 ins_stmt
= perform_symbolic_merge (ins_stmt
, &n
, info
->ins_stmt
,
2506 if (ins_stmt
== NULL
)
2511 uint64_t cmpxchg
, cmpnop
;
2512 find_bswap_or_nop_finalize (&n
, &cmpxchg
, &cmpnop
);
2514 /* A complete byte swap should make the symbolic number to start with
2515 the largest digit in the highest order byte. Unchanged symbolic
2516 number indicates a read with same endianness as target architecture. */
2517 if (n
.n
!= cmpnop
&& n
.n
!= cmpxchg
)
2520 if (n
.base_addr
== NULL_TREE
&& !is_gimple_val (n
.src
))
2523 if (!check_no_overlap (m_store_info
, last
, LROTATE_EXPR
, last_order
, end
))
2526 /* Don't handle memory copy this way if normal non-bswap processing
2527 would handle it too. */
2528 if (n
.n
== cmpnop
&& (unsigned) n
.n_ops
== last
- first
+ 1)
2531 for (i
= first
; i
<= last
; ++i
)
2532 if (m_store_info
[i
]->rhs_code
!= MEM_REF
)
2542 /* Will emit LROTATE_EXPR. */
2545 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32
)
2546 && optab_handler (bswap_optab
, SImode
) != CODE_FOR_nothing
)
2550 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64
)
2551 && optab_handler (bswap_optab
, DImode
) != CODE_FOR_nothing
)
2558 if (!allow_unaligned
&& n
.base_addr
)
2560 unsigned int align
= get_object_alignment (n
.src
);
2561 if (align
< try_size
)
2565 /* If each load has vuse of the corresponding store, need to verify
2566 the loads can be sunk right before the last store. */
2567 if (vuse_store
== 1)
2569 auto_vec
<tree
, 64> refs
;
2570 for (unsigned int i
= first
; i
<= last
; ++i
)
2571 gather_bswap_load_refs (&refs
,
2572 gimple_assign_rhs1 (m_store_info
[i
]->stmt
));
2576 FOR_EACH_VEC_ELT (refs
, i
, ref
)
2577 if (stmts_may_clobber_ref_p (first_stmt
, last_stmt
, ref
))
2583 infof
->ins_stmt
= ins_stmt
;
2584 for (unsigned int i
= first
; i
<= last
; ++i
)
2586 m_store_info
[i
]->rhs_code
= n
.n
== cmpxchg
? LROTATE_EXPR
: NOP_EXPR
;
2587 m_store_info
[i
]->ops
[0].base_addr
= NULL_TREE
;
2588 m_store_info
[i
]->ops
[1].base_addr
= NULL_TREE
;
2590 merged_store
->merge_into (m_store_info
[i
]);
2596 /* Go through the candidate stores recorded in m_store_info and merge them
2597 into merged_store_group objects recorded into m_merged_store_groups
2598 representing the widened stores. Return true if coalescing was successful
2599 and the number of widened stores is fewer than the original number
2603 imm_store_chain_info::coalesce_immediate_stores ()
2605 /* Anything less can't be processed. */
2606 if (m_store_info
.length () < 2)
2609 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2610 fprintf (dump_file
, "Attempting to coalesce %u stores in chain\n",
2611 m_store_info
.length ());
2613 store_immediate_info
*info
;
2614 unsigned int i
, ignore
= 0;
2616 /* Order the stores by the bitposition they write to. */
2617 m_store_info
.qsort (sort_by_bitpos
);
2619 info
= m_store_info
[0];
2620 merged_store_group
*merged_store
= new merged_store_group (info
);
2621 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2622 fputs ("New store group\n", dump_file
);
2624 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
2629 /* First try to handle group of stores like:
2634 using the bswap framework. */
2635 if (info
->bitpos
== merged_store
->start
+ merged_store
->width
2636 && merged_store
->stores
.length () == 1
2637 && merged_store
->stores
[0]->ins_stmt
!= NULL
2638 && info
->ins_stmt
!= NULL
)
2640 unsigned int try_size
;
2641 for (try_size
= 64; try_size
>= 16; try_size
>>= 1)
2642 if (try_coalesce_bswap (merged_store
, i
- 1, try_size
))
2647 ignore
= i
+ merged_store
->stores
.length () - 1;
2648 m_merged_store_groups
.safe_push (merged_store
);
2649 if (ignore
< m_store_info
.length ())
2650 merged_store
= new merged_store_group (m_store_info
[ignore
]);
2652 merged_store
= NULL
;
2659 Overlapping stores. */
2660 if (IN_RANGE (info
->bitpos
, merged_store
->start
,
2661 merged_store
->start
+ merged_store
->width
- 1))
2663 /* Only allow overlapping stores of constants. */
2664 if (info
->rhs_code
== INTEGER_CST
2665 && merged_store
->stores
[0]->rhs_code
== INTEGER_CST
)
2667 merged_store
->merge_overlapping (info
);
2671 /* |---store 1---||---store 2---|
2672 This store is consecutive to the previous one.
2673 Merge it into the current store group. There can be gaps in between
2674 the stores, but there can't be gaps in between bitregions. */
2675 else if (info
->bitregion_start
<= merged_store
->bitregion_end
2676 && info
->rhs_code
!= LROTATE_EXPR
2677 && (info
->rhs_code
== merged_store
->stores
[0]->rhs_code
2678 || (info
->rhs_code
== INTEGER_CST
2679 && merged_store
->stores
[0]->rhs_code
== BIT_INSERT_EXPR
)
2680 || (info
->rhs_code
== BIT_INSERT_EXPR
2681 && merged_store
->stores
[0]->rhs_code
== INTEGER_CST
)))
2683 store_immediate_info
*infof
= merged_store
->stores
[0];
2685 /* All the rhs_code ops that take 2 operands are commutative,
2686 swap the operands if it could make the operands compatible. */
2687 if (infof
->ops
[0].base_addr
2688 && infof
->ops
[1].base_addr
2689 && info
->ops
[0].base_addr
2690 && info
->ops
[1].base_addr
2691 && known_eq (info
->ops
[1].bitpos
- infof
->ops
[0].bitpos
,
2692 info
->bitpos
- infof
->bitpos
)
2693 && operand_equal_p (info
->ops
[1].base_addr
,
2694 infof
->ops
[0].base_addr
, 0))
2696 std::swap (info
->ops
[0], info
->ops
[1]);
2697 info
->ops_swapped_p
= true;
2699 if ((infof
->ops
[0].base_addr
2700 ? compatible_load_p (merged_store
, info
, base_addr
, 0)
2701 : !info
->ops
[0].base_addr
)
2702 && (infof
->ops
[1].base_addr
2703 ? compatible_load_p (merged_store
, info
, base_addr
, 1)
2704 : !info
->ops
[1].base_addr
)
2705 && check_no_overlap (m_store_info
, i
, info
->rhs_code
,
2706 MAX (merged_store
->last_order
,
2708 MAX (merged_store
->start
2709 + merged_store
->width
,
2710 info
->bitpos
+ info
->bitsize
)))
2712 merged_store
->merge_into (info
);
2717 /* |---store 1---| <gap> |---store 2---|.
2718 Gap between stores or the rhs not compatible. Start a new group. */
2720 /* Try to apply all the stores recorded for the group to determine
2721 the bitpattern they write and discard it if that fails.
2722 This will also reject single-store groups. */
2723 if (merged_store
->apply_stores ())
2724 m_merged_store_groups
.safe_push (merged_store
);
2726 delete merged_store
;
2728 merged_store
= new merged_store_group (info
);
2729 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2730 fputs ("New store group\n", dump_file
);
2733 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2735 fprintf (dump_file
, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
2736 " bitpos:" HOST_WIDE_INT_PRINT_DEC
" val:",
2737 i
, info
->bitsize
, info
->bitpos
);
2738 print_generic_expr (dump_file
, gimple_assign_rhs1 (info
->stmt
));
2739 fputc ('\n', dump_file
);
2743 /* Record or discard the last store group. */
2746 if (merged_store
->apply_stores ())
2747 m_merged_store_groups
.safe_push (merged_store
);
2749 delete merged_store
;
2752 gcc_assert (m_merged_store_groups
.length () <= m_store_info
.length ());
2755 = !m_merged_store_groups
.is_empty ()
2756 && m_merged_store_groups
.length () < m_store_info
.length ();
2758 if (success
&& dump_file
)
2759 fprintf (dump_file
, "Coalescing successful!\nMerged into %u stores\n",
2760 m_merged_store_groups
.length ());
2765 /* Return the type to use for the merged stores or loads described by STMTS.
2766 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
2767 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
2768 of the MEM_REFs if any. */
2771 get_alias_type_for_stmts (vec
<gimple
*> &stmts
, bool is_load
,
2772 unsigned short *cliquep
, unsigned short *basep
)
2776 tree type
= NULL_TREE
;
2777 tree ret
= NULL_TREE
;
2781 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
2783 tree ref
= is_load
? gimple_assign_rhs1 (stmt
)
2784 : gimple_assign_lhs (stmt
);
2785 tree type1
= reference_alias_ptr_type (ref
);
2786 tree base
= get_base_address (ref
);
2790 if (TREE_CODE (base
) == MEM_REF
)
2792 *cliquep
= MR_DEPENDENCE_CLIQUE (base
);
2793 *basep
= MR_DEPENDENCE_BASE (base
);
2798 if (!alias_ptr_types_compatible_p (type
, type1
))
2799 ret
= ptr_type_node
;
2800 if (TREE_CODE (base
) != MEM_REF
2801 || *cliquep
!= MR_DEPENDENCE_CLIQUE (base
)
2802 || *basep
!= MR_DEPENDENCE_BASE (base
))
2811 /* Return the location_t information we can find among the statements
2815 get_location_for_stmts (vec
<gimple
*> &stmts
)
2820 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
2821 if (gimple_has_location (stmt
))
2822 return gimple_location (stmt
);
2824 return UNKNOWN_LOCATION
;
2827 /* Used to decribe a store resulting from splitting a wide store in smaller
2828 regularly-sized stores in split_group. */
2832 unsigned HOST_WIDE_INT bytepos
;
2833 unsigned HOST_WIDE_INT size
;
2834 unsigned HOST_WIDE_INT align
;
2835 auto_vec
<store_immediate_info
*> orig_stores
;
2836 /* True if there is a single orig stmt covering the whole split store. */
2838 split_store (unsigned HOST_WIDE_INT
, unsigned HOST_WIDE_INT
,
2839 unsigned HOST_WIDE_INT
);
2842 /* Simple constructor. */
2844 split_store::split_store (unsigned HOST_WIDE_INT bp
,
2845 unsigned HOST_WIDE_INT sz
,
2846 unsigned HOST_WIDE_INT al
)
2847 : bytepos (bp
), size (sz
), align (al
), orig (false)
2849 orig_stores
.create (0);
2852 /* Record all stores in GROUP that write to the region starting at BITPOS and
2853 is of size BITSIZE. Record infos for such statements in STORES if
2854 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
2855 if there is exactly one original store in the range. */
2857 static store_immediate_info
*
2858 find_constituent_stores (struct merged_store_group
*group
,
2859 vec
<store_immediate_info
*> *stores
,
2860 unsigned int *first
,
2861 unsigned HOST_WIDE_INT bitpos
,
2862 unsigned HOST_WIDE_INT bitsize
)
2864 store_immediate_info
*info
, *ret
= NULL
;
2866 bool second
= false;
2867 bool update_first
= true;
2868 unsigned HOST_WIDE_INT end
= bitpos
+ bitsize
;
2869 for (i
= *first
; group
->stores
.iterate (i
, &info
); ++i
)
2871 unsigned HOST_WIDE_INT stmt_start
= info
->bitpos
;
2872 unsigned HOST_WIDE_INT stmt_end
= stmt_start
+ info
->bitsize
;
2873 if (stmt_end
<= bitpos
)
2875 /* BITPOS passed to this function never decreases from within the
2876 same split_group call, so optimize and don't scan info records
2877 which are known to end before or at BITPOS next time.
2878 Only do it if all stores before this one also pass this. */
2884 update_first
= false;
2886 /* The stores in GROUP are ordered by bitposition so if we're past
2887 the region for this group return early. */
2888 if (stmt_start
>= end
)
2893 stores
->safe_push (info
);
2908 /* Return how many SSA_NAMEs used to compute value to store in the INFO
2909 store have multiple uses. If any SSA_NAME has multiple uses, also
2910 count statements needed to compute it. */
2913 count_multiple_uses (store_immediate_info
*info
)
2915 gimple
*stmt
= info
->stmt
;
2917 switch (info
->rhs_code
)
2924 if (info
->bit_not_p
)
2926 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2927 ret
= 1; /* Fall through below to return
2928 the BIT_NOT_EXPR stmt and then
2929 BIT_{AND,IOR,XOR}_EXPR and anything it
2932 /* stmt is after this the BIT_NOT_EXPR. */
2933 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
2935 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2937 ret
+= 1 + info
->ops
[0].bit_not_p
;
2938 if (info
->ops
[1].base_addr
)
2939 ret
+= 1 + info
->ops
[1].bit_not_p
;
2942 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
2943 /* stmt is now the BIT_*_EXPR. */
2944 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2945 ret
+= 1 + info
->ops
[info
->ops_swapped_p
].bit_not_p
;
2946 else if (info
->ops
[info
->ops_swapped_p
].bit_not_p
)
2948 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
2949 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
2952 if (info
->ops
[1].base_addr
== NULL_TREE
)
2954 gcc_checking_assert (!info
->ops_swapped_p
);
2957 if (!has_single_use (gimple_assign_rhs2 (stmt
)))
2958 ret
+= 1 + info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
;
2959 else if (info
->ops
[1 - info
->ops_swapped_p
].bit_not_p
)
2961 gimple
*stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt
));
2962 if (!has_single_use (gimple_assign_rhs1 (stmt2
)))
2967 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2968 return 1 + info
->ops
[0].bit_not_p
;
2969 else if (info
->ops
[0].bit_not_p
)
2971 stmt
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
2972 if (!has_single_use (gimple_assign_rhs1 (stmt
)))
2976 case BIT_INSERT_EXPR
:
2977 return has_single_use (gimple_assign_rhs1 (stmt
)) ? 0 : 1;
2983 /* Split a merged store described by GROUP by populating the SPLIT_STORES
2984 vector (if non-NULL) with split_store structs describing the byte offset
2985 (from the base), the bit size and alignment of each store as well as the
2986 original statements involved in each such split group.
2987 This is to separate the splitting strategy from the statement
2988 building/emission/linking done in output_merged_store.
2989 Return number of new stores.
2990 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
2991 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
2992 If SPLIT_STORES is NULL, it is just a dry run to count number of
2996 split_group (merged_store_group
*group
, bool allow_unaligned_store
,
2997 bool allow_unaligned_load
,
2998 vec
<struct split_store
*> *split_stores
,
2999 unsigned *total_orig
,
3000 unsigned *total_new
)
3002 unsigned HOST_WIDE_INT pos
= group
->bitregion_start
;
3003 unsigned HOST_WIDE_INT size
= group
->bitregion_end
- pos
;
3004 unsigned HOST_WIDE_INT bytepos
= pos
/ BITS_PER_UNIT
;
3005 unsigned HOST_WIDE_INT group_align
= group
->align
;
3006 unsigned HOST_WIDE_INT align_base
= group
->align_base
;
3007 unsigned HOST_WIDE_INT group_load_align
= group_align
;
3008 bool any_orig
= false;
3010 gcc_assert ((size
% BITS_PER_UNIT
== 0) && (pos
% BITS_PER_UNIT
== 0));
3012 if (group
->stores
[0]->rhs_code
== LROTATE_EXPR
3013 || group
->stores
[0]->rhs_code
== NOP_EXPR
)
3015 /* For bswap framework using sets of stores, all the checking
3016 has been done earlier in try_coalesce_bswap and needs to be
3017 emitted as a single store. */
3020 /* Avoid the old/new stmt count heuristics. It should be
3021 always beneficial. */
3028 unsigned HOST_WIDE_INT align_bitpos
3029 = (group
->start
- align_base
) & (group_align
- 1);
3030 unsigned HOST_WIDE_INT align
= group_align
;
3032 align
= least_bit_hwi (align_bitpos
);
3033 bytepos
= group
->start
/ BITS_PER_UNIT
;
3034 struct split_store
*store
3035 = new split_store (bytepos
, group
->width
, align
);
3036 unsigned int first
= 0;
3037 find_constituent_stores (group
, &store
->orig_stores
,
3038 &first
, group
->start
, group
->width
);
3039 split_stores
->safe_push (store
);
3045 unsigned int ret
= 0, first
= 0;
3046 unsigned HOST_WIDE_INT try_pos
= bytepos
;
3051 store_immediate_info
*info
= group
->stores
[0];
3054 total_orig
[0] = 1; /* The orig store. */
3055 info
= group
->stores
[0];
3056 if (info
->ops
[0].base_addr
)
3058 if (info
->ops
[1].base_addr
)
3060 switch (info
->rhs_code
)
3065 total_orig
[0]++; /* The orig BIT_*_EXPR stmt. */
3070 total_orig
[0] *= group
->stores
.length ();
3072 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
3074 total_new
[0] += count_multiple_uses (info
);
3075 total_orig
[0] += (info
->bit_not_p
3076 + info
->ops
[0].bit_not_p
3077 + info
->ops
[1].bit_not_p
);
3081 if (!allow_unaligned_load
)
3082 for (int i
= 0; i
< 2; ++i
)
3083 if (group
->load_align
[i
])
3084 group_load_align
= MIN (group_load_align
, group
->load_align
[i
]);
3088 if ((allow_unaligned_store
|| group_align
<= BITS_PER_UNIT
)
3089 && group
->mask
[try_pos
- bytepos
] == (unsigned char) ~0U)
3091 /* Skip padding bytes. */
3093 size
-= BITS_PER_UNIT
;
3097 unsigned HOST_WIDE_INT try_bitpos
= try_pos
* BITS_PER_UNIT
;
3098 unsigned int try_size
= MAX_STORE_BITSIZE
, nonmasked
;
3099 unsigned HOST_WIDE_INT align_bitpos
3100 = (try_bitpos
- align_base
) & (group_align
- 1);
3101 unsigned HOST_WIDE_INT align
= group_align
;
3103 align
= least_bit_hwi (align_bitpos
);
3104 if (!allow_unaligned_store
)
3105 try_size
= MIN (try_size
, align
);
3106 if (!allow_unaligned_load
)
3108 /* If we can't do or don't want to do unaligned stores
3109 as well as loads, we need to take the loads into account
3111 unsigned HOST_WIDE_INT load_align
= group_load_align
;
3112 align_bitpos
= (try_bitpos
- align_base
) & (load_align
- 1);
3114 load_align
= least_bit_hwi (align_bitpos
);
3115 for (int i
= 0; i
< 2; ++i
)
3116 if (group
->load_align
[i
])
3119 = known_alignment (try_bitpos
3120 - group
->stores
[0]->bitpos
3121 + group
->stores
[0]->ops
[i
].bitpos
3122 - group
->load_align_base
[i
]);
3123 if (align_bitpos
& (group_load_align
- 1))
3125 unsigned HOST_WIDE_INT a
= least_bit_hwi (align_bitpos
);
3126 load_align
= MIN (load_align
, a
);
3129 try_size
= MIN (try_size
, load_align
);
3131 store_immediate_info
*info
3132 = find_constituent_stores (group
, NULL
, &first
, try_bitpos
, try_size
);
3135 /* If there is just one original statement for the range, see if
3136 we can just reuse the original store which could be even larger
3138 unsigned HOST_WIDE_INT stmt_end
3139 = ROUND_UP (info
->bitpos
+ info
->bitsize
, BITS_PER_UNIT
);
3140 info
= find_constituent_stores (group
, NULL
, &first
, try_bitpos
,
3141 stmt_end
- try_bitpos
);
3142 if (info
&& info
->bitpos
>= try_bitpos
)
3144 try_size
= stmt_end
- try_bitpos
;
3149 /* Approximate store bitsize for the case when there are no padding
3151 while (try_size
> size
)
3153 /* Now look for whole padding bytes at the end of that bitsize. */
3154 for (nonmasked
= try_size
/ BITS_PER_UNIT
; nonmasked
> 0; --nonmasked
)
3155 if (group
->mask
[try_pos
- bytepos
+ nonmasked
- 1]
3156 != (unsigned char) ~0U)
3160 /* If entire try_size range is padding, skip it. */
3161 try_pos
+= try_size
/ BITS_PER_UNIT
;
3165 /* Otherwise try to decrease try_size if second half, last 3 quarters
3166 etc. are padding. */
3167 nonmasked
*= BITS_PER_UNIT
;
3168 while (nonmasked
<= try_size
/ 2)
3170 if (!allow_unaligned_store
&& group_align
> BITS_PER_UNIT
)
3172 /* Now look for whole padding bytes at the start of that bitsize. */
3173 unsigned int try_bytesize
= try_size
/ BITS_PER_UNIT
, masked
;
3174 for (masked
= 0; masked
< try_bytesize
; ++masked
)
3175 if (group
->mask
[try_pos
- bytepos
+ masked
] != (unsigned char) ~0U)
3177 masked
*= BITS_PER_UNIT
;
3178 gcc_assert (masked
< try_size
);
3179 if (masked
>= try_size
/ 2)
3181 while (masked
>= try_size
/ 2)
3184 try_pos
+= try_size
/ BITS_PER_UNIT
;
3188 /* Need to recompute the alignment, so just retry at the new
3199 struct split_store
*store
3200 = new split_store (try_pos
, try_size
, align
);
3201 info
= find_constituent_stores (group
, &store
->orig_stores
,
3202 &first
, try_bitpos
, try_size
);
3204 && info
->bitpos
>= try_bitpos
3205 && info
->bitpos
+ info
->bitsize
<= try_bitpos
+ try_size
)
3210 split_stores
->safe_push (store
);
3213 try_pos
+= try_size
/ BITS_PER_UNIT
;
3220 struct split_store
*store
;
3221 /* If we are reusing some original stores and any of the
3222 original SSA_NAMEs had multiple uses, we need to subtract
3223 those now before we add the new ones. */
3224 if (total_new
[0] && any_orig
)
3226 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
3228 total_new
[0] -= count_multiple_uses (store
->orig_stores
[0]);
3230 total_new
[0] += ret
; /* The new store. */
3231 store_immediate_info
*info
= group
->stores
[0];
3232 if (info
->ops
[0].base_addr
)
3233 total_new
[0] += ret
;
3234 if (info
->ops
[1].base_addr
)
3235 total_new
[0] += ret
;
3236 switch (info
->rhs_code
)
3241 total_new
[0] += ret
; /* The new BIT_*_EXPR stmt. */
3246 FOR_EACH_VEC_ELT (*split_stores
, i
, store
)
3249 bool bit_not_p
[3] = { false, false, false };
3250 /* If all orig_stores have certain bit_not_p set, then
3251 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3252 If some orig_stores have certain bit_not_p set, then
3253 we'd use a BIT_XOR_EXPR with a mask and need to account for
3255 FOR_EACH_VEC_ELT (store
->orig_stores
, j
, info
)
3257 if (info
->ops
[0].bit_not_p
)
3258 bit_not_p
[0] = true;
3259 if (info
->ops
[1].bit_not_p
)
3260 bit_not_p
[1] = true;
3261 if (info
->bit_not_p
)
3262 bit_not_p
[2] = true;
3264 total_new
[0] += bit_not_p
[0] + bit_not_p
[1] + bit_not_p
[2];
3272 /* Return the operation through which the operand IDX (if < 2) or
3273 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3274 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3275 the bits should be xored with mask. */
3277 static enum tree_code
3278 invert_op (split_store
*split_store
, int idx
, tree int_type
, tree
&mask
)
3281 store_immediate_info
*info
;
3282 unsigned int cnt
= 0;
3283 bool any_paddings
= false;
3284 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
3286 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
3290 tree lhs
= gimple_assign_lhs (info
->stmt
);
3291 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3292 && TYPE_PRECISION (TREE_TYPE (lhs
)) < info
->bitsize
)
3293 any_paddings
= true;
3299 if (cnt
== split_store
->orig_stores
.length () && !any_paddings
)
3300 return BIT_NOT_EXPR
;
3302 unsigned HOST_WIDE_INT try_bitpos
= split_store
->bytepos
* BITS_PER_UNIT
;
3303 unsigned buf_size
= split_store
->size
/ BITS_PER_UNIT
;
3305 = XALLOCAVEC (unsigned char, buf_size
);
3306 memset (buf
, ~0U, buf_size
);
3307 FOR_EACH_VEC_ELT (split_store
->orig_stores
, i
, info
)
3309 bool bit_not_p
= idx
< 2 ? info
->ops
[idx
].bit_not_p
: info
->bit_not_p
;
3312 /* Clear regions with bit_not_p and invert afterwards, rather than
3313 clear regions with !bit_not_p, so that gaps in between stores aren't
3315 unsigned HOST_WIDE_INT bitsize
= info
->bitsize
;
3316 unsigned HOST_WIDE_INT prec
= bitsize
;
3317 unsigned int pos_in_buffer
= 0;
3320 tree lhs
= gimple_assign_lhs (info
->stmt
);
3321 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
3322 && TYPE_PRECISION (TREE_TYPE (lhs
)) < bitsize
)
3323 prec
= TYPE_PRECISION (TREE_TYPE (lhs
));
3325 if (info
->bitpos
< try_bitpos
)
3327 gcc_assert (info
->bitpos
+ bitsize
> try_bitpos
);
3328 if (!BYTES_BIG_ENDIAN
)
3330 if (prec
<= try_bitpos
- info
->bitpos
)
3332 prec
-= try_bitpos
- info
->bitpos
;
3334 bitsize
-= try_bitpos
- info
->bitpos
;
3335 if (BYTES_BIG_ENDIAN
&& prec
> bitsize
)
3339 pos_in_buffer
= info
->bitpos
- try_bitpos
;
3342 /* If this is a bool inversion, invert just the least significant
3343 prec bits rather than all bits of it. */
3344 if (BYTES_BIG_ENDIAN
)
3346 pos_in_buffer
+= bitsize
- prec
;
3347 if (pos_in_buffer
>= split_store
->size
)
3352 if (pos_in_buffer
+ bitsize
> split_store
->size
)
3353 bitsize
= split_store
->size
- pos_in_buffer
;
3354 unsigned char *p
= buf
+ (pos_in_buffer
/ BITS_PER_UNIT
);
3355 if (BYTES_BIG_ENDIAN
)
3356 clear_bit_region_be (p
, (BITS_PER_UNIT
- 1
3357 - (pos_in_buffer
% BITS_PER_UNIT
)), bitsize
);
3359 clear_bit_region (p
, pos_in_buffer
% BITS_PER_UNIT
, bitsize
);
3361 for (unsigned int i
= 0; i
< buf_size
; ++i
)
3363 mask
= native_interpret_expr (int_type
, buf
, buf_size
);
3364 return BIT_XOR_EXPR
;
3367 /* Given a merged store group GROUP output the widened version of it.
3368 The store chain is against the base object BASE.
3369 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3370 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3371 Make sure that the number of statements output is less than the number of
3372 original statements. If a better sequence is possible emit it and
3376 imm_store_chain_info::output_merged_store (merged_store_group
*group
)
3378 split_store
*split_store
;
3380 unsigned HOST_WIDE_INT start_byte_pos
3381 = group
->bitregion_start
/ BITS_PER_UNIT
;
3383 unsigned int orig_num_stmts
= group
->stores
.length ();
3384 if (orig_num_stmts
< 2)
3387 auto_vec
<struct split_store
*, 32> split_stores
;
3388 bool allow_unaligned_store
3389 = !STRICT_ALIGNMENT
&& PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED
);
3390 bool allow_unaligned_load
= allow_unaligned_store
;
3391 if (allow_unaligned_store
)
3393 /* If unaligned stores are allowed, see how many stores we'd emit
3394 for unaligned and how many stores we'd emit for aligned stores.
3395 Only use unaligned stores if it allows fewer stores than aligned. */
3396 unsigned aligned_cnt
3397 = split_group (group
, false, allow_unaligned_load
, NULL
, NULL
, NULL
);
3398 unsigned unaligned_cnt
3399 = split_group (group
, true, allow_unaligned_load
, NULL
, NULL
, NULL
);
3400 if (aligned_cnt
<= unaligned_cnt
)
3401 allow_unaligned_store
= false;
3403 unsigned total_orig
, total_new
;
3404 split_group (group
, allow_unaligned_store
, allow_unaligned_load
,
3405 &split_stores
, &total_orig
, &total_new
);
3407 if (split_stores
.length () >= orig_num_stmts
)
3409 /* We didn't manage to reduce the number of statements. Bail out. */
3410 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3411 fprintf (dump_file
, "Exceeded original number of stmts (%u)."
3412 " Not profitable to emit new sequence.\n",
3414 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3418 if (total_orig
<= total_new
)
3420 /* If number of estimated new statements is above estimated original
3421 statements, bail out too. */
3422 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3423 fprintf (dump_file
, "Estimated number of original stmts (%u)"
3424 " not larger than estimated number of new"
3426 total_orig
, total_new
);
3427 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3432 gimple_stmt_iterator last_gsi
= gsi_for_stmt (group
->last_stmt
);
3433 gimple_seq seq
= NULL
;
3434 tree last_vdef
, new_vuse
;
3435 last_vdef
= gimple_vdef (group
->last_stmt
);
3436 new_vuse
= gimple_vuse (group
->last_stmt
);
3437 tree bswap_res
= NULL_TREE
;
3439 if (group
->stores
[0]->rhs_code
== LROTATE_EXPR
3440 || group
->stores
[0]->rhs_code
== NOP_EXPR
)
3442 tree fndecl
= NULL_TREE
, bswap_type
= NULL_TREE
, load_type
;
3443 gimple
*ins_stmt
= group
->stores
[0]->ins_stmt
;
3444 struct symbolic_number
*n
= &group
->stores
[0]->n
;
3445 bool bswap
= group
->stores
[0]->rhs_code
== LROTATE_EXPR
;
3450 load_type
= bswap_type
= uint16_type_node
;
3453 load_type
= uint32_type_node
;
3456 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP32
);
3457 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
3461 load_type
= uint64_type_node
;
3464 fndecl
= builtin_decl_explicit (BUILT_IN_BSWAP64
);
3465 bswap_type
= TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl
)));
3472 /* If the loads have each vuse of the corresponding store,
3473 we've checked the aliasing already in try_coalesce_bswap and
3474 we want to sink the need load into seq. So need to use new_vuse
3478 if (n
->vuse
== NULL
)
3484 /* Update vuse in case it has changed by output_merged_stores. */
3485 n
->vuse
= gimple_vuse (ins_stmt
);
3487 bswap_res
= bswap_replace (gsi_start (seq
), ins_stmt
, fndecl
,
3488 bswap_type
, load_type
, n
, bswap
);
3489 gcc_assert (bswap_res
);
3492 gimple
*stmt
= NULL
;
3493 auto_vec
<gimple
*, 32> orig_stmts
;
3494 gimple_seq this_seq
;
3495 tree addr
= force_gimple_operand_1 (unshare_expr (base_addr
), &this_seq
,
3496 is_gimple_mem_ref_addr
, NULL_TREE
);
3497 gimple_seq_add_seq_without_update (&seq
, this_seq
);
3499 tree load_addr
[2] = { NULL_TREE
, NULL_TREE
};
3500 gimple_seq load_seq
[2] = { NULL
, NULL
};
3501 gimple_stmt_iterator load_gsi
[2] = { gsi_none (), gsi_none () };
3502 for (int j
= 0; j
< 2; ++j
)
3504 store_operand_info
&op
= group
->stores
[0]->ops
[j
];
3505 if (op
.base_addr
== NULL_TREE
)
3508 store_immediate_info
*infol
= group
->stores
.last ();
3509 if (gimple_vuse (op
.stmt
) == gimple_vuse (infol
->ops
[j
].stmt
))
3511 /* We can't pick the location randomly; while we've verified
3512 all the loads have the same vuse, they can be still in different
3513 basic blocks and we need to pick the one from the last bb:
3519 otherwise if we put the wider load at the q[0] load, we might
3520 segfault if q[1] is not mapped. */
3521 basic_block bb
= gimple_bb (op
.stmt
);
3522 gimple
*ostmt
= op
.stmt
;
3523 store_immediate_info
*info
;
3524 FOR_EACH_VEC_ELT (group
->stores
, i
, info
)
3526 gimple
*tstmt
= info
->ops
[j
].stmt
;
3527 basic_block tbb
= gimple_bb (tstmt
);
3528 if (dominated_by_p (CDI_DOMINATORS
, tbb
, bb
))
3534 load_gsi
[j
] = gsi_for_stmt (ostmt
);
3536 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
3537 &load_seq
[j
], is_gimple_mem_ref_addr
,
3540 else if (operand_equal_p (base_addr
, op
.base_addr
, 0))
3541 load_addr
[j
] = addr
;
3545 = force_gimple_operand_1 (unshare_expr (op
.base_addr
),
3546 &this_seq
, is_gimple_mem_ref_addr
,
3548 gimple_seq_add_seq_without_update (&seq
, this_seq
);
3552 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3554 unsigned HOST_WIDE_INT try_size
= split_store
->size
;
3555 unsigned HOST_WIDE_INT try_pos
= split_store
->bytepos
;
3556 unsigned HOST_WIDE_INT try_bitpos
= try_pos
* BITS_PER_UNIT
;
3557 unsigned HOST_WIDE_INT align
= split_store
->align
;
3560 if (split_store
->orig
)
3562 /* If there is just a single constituent store which covers
3563 the whole area, just reuse the lhs and rhs. */
3564 gimple
*orig_stmt
= split_store
->orig_stores
[0]->stmt
;
3565 dest
= gimple_assign_lhs (orig_stmt
);
3566 src
= gimple_assign_rhs1 (orig_stmt
);
3567 loc
= gimple_location (orig_stmt
);
3571 store_immediate_info
*info
;
3572 unsigned short clique
, base
;
3574 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3575 orig_stmts
.safe_push (info
->stmt
);
3577 = get_alias_type_for_stmts (orig_stmts
, false, &clique
, &base
);
3578 loc
= get_location_for_stmts (orig_stmts
);
3579 orig_stmts
.truncate (0);
3581 tree int_type
= build_nonstandard_integer_type (try_size
, UNSIGNED
);
3582 int_type
= build_aligned_type (int_type
, align
);
3583 dest
= fold_build2 (MEM_REF
, int_type
, addr
,
3584 build_int_cst (offset_type
, try_pos
));
3585 if (TREE_CODE (dest
) == MEM_REF
)
3587 MR_DEPENDENCE_CLIQUE (dest
) = clique
;
3588 MR_DEPENDENCE_BASE (dest
) = base
;
3593 mask
= integer_zero_node
;
3595 mask
= native_interpret_expr (int_type
,
3596 group
->mask
+ try_pos
3602 j
< 1 + (split_store
->orig_stores
[0]->ops
[1].val
!= NULL_TREE
);
3605 store_operand_info
&op
= split_store
->orig_stores
[0]->ops
[j
];
3608 else if (op
.base_addr
)
3610 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3611 orig_stmts
.safe_push (info
->ops
[j
].stmt
);
3613 offset_type
= get_alias_type_for_stmts (orig_stmts
, true,
3615 location_t load_loc
= get_location_for_stmts (orig_stmts
);
3616 orig_stmts
.truncate (0);
3618 unsigned HOST_WIDE_INT load_align
= group
->load_align
[j
];
3619 unsigned HOST_WIDE_INT align_bitpos
3620 = known_alignment (try_bitpos
3621 - split_store
->orig_stores
[0]->bitpos
3623 if (align_bitpos
& (load_align
- 1))
3624 load_align
= least_bit_hwi (align_bitpos
);
3627 = build_nonstandard_integer_type (try_size
, UNSIGNED
);
3629 = build_aligned_type (load_int_type
, load_align
);
3631 poly_uint64 load_pos
3632 = exact_div (try_bitpos
3633 - split_store
->orig_stores
[0]->bitpos
3636 ops
[j
] = fold_build2 (MEM_REF
, load_int_type
, load_addr
[j
],
3637 build_int_cst (offset_type
, load_pos
));
3638 if (TREE_CODE (ops
[j
]) == MEM_REF
)
3640 MR_DEPENDENCE_CLIQUE (ops
[j
]) = clique
;
3641 MR_DEPENDENCE_BASE (ops
[j
]) = base
;
3643 if (!integer_zerop (mask
))
3644 /* The load might load some bits (that will be masked off
3645 later on) uninitialized, avoid -W*uninitialized
3646 warnings in that case. */
3647 TREE_NO_WARNING (ops
[j
]) = 1;
3649 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3651 gimple_set_location (stmt
, load_loc
);
3652 if (gsi_bb (load_gsi
[j
]))
3654 gimple_set_vuse (stmt
, gimple_vuse (op
.stmt
));
3655 gimple_seq_add_stmt_without_update (&load_seq
[j
], stmt
);
3659 gimple_set_vuse (stmt
, new_vuse
);
3660 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3662 ops
[j
] = gimple_assign_lhs (stmt
);
3664 enum tree_code inv_op
3665 = invert_op (split_store
, j
, int_type
, xor_mask
);
3666 if (inv_op
!= NOP_EXPR
)
3668 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3669 inv_op
, ops
[j
], xor_mask
);
3670 gimple_set_location (stmt
, load_loc
);
3671 ops
[j
] = gimple_assign_lhs (stmt
);
3673 if (gsi_bb (load_gsi
[j
]))
3674 gimple_seq_add_stmt_without_update (&load_seq
[j
],
3677 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3681 ops
[j
] = native_interpret_expr (int_type
,
3682 group
->val
+ try_pos
3687 switch (split_store
->orig_stores
[0]->rhs_code
)
3692 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3694 tree rhs1
= gimple_assign_rhs1 (info
->stmt
);
3695 orig_stmts
.safe_push (SSA_NAME_DEF_STMT (rhs1
));
3698 bit_loc
= get_location_for_stmts (orig_stmts
);
3699 orig_stmts
.truncate (0);
3702 = gimple_build_assign (make_ssa_name (int_type
),
3703 split_store
->orig_stores
[0]->rhs_code
,
3705 gimple_set_location (stmt
, bit_loc
);
3706 /* If there is just one load and there is a separate
3707 load_seq[0], emit the bitwise op right after it. */
3708 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
3709 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
3710 /* Otherwise, if at least one load is in seq, we need to
3711 emit the bitwise op right before the store. If there
3712 are two loads and are emitted somewhere else, it would
3713 be better to emit the bitwise op as early as possible;
3714 we don't track where that would be possible right now
3717 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3718 src
= gimple_assign_lhs (stmt
);
3720 enum tree_code inv_op
;
3721 inv_op
= invert_op (split_store
, 2, int_type
, xor_mask
);
3722 if (inv_op
!= NOP_EXPR
)
3724 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3725 inv_op
, src
, xor_mask
);
3726 gimple_set_location (stmt
, bit_loc
);
3727 if (load_addr
[1] == NULL_TREE
&& gsi_bb (load_gsi
[0]))
3728 gimple_seq_add_stmt_without_update (&load_seq
[0], stmt
);
3730 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3731 src
= gimple_assign_lhs (stmt
);
3737 if (!is_gimple_val (src
))
3739 stmt
= gimple_build_assign (make_ssa_name (TREE_TYPE (src
)),
3741 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3742 src
= gimple_assign_lhs (stmt
);
3744 if (!useless_type_conversion_p (int_type
, TREE_TYPE (src
)))
3746 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3748 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3749 src
= gimple_assign_lhs (stmt
);
3751 inv_op
= invert_op (split_store
, 2, int_type
, xor_mask
);
3752 if (inv_op
!= NOP_EXPR
)
3754 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3755 inv_op
, src
, xor_mask
);
3756 gimple_set_location (stmt
, loc
);
3757 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3758 src
= gimple_assign_lhs (stmt
);
3766 /* If bit insertion is required, we use the source as an accumulator
3767 into which the successive bit-field values are manually inserted.
3768 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
3769 if (group
->bit_insertion
)
3770 FOR_EACH_VEC_ELT (split_store
->orig_stores
, k
, info
)
3771 if (info
->rhs_code
== BIT_INSERT_EXPR
3772 && info
->bitpos
< try_bitpos
+ try_size
3773 && info
->bitpos
+ info
->bitsize
> try_bitpos
)
3775 /* Mask, truncate, convert to final type, shift and ior into
3776 the accumulator. Note that every step can be a no-op. */
3777 const HOST_WIDE_INT start_gap
= info
->bitpos
- try_bitpos
;
3778 const HOST_WIDE_INT end_gap
3779 = (try_bitpos
+ try_size
) - (info
->bitpos
+ info
->bitsize
);
3780 tree tem
= info
->ops
[0].val
;
3781 if (TYPE_PRECISION (TREE_TYPE (tem
)) <= info
->bitsize
)
3784 = build_nonstandard_integer_type (info
->bitsize
,
3786 tem
= gimple_convert (&seq
, loc
, bitfield_type
, tem
);
3788 else if ((BYTES_BIG_ENDIAN
? start_gap
: end_gap
) > 0)
3790 const unsigned HOST_WIDE_INT imask
3791 = (HOST_WIDE_INT_1U
<< info
->bitsize
) - 1;
3792 tem
= gimple_build (&seq
, loc
,
3793 BIT_AND_EXPR
, TREE_TYPE (tem
), tem
,
3794 build_int_cst (TREE_TYPE (tem
),
3797 const HOST_WIDE_INT shift
3798 = (BYTES_BIG_ENDIAN
? end_gap
: start_gap
);
3800 tem
= gimple_build (&seq
, loc
,
3801 RSHIFT_EXPR
, TREE_TYPE (tem
), tem
,
3802 build_int_cst (NULL_TREE
, -shift
));
3803 tem
= gimple_convert (&seq
, loc
, int_type
, tem
);
3805 tem
= gimple_build (&seq
, loc
,
3806 LSHIFT_EXPR
, int_type
, tem
,
3807 build_int_cst (NULL_TREE
, shift
));
3808 src
= gimple_build (&seq
, loc
,
3809 BIT_IOR_EXPR
, int_type
, tem
, src
);
3812 if (!integer_zerop (mask
))
3814 tree tem
= make_ssa_name (int_type
);
3815 tree load_src
= unshare_expr (dest
);
3816 /* The load might load some or all bits uninitialized,
3817 avoid -W*uninitialized warnings in that case.
3818 As optimization, it would be nice if all the bits are
3819 provably uninitialized (no stores at all yet or previous
3820 store a CLOBBER) we'd optimize away the load and replace
3822 TREE_NO_WARNING (load_src
) = 1;
3823 stmt
= gimple_build_assign (tem
, load_src
);
3824 gimple_set_location (stmt
, loc
);
3825 gimple_set_vuse (stmt
, new_vuse
);
3826 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3828 /* FIXME: If there is a single chunk of zero bits in mask,
3829 perhaps use BIT_INSERT_EXPR instead? */
3830 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3831 BIT_AND_EXPR
, tem
, mask
);
3832 gimple_set_location (stmt
, loc
);
3833 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3834 tem
= gimple_assign_lhs (stmt
);
3836 if (TREE_CODE (src
) == INTEGER_CST
)
3837 src
= wide_int_to_tree (int_type
,
3838 wi::bit_and_not (wi::to_wide (src
),
3839 wi::to_wide (mask
)));
3843 = wide_int_to_tree (int_type
,
3844 wi::bit_not (wi::to_wide (mask
)));
3845 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3846 BIT_AND_EXPR
, src
, nmask
);
3847 gimple_set_location (stmt
, loc
);
3848 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3849 src
= gimple_assign_lhs (stmt
);
3851 stmt
= gimple_build_assign (make_ssa_name (int_type
),
3852 BIT_IOR_EXPR
, tem
, src
);
3853 gimple_set_location (stmt
, loc
);
3854 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3855 src
= gimple_assign_lhs (stmt
);
3859 stmt
= gimple_build_assign (dest
, src
);
3860 gimple_set_location (stmt
, loc
);
3861 gimple_set_vuse (stmt
, new_vuse
);
3862 gimple_seq_add_stmt_without_update (&seq
, stmt
);
3865 if (i
< split_stores
.length () - 1)
3866 new_vdef
= make_ssa_name (gimple_vop (cfun
), stmt
);
3868 new_vdef
= last_vdef
;
3870 gimple_set_vdef (stmt
, new_vdef
);
3871 SSA_NAME_DEF_STMT (new_vdef
) = stmt
;
3872 new_vuse
= new_vdef
;
3875 FOR_EACH_VEC_ELT (split_stores
, i
, split_store
)
3882 "New sequence of %u stores to replace old one of %u stores\n",
3883 split_stores
.length (), orig_num_stmts
);
3884 if (dump_flags
& TDF_DETAILS
)
3885 print_gimple_seq (dump_file
, seq
, 0, TDF_VOPS
| TDF_MEMSYMS
);
3887 gsi_insert_seq_after (&last_gsi
, seq
, GSI_SAME_STMT
);
3888 for (int j
= 0; j
< 2; ++j
)
3890 gsi_insert_seq_after (&load_gsi
[j
], load_seq
[j
], GSI_SAME_STMT
);
3895 /* Process the merged_store_group objects created in the coalescing phase.
3896 The stores are all against the base object BASE.
3897 Try to output the widened stores and delete the original statements if
3898 successful. Return true iff any changes were made. */
3901 imm_store_chain_info::output_merged_stores ()
3904 merged_store_group
*merged_store
;
3906 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_store
)
3908 if (output_merged_store (merged_store
))
3911 store_immediate_info
*store
;
3912 FOR_EACH_VEC_ELT (merged_store
->stores
, j
, store
)
3914 gimple
*stmt
= store
->stmt
;
3915 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
3916 gsi_remove (&gsi
, true);
3917 if (stmt
!= merged_store
->last_stmt
)
3919 unlink_stmt_vdef (stmt
);
3920 release_defs (stmt
);
3926 if (ret
&& dump_file
)
3927 fprintf (dump_file
, "Merging successful!\n");
3932 /* Coalesce the store_immediate_info objects recorded against the base object
3933 BASE in the first phase and output them.
3934 Delete the allocated structures.
3935 Return true if any changes were made. */
3938 imm_store_chain_info::terminate_and_process_chain ()
3940 /* Process store chain. */
3942 if (m_store_info
.length () > 1)
3944 ret
= coalesce_immediate_stores ();
3946 ret
= output_merged_stores ();
3949 /* Delete all the entries we allocated ourselves. */
3950 store_immediate_info
*info
;
3952 FOR_EACH_VEC_ELT (m_store_info
, i
, info
)
3955 merged_store_group
*merged_info
;
3956 FOR_EACH_VEC_ELT (m_merged_store_groups
, i
, merged_info
)
3962 /* Return true iff LHS is a destination potentially interesting for
3963 store merging. In practice these are the codes that get_inner_reference
3967 lhs_valid_for_store_merging_p (tree lhs
)
3969 tree_code code
= TREE_CODE (lhs
);
3971 if (code
== ARRAY_REF
|| code
== ARRAY_RANGE_REF
|| code
== MEM_REF
3972 || code
== COMPONENT_REF
|| code
== BIT_FIELD_REF
)
3978 /* Return true if the tree RHS is a constant we want to consider
3979 during store merging. In practice accept all codes that
3980 native_encode_expr accepts. */
3983 rhs_valid_for_store_merging_p (tree rhs
)
3985 unsigned HOST_WIDE_INT size
;
3986 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs
))).is_constant (&size
)
3987 && native_encode_expr (rhs
, NULL
, size
) != 0);
3990 /* If MEM is a memory reference usable for store merging (either as
3991 store destination or for loads), return the non-NULL base_addr
3992 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
3993 Otherwise return NULL, *PBITPOS should be still valid even for that
3997 mem_valid_for_store_merging (tree mem
, poly_uint64
*pbitsize
,
3998 poly_uint64
*pbitpos
,
3999 poly_uint64
*pbitregion_start
,
4000 poly_uint64
*pbitregion_end
)
4002 poly_int64 bitsize
, bitpos
;
4003 poly_uint64 bitregion_start
= 0, bitregion_end
= 0;
4005 int unsignedp
= 0, reversep
= 0, volatilep
= 0;
4007 tree base_addr
= get_inner_reference (mem
, &bitsize
, &bitpos
, &offset
, &mode
,
4008 &unsignedp
, &reversep
, &volatilep
);
4009 *pbitsize
= bitsize
;
4010 if (known_eq (bitsize
, 0))
4013 if (TREE_CODE (mem
) == COMPONENT_REF
4014 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem
, 1)))
4016 get_bit_range (&bitregion_start
, &bitregion_end
, mem
, &bitpos
, &offset
);
4017 if (maybe_ne (bitregion_end
, 0U))
4024 /* We do not want to rewrite TARGET_MEM_REFs. */
4025 if (TREE_CODE (base_addr
) == TARGET_MEM_REF
)
4027 /* In some cases get_inner_reference may return a
4028 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4029 canonicalize the base_addr to MEM_REF [ptr] and take
4030 byteoffset into account in the bitpos. This occurs in
4031 PR 23684 and this way we can catch more chains. */
4032 else if (TREE_CODE (base_addr
) == MEM_REF
)
4034 poly_offset_int byte_off
= mem_ref_offset (base_addr
);
4035 poly_offset_int bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4037 if (known_ge (bit_off
, 0) && bit_off
.to_shwi (&bitpos
))
4039 if (maybe_ne (bitregion_end
, 0U))
4041 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4042 bit_off
+= bitregion_start
;
4043 if (bit_off
.to_uhwi (&bitregion_start
))
4045 bit_off
= byte_off
<< LOG2_BITS_PER_UNIT
;
4046 bit_off
+= bitregion_end
;
4047 if (!bit_off
.to_uhwi (&bitregion_end
))
4056 base_addr
= TREE_OPERAND (base_addr
, 0);
4058 /* get_inner_reference returns the base object, get at its
4062 if (maybe_lt (bitpos
, 0))
4064 base_addr
= build_fold_addr_expr (base_addr
);
4067 if (known_eq (bitregion_end
, 0U))
4069 bitregion_start
= round_down_to_byte_boundary (bitpos
);
4070 bitregion_end
= bitpos
;
4071 bitregion_end
= round_up_to_byte_boundary (bitregion_end
+ bitsize
);
4074 if (offset
!= NULL_TREE
)
4076 /* If the access is variable offset then a base decl has to be
4077 address-taken to be able to emit pointer-based stores to it.
4078 ??? We might be able to get away with re-using the original
4079 base up to the first variable part and then wrapping that inside
4081 tree base
= get_base_address (base_addr
);
4083 || (DECL_P (base
) && ! TREE_ADDRESSABLE (base
)))
4086 base_addr
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base_addr
),
4090 *pbitsize
= bitsize
;
4092 *pbitregion_start
= bitregion_start
;
4093 *pbitregion_end
= bitregion_end
;
4097 /* Return true if STMT is a load that can be used for store merging.
4098 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4099 BITREGION_END are properties of the corresponding store. */
4102 handled_load (gimple
*stmt
, store_operand_info
*op
,
4103 poly_uint64 bitsize
, poly_uint64 bitpos
,
4104 poly_uint64 bitregion_start
, poly_uint64 bitregion_end
)
4106 if (!is_gimple_assign (stmt
))
4108 if (gimple_assign_rhs_code (stmt
) == BIT_NOT_EXPR
)
4110 tree rhs1
= gimple_assign_rhs1 (stmt
);
4111 if (TREE_CODE (rhs1
) == SSA_NAME
4112 && handled_load (SSA_NAME_DEF_STMT (rhs1
), op
, bitsize
, bitpos
,
4113 bitregion_start
, bitregion_end
))
4115 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4116 been optimized earlier, but if allowed here, would confuse the
4117 multiple uses counting. */
4120 op
->bit_not_p
= !op
->bit_not_p
;
4125 if (gimple_vuse (stmt
)
4126 && gimple_assign_load_p (stmt
)
4127 && !stmt_can_throw_internal (stmt
)
4128 && !gimple_has_volatile_ops (stmt
))
4130 tree mem
= gimple_assign_rhs1 (stmt
);
4132 = mem_valid_for_store_merging (mem
, &op
->bitsize
, &op
->bitpos
,
4133 &op
->bitregion_start
,
4134 &op
->bitregion_end
);
4135 if (op
->base_addr
!= NULL_TREE
4136 && known_eq (op
->bitsize
, bitsize
)
4137 && multiple_p (op
->bitpos
- bitpos
, BITS_PER_UNIT
)
4138 && known_ge (op
->bitpos
- op
->bitregion_start
,
4139 bitpos
- bitregion_start
)
4140 && known_ge (op
->bitregion_end
- op
->bitpos
,
4141 bitregion_end
- bitpos
))
4145 op
->bit_not_p
= false;
4152 /* Record the store STMT for store merging optimization if it can be
4156 pass_store_merging::process_store (gimple
*stmt
)
4158 tree lhs
= gimple_assign_lhs (stmt
);
4159 tree rhs
= gimple_assign_rhs1 (stmt
);
4160 poly_uint64 bitsize
, bitpos
;
4161 poly_uint64 bitregion_start
, bitregion_end
;
4163 = mem_valid_for_store_merging (lhs
, &bitsize
, &bitpos
,
4164 &bitregion_start
, &bitregion_end
);
4165 if (known_eq (bitsize
, 0U))
4168 bool invalid
= (base_addr
== NULL_TREE
4169 || (maybe_gt (bitsize
,
4170 (unsigned int) MAX_BITSIZE_MODE_ANY_INT
)
4171 && (TREE_CODE (rhs
) != INTEGER_CST
)));
4172 enum tree_code rhs_code
= ERROR_MARK
;
4173 bool bit_not_p
= false;
4174 struct symbolic_number n
;
4175 gimple
*ins_stmt
= NULL
;
4176 store_operand_info ops
[2];
4179 else if (rhs_valid_for_store_merging_p (rhs
))
4181 rhs_code
= INTEGER_CST
;
4184 else if (TREE_CODE (rhs
) != SSA_NAME
)
4188 gimple
*def_stmt
= SSA_NAME_DEF_STMT (rhs
), *def_stmt1
, *def_stmt2
;
4189 if (!is_gimple_assign (def_stmt
))
4191 else if (handled_load (def_stmt
, &ops
[0], bitsize
, bitpos
,
4192 bitregion_start
, bitregion_end
))
4194 else if (gimple_assign_rhs_code (def_stmt
) == BIT_NOT_EXPR
)
4196 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
4197 if (TREE_CODE (rhs1
) == SSA_NAME
4198 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1
)))
4201 def_stmt
= SSA_NAME_DEF_STMT (rhs1
);
4205 if (rhs_code
== ERROR_MARK
&& !invalid
)
4206 switch ((rhs_code
= gimple_assign_rhs_code (def_stmt
)))
4212 rhs1
= gimple_assign_rhs1 (def_stmt
);
4213 rhs2
= gimple_assign_rhs2 (def_stmt
);
4215 if (TREE_CODE (rhs1
) != SSA_NAME
)
4217 def_stmt1
= SSA_NAME_DEF_STMT (rhs1
);
4218 if (!is_gimple_assign (def_stmt1
)
4219 || !handled_load (def_stmt1
, &ops
[0], bitsize
, bitpos
,
4220 bitregion_start
, bitregion_end
))
4222 if (rhs_valid_for_store_merging_p (rhs2
))
4224 else if (TREE_CODE (rhs2
) != SSA_NAME
)
4228 def_stmt2
= SSA_NAME_DEF_STMT (rhs2
);
4229 if (!is_gimple_assign (def_stmt2
))
4231 else if (!handled_load (def_stmt2
, &ops
[1], bitsize
, bitpos
,
4232 bitregion_start
, bitregion_end
))
4242 unsigned HOST_WIDE_INT const_bitsize
;
4243 if (bitsize
.is_constant (&const_bitsize
)
4244 && (const_bitsize
% BITS_PER_UNIT
) == 0
4245 && const_bitsize
<= 64
4246 && multiple_p (bitpos
, BITS_PER_UNIT
))
4248 ins_stmt
= find_bswap_or_nop_1 (def_stmt
, &n
, 12);
4252 for (unsigned HOST_WIDE_INT i
= 0;
4254 i
+= BITS_PER_UNIT
, nn
>>= BITS_PER_MARKER
)
4255 if ((nn
& MARKER_MASK
) == 0
4256 || (nn
& MARKER_MASK
) == MARKER_BYTE_UNKNOWN
)
4265 rhs_code
= LROTATE_EXPR
;
4266 ops
[0].base_addr
= NULL_TREE
;
4267 ops
[1].base_addr
= NULL_TREE
;
4275 && bitsize
.is_constant (&const_bitsize
)
4276 && ((const_bitsize
% BITS_PER_UNIT
) != 0
4277 || !multiple_p (bitpos
, BITS_PER_UNIT
))
4278 && const_bitsize
<= 64)
4280 /* Bypass a conversion to the bit-field type. */
4281 if (is_gimple_assign (def_stmt
) && CONVERT_EXPR_CODE_P (rhs_code
))
4283 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
4284 if (TREE_CODE (rhs1
) == SSA_NAME
4285 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1
)))
4288 rhs_code
= BIT_INSERT_EXPR
;
4290 ops
[0].base_addr
= NULL_TREE
;
4291 ops
[1].base_addr
= NULL_TREE
;
4296 unsigned HOST_WIDE_INT const_bitsize
, const_bitpos
;
4297 unsigned HOST_WIDE_INT const_bitregion_start
, const_bitregion_end
;
4299 || !bitsize
.is_constant (&const_bitsize
)
4300 || !bitpos
.is_constant (&const_bitpos
)
4301 || !bitregion_start
.is_constant (&const_bitregion_start
)
4302 || !bitregion_end
.is_constant (&const_bitregion_end
))
4304 terminate_all_aliasing_chains (NULL
, stmt
);
4309 memset (&n
, 0, sizeof (n
));
4311 struct imm_store_chain_info
**chain_info
= NULL
;
4313 chain_info
= m_stores
.get (base_addr
);
4315 store_immediate_info
*info
;
4318 unsigned int ord
= (*chain_info
)->m_store_info
.length ();
4319 info
= new store_immediate_info (const_bitsize
, const_bitpos
,
4320 const_bitregion_start
,
4321 const_bitregion_end
,
4322 stmt
, ord
, rhs_code
, n
, ins_stmt
,
4323 bit_not_p
, ops
[0], ops
[1]);
4324 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4326 fprintf (dump_file
, "Recording immediate store from stmt:\n");
4327 print_gimple_stmt (dump_file
, stmt
, 0);
4329 (*chain_info
)->m_store_info
.safe_push (info
);
4330 terminate_all_aliasing_chains (chain_info
, stmt
);
4331 /* If we reach the limit of stores to merge in a chain terminate and
4332 process the chain now. */
4333 if ((*chain_info
)->m_store_info
.length ()
4334 == (unsigned int) PARAM_VALUE (PARAM_MAX_STORES_TO_MERGE
))
4336 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4338 "Reached maximum number of statements to merge:\n");
4339 terminate_and_release_chain (*chain_info
);
4344 /* Store aliases any existing chain? */
4345 terminate_all_aliasing_chains (NULL
, stmt
);
4346 /* Start a new chain. */
4347 struct imm_store_chain_info
*new_chain
4348 = new imm_store_chain_info (m_stores_head
, base_addr
);
4349 info
= new store_immediate_info (const_bitsize
, const_bitpos
,
4350 const_bitregion_start
,
4351 const_bitregion_end
,
4352 stmt
, 0, rhs_code
, n
, ins_stmt
,
4353 bit_not_p
, ops
[0], ops
[1]);
4354 new_chain
->m_store_info
.safe_push (info
);
4355 m_stores
.put (base_addr
, new_chain
);
4356 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4358 fprintf (dump_file
, "Starting new chain with statement:\n");
4359 print_gimple_stmt (dump_file
, stmt
, 0);
4360 fprintf (dump_file
, "The base object is:\n");
4361 print_generic_expr (dump_file
, base_addr
);
4362 fprintf (dump_file
, "\n");
4366 /* Entry point for the pass. Go over each basic block recording chains of
4367 immediate stores. Upon encountering a terminating statement (as defined
4368 by stmt_terminates_chain_p) process the recorded stores and emit the widened
4372 pass_store_merging::execute (function
*fun
)
4375 hash_set
<gimple
*> orig_stmts
;
4377 calculate_dominance_info (CDI_DOMINATORS
);
4379 FOR_EACH_BB_FN (bb
, fun
)
4381 gimple_stmt_iterator gsi
;
4382 unsigned HOST_WIDE_INT num_statements
= 0;
4383 /* Record the original statements so that we can keep track of
4384 statements emitted in this pass and not re-process new
4386 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4388 if (is_gimple_debug (gsi_stmt (gsi
)))
4391 if (++num_statements
>= 2)
4395 if (num_statements
< 2)
4398 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4399 fprintf (dump_file
, "Processing basic block <%d>:\n", bb
->index
);
4401 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4403 gimple
*stmt
= gsi_stmt (gsi
);
4405 if (is_gimple_debug (stmt
))
4408 if (gimple_has_volatile_ops (stmt
))
4410 /* Terminate all chains. */
4411 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4412 fprintf (dump_file
, "Volatile access terminates "
4414 terminate_and_process_all_chains ();
4418 if (gimple_assign_single_p (stmt
) && gimple_vdef (stmt
)
4419 && !stmt_can_throw_internal (stmt
)
4420 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt
)))
4421 process_store (stmt
);
4423 terminate_all_aliasing_chains (NULL
, stmt
);
4425 terminate_and_process_all_chains ();
4432 /* Construct and return a store merging pass object. */
4435 make_pass_store_merging (gcc::context
*ctxt
)
4437 return new pass_store_merging (ctxt
);
4442 namespace selftest
{
4444 /* Selftests for store merging helpers. */
4446 /* Assert that all elements of the byte arrays X and Y, both of length N
4450 verify_array_eq (unsigned char *x
, unsigned char *y
, unsigned int n
)
4452 for (unsigned int i
= 0; i
< n
; i
++)
4456 fprintf (stderr
, "Arrays do not match. X:\n");
4457 dump_char_array (stderr
, x
, n
);
4458 fprintf (stderr
, "Y:\n");
4459 dump_char_array (stderr
, y
, n
);
4461 ASSERT_EQ (x
[i
], y
[i
]);
4465 /* Test shift_bytes_in_array and that it carries bits across between
4469 verify_shift_bytes_in_array (void)
4472 00011111 | 11100000. */
4473 unsigned char orig
[2] = { 0xe0, 0x1f };
4474 unsigned char in
[2];
4475 memcpy (in
, orig
, sizeof orig
);
4477 unsigned char expected
[2] = { 0x80, 0x7f };
4478 shift_bytes_in_array (in
, sizeof (in
), 2);
4479 verify_array_eq (in
, expected
, sizeof (in
));
4481 memcpy (in
, orig
, sizeof orig
);
4482 memcpy (expected
, orig
, sizeof orig
);
4483 /* Check that shifting by zero doesn't change anything. */
4484 shift_bytes_in_array (in
, sizeof (in
), 0);
4485 verify_array_eq (in
, expected
, sizeof (in
));
4489 /* Test shift_bytes_in_array_right and that it carries bits across between
4493 verify_shift_bytes_in_array_right (void)
4496 00011111 | 11100000. */
4497 unsigned char orig
[2] = { 0x1f, 0xe0};
4498 unsigned char in
[2];
4499 memcpy (in
, orig
, sizeof orig
);
4500 unsigned char expected
[2] = { 0x07, 0xf8};
4501 shift_bytes_in_array_right (in
, sizeof (in
), 2);
4502 verify_array_eq (in
, expected
, sizeof (in
));
4504 memcpy (in
, orig
, sizeof orig
);
4505 memcpy (expected
, orig
, sizeof orig
);
4506 /* Check that shifting by zero doesn't change anything. */
4507 shift_bytes_in_array_right (in
, sizeof (in
), 0);
4508 verify_array_eq (in
, expected
, sizeof (in
));
4511 /* Test clear_bit_region that it clears exactly the bits asked and
4515 verify_clear_bit_region (void)
4517 /* Start with all bits set and test clearing various patterns in them. */
4518 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
4519 unsigned char in
[3];
4520 unsigned char expected
[3];
4521 memcpy (in
, orig
, sizeof in
);
4523 /* Check zeroing out all the bits. */
4524 clear_bit_region (in
, 0, 3 * BITS_PER_UNIT
);
4525 expected
[0] = expected
[1] = expected
[2] = 0;
4526 verify_array_eq (in
, expected
, sizeof in
);
4528 memcpy (in
, orig
, sizeof in
);
4529 /* Leave the first and last bits intact. */
4530 clear_bit_region (in
, 1, 3 * BITS_PER_UNIT
- 2);
4534 verify_array_eq (in
, expected
, sizeof in
);
4537 /* Test verify_clear_bit_region_be that it clears exactly the bits asked and
4541 verify_clear_bit_region_be (void)
4543 /* Start with all bits set and test clearing various patterns in them. */
4544 unsigned char orig
[3] = { 0xff, 0xff, 0xff};
4545 unsigned char in
[3];
4546 unsigned char expected
[3];
4547 memcpy (in
, orig
, sizeof in
);
4549 /* Check zeroing out all the bits. */
4550 clear_bit_region_be (in
, BITS_PER_UNIT
- 1, 3 * BITS_PER_UNIT
);
4551 expected
[0] = expected
[1] = expected
[2] = 0;
4552 verify_array_eq (in
, expected
, sizeof in
);
4554 memcpy (in
, orig
, sizeof in
);
4555 /* Leave the first and last bits intact. */
4556 clear_bit_region_be (in
, BITS_PER_UNIT
- 2, 3 * BITS_PER_UNIT
- 2);
4560 verify_array_eq (in
, expected
, sizeof in
);
4564 /* Run all of the selftests within this file. */
4567 store_merging_c_tests (void)
4569 verify_shift_bytes_in_array ();
4570 verify_shift_bytes_in_array_right ();
4571 verify_clear_bit_region ();
4572 verify_clear_bit_region_be ();
4575 } // namespace selftest
4576 #endif /* CHECKING_P. */