fwprop: Fix single_use_p calculation
[official-gcc.git] / gcc / gimple-ssa-store-merging.c
blob213c1551d39e3548e47aad44ec1d6f820f39967c
1 /* GIMPLE store merging and byte swapping passes.
2 Copyright (C) 2009-2021 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* The purpose of the store merging pass is to combine multiple memory stores
22 of constant values, values loaded from memory, bitwise operations on those,
23 or bit-field values, to consecutive locations, into fewer wider stores.
25 For example, if we have a sequence peforming four byte stores to
26 consecutive memory locations:
27 [p ] := imm1;
28 [p + 1B] := imm2;
29 [p + 2B] := imm3;
30 [p + 3B] := imm4;
31 we can transform this into a single 4-byte store if the target supports it:
32 [p] := imm1:imm2:imm3:imm4 concatenated according to endianness.
34 Or:
35 [p ] := [q ];
36 [p + 1B] := [q + 1B];
37 [p + 2B] := [q + 2B];
38 [p + 3B] := [q + 3B];
39 if there is no overlap can be transformed into a single 4-byte
40 load followed by single 4-byte store.
42 Or:
43 [p ] := [q ] ^ imm1;
44 [p + 1B] := [q + 1B] ^ imm2;
45 [p + 2B] := [q + 2B] ^ imm3;
46 [p + 3B] := [q + 3B] ^ imm4;
47 if there is no overlap can be transformed into a single 4-byte
48 load, xored with imm1:imm2:imm3:imm4 and stored using a single 4-byte store.
50 Or:
51 [p:1 ] := imm;
52 [p:31] := val & 0x7FFFFFFF;
53 we can transform this into a single 4-byte store if the target supports it:
54 [p] := imm:(val & 0x7FFFFFFF) concatenated according to endianness.
56 The algorithm is applied to each basic block in three phases:
58 1) Scan through the basic block and record assignments to destinations
59 that can be expressed as a store to memory of a certain size at a certain
60 bit offset from base expressions we can handle. For bit-fields we also
61 record the surrounding bit region, i.e. bits that could be stored in
62 a read-modify-write operation when storing the bit-field. Record store
63 chains to different bases in a hash_map (m_stores) and make sure to
64 terminate such chains when appropriate (for example when the stored
65 values get used subsequently).
66 These stores can be a result of structure element initializers, array stores
67 etc. A store_immediate_info object is recorded for every such store.
68 Record as many such assignments to a single base as possible until a
69 statement that interferes with the store sequence is encountered.
70 Each store has up to 2 operands, which can be a either constant, a memory
71 load or an SSA name, from which the value to be stored can be computed.
72 At most one of the operands can be a constant. The operands are recorded
73 in store_operand_info struct.
75 2) Analyze the chains of stores recorded in phase 1) (i.e. the vector of
76 store_immediate_info objects) and coalesce contiguous stores into
77 merged_store_group objects. For bit-field stores, we don't need to
78 require the stores to be contiguous, just their surrounding bit regions
79 have to be contiguous. If the expression being stored is different
80 between adjacent stores, such as one store storing a constant and
81 following storing a value loaded from memory, or if the loaded memory
82 objects are not adjacent, a new merged_store_group is created as well.
84 For example, given the stores:
85 [p ] := 0;
86 [p + 1B] := 1;
87 [p + 3B] := 0;
88 [p + 4B] := 1;
89 [p + 5B] := 0;
90 [p + 6B] := 0;
91 This phase would produce two merged_store_group objects, one recording the
92 two bytes stored in the memory region [p : p + 1] and another
93 recording the four bytes stored in the memory region [p + 3 : p + 6].
95 3) The merged_store_group objects produced in phase 2) are processed
96 to generate the sequence of wider stores that set the contiguous memory
97 regions to the sequence of bytes that correspond to it. This may emit
98 multiple stores per store group to handle contiguous stores that are not
99 of a size that is a power of 2. For example it can try to emit a 40-bit
100 store as a 32-bit store followed by an 8-bit store.
101 We try to emit as wide stores as we can while respecting STRICT_ALIGNMENT
102 or TARGET_SLOW_UNALIGNED_ACCESS settings.
104 Note on endianness and example:
105 Consider 2 contiguous 16-bit stores followed by 2 contiguous 8-bit stores:
106 [p ] := 0x1234;
107 [p + 2B] := 0x5678;
108 [p + 4B] := 0xab;
109 [p + 5B] := 0xcd;
111 The memory layout for little-endian (LE) and big-endian (BE) must be:
112 p |LE|BE|
113 ---------
114 0 |34|12|
115 1 |12|34|
116 2 |78|56|
117 3 |56|78|
118 4 |ab|ab|
119 5 |cd|cd|
121 To merge these into a single 48-bit merged value 'val' in phase 2)
122 on little-endian we insert stores to higher (consecutive) bitpositions
123 into the most significant bits of the merged value.
124 The final merged value would be: 0xcdab56781234
126 For big-endian we insert stores to higher bitpositions into the least
127 significant bits of the merged value.
128 The final merged value would be: 0x12345678abcd
130 Then, in phase 3), we want to emit this 48-bit value as a 32-bit store
131 followed by a 16-bit store. Again, we must consider endianness when
132 breaking down the 48-bit value 'val' computed above.
133 For little endian we emit:
134 [p] (32-bit) := 0x56781234; // val & 0x0000ffffffff;
135 [p + 4B] (16-bit) := 0xcdab; // (val & 0xffff00000000) >> 32;
137 Whereas for big-endian we emit:
138 [p] (32-bit) := 0x12345678; // (val & 0xffffffff0000) >> 16;
139 [p + 4B] (16-bit) := 0xabcd; // val & 0x00000000ffff; */
141 #include "config.h"
142 #include "system.h"
143 #include "coretypes.h"
144 #include "backend.h"
145 #include "tree.h"
146 #include "gimple.h"
147 #include "builtins.h"
148 #include "fold-const.h"
149 #include "tree-pass.h"
150 #include "ssa.h"
151 #include "gimple-pretty-print.h"
152 #include "alias.h"
153 #include "fold-const.h"
154 #include "print-tree.h"
155 #include "tree-hash-traits.h"
156 #include "gimple-iterator.h"
157 #include "gimplify.h"
158 #include "gimple-fold.h"
159 #include "stor-layout.h"
160 #include "timevar.h"
161 #include "cfganal.h"
162 #include "cfgcleanup.h"
163 #include "tree-cfg.h"
164 #include "except.h"
165 #include "tree-eh.h"
166 #include "target.h"
167 #include "gimplify-me.h"
168 #include "rtl.h"
169 #include "expr.h" /* For get_bit_range. */
170 #include "optabs-tree.h"
171 #include "dbgcnt.h"
172 #include "selftest.h"
174 /* The maximum size (in bits) of the stores this pass should generate. */
175 #define MAX_STORE_BITSIZE (BITS_PER_WORD)
176 #define MAX_STORE_BYTES (MAX_STORE_BITSIZE / BITS_PER_UNIT)
178 /* Limit to bound the number of aliasing checks for loads with the same
179 vuse as the corresponding store. */
180 #define MAX_STORE_ALIAS_CHECKS 64
182 namespace {
184 struct bswap_stat
186 /* Number of hand-written 16-bit nop / bswaps found. */
187 int found_16bit;
189 /* Number of hand-written 32-bit nop / bswaps found. */
190 int found_32bit;
192 /* Number of hand-written 64-bit nop / bswaps found. */
193 int found_64bit;
194 } nop_stats, bswap_stats;
196 /* A symbolic number structure is used to detect byte permutation and selection
197 patterns of a source. To achieve that, its field N contains an artificial
198 number consisting of BITS_PER_MARKER sized markers tracking where does each
199 byte come from in the source:
201 0 - target byte has the value 0
202 FF - target byte has an unknown value (eg. due to sign extension)
203 1..size - marker value is the byte index in the source (0 for lsb).
205 To detect permutations on memory sources (arrays and structures), a symbolic
206 number is also associated:
207 - a base address BASE_ADDR and an OFFSET giving the address of the source;
208 - a range which gives the difference between the highest and lowest accessed
209 memory location to make such a symbolic number;
210 - the address SRC of the source element of lowest address as a convenience
211 to easily get BASE_ADDR + offset + lowest bytepos;
212 - number of expressions N_OPS bitwise ored together to represent
213 approximate cost of the computation.
215 Note 1: the range is different from size as size reflects the size of the
216 type of the current expression. For instance, for an array char a[],
217 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
218 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
219 time a range of 1.
221 Note 2: for non-memory sources, range holds the same value as size.
223 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
225 struct symbolic_number {
226 uint64_t n;
227 tree type;
228 tree base_addr;
229 tree offset;
230 poly_int64_pod bytepos;
231 tree src;
232 tree alias_set;
233 tree vuse;
234 unsigned HOST_WIDE_INT range;
235 int n_ops;
238 #define BITS_PER_MARKER 8
239 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
240 #define MARKER_BYTE_UNKNOWN MARKER_MASK
241 #define HEAD_MARKER(n, size) \
242 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
244 /* The number which the find_bswap_or_nop_1 result should match in
245 order to have a nop. The number is masked according to the size of
246 the symbolic number before using it. */
247 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
248 (uint64_t)0x08070605 << 32 | 0x04030201)
250 /* The number which the find_bswap_or_nop_1 result should match in
251 order to have a byte swap. The number is masked according to the
252 size of the symbolic number before using it. */
253 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
254 (uint64_t)0x01020304 << 32 | 0x05060708)
256 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
257 number N. Return false if the requested operation is not permitted
258 on a symbolic number. */
260 inline bool
261 do_shift_rotate (enum tree_code code,
262 struct symbolic_number *n,
263 int count)
265 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
266 unsigned head_marker;
268 if (count < 0
269 || count >= TYPE_PRECISION (n->type)
270 || count % BITS_PER_UNIT != 0)
271 return false;
272 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
274 /* Zero out the extra bits of N in order to avoid them being shifted
275 into the significant bits. */
276 if (size < 64 / BITS_PER_MARKER)
277 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
279 switch (code)
281 case LSHIFT_EXPR:
282 n->n <<= count;
283 break;
284 case RSHIFT_EXPR:
285 head_marker = HEAD_MARKER (n->n, size);
286 n->n >>= count;
287 /* Arithmetic shift of signed type: result is dependent on the value. */
288 if (!TYPE_UNSIGNED (n->type) && head_marker)
289 for (i = 0; i < count / BITS_PER_MARKER; i++)
290 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
291 << ((size - 1 - i) * BITS_PER_MARKER);
292 break;
293 case LROTATE_EXPR:
294 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
295 break;
296 case RROTATE_EXPR:
297 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
298 break;
299 default:
300 return false;
302 /* Zero unused bits for size. */
303 if (size < 64 / BITS_PER_MARKER)
304 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
305 return true;
308 /* Perform sanity checking for the symbolic number N and the gimple
309 statement STMT. */
311 inline bool
312 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
314 tree lhs_type;
316 lhs_type = gimple_expr_type (stmt);
318 if (TREE_CODE (lhs_type) != INTEGER_TYPE
319 && TREE_CODE (lhs_type) != ENUMERAL_TYPE)
320 return false;
322 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
323 return false;
325 return true;
328 /* Initialize the symbolic number N for the bswap pass from the base element
329 SRC manipulated by the bitwise OR expression. */
331 bool
332 init_symbolic_number (struct symbolic_number *n, tree src)
334 int size;
336 if (! INTEGRAL_TYPE_P (TREE_TYPE (src)))
337 return false;
339 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
340 n->src = src;
342 /* Set up the symbolic number N by setting each byte to a value between 1 and
343 the byte size of rhs1. The highest order byte is set to n->size and the
344 lowest order byte to 1. */
345 n->type = TREE_TYPE (src);
346 size = TYPE_PRECISION (n->type);
347 if (size % BITS_PER_UNIT != 0)
348 return false;
349 size /= BITS_PER_UNIT;
350 if (size > 64 / BITS_PER_MARKER)
351 return false;
352 n->range = size;
353 n->n = CMPNOP;
354 n->n_ops = 1;
356 if (size < 64 / BITS_PER_MARKER)
357 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
359 return true;
362 /* Check if STMT might be a byte swap or a nop from a memory source and returns
363 the answer. If so, REF is that memory source and the base of the memory area
364 accessed and the offset of the access from that base are recorded in N. */
366 bool
367 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
369 /* Leaf node is an array or component ref. Memorize its base and
370 offset from base to compare to other such leaf node. */
371 poly_int64 bitsize, bitpos, bytepos;
372 machine_mode mode;
373 int unsignedp, reversep, volatilep;
374 tree offset, base_addr;
376 /* Not prepared to handle PDP endian. */
377 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
378 return false;
380 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
381 return false;
383 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
384 &unsignedp, &reversep, &volatilep);
386 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
387 /* Do not rewrite TARGET_MEM_REF. */
388 return false;
389 else if (TREE_CODE (base_addr) == MEM_REF)
391 poly_offset_int bit_offset = 0;
392 tree off = TREE_OPERAND (base_addr, 1);
394 if (!integer_zerop (off))
396 poly_offset_int boff = mem_ref_offset (base_addr);
397 boff <<= LOG2_BITS_PER_UNIT;
398 bit_offset += boff;
401 base_addr = TREE_OPERAND (base_addr, 0);
403 /* Avoid returning a negative bitpos as this may wreak havoc later. */
404 if (maybe_lt (bit_offset, 0))
406 tree byte_offset = wide_int_to_tree
407 (sizetype, bits_to_bytes_round_down (bit_offset));
408 bit_offset = num_trailing_bits (bit_offset);
409 if (offset)
410 offset = size_binop (PLUS_EXPR, offset, byte_offset);
411 else
412 offset = byte_offset;
415 bitpos += bit_offset.force_shwi ();
417 else
418 base_addr = build_fold_addr_expr (base_addr);
420 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
421 return false;
422 if (!multiple_p (bitsize, BITS_PER_UNIT))
423 return false;
424 if (reversep)
425 return false;
427 if (!init_symbolic_number (n, ref))
428 return false;
429 n->base_addr = base_addr;
430 n->offset = offset;
431 n->bytepos = bytepos;
432 n->alias_set = reference_alias_ptr_type (ref);
433 n->vuse = gimple_vuse (stmt);
434 return true;
437 /* Compute the symbolic number N representing the result of a bitwise OR on 2
438 symbolic number N1 and N2 whose source statements are respectively
439 SOURCE_STMT1 and SOURCE_STMT2. */
441 gimple *
442 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
443 gimple *source_stmt2, struct symbolic_number *n2,
444 struct symbolic_number *n)
446 int i, size;
447 uint64_t mask;
448 gimple *source_stmt;
449 struct symbolic_number *n_start;
451 tree rhs1 = gimple_assign_rhs1 (source_stmt1);
452 if (TREE_CODE (rhs1) == BIT_FIELD_REF
453 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
454 rhs1 = TREE_OPERAND (rhs1, 0);
455 tree rhs2 = gimple_assign_rhs1 (source_stmt2);
456 if (TREE_CODE (rhs2) == BIT_FIELD_REF
457 && TREE_CODE (TREE_OPERAND (rhs2, 0)) == SSA_NAME)
458 rhs2 = TREE_OPERAND (rhs2, 0);
460 /* Sources are different, cancel bswap if they are not memory location with
461 the same base (array, structure, ...). */
462 if (rhs1 != rhs2)
464 uint64_t inc;
465 HOST_WIDE_INT start1, start2, start_sub, end_sub, end1, end2, end;
466 struct symbolic_number *toinc_n_ptr, *n_end;
467 basic_block bb1, bb2;
469 if (!n1->base_addr || !n2->base_addr
470 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
471 return NULL;
473 if (!n1->offset != !n2->offset
474 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
475 return NULL;
477 start1 = 0;
478 if (!(n2->bytepos - n1->bytepos).is_constant (&start2))
479 return NULL;
481 if (start1 < start2)
483 n_start = n1;
484 start_sub = start2 - start1;
486 else
488 n_start = n2;
489 start_sub = start1 - start2;
492 bb1 = gimple_bb (source_stmt1);
493 bb2 = gimple_bb (source_stmt2);
494 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
495 source_stmt = source_stmt1;
496 else
497 source_stmt = source_stmt2;
499 /* Find the highest address at which a load is performed and
500 compute related info. */
501 end1 = start1 + (n1->range - 1);
502 end2 = start2 + (n2->range - 1);
503 if (end1 < end2)
505 end = end2;
506 end_sub = end2 - end1;
508 else
510 end = end1;
511 end_sub = end1 - end2;
513 n_end = (end2 > end1) ? n2 : n1;
515 /* Find symbolic number whose lsb is the most significant. */
516 if (BYTES_BIG_ENDIAN)
517 toinc_n_ptr = (n_end == n1) ? n2 : n1;
518 else
519 toinc_n_ptr = (n_start == n1) ? n2 : n1;
521 n->range = end - MIN (start1, start2) + 1;
523 /* Check that the range of memory covered can be represented by
524 a symbolic number. */
525 if (n->range > 64 / BITS_PER_MARKER)
526 return NULL;
528 /* Reinterpret byte marks in symbolic number holding the value of
529 bigger weight according to target endianness. */
530 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
531 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
532 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
534 unsigned marker
535 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
536 if (marker && marker != MARKER_BYTE_UNKNOWN)
537 toinc_n_ptr->n += inc;
540 else
542 n->range = n1->range;
543 n_start = n1;
544 source_stmt = source_stmt1;
547 if (!n1->alias_set
548 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
549 n->alias_set = n1->alias_set;
550 else
551 n->alias_set = ptr_type_node;
552 n->vuse = n_start->vuse;
553 n->base_addr = n_start->base_addr;
554 n->offset = n_start->offset;
555 n->src = n_start->src;
556 n->bytepos = n_start->bytepos;
557 n->type = n_start->type;
558 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
560 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
562 uint64_t masked1, masked2;
564 masked1 = n1->n & mask;
565 masked2 = n2->n & mask;
566 if (masked1 && masked2 && masked1 != masked2)
567 return NULL;
569 n->n = n1->n | n2->n;
570 n->n_ops = n1->n_ops + n2->n_ops;
572 return source_stmt;
575 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
576 the operation given by the rhs of STMT on the result. If the operation
577 could successfully be executed the function returns a gimple stmt whose
578 rhs's first tree is the expression of the source operand and NULL
579 otherwise. */
581 gimple *
582 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
584 enum tree_code code;
585 tree rhs1, rhs2 = NULL;
586 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
587 enum gimple_rhs_class rhs_class;
589 if (!limit || !is_gimple_assign (stmt))
590 return NULL;
592 rhs1 = gimple_assign_rhs1 (stmt);
594 if (find_bswap_or_nop_load (stmt, rhs1, n))
595 return stmt;
597 /* Handle BIT_FIELD_REF. */
598 if (TREE_CODE (rhs1) == BIT_FIELD_REF
599 && TREE_CODE (TREE_OPERAND (rhs1, 0)) == SSA_NAME)
601 if (!tree_fits_uhwi_p (TREE_OPERAND (rhs1, 1))
602 || !tree_fits_uhwi_p (TREE_OPERAND (rhs1, 2)))
603 return NULL;
605 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TREE_OPERAND (rhs1, 1));
606 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (TREE_OPERAND (rhs1, 2));
607 if (bitpos % BITS_PER_UNIT == 0
608 && bitsize % BITS_PER_UNIT == 0
609 && init_symbolic_number (n, TREE_OPERAND (rhs1, 0)))
611 /* Handle big-endian bit numbering in BIT_FIELD_REF. */
612 if (BYTES_BIG_ENDIAN)
613 bitpos = TYPE_PRECISION (n->type) - bitpos - bitsize;
615 /* Shift. */
616 if (!do_shift_rotate (RSHIFT_EXPR, n, bitpos))
617 return NULL;
619 /* Mask. */
620 uint64_t mask = 0;
621 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
622 for (unsigned i = 0; i < bitsize / BITS_PER_UNIT;
623 i++, tmp <<= BITS_PER_UNIT)
624 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
625 n->n &= mask;
627 /* Convert. */
628 n->type = TREE_TYPE (rhs1);
629 if (!n->base_addr)
630 n->range = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
632 return verify_symbolic_number_p (n, stmt) ? stmt : NULL;
635 return NULL;
638 if (TREE_CODE (rhs1) != SSA_NAME)
639 return NULL;
641 code = gimple_assign_rhs_code (stmt);
642 rhs_class = gimple_assign_rhs_class (stmt);
643 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
645 if (rhs_class == GIMPLE_BINARY_RHS)
646 rhs2 = gimple_assign_rhs2 (stmt);
648 /* Handle unary rhs and binary rhs with integer constants as second
649 operand. */
651 if (rhs_class == GIMPLE_UNARY_RHS
652 || (rhs_class == GIMPLE_BINARY_RHS
653 && TREE_CODE (rhs2) == INTEGER_CST))
655 if (code != BIT_AND_EXPR
656 && code != LSHIFT_EXPR
657 && code != RSHIFT_EXPR
658 && code != LROTATE_EXPR
659 && code != RROTATE_EXPR
660 && !CONVERT_EXPR_CODE_P (code))
661 return NULL;
663 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
665 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
666 we have to initialize the symbolic number. */
667 if (!source_stmt1)
669 if (gimple_assign_load_p (stmt)
670 || !init_symbolic_number (n, rhs1))
671 return NULL;
672 source_stmt1 = stmt;
675 switch (code)
677 case BIT_AND_EXPR:
679 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
680 uint64_t val = int_cst_value (rhs2), mask = 0;
681 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
683 /* Only constants masking full bytes are allowed. */
684 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
685 if ((val & tmp) != 0 && (val & tmp) != tmp)
686 return NULL;
687 else if (val & tmp)
688 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
690 n->n &= mask;
692 break;
693 case LSHIFT_EXPR:
694 case RSHIFT_EXPR:
695 case LROTATE_EXPR:
696 case RROTATE_EXPR:
697 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
698 return NULL;
699 break;
700 CASE_CONVERT:
702 int i, type_size, old_type_size;
703 tree type;
705 type = gimple_expr_type (stmt);
706 type_size = TYPE_PRECISION (type);
707 if (type_size % BITS_PER_UNIT != 0)
708 return NULL;
709 type_size /= BITS_PER_UNIT;
710 if (type_size > 64 / BITS_PER_MARKER)
711 return NULL;
713 /* Sign extension: result is dependent on the value. */
714 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
715 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
716 && HEAD_MARKER (n->n, old_type_size))
717 for (i = 0; i < type_size - old_type_size; i++)
718 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
719 << ((type_size - 1 - i) * BITS_PER_MARKER);
721 if (type_size < 64 / BITS_PER_MARKER)
723 /* If STMT casts to a smaller type mask out the bits not
724 belonging to the target type. */
725 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
727 n->type = type;
728 if (!n->base_addr)
729 n->range = type_size;
731 break;
732 default:
733 return NULL;
735 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
738 /* Handle binary rhs. */
740 if (rhs_class == GIMPLE_BINARY_RHS)
742 struct symbolic_number n1, n2;
743 gimple *source_stmt, *source_stmt2;
745 if (code != BIT_IOR_EXPR)
746 return NULL;
748 if (TREE_CODE (rhs2) != SSA_NAME)
749 return NULL;
751 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
753 switch (code)
755 case BIT_IOR_EXPR:
756 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
758 if (!source_stmt1)
759 return NULL;
761 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
763 if (!source_stmt2)
764 return NULL;
766 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
767 return NULL;
769 if (n1.vuse != n2.vuse)
770 return NULL;
772 source_stmt
773 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
775 if (!source_stmt)
776 return NULL;
778 if (!verify_symbolic_number_p (n, stmt))
779 return NULL;
781 break;
782 default:
783 return NULL;
785 return source_stmt;
787 return NULL;
790 /* Helper for find_bswap_or_nop and try_coalesce_bswap to compute
791 *CMPXCHG, *CMPNOP and adjust *N. */
793 void
794 find_bswap_or_nop_finalize (struct symbolic_number *n, uint64_t *cmpxchg,
795 uint64_t *cmpnop)
797 unsigned rsize;
798 uint64_t tmpn, mask;
800 /* The number which the find_bswap_or_nop_1 result should match in order
801 to have a full byte swap. The number is shifted to the right
802 according to the size of the symbolic number before using it. */
803 *cmpxchg = CMPXCHG;
804 *cmpnop = CMPNOP;
806 /* Find real size of result (highest non-zero byte). */
807 if (n->base_addr)
808 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
809 else
810 rsize = n->range;
812 /* Zero out the bits corresponding to untouched bytes in original gimple
813 expression. */
814 if (n->range < (int) sizeof (int64_t))
816 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
817 *cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
818 *cmpnop &= mask;
821 /* Zero out the bits corresponding to unused bytes in the result of the
822 gimple expression. */
823 if (rsize < n->range)
825 if (BYTES_BIG_ENDIAN)
827 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
828 *cmpxchg &= mask;
829 *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
831 else
833 mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
834 *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
835 *cmpnop &= mask;
837 n->range = rsize;
840 n->range *= BITS_PER_UNIT;
843 /* Check if STMT completes a bswap implementation or a read in a given
844 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
845 accordingly. It also sets N to represent the kind of operations
846 performed: size of the resulting expression and whether it works on
847 a memory source, and if so alias-set and vuse. At last, the
848 function returns a stmt whose rhs's first tree is the source
849 expression. */
851 gimple *
852 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
854 tree type_size = TYPE_SIZE_UNIT (gimple_expr_type (stmt));
855 if (!tree_fits_uhwi_p (type_size))
856 return NULL;
858 /* The last parameter determines the depth search limit. It usually
859 correlates directly to the number n of bytes to be touched. We
860 increase that number by 2 * (log2(n) + 1) here in order to also
861 cover signed -> unsigned conversions of the src operand as can be seen
862 in libgcc, and for initial shift/and operation of the src operand. */
863 int limit = tree_to_uhwi (type_size);
864 limit += 2 * (1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit));
865 gimple *ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
867 if (!ins_stmt)
869 if (gimple_assign_rhs_code (stmt) != CONSTRUCTOR
870 || BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
871 return NULL;
872 unsigned HOST_WIDE_INT sz = tree_to_uhwi (type_size) * BITS_PER_UNIT;
873 if (sz != 16 && sz != 32 && sz != 64)
874 return NULL;
875 tree rhs = gimple_assign_rhs1 (stmt);
876 if (CONSTRUCTOR_NELTS (rhs) == 0)
877 return NULL;
878 tree eltype = TREE_TYPE (TREE_TYPE (rhs));
879 unsigned HOST_WIDE_INT eltsz
880 = int_size_in_bytes (eltype) * BITS_PER_UNIT;
881 if (TYPE_PRECISION (eltype) != eltsz)
882 return NULL;
883 constructor_elt *elt;
884 unsigned int i;
885 tree type = build_nonstandard_integer_type (sz, 1);
886 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (rhs), i, elt)
888 if (TREE_CODE (elt->value) != SSA_NAME
889 || !INTEGRAL_TYPE_P (TREE_TYPE (elt->value)))
890 return NULL;
891 struct symbolic_number n1;
892 gimple *source_stmt
893 = find_bswap_or_nop_1 (SSA_NAME_DEF_STMT (elt->value), &n1,
894 limit - 1);
896 if (!source_stmt)
897 return NULL;
899 n1.type = type;
900 if (!n1.base_addr)
901 n1.range = sz / BITS_PER_UNIT;
903 if (i == 0)
905 ins_stmt = source_stmt;
906 *n = n1;
908 else
910 if (n->vuse != n1.vuse)
911 return NULL;
913 struct symbolic_number n0 = *n;
915 if (!BYTES_BIG_ENDIAN)
917 if (!do_shift_rotate (LSHIFT_EXPR, &n1, i * eltsz))
918 return NULL;
920 else if (!do_shift_rotate (LSHIFT_EXPR, &n0, eltsz))
921 return NULL;
922 ins_stmt
923 = perform_symbolic_merge (ins_stmt, &n0, source_stmt, &n1, n);
925 if (!ins_stmt)
926 return NULL;
931 uint64_t cmpxchg, cmpnop;
932 find_bswap_or_nop_finalize (n, &cmpxchg, &cmpnop);
934 /* A complete byte swap should make the symbolic number to start with
935 the largest digit in the highest order byte. Unchanged symbolic
936 number indicates a read with same endianness as target architecture. */
937 if (n->n == cmpnop)
938 *bswap = false;
939 else if (n->n == cmpxchg)
940 *bswap = true;
941 else
942 return NULL;
944 /* Useless bit manipulation performed by code. */
945 if (!n->base_addr && n->n == cmpnop && n->n_ops == 1)
946 return NULL;
948 return ins_stmt;
951 const pass_data pass_data_optimize_bswap =
953 GIMPLE_PASS, /* type */
954 "bswap", /* name */
955 OPTGROUP_NONE, /* optinfo_flags */
956 TV_NONE, /* tv_id */
957 PROP_ssa, /* properties_required */
958 0, /* properties_provided */
959 0, /* properties_destroyed */
960 0, /* todo_flags_start */
961 0, /* todo_flags_finish */
964 class pass_optimize_bswap : public gimple_opt_pass
966 public:
967 pass_optimize_bswap (gcc::context *ctxt)
968 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
971 /* opt_pass methods: */
972 virtual bool gate (function *)
974 return flag_expensive_optimizations && optimize && BITS_PER_UNIT == 8;
977 virtual unsigned int execute (function *);
979 }; // class pass_optimize_bswap
981 /* Helper function for bswap_replace. Build VIEW_CONVERT_EXPR from
982 VAL to TYPE. If VAL has different type size, emit a NOP_EXPR cast
983 first. */
985 static tree
986 bswap_view_convert (gimple_stmt_iterator *gsi, tree type, tree val)
988 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (val)));
989 if (TYPE_SIZE (type) != TYPE_SIZE (TREE_TYPE (val)))
991 HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_SIZE (type));
992 tree itype = build_nonstandard_integer_type (prec, 1);
993 gimple *g = gimple_build_assign (make_ssa_name (itype), NOP_EXPR, val);
994 gsi_insert_before (gsi, g, GSI_SAME_STMT);
995 val = gimple_assign_lhs (g);
997 return build1 (VIEW_CONVERT_EXPR, type, val);
1000 /* Perform the bswap optimization: replace the expression computed in the rhs
1001 of gsi_stmt (GSI) (or if NULL add instead of replace) by an equivalent
1002 bswap, load or load + bswap expression.
1003 Which of these alternatives replace the rhs is given by N->base_addr (non
1004 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
1005 load to perform are also given in N while the builtin bswap invoke is given
1006 in FNDEL. Finally, if a load is involved, INS_STMT refers to one of the
1007 load statements involved to construct the rhs in gsi_stmt (GSI) and
1008 N->range gives the size of the rhs expression for maintaining some
1009 statistics.
1011 Note that if the replacement involve a load and if gsi_stmt (GSI) is
1012 non-NULL, that stmt is moved just after INS_STMT to do the load with the
1013 same VUSE which can lead to gsi_stmt (GSI) changing of basic block. */
1015 tree
1016 bswap_replace (gimple_stmt_iterator gsi, gimple *ins_stmt, tree fndecl,
1017 tree bswap_type, tree load_type, struct symbolic_number *n,
1018 bool bswap)
1020 tree src, tmp, tgt = NULL_TREE;
1021 gimple *bswap_stmt;
1022 tree_code conv_code = NOP_EXPR;
1024 gimple *cur_stmt = gsi_stmt (gsi);
1025 src = n->src;
1026 if (cur_stmt)
1028 tgt = gimple_assign_lhs (cur_stmt);
1029 if (gimple_assign_rhs_code (cur_stmt) == CONSTRUCTOR
1030 && tgt
1031 && VECTOR_TYPE_P (TREE_TYPE (tgt)))
1032 conv_code = VIEW_CONVERT_EXPR;
1035 /* Need to load the value from memory first. */
1036 if (n->base_addr)
1038 gimple_stmt_iterator gsi_ins = gsi;
1039 if (ins_stmt)
1040 gsi_ins = gsi_for_stmt (ins_stmt);
1041 tree addr_expr, addr_tmp, val_expr, val_tmp;
1042 tree load_offset_ptr, aligned_load_type;
1043 gimple *load_stmt;
1044 unsigned align = get_object_alignment (src);
1045 poly_int64 load_offset = 0;
1047 if (cur_stmt)
1049 basic_block ins_bb = gimple_bb (ins_stmt);
1050 basic_block cur_bb = gimple_bb (cur_stmt);
1051 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
1052 return NULL_TREE;
1054 /* Move cur_stmt just before one of the load of the original
1055 to ensure it has the same VUSE. See PR61517 for what could
1056 go wrong. */
1057 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
1058 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
1059 gsi_move_before (&gsi, &gsi_ins);
1060 gsi = gsi_for_stmt (cur_stmt);
1062 else
1063 gsi = gsi_ins;
1065 /* Compute address to load from and cast according to the size
1066 of the load. */
1067 addr_expr = build_fold_addr_expr (src);
1068 if (is_gimple_mem_ref_addr (addr_expr))
1069 addr_tmp = unshare_expr (addr_expr);
1070 else
1072 addr_tmp = unshare_expr (n->base_addr);
1073 if (!is_gimple_mem_ref_addr (addr_tmp))
1074 addr_tmp = force_gimple_operand_gsi_1 (&gsi, addr_tmp,
1075 is_gimple_mem_ref_addr,
1076 NULL_TREE, true,
1077 GSI_SAME_STMT);
1078 load_offset = n->bytepos;
1079 if (n->offset)
1081 tree off
1082 = force_gimple_operand_gsi (&gsi, unshare_expr (n->offset),
1083 true, NULL_TREE, true,
1084 GSI_SAME_STMT);
1085 gimple *stmt
1086 = gimple_build_assign (make_ssa_name (TREE_TYPE (addr_tmp)),
1087 POINTER_PLUS_EXPR, addr_tmp, off);
1088 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1089 addr_tmp = gimple_assign_lhs (stmt);
1093 /* Perform the load. */
1094 aligned_load_type = load_type;
1095 if (align < TYPE_ALIGN (load_type))
1096 aligned_load_type = build_aligned_type (load_type, align);
1097 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
1098 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
1099 load_offset_ptr);
1101 if (!bswap)
1103 if (n->range == 16)
1104 nop_stats.found_16bit++;
1105 else if (n->range == 32)
1106 nop_stats.found_32bit++;
1107 else
1109 gcc_assert (n->range == 64);
1110 nop_stats.found_64bit++;
1113 /* Convert the result of load if necessary. */
1114 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), load_type))
1116 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
1117 "load_dst");
1118 load_stmt = gimple_build_assign (val_tmp, val_expr);
1119 gimple_set_vuse (load_stmt, n->vuse);
1120 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1121 if (conv_code == VIEW_CONVERT_EXPR)
1122 val_tmp = bswap_view_convert (&gsi, TREE_TYPE (tgt), val_tmp);
1123 gimple_assign_set_rhs_with_ops (&gsi, conv_code, val_tmp);
1124 update_stmt (cur_stmt);
1126 else if (cur_stmt)
1128 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
1129 gimple_set_vuse (cur_stmt, n->vuse);
1130 update_stmt (cur_stmt);
1132 else
1134 tgt = make_ssa_name (load_type);
1135 cur_stmt = gimple_build_assign (tgt, MEM_REF, val_expr);
1136 gimple_set_vuse (cur_stmt, n->vuse);
1137 gsi_insert_before (&gsi, cur_stmt, GSI_SAME_STMT);
1140 if (dump_file)
1142 fprintf (dump_file,
1143 "%d bit load in target endianness found at: ",
1144 (int) n->range);
1145 print_gimple_stmt (dump_file, cur_stmt, 0);
1147 return tgt;
1149 else
1151 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
1152 load_stmt = gimple_build_assign (val_tmp, val_expr);
1153 gimple_set_vuse (load_stmt, n->vuse);
1154 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
1156 src = val_tmp;
1158 else if (!bswap)
1160 gimple *g = NULL;
1161 if (tgt && !useless_type_conversion_p (TREE_TYPE (tgt), TREE_TYPE (src)))
1163 if (!is_gimple_val (src))
1164 return NULL_TREE;
1165 if (conv_code == VIEW_CONVERT_EXPR)
1166 src = bswap_view_convert (&gsi, TREE_TYPE (tgt), src);
1167 g = gimple_build_assign (tgt, conv_code, src);
1169 else if (cur_stmt)
1170 g = gimple_build_assign (tgt, src);
1171 else
1172 tgt = src;
1173 if (n->range == 16)
1174 nop_stats.found_16bit++;
1175 else if (n->range == 32)
1176 nop_stats.found_32bit++;
1177 else
1179 gcc_assert (n->range == 64);
1180 nop_stats.found_64bit++;
1182 if (dump_file)
1184 fprintf (dump_file,
1185 "%d bit reshuffle in target endianness found at: ",
1186 (int) n->range);
1187 if (cur_stmt)
1188 print_gimple_stmt (dump_file, cur_stmt, 0);
1189 else
1191 print_generic_expr (dump_file, tgt, TDF_NONE);
1192 fprintf (dump_file, "\n");
1195 if (cur_stmt)
1196 gsi_replace (&gsi, g, true);
1197 return tgt;
1199 else if (TREE_CODE (src) == BIT_FIELD_REF)
1200 src = TREE_OPERAND (src, 0);
1202 if (n->range == 16)
1203 bswap_stats.found_16bit++;
1204 else if (n->range == 32)
1205 bswap_stats.found_32bit++;
1206 else
1208 gcc_assert (n->range == 64);
1209 bswap_stats.found_64bit++;
1212 tmp = src;
1214 /* Convert the src expression if necessary. */
1215 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
1217 gimple *convert_stmt;
1219 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1220 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
1221 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1224 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
1225 are considered as rotation of 2N bit values by N bits is generally not
1226 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
1227 gives 0x03040102 while a bswap for that value is 0x04030201. */
1228 if (bswap && n->range == 16)
1230 tree count = build_int_cst (NULL, BITS_PER_UNIT);
1231 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
1232 bswap_stmt = gimple_build_assign (NULL, src);
1234 else
1235 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
1237 if (tgt == NULL_TREE)
1238 tgt = make_ssa_name (bswap_type);
1239 tmp = tgt;
1241 /* Convert the result if necessary. */
1242 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
1244 gimple *convert_stmt;
1246 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
1247 tree atmp = tmp;
1248 if (conv_code == VIEW_CONVERT_EXPR)
1249 atmp = bswap_view_convert (&gsi, TREE_TYPE (tgt), tmp);
1250 convert_stmt = gimple_build_assign (tgt, conv_code, atmp);
1251 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1254 gimple_set_lhs (bswap_stmt, tmp);
1256 if (dump_file)
1258 fprintf (dump_file, "%d bit bswap implementation found at: ",
1259 (int) n->range);
1260 if (cur_stmt)
1261 print_gimple_stmt (dump_file, cur_stmt, 0);
1262 else
1264 print_generic_expr (dump_file, tgt, TDF_NONE);
1265 fprintf (dump_file, "\n");
1269 if (cur_stmt)
1271 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
1272 gsi_remove (&gsi, true);
1274 else
1275 gsi_insert_before (&gsi, bswap_stmt, GSI_SAME_STMT);
1276 return tgt;
1279 /* Try to optimize an assignment CUR_STMT with CONSTRUCTOR on the rhs
1280 using bswap optimizations. CDI_DOMINATORS need to be
1281 computed on entry. Return true if it has been optimized and
1282 TODO_update_ssa is needed. */
1284 static bool
1285 maybe_optimize_vector_constructor (gimple *cur_stmt)
1287 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1288 struct symbolic_number n;
1289 bool bswap;
1291 gcc_assert (is_gimple_assign (cur_stmt)
1292 && gimple_assign_rhs_code (cur_stmt) == CONSTRUCTOR);
1294 tree rhs = gimple_assign_rhs1 (cur_stmt);
1295 if (!VECTOR_TYPE_P (TREE_TYPE (rhs))
1296 || !INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))
1297 || gimple_assign_lhs (cur_stmt) == NULL_TREE)
1298 return false;
1300 HOST_WIDE_INT sz = int_size_in_bytes (TREE_TYPE (rhs)) * BITS_PER_UNIT;
1301 switch (sz)
1303 case 16:
1304 load_type = bswap_type = uint16_type_node;
1305 break;
1306 case 32:
1307 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1308 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
1310 load_type = uint32_type_node;
1311 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1312 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1314 else
1315 return false;
1316 break;
1317 case 64:
1318 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1319 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1320 || (word_mode == SImode
1321 && builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1322 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)))
1324 load_type = uint64_type_node;
1325 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1326 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1328 else
1329 return false;
1330 break;
1331 default:
1332 return false;
1335 gimple *ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1336 if (!ins_stmt || n.range != (unsigned HOST_WIDE_INT) sz)
1337 return false;
1339 if (bswap && !fndecl && n.range != 16)
1340 return false;
1342 memset (&nop_stats, 0, sizeof (nop_stats));
1343 memset (&bswap_stats, 0, sizeof (bswap_stats));
1344 return bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1345 bswap_type, load_type, &n, bswap) != NULL_TREE;
1348 /* Find manual byte swap implementations as well as load in a given
1349 endianness. Byte swaps are turned into a bswap builtin invokation
1350 while endian loads are converted to bswap builtin invokation or
1351 simple load according to the target endianness. */
1353 unsigned int
1354 pass_optimize_bswap::execute (function *fun)
1356 basic_block bb;
1357 bool bswap32_p, bswap64_p;
1358 bool changed = false;
1359 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1361 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1362 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1363 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1364 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1365 || (bswap32_p && word_mode == SImode)));
1367 /* Determine the argument type of the builtins. The code later on
1368 assumes that the return and argument type are the same. */
1369 if (bswap32_p)
1371 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1372 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1375 if (bswap64_p)
1377 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1378 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1381 memset (&nop_stats, 0, sizeof (nop_stats));
1382 memset (&bswap_stats, 0, sizeof (bswap_stats));
1383 calculate_dominance_info (CDI_DOMINATORS);
1385 FOR_EACH_BB_FN (bb, fun)
1387 gimple_stmt_iterator gsi;
1389 /* We do a reverse scan for bswap patterns to make sure we get the
1390 widest match. As bswap pattern matching doesn't handle previously
1391 inserted smaller bswap replacements as sub-patterns, the wider
1392 variant wouldn't be detected. */
1393 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
1395 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
1396 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
1397 enum tree_code code;
1398 struct symbolic_number n;
1399 bool bswap;
1401 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
1402 might be moved to a different basic block by bswap_replace and gsi
1403 must not points to it if that's the case. Moving the gsi_prev
1404 there make sure that gsi points to the statement previous to
1405 cur_stmt while still making sure that all statements are
1406 considered in this basic block. */
1407 gsi_prev (&gsi);
1409 if (!is_gimple_assign (cur_stmt))
1410 continue;
1412 code = gimple_assign_rhs_code (cur_stmt);
1413 switch (code)
1415 case LROTATE_EXPR:
1416 case RROTATE_EXPR:
1417 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
1418 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
1419 % BITS_PER_UNIT)
1420 continue;
1421 /* Fall through. */
1422 case BIT_IOR_EXPR:
1423 break;
1424 case CONSTRUCTOR:
1426 tree rhs = gimple_assign_rhs1 (cur_stmt);
1427 if (VECTOR_TYPE_P (TREE_TYPE (rhs))
1428 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs))))
1429 break;
1431 continue;
1432 default:
1433 continue;
1436 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
1438 if (!ins_stmt)
1439 continue;
1441 switch (n.range)
1443 case 16:
1444 /* Already in canonical form, nothing to do. */
1445 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
1446 continue;
1447 load_type = bswap_type = uint16_type_node;
1448 break;
1449 case 32:
1450 load_type = uint32_type_node;
1451 if (bswap32_p)
1453 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1454 bswap_type = bswap32_type;
1456 break;
1457 case 64:
1458 load_type = uint64_type_node;
1459 if (bswap64_p)
1461 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1462 bswap_type = bswap64_type;
1464 break;
1465 default:
1466 continue;
1469 if (bswap && !fndecl && n.range != 16)
1470 continue;
1472 if (bswap_replace (gsi_for_stmt (cur_stmt), ins_stmt, fndecl,
1473 bswap_type, load_type, &n, bswap))
1474 changed = true;
1478 statistics_counter_event (fun, "16-bit nop implementations found",
1479 nop_stats.found_16bit);
1480 statistics_counter_event (fun, "32-bit nop implementations found",
1481 nop_stats.found_32bit);
1482 statistics_counter_event (fun, "64-bit nop implementations found",
1483 nop_stats.found_64bit);
1484 statistics_counter_event (fun, "16-bit bswap implementations found",
1485 bswap_stats.found_16bit);
1486 statistics_counter_event (fun, "32-bit bswap implementations found",
1487 bswap_stats.found_32bit);
1488 statistics_counter_event (fun, "64-bit bswap implementations found",
1489 bswap_stats.found_64bit);
1491 return (changed ? TODO_update_ssa : 0);
1494 } // anon namespace
1496 gimple_opt_pass *
1497 make_pass_optimize_bswap (gcc::context *ctxt)
1499 return new pass_optimize_bswap (ctxt);
1502 namespace {
1504 /* Struct recording one operand for the store, which is either a constant,
1505 then VAL represents the constant and all the other fields are zero, or
1506 a memory load, then VAL represents the reference, BASE_ADDR is non-NULL
1507 and the other fields also reflect the memory load, or an SSA name, then
1508 VAL represents the SSA name and all the other fields are zero, */
1510 class store_operand_info
1512 public:
1513 tree val;
1514 tree base_addr;
1515 poly_uint64 bitsize;
1516 poly_uint64 bitpos;
1517 poly_uint64 bitregion_start;
1518 poly_uint64 bitregion_end;
1519 gimple *stmt;
1520 bool bit_not_p;
1521 store_operand_info ();
1524 store_operand_info::store_operand_info ()
1525 : val (NULL_TREE), base_addr (NULL_TREE), bitsize (0), bitpos (0),
1526 bitregion_start (0), bitregion_end (0), stmt (NULL), bit_not_p (false)
1530 /* Struct recording the information about a single store of an immediate
1531 to memory. These are created in the first phase and coalesced into
1532 merged_store_group objects in the second phase. */
1534 class store_immediate_info
1536 public:
1537 unsigned HOST_WIDE_INT bitsize;
1538 unsigned HOST_WIDE_INT bitpos;
1539 unsigned HOST_WIDE_INT bitregion_start;
1540 /* This is one past the last bit of the bit region. */
1541 unsigned HOST_WIDE_INT bitregion_end;
1542 gimple *stmt;
1543 unsigned int order;
1544 /* INTEGER_CST for constant store, STRING_CST for string store,
1545 MEM_REF for memory copy, BIT_*_EXPR for logical bitwise operation,
1546 BIT_INSERT_EXPR for bit insertion.
1547 LROTATE_EXPR if it can be only bswap optimized and
1548 ops are not really meaningful.
1549 NOP_EXPR if bswap optimization detected identity, ops
1550 are not meaningful. */
1551 enum tree_code rhs_code;
1552 /* Two fields for bswap optimization purposes. */
1553 struct symbolic_number n;
1554 gimple *ins_stmt;
1555 /* True if BIT_{AND,IOR,XOR}_EXPR result is inverted before storing. */
1556 bool bit_not_p;
1557 /* True if ops have been swapped and thus ops[1] represents
1558 rhs1 of BIT_{AND,IOR,XOR}_EXPR and ops[0] represents rhs2. */
1559 bool ops_swapped_p;
1560 /* The index number of the landing pad, or 0 if there is none. */
1561 int lp_nr;
1562 /* Operands. For BIT_*_EXPR rhs_code both operands are used, otherwise
1563 just the first one. */
1564 store_operand_info ops[2];
1565 store_immediate_info (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1566 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
1567 gimple *, unsigned int, enum tree_code,
1568 struct symbolic_number &, gimple *, bool, int,
1569 const store_operand_info &,
1570 const store_operand_info &);
1573 store_immediate_info::store_immediate_info (unsigned HOST_WIDE_INT bs,
1574 unsigned HOST_WIDE_INT bp,
1575 unsigned HOST_WIDE_INT brs,
1576 unsigned HOST_WIDE_INT bre,
1577 gimple *st,
1578 unsigned int ord,
1579 enum tree_code rhscode,
1580 struct symbolic_number &nr,
1581 gimple *ins_stmtp,
1582 bool bitnotp,
1583 int nr2,
1584 const store_operand_info &op0r,
1585 const store_operand_info &op1r)
1586 : bitsize (bs), bitpos (bp), bitregion_start (brs), bitregion_end (bre),
1587 stmt (st), order (ord), rhs_code (rhscode), n (nr),
1588 ins_stmt (ins_stmtp), bit_not_p (bitnotp), ops_swapped_p (false),
1589 lp_nr (nr2)
1590 #if __cplusplus >= 201103L
1591 , ops { op0r, op1r }
1594 #else
1596 ops[0] = op0r;
1597 ops[1] = op1r;
1599 #endif
1601 /* Struct representing a group of stores to contiguous memory locations.
1602 These are produced by the second phase (coalescing) and consumed in the
1603 third phase that outputs the widened stores. */
1605 class merged_store_group
1607 public:
1608 unsigned HOST_WIDE_INT start;
1609 unsigned HOST_WIDE_INT width;
1610 unsigned HOST_WIDE_INT bitregion_start;
1611 unsigned HOST_WIDE_INT bitregion_end;
1612 /* The size of the allocated memory for val and mask. */
1613 unsigned HOST_WIDE_INT buf_size;
1614 unsigned HOST_WIDE_INT align_base;
1615 poly_uint64 load_align_base[2];
1617 unsigned int align;
1618 unsigned int load_align[2];
1619 unsigned int first_order;
1620 unsigned int last_order;
1621 bool bit_insertion;
1622 bool string_concatenation;
1623 bool only_constants;
1624 bool consecutive;
1625 unsigned int first_nonmergeable_order;
1626 int lp_nr;
1628 auto_vec<store_immediate_info *> stores;
1629 /* We record the first and last original statements in the sequence because
1630 we'll need their vuse/vdef and replacement position. It's easier to keep
1631 track of them separately as 'stores' is reordered by apply_stores. */
1632 gimple *last_stmt;
1633 gimple *first_stmt;
1634 unsigned char *val;
1635 unsigned char *mask;
1637 merged_store_group (store_immediate_info *);
1638 ~merged_store_group ();
1639 bool can_be_merged_into (store_immediate_info *);
1640 void merge_into (store_immediate_info *);
1641 void merge_overlapping (store_immediate_info *);
1642 bool apply_stores ();
1643 private:
1644 void do_merge (store_immediate_info *);
1647 /* Debug helper. Dump LEN elements of byte array PTR to FD in hex. */
1649 static void
1650 dump_char_array (FILE *fd, unsigned char *ptr, unsigned int len)
1652 if (!fd)
1653 return;
1655 for (unsigned int i = 0; i < len; i++)
1656 fprintf (fd, "%02x ", ptr[i]);
1657 fprintf (fd, "\n");
1660 /* Clear out LEN bits starting from bit START in the byte array
1661 PTR. This clears the bits to the *right* from START.
1662 START must be within [0, BITS_PER_UNIT) and counts starting from
1663 the least significant bit. */
1665 static void
1666 clear_bit_region_be (unsigned char *ptr, unsigned int start,
1667 unsigned int len)
1669 if (len == 0)
1670 return;
1671 /* Clear len bits to the right of start. */
1672 else if (len <= start + 1)
1674 unsigned char mask = (~(~0U << len));
1675 mask = mask << (start + 1U - len);
1676 ptr[0] &= ~mask;
1678 else if (start != BITS_PER_UNIT - 1)
1680 clear_bit_region_be (ptr, start, (start % BITS_PER_UNIT) + 1);
1681 clear_bit_region_be (ptr + 1, BITS_PER_UNIT - 1,
1682 len - (start % BITS_PER_UNIT) - 1);
1684 else if (start == BITS_PER_UNIT - 1
1685 && len > BITS_PER_UNIT)
1687 unsigned int nbytes = len / BITS_PER_UNIT;
1688 memset (ptr, 0, nbytes);
1689 if (len % BITS_PER_UNIT != 0)
1690 clear_bit_region_be (ptr + nbytes, BITS_PER_UNIT - 1,
1691 len % BITS_PER_UNIT);
1693 else
1694 gcc_unreachable ();
1697 /* In the byte array PTR clear the bit region starting at bit
1698 START and is LEN bits wide.
1699 For regions spanning multiple bytes do this recursively until we reach
1700 zero LEN or a region contained within a single byte. */
1702 static void
1703 clear_bit_region (unsigned char *ptr, unsigned int start,
1704 unsigned int len)
1706 /* Degenerate base case. */
1707 if (len == 0)
1708 return;
1709 else if (start >= BITS_PER_UNIT)
1710 clear_bit_region (ptr + 1, start - BITS_PER_UNIT, len);
1711 /* Second base case. */
1712 else if ((start + len) <= BITS_PER_UNIT)
1714 unsigned char mask = (~0U) << (unsigned char) (BITS_PER_UNIT - len);
1715 mask >>= BITS_PER_UNIT - (start + len);
1717 ptr[0] &= ~mask;
1719 return;
1721 /* Clear most significant bits in a byte and proceed with the next byte. */
1722 else if (start != 0)
1724 clear_bit_region (ptr, start, BITS_PER_UNIT - start);
1725 clear_bit_region (ptr + 1, 0, len - (BITS_PER_UNIT - start));
1727 /* Whole bytes need to be cleared. */
1728 else if (start == 0 && len > BITS_PER_UNIT)
1730 unsigned int nbytes = len / BITS_PER_UNIT;
1731 /* We could recurse on each byte but we clear whole bytes, so a simple
1732 memset will do. */
1733 memset (ptr, '\0', nbytes);
1734 /* Clear the remaining sub-byte region if there is one. */
1735 if (len % BITS_PER_UNIT != 0)
1736 clear_bit_region (ptr + nbytes, 0, len % BITS_PER_UNIT);
1738 else
1739 gcc_unreachable ();
1742 /* Write BITLEN bits of EXPR to the byte array PTR at
1743 bit position BITPOS. PTR should contain TOTAL_BYTES elements.
1744 Return true if the operation succeeded. */
1746 static bool
1747 encode_tree_to_bitpos (tree expr, unsigned char *ptr, int bitlen, int bitpos,
1748 unsigned int total_bytes)
1750 unsigned int first_byte = bitpos / BITS_PER_UNIT;
1751 bool sub_byte_op_p = ((bitlen % BITS_PER_UNIT)
1752 || (bitpos % BITS_PER_UNIT)
1753 || !int_mode_for_size (bitlen, 0).exists ());
1754 bool empty_ctor_p
1755 = (TREE_CODE (expr) == CONSTRUCTOR
1756 && CONSTRUCTOR_NELTS (expr) == 0
1757 && TYPE_SIZE_UNIT (TREE_TYPE (expr))
1758 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (expr))));
1760 if (!sub_byte_op_p)
1762 if (first_byte >= total_bytes)
1763 return false;
1764 total_bytes -= first_byte;
1765 if (empty_ctor_p)
1767 unsigned HOST_WIDE_INT rhs_bytes
1768 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1769 if (rhs_bytes > total_bytes)
1770 return false;
1771 memset (ptr + first_byte, '\0', rhs_bytes);
1772 return true;
1774 return native_encode_expr (expr, ptr + first_byte, total_bytes) != 0;
1777 /* LITTLE-ENDIAN
1778 We are writing a non byte-sized quantity or at a position that is not
1779 at a byte boundary.
1780 |--------|--------|--------| ptr + first_byte
1782 xxx xxxxxxxx xxx< bp>
1783 |______EXPR____|
1785 First native_encode_expr EXPR into a temporary buffer and shift each
1786 byte in the buffer by 'bp' (carrying the bits over as necessary).
1787 |00000000|00xxxxxx|xxxxxxxx| << bp = |000xxxxx|xxxxxxxx|xxx00000|
1788 <------bitlen---->< bp>
1789 Then we clear the destination bits:
1790 |---00000|00000000|000-----| ptr + first_byte
1791 <-------bitlen--->< bp>
1793 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1794 |---xxxxx||xxxxxxxx||xxx-----| ptr + first_byte.
1796 BIG-ENDIAN
1797 We are writing a non byte-sized quantity or at a position that is not
1798 at a byte boundary.
1799 ptr + first_byte |--------|--------|--------|
1801 <bp >xxx xxxxxxxx xxx
1802 |_____EXPR_____|
1804 First native_encode_expr EXPR into a temporary buffer and shift each
1805 byte in the buffer to the right by (carrying the bits over as necessary).
1806 We shift by as much as needed to align the most significant bit of EXPR
1807 with bitpos:
1808 |00xxxxxx|xxxxxxxx| >> 3 = |00000xxx|xxxxxxxx|xxxxx000|
1809 <---bitlen----> <bp ><-----bitlen----->
1810 Then we clear the destination bits:
1811 ptr + first_byte |-----000||00000000||00000---|
1812 <bp ><-------bitlen----->
1814 Finally we ORR the bytes of the shifted EXPR into the cleared region:
1815 ptr + first_byte |---xxxxx||xxxxxxxx||xxx-----|.
1816 The awkwardness comes from the fact that bitpos is counted from the
1817 most significant bit of a byte. */
1819 /* We must be dealing with fixed-size data at this point, since the
1820 total size is also fixed. */
1821 unsigned int byte_size;
1822 if (empty_ctor_p)
1824 unsigned HOST_WIDE_INT rhs_bytes
1825 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1826 if (rhs_bytes > total_bytes)
1827 return false;
1828 byte_size = rhs_bytes;
1830 else
1832 fixed_size_mode mode
1833 = as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (expr)));
1834 byte_size
1835 = mode == BLKmode
1836 ? tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (expr)))
1837 : GET_MODE_SIZE (mode);
1839 /* Allocate an extra byte so that we have space to shift into. */
1840 byte_size++;
1841 unsigned char *tmpbuf = XALLOCAVEC (unsigned char, byte_size);
1842 memset (tmpbuf, '\0', byte_size);
1843 /* The store detection code should only have allowed constants that are
1844 accepted by native_encode_expr or empty ctors. */
1845 if (!empty_ctor_p
1846 && native_encode_expr (expr, tmpbuf, byte_size - 1) == 0)
1847 gcc_unreachable ();
1849 /* The native_encode_expr machinery uses TYPE_MODE to determine how many
1850 bytes to write. This means it can write more than
1851 ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT bytes (for example
1852 write 8 bytes for a bitlen of 40). Skip the bytes that are not within
1853 bitlen and zero out the bits that are not relevant as well (that may
1854 contain a sign bit due to sign-extension). */
1855 unsigned int padding
1856 = byte_size - ROUND_UP (bitlen, BITS_PER_UNIT) / BITS_PER_UNIT - 1;
1857 /* On big-endian the padding is at the 'front' so just skip the initial
1858 bytes. */
1859 if (BYTES_BIG_ENDIAN)
1860 tmpbuf += padding;
1862 byte_size -= padding;
1864 if (bitlen % BITS_PER_UNIT != 0)
1866 if (BYTES_BIG_ENDIAN)
1867 clear_bit_region_be (tmpbuf, BITS_PER_UNIT - 1,
1868 BITS_PER_UNIT - (bitlen % BITS_PER_UNIT));
1869 else
1870 clear_bit_region (tmpbuf, bitlen,
1871 byte_size * BITS_PER_UNIT - bitlen);
1873 /* Left shifting relies on the last byte being clear if bitlen is
1874 a multiple of BITS_PER_UNIT, which might not be clear if
1875 there are padding bytes. */
1876 else if (!BYTES_BIG_ENDIAN)
1877 tmpbuf[byte_size - 1] = '\0';
1879 /* Clear the bit region in PTR where the bits from TMPBUF will be
1880 inserted into. */
1881 if (BYTES_BIG_ENDIAN)
1882 clear_bit_region_be (ptr + first_byte,
1883 BITS_PER_UNIT - 1 - (bitpos % BITS_PER_UNIT), bitlen);
1884 else
1885 clear_bit_region (ptr + first_byte, bitpos % BITS_PER_UNIT, bitlen);
1887 int shift_amnt;
1888 int bitlen_mod = bitlen % BITS_PER_UNIT;
1889 int bitpos_mod = bitpos % BITS_PER_UNIT;
1891 bool skip_byte = false;
1892 if (BYTES_BIG_ENDIAN)
1894 /* BITPOS and BITLEN are exactly aligned and no shifting
1895 is necessary. */
1896 if (bitpos_mod + bitlen_mod == BITS_PER_UNIT
1897 || (bitpos_mod == 0 && bitlen_mod == 0))
1898 shift_amnt = 0;
1899 /* |. . . . . . . .|
1900 <bp > <blen >.
1901 We always shift right for BYTES_BIG_ENDIAN so shift the beginning
1902 of the value until it aligns with 'bp' in the next byte over. */
1903 else if (bitpos_mod + bitlen_mod < BITS_PER_UNIT)
1905 shift_amnt = bitlen_mod + bitpos_mod;
1906 skip_byte = bitlen_mod != 0;
1908 /* |. . . . . . . .|
1909 <----bp--->
1910 <---blen---->.
1911 Shift the value right within the same byte so it aligns with 'bp'. */
1912 else
1913 shift_amnt = bitlen_mod + bitpos_mod - BITS_PER_UNIT;
1915 else
1916 shift_amnt = bitpos % BITS_PER_UNIT;
1918 /* Create the shifted version of EXPR. */
1919 if (!BYTES_BIG_ENDIAN)
1921 shift_bytes_in_array_left (tmpbuf, byte_size, shift_amnt);
1922 if (shift_amnt == 0)
1923 byte_size--;
1925 else
1927 gcc_assert (BYTES_BIG_ENDIAN);
1928 shift_bytes_in_array_right (tmpbuf, byte_size, shift_amnt);
1929 /* If shifting right forced us to move into the next byte skip the now
1930 empty byte. */
1931 if (skip_byte)
1933 tmpbuf++;
1934 byte_size--;
1938 /* Insert the bits from TMPBUF. */
1939 for (unsigned int i = 0; i < byte_size; i++)
1940 ptr[first_byte + i] |= tmpbuf[i];
1942 return true;
1945 /* Sorting function for store_immediate_info objects.
1946 Sorts them by bitposition. */
1948 static int
1949 sort_by_bitpos (const void *x, const void *y)
1951 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1952 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1954 if ((*tmp)->bitpos < (*tmp2)->bitpos)
1955 return -1;
1956 else if ((*tmp)->bitpos > (*tmp2)->bitpos)
1957 return 1;
1958 else
1959 /* If they are the same let's use the order which is guaranteed to
1960 be different. */
1961 return (*tmp)->order - (*tmp2)->order;
1964 /* Sorting function for store_immediate_info objects.
1965 Sorts them by the order field. */
1967 static int
1968 sort_by_order (const void *x, const void *y)
1970 store_immediate_info *const *tmp = (store_immediate_info * const *) x;
1971 store_immediate_info *const *tmp2 = (store_immediate_info * const *) y;
1973 if ((*tmp)->order < (*tmp2)->order)
1974 return -1;
1975 else if ((*tmp)->order > (*tmp2)->order)
1976 return 1;
1978 gcc_unreachable ();
1981 /* Initialize a merged_store_group object from a store_immediate_info
1982 object. */
1984 merged_store_group::merged_store_group (store_immediate_info *info)
1986 start = info->bitpos;
1987 width = info->bitsize;
1988 bitregion_start = info->bitregion_start;
1989 bitregion_end = info->bitregion_end;
1990 /* VAL has memory allocated for it in apply_stores once the group
1991 width has been finalized. */
1992 val = NULL;
1993 mask = NULL;
1994 bit_insertion = info->rhs_code == BIT_INSERT_EXPR;
1995 string_concatenation = info->rhs_code == STRING_CST;
1996 only_constants = info->rhs_code == INTEGER_CST;
1997 consecutive = true;
1998 first_nonmergeable_order = ~0U;
1999 lp_nr = info->lp_nr;
2000 unsigned HOST_WIDE_INT align_bitpos = 0;
2001 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
2002 &align, &align_bitpos);
2003 align_base = start - align_bitpos;
2004 for (int i = 0; i < 2; ++i)
2006 store_operand_info &op = info->ops[i];
2007 if (op.base_addr == NULL_TREE)
2009 load_align[i] = 0;
2010 load_align_base[i] = 0;
2012 else
2014 get_object_alignment_1 (op.val, &load_align[i], &align_bitpos);
2015 load_align_base[i] = op.bitpos - align_bitpos;
2018 stores.create (1);
2019 stores.safe_push (info);
2020 last_stmt = info->stmt;
2021 last_order = info->order;
2022 first_stmt = last_stmt;
2023 first_order = last_order;
2024 buf_size = 0;
2027 merged_store_group::~merged_store_group ()
2029 if (val)
2030 XDELETEVEC (val);
2033 /* Return true if the store described by INFO can be merged into the group. */
2035 bool
2036 merged_store_group::can_be_merged_into (store_immediate_info *info)
2038 /* Do not merge bswap patterns. */
2039 if (info->rhs_code == LROTATE_EXPR)
2040 return false;
2042 if (info->lp_nr != lp_nr)
2043 return false;
2045 /* The canonical case. */
2046 if (info->rhs_code == stores[0]->rhs_code)
2047 return true;
2049 /* BIT_INSERT_EXPR is compatible with INTEGER_CST if no STRING_CST. */
2050 if (info->rhs_code == BIT_INSERT_EXPR && stores[0]->rhs_code == INTEGER_CST)
2051 return !string_concatenation;
2053 if (stores[0]->rhs_code == BIT_INSERT_EXPR && info->rhs_code == INTEGER_CST)
2054 return !string_concatenation;
2056 /* We can turn MEM_REF into BIT_INSERT_EXPR for bit-field stores, but do it
2057 only for small regions since this can generate a lot of instructions. */
2058 if (info->rhs_code == MEM_REF
2059 && (stores[0]->rhs_code == INTEGER_CST
2060 || stores[0]->rhs_code == BIT_INSERT_EXPR)
2061 && info->bitregion_start == stores[0]->bitregion_start
2062 && info->bitregion_end == stores[0]->bitregion_end
2063 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
2064 return !string_concatenation;
2066 if (stores[0]->rhs_code == MEM_REF
2067 && (info->rhs_code == INTEGER_CST
2068 || info->rhs_code == BIT_INSERT_EXPR)
2069 && info->bitregion_start == stores[0]->bitregion_start
2070 && info->bitregion_end == stores[0]->bitregion_end
2071 && info->bitregion_end - info->bitregion_start <= MAX_FIXED_MODE_SIZE)
2072 return !string_concatenation;
2074 /* STRING_CST is compatible with INTEGER_CST if no BIT_INSERT_EXPR. */
2075 if (info->rhs_code == STRING_CST
2076 && stores[0]->rhs_code == INTEGER_CST
2077 && stores[0]->bitsize == CHAR_BIT)
2078 return !bit_insertion;
2080 if (stores[0]->rhs_code == STRING_CST
2081 && info->rhs_code == INTEGER_CST
2082 && info->bitsize == CHAR_BIT)
2083 return !bit_insertion;
2085 return false;
2088 /* Helper method for merge_into and merge_overlapping to do
2089 the common part. */
2091 void
2092 merged_store_group::do_merge (store_immediate_info *info)
2094 bitregion_start = MIN (bitregion_start, info->bitregion_start);
2095 bitregion_end = MAX (bitregion_end, info->bitregion_end);
2097 unsigned int this_align;
2098 unsigned HOST_WIDE_INT align_bitpos = 0;
2099 get_object_alignment_1 (gimple_assign_lhs (info->stmt),
2100 &this_align, &align_bitpos);
2101 if (this_align > align)
2103 align = this_align;
2104 align_base = info->bitpos - align_bitpos;
2106 for (int i = 0; i < 2; ++i)
2108 store_operand_info &op = info->ops[i];
2109 if (!op.base_addr)
2110 continue;
2112 get_object_alignment_1 (op.val, &this_align, &align_bitpos);
2113 if (this_align > load_align[i])
2115 load_align[i] = this_align;
2116 load_align_base[i] = op.bitpos - align_bitpos;
2120 gimple *stmt = info->stmt;
2121 stores.safe_push (info);
2122 if (info->order > last_order)
2124 last_order = info->order;
2125 last_stmt = stmt;
2127 else if (info->order < first_order)
2129 first_order = info->order;
2130 first_stmt = stmt;
2133 if (info->bitpos != start + width)
2134 consecutive = false;
2136 /* We need to use extraction if there is any bit-field. */
2137 if (info->rhs_code == BIT_INSERT_EXPR)
2139 bit_insertion = true;
2140 gcc_assert (!string_concatenation);
2143 /* We want to use concatenation if there is any string. */
2144 if (info->rhs_code == STRING_CST)
2146 string_concatenation = true;
2147 gcc_assert (!bit_insertion);
2150 /* But we cannot use it if we don't have consecutive stores. */
2151 if (!consecutive)
2152 string_concatenation = false;
2154 if (info->rhs_code != INTEGER_CST)
2155 only_constants = false;
2158 /* Merge a store recorded by INFO into this merged store.
2159 The store is not overlapping with the existing recorded
2160 stores. */
2162 void
2163 merged_store_group::merge_into (store_immediate_info *info)
2165 do_merge (info);
2167 /* Make sure we're inserting in the position we think we're inserting. */
2168 gcc_assert (info->bitpos >= start + width
2169 && info->bitregion_start <= bitregion_end);
2171 width = info->bitpos + info->bitsize - start;
2174 /* Merge a store described by INFO into this merged store.
2175 INFO overlaps in some way with the current store (i.e. it's not contiguous
2176 which is handled by merged_store_group::merge_into). */
2178 void
2179 merged_store_group::merge_overlapping (store_immediate_info *info)
2181 do_merge (info);
2183 /* If the store extends the size of the group, extend the width. */
2184 if (info->bitpos + info->bitsize > start + width)
2185 width = info->bitpos + info->bitsize - start;
2188 /* Go through all the recorded stores in this group in program order and
2189 apply their values to the VAL byte array to create the final merged
2190 value. Return true if the operation succeeded. */
2192 bool
2193 merged_store_group::apply_stores ()
2195 store_immediate_info *info;
2196 unsigned int i;
2198 /* Make sure we have more than one store in the group, otherwise we cannot
2199 merge anything. */
2200 if (bitregion_start % BITS_PER_UNIT != 0
2201 || bitregion_end % BITS_PER_UNIT != 0
2202 || stores.length () == 1)
2203 return false;
2205 buf_size = (bitregion_end - bitregion_start) / BITS_PER_UNIT;
2207 /* Really do string concatenation for large strings only. */
2208 if (buf_size <= MOVE_MAX)
2209 string_concatenation = false;
2211 /* Create a power-of-2-sized buffer for native_encode_expr. */
2212 if (!string_concatenation)
2213 buf_size = 1 << ceil_log2 (buf_size);
2215 val = XNEWVEC (unsigned char, 2 * buf_size);
2216 mask = val + buf_size;
2217 memset (val, 0, buf_size);
2218 memset (mask, ~0U, buf_size);
2220 stores.qsort (sort_by_order);
2222 FOR_EACH_VEC_ELT (stores, i, info)
2224 unsigned int pos_in_buffer = info->bitpos - bitregion_start;
2225 tree cst;
2226 if (info->ops[0].val && info->ops[0].base_addr == NULL_TREE)
2227 cst = info->ops[0].val;
2228 else if (info->ops[1].val && info->ops[1].base_addr == NULL_TREE)
2229 cst = info->ops[1].val;
2230 else
2231 cst = NULL_TREE;
2232 bool ret = true;
2233 if (cst && info->rhs_code != BIT_INSERT_EXPR)
2234 ret = encode_tree_to_bitpos (cst, val, info->bitsize, pos_in_buffer,
2235 buf_size);
2236 unsigned char *m = mask + (pos_in_buffer / BITS_PER_UNIT);
2237 if (BYTES_BIG_ENDIAN)
2238 clear_bit_region_be (m, (BITS_PER_UNIT - 1
2239 - (pos_in_buffer % BITS_PER_UNIT)),
2240 info->bitsize);
2241 else
2242 clear_bit_region (m, pos_in_buffer % BITS_PER_UNIT, info->bitsize);
2243 if (cst && dump_file && (dump_flags & TDF_DETAILS))
2245 if (ret)
2247 fputs ("After writing ", dump_file);
2248 print_generic_expr (dump_file, cst, TDF_NONE);
2249 fprintf (dump_file, " of size " HOST_WIDE_INT_PRINT_DEC
2250 " at position %d\n", info->bitsize, pos_in_buffer);
2251 fputs (" the merged value contains ", dump_file);
2252 dump_char_array (dump_file, val, buf_size);
2253 fputs (" the merged mask contains ", dump_file);
2254 dump_char_array (dump_file, mask, buf_size);
2255 if (bit_insertion)
2256 fputs (" bit insertion is required\n", dump_file);
2257 if (string_concatenation)
2258 fputs (" string concatenation is required\n", dump_file);
2260 else
2261 fprintf (dump_file, "Failed to merge stores\n");
2263 if (!ret)
2264 return false;
2266 stores.qsort (sort_by_bitpos);
2267 return true;
2270 /* Structure describing the store chain. */
2272 class imm_store_chain_info
2274 public:
2275 /* Doubly-linked list that imposes an order on chain processing.
2276 PNXP (prev's next pointer) points to the head of a list, or to
2277 the next field in the previous chain in the list.
2278 See pass_store_merging::m_stores_head for more rationale. */
2279 imm_store_chain_info *next, **pnxp;
2280 tree base_addr;
2281 auto_vec<store_immediate_info *> m_store_info;
2282 auto_vec<merged_store_group *> m_merged_store_groups;
2284 imm_store_chain_info (imm_store_chain_info *&inspt, tree b_a)
2285 : next (inspt), pnxp (&inspt), base_addr (b_a)
2287 inspt = this;
2288 if (next)
2290 gcc_checking_assert (pnxp == next->pnxp);
2291 next->pnxp = &next;
2294 ~imm_store_chain_info ()
2296 *pnxp = next;
2297 if (next)
2299 gcc_checking_assert (&next == next->pnxp);
2300 next->pnxp = pnxp;
2303 bool terminate_and_process_chain ();
2304 bool try_coalesce_bswap (merged_store_group *, unsigned int, unsigned int,
2305 unsigned int);
2306 bool coalesce_immediate_stores ();
2307 bool output_merged_store (merged_store_group *);
2308 bool output_merged_stores ();
2311 const pass_data pass_data_tree_store_merging = {
2312 GIMPLE_PASS, /* type */
2313 "store-merging", /* name */
2314 OPTGROUP_NONE, /* optinfo_flags */
2315 TV_GIMPLE_STORE_MERGING, /* tv_id */
2316 PROP_ssa, /* properties_required */
2317 0, /* properties_provided */
2318 0, /* properties_destroyed */
2319 0, /* todo_flags_start */
2320 TODO_update_ssa, /* todo_flags_finish */
2323 class pass_store_merging : public gimple_opt_pass
2325 public:
2326 pass_store_merging (gcc::context *ctxt)
2327 : gimple_opt_pass (pass_data_tree_store_merging, ctxt), m_stores_head (),
2328 m_n_chains (0), m_n_stores (0)
2332 /* Pass not supported for PDP-endian, nor for insane hosts or
2333 target character sizes where native_{encode,interpret}_expr
2334 doesn't work properly. */
2335 virtual bool
2336 gate (function *)
2338 return flag_store_merging
2339 && BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
2340 && CHAR_BIT == 8
2341 && BITS_PER_UNIT == 8;
2344 virtual unsigned int execute (function *);
2346 private:
2347 hash_map<tree_operand_hash, class imm_store_chain_info *> m_stores;
2349 /* Form a doubly-linked stack of the elements of m_stores, so that
2350 we can iterate over them in a predictable way. Using this order
2351 avoids extraneous differences in the compiler output just because
2352 of tree pointer variations (e.g. different chains end up in
2353 different positions of m_stores, so they are handled in different
2354 orders, so they allocate or release SSA names in different
2355 orders, and when they get reused, subsequent passes end up
2356 getting different SSA names, which may ultimately change
2357 decisions when going out of SSA). */
2358 imm_store_chain_info *m_stores_head;
2360 /* The number of store chains currently tracked. */
2361 unsigned m_n_chains;
2362 /* The number of stores currently tracked. */
2363 unsigned m_n_stores;
2365 bool process_store (gimple *);
2366 bool terminate_and_process_chain (imm_store_chain_info *);
2367 bool terminate_all_aliasing_chains (imm_store_chain_info **, gimple *);
2368 bool terminate_and_process_all_chains ();
2369 }; // class pass_store_merging
2371 /* Terminate and process all recorded chains. Return true if any changes
2372 were made. */
2374 bool
2375 pass_store_merging::terminate_and_process_all_chains ()
2377 bool ret = false;
2378 while (m_stores_head)
2379 ret |= terminate_and_process_chain (m_stores_head);
2380 gcc_assert (m_stores.is_empty ());
2381 return ret;
2384 /* Terminate all chains that are affected by the statement STMT.
2385 CHAIN_INFO is the chain we should ignore from the checks if
2386 non-NULL. Return true if any changes were made. */
2388 bool
2389 pass_store_merging::terminate_all_aliasing_chains (imm_store_chain_info
2390 **chain_info,
2391 gimple *stmt)
2393 bool ret = false;
2395 /* If the statement doesn't touch memory it can't alias. */
2396 if (!gimple_vuse (stmt))
2397 return false;
2399 tree store_lhs = gimple_store_p (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
2400 ao_ref store_lhs_ref;
2401 ao_ref_init (&store_lhs_ref, store_lhs);
2402 for (imm_store_chain_info *next = m_stores_head, *cur = next; cur; cur = next)
2404 next = cur->next;
2406 /* We already checked all the stores in chain_info and terminated the
2407 chain if necessary. Skip it here. */
2408 if (chain_info && *chain_info == cur)
2409 continue;
2411 store_immediate_info *info;
2412 unsigned int i;
2413 FOR_EACH_VEC_ELT (cur->m_store_info, i, info)
2415 tree lhs = gimple_assign_lhs (info->stmt);
2416 ao_ref lhs_ref;
2417 ao_ref_init (&lhs_ref, lhs);
2418 if (ref_maybe_used_by_stmt_p (stmt, &lhs_ref)
2419 || stmt_may_clobber_ref_p_1 (stmt, &lhs_ref)
2420 || (store_lhs && refs_may_alias_p_1 (&store_lhs_ref,
2421 &lhs_ref, false)))
2423 if (dump_file && (dump_flags & TDF_DETAILS))
2425 fprintf (dump_file, "stmt causes chain termination:\n");
2426 print_gimple_stmt (dump_file, stmt, 0);
2428 ret |= terminate_and_process_chain (cur);
2429 break;
2434 return ret;
2437 /* Helper function. Terminate the recorded chain storing to base object
2438 BASE. Return true if the merging and output was successful. The m_stores
2439 entry is removed after the processing in any case. */
2441 bool
2442 pass_store_merging::terminate_and_process_chain (imm_store_chain_info *chain_info)
2444 m_n_stores -= chain_info->m_store_info.length ();
2445 m_n_chains--;
2446 bool ret = chain_info->terminate_and_process_chain ();
2447 m_stores.remove (chain_info->base_addr);
2448 delete chain_info;
2449 return ret;
2452 /* Return true if stmts in between FIRST (inclusive) and LAST (exclusive)
2453 may clobber REF. FIRST and LAST must have non-NULL vdef. We want to
2454 be able to sink load of REF across stores between FIRST and LAST, up
2455 to right before LAST. */
2457 bool
2458 stmts_may_clobber_ref_p (gimple *first, gimple *last, tree ref)
2460 ao_ref r;
2461 ao_ref_init (&r, ref);
2462 unsigned int count = 0;
2463 tree vop = gimple_vdef (last);
2464 gimple *stmt;
2466 /* Return true conservatively if the basic blocks are different. */
2467 if (gimple_bb (first) != gimple_bb (last))
2468 return true;
2472 stmt = SSA_NAME_DEF_STMT (vop);
2473 if (stmt_may_clobber_ref_p_1 (stmt, &r))
2474 return true;
2475 if (gimple_store_p (stmt)
2476 && refs_anti_dependent_p (ref, gimple_get_lhs (stmt)))
2477 return true;
2478 /* Avoid quadratic compile time by bounding the number of checks
2479 we perform. */
2480 if (++count > MAX_STORE_ALIAS_CHECKS)
2481 return true;
2482 vop = gimple_vuse (stmt);
2484 while (stmt != first);
2486 return false;
2489 /* Return true if INFO->ops[IDX] is mergeable with the
2490 corresponding loads already in MERGED_STORE group.
2491 BASE_ADDR is the base address of the whole store group. */
2493 bool
2494 compatible_load_p (merged_store_group *merged_store,
2495 store_immediate_info *info,
2496 tree base_addr, int idx)
2498 store_immediate_info *infof = merged_store->stores[0];
2499 if (!info->ops[idx].base_addr
2500 || maybe_ne (info->ops[idx].bitpos - infof->ops[idx].bitpos,
2501 info->bitpos - infof->bitpos)
2502 || !operand_equal_p (info->ops[idx].base_addr,
2503 infof->ops[idx].base_addr, 0))
2504 return false;
2506 store_immediate_info *infol = merged_store->stores.last ();
2507 tree load_vuse = gimple_vuse (info->ops[idx].stmt);
2508 /* In this case all vuses should be the same, e.g.
2509 _1 = s.a; _2 = s.b; _3 = _1 | 1; t.a = _3; _4 = _2 | 2; t.b = _4;
2511 _1 = s.a; _2 = s.b; t.a = _1; t.b = _2;
2512 and we can emit the coalesced load next to any of those loads. */
2513 if (gimple_vuse (infof->ops[idx].stmt) == load_vuse
2514 && gimple_vuse (infol->ops[idx].stmt) == load_vuse)
2515 return true;
2517 /* Otherwise, at least for now require that the load has the same
2518 vuse as the store. See following examples. */
2519 if (gimple_vuse (info->stmt) != load_vuse)
2520 return false;
2522 if (gimple_vuse (infof->stmt) != gimple_vuse (infof->ops[idx].stmt)
2523 || (infof != infol
2524 && gimple_vuse (infol->stmt) != gimple_vuse (infol->ops[idx].stmt)))
2525 return false;
2527 /* If the load is from the same location as the store, already
2528 the construction of the immediate chain info guarantees no intervening
2529 stores, so no further checks are needed. Example:
2530 _1 = s.a; _2 = _1 & -7; s.a = _2; _3 = s.b; _4 = _3 & -7; s.b = _4; */
2531 if (known_eq (info->ops[idx].bitpos, info->bitpos)
2532 && operand_equal_p (info->ops[idx].base_addr, base_addr, 0))
2533 return true;
2535 /* Otherwise, we need to punt if any of the loads can be clobbered by any
2536 of the stores in the group, or any other stores in between those.
2537 Previous calls to compatible_load_p ensured that for all the
2538 merged_store->stores IDX loads, no stmts starting with
2539 merged_store->first_stmt and ending right before merged_store->last_stmt
2540 clobbers those loads. */
2541 gimple *first = merged_store->first_stmt;
2542 gimple *last = merged_store->last_stmt;
2543 unsigned int i;
2544 store_immediate_info *infoc;
2545 /* The stores are sorted by increasing store bitpos, so if info->stmt store
2546 comes before the so far first load, we'll be changing
2547 merged_store->first_stmt. In that case we need to give up if
2548 any of the earlier processed loads clobber with the stmts in the new
2549 range. */
2550 if (info->order < merged_store->first_order)
2552 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2553 if (stmts_may_clobber_ref_p (info->stmt, first, infoc->ops[idx].val))
2554 return false;
2555 first = info->stmt;
2557 /* Similarly, we could change merged_store->last_stmt, so ensure
2558 in that case no stmts in the new range clobber any of the earlier
2559 processed loads. */
2560 else if (info->order > merged_store->last_order)
2562 FOR_EACH_VEC_ELT (merged_store->stores, i, infoc)
2563 if (stmts_may_clobber_ref_p (last, info->stmt, infoc->ops[idx].val))
2564 return false;
2565 last = info->stmt;
2567 /* And finally, we'd be adding a new load to the set, ensure it isn't
2568 clobbered in the new range. */
2569 if (stmts_may_clobber_ref_p (first, last, info->ops[idx].val))
2570 return false;
2572 /* Otherwise, we are looking for:
2573 _1 = s.a; _2 = _1 ^ 15; t.a = _2; _3 = s.b; _4 = _3 ^ 15; t.b = _4;
2575 _1 = s.a; t.a = _1; _2 = s.b; t.b = _2; */
2576 return true;
2579 /* Add all refs loaded to compute VAL to REFS vector. */
2581 void
2582 gather_bswap_load_refs (vec<tree> *refs, tree val)
2584 if (TREE_CODE (val) != SSA_NAME)
2585 return;
2587 gimple *stmt = SSA_NAME_DEF_STMT (val);
2588 if (!is_gimple_assign (stmt))
2589 return;
2591 if (gimple_assign_load_p (stmt))
2593 refs->safe_push (gimple_assign_rhs1 (stmt));
2594 return;
2597 switch (gimple_assign_rhs_class (stmt))
2599 case GIMPLE_BINARY_RHS:
2600 gather_bswap_load_refs (refs, gimple_assign_rhs2 (stmt));
2601 /* FALLTHRU */
2602 case GIMPLE_UNARY_RHS:
2603 gather_bswap_load_refs (refs, gimple_assign_rhs1 (stmt));
2604 break;
2605 default:
2606 gcc_unreachable ();
2610 /* Check if there are any stores in M_STORE_INFO after index I
2611 (where M_STORE_INFO must be sorted by sort_by_bitpos) that overlap
2612 a potential group ending with END that have their order
2613 smaller than LAST_ORDER. ALL_INTEGER_CST_P is true if
2614 all the stores already merged and the one under consideration
2615 have rhs_code of INTEGER_CST. Return true if there are no such stores.
2616 Consider:
2617 MEM[(long long int *)p_28] = 0;
2618 MEM[(long long int *)p_28 + 8B] = 0;
2619 MEM[(long long int *)p_28 + 16B] = 0;
2620 MEM[(long long int *)p_28 + 24B] = 0;
2621 _129 = (int) _130;
2622 MEM[(int *)p_28 + 8B] = _129;
2623 MEM[(int *)p_28].a = -1;
2624 We already have
2625 MEM[(long long int *)p_28] = 0;
2626 MEM[(int *)p_28].a = -1;
2627 stmts in the current group and need to consider if it is safe to
2628 add MEM[(long long int *)p_28 + 8B] = 0; store into the same group.
2629 There is an overlap between that store and the MEM[(int *)p_28 + 8B] = _129;
2630 store though, so if we add the MEM[(long long int *)p_28 + 8B] = 0;
2631 into the group and merging of those 3 stores is successful, merged
2632 stmts will be emitted at the latest store from that group, i.e.
2633 LAST_ORDER, which is the MEM[(int *)p_28].a = -1; store.
2634 The MEM[(int *)p_28 + 8B] = _129; store that originally follows
2635 the MEM[(long long int *)p_28 + 8B] = 0; would now be before it,
2636 so we need to refuse merging MEM[(long long int *)p_28 + 8B] = 0;
2637 into the group. That way it will be its own store group and will
2638 not be touched. If ALL_INTEGER_CST_P and there are overlapping
2639 INTEGER_CST stores, those are mergeable using merge_overlapping,
2640 so don't return false for those.
2642 Similarly, check stores from FIRST_EARLIER (inclusive) to END_EARLIER
2643 (exclusive), whether they don't overlap the bitrange START to END
2644 and have order in between FIRST_ORDER and LAST_ORDER. This is to
2645 prevent merging in cases like:
2646 MEM <char[12]> [&b + 8B] = {};
2647 MEM[(short *) &b] = 5;
2648 _5 = *x_4(D);
2649 MEM <long long unsigned int> [&b + 2B] = _5;
2650 MEM[(char *)&b + 16B] = 88;
2651 MEM[(int *)&b + 20B] = 1;
2652 The = {} store comes in sort_by_bitpos before the = 88 store, and can't
2653 be merged with it, because the = _5 store overlaps these and is in between
2654 them in sort_by_order ordering. If it was merged, the merged store would
2655 go after the = _5 store and thus change behavior. */
2657 static bool
2658 check_no_overlap (vec<store_immediate_info *> m_store_info, unsigned int i,
2659 bool all_integer_cst_p, unsigned int first_order,
2660 unsigned int last_order, unsigned HOST_WIDE_INT start,
2661 unsigned HOST_WIDE_INT end, unsigned int first_earlier,
2662 unsigned end_earlier)
2664 unsigned int len = m_store_info.length ();
2665 for (unsigned int j = first_earlier; j < end_earlier; j++)
2667 store_immediate_info *info = m_store_info[j];
2668 if (info->order > first_order
2669 && info->order < last_order
2670 && info->bitpos + info->bitsize > start)
2671 return false;
2673 for (++i; i < len; ++i)
2675 store_immediate_info *info = m_store_info[i];
2676 if (info->bitpos >= end)
2677 break;
2678 if (info->order < last_order
2679 && (!all_integer_cst_p || info->rhs_code != INTEGER_CST))
2680 return false;
2682 return true;
2685 /* Return true if m_store_info[first] and at least one following store
2686 form a group which store try_size bitsize value which is byte swapped
2687 from a memory load or some value, or identity from some value.
2688 This uses the bswap pass APIs. */
2690 bool
2691 imm_store_chain_info::try_coalesce_bswap (merged_store_group *merged_store,
2692 unsigned int first,
2693 unsigned int try_size,
2694 unsigned int first_earlier)
2696 unsigned int len = m_store_info.length (), last = first;
2697 unsigned HOST_WIDE_INT width = m_store_info[first]->bitsize;
2698 if (width >= try_size)
2699 return false;
2700 for (unsigned int i = first + 1; i < len; ++i)
2702 if (m_store_info[i]->bitpos != m_store_info[first]->bitpos + width
2703 || m_store_info[i]->lp_nr != merged_store->lp_nr
2704 || m_store_info[i]->ins_stmt == NULL)
2705 return false;
2706 width += m_store_info[i]->bitsize;
2707 if (width >= try_size)
2709 last = i;
2710 break;
2713 if (width != try_size)
2714 return false;
2716 bool allow_unaligned
2717 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
2718 /* Punt if the combined store would not be aligned and we need alignment. */
2719 if (!allow_unaligned)
2721 unsigned int align = merged_store->align;
2722 unsigned HOST_WIDE_INT align_base = merged_store->align_base;
2723 for (unsigned int i = first + 1; i <= last; ++i)
2725 unsigned int this_align;
2726 unsigned HOST_WIDE_INT align_bitpos = 0;
2727 get_object_alignment_1 (gimple_assign_lhs (m_store_info[i]->stmt),
2728 &this_align, &align_bitpos);
2729 if (this_align > align)
2731 align = this_align;
2732 align_base = m_store_info[i]->bitpos - align_bitpos;
2735 unsigned HOST_WIDE_INT align_bitpos
2736 = (m_store_info[first]->bitpos - align_base) & (align - 1);
2737 if (align_bitpos)
2738 align = least_bit_hwi (align_bitpos);
2739 if (align < try_size)
2740 return false;
2743 tree type;
2744 switch (try_size)
2746 case 16: type = uint16_type_node; break;
2747 case 32: type = uint32_type_node; break;
2748 case 64: type = uint64_type_node; break;
2749 default: gcc_unreachable ();
2751 struct symbolic_number n;
2752 gimple *ins_stmt = NULL;
2753 int vuse_store = -1;
2754 unsigned int first_order = merged_store->first_order;
2755 unsigned int last_order = merged_store->last_order;
2756 gimple *first_stmt = merged_store->first_stmt;
2757 gimple *last_stmt = merged_store->last_stmt;
2758 unsigned HOST_WIDE_INT end = merged_store->start + merged_store->width;
2759 store_immediate_info *infof = m_store_info[first];
2761 for (unsigned int i = first; i <= last; ++i)
2763 store_immediate_info *info = m_store_info[i];
2764 struct symbolic_number this_n = info->n;
2765 this_n.type = type;
2766 if (!this_n.base_addr)
2767 this_n.range = try_size / BITS_PER_UNIT;
2768 else
2769 /* Update vuse in case it has changed by output_merged_stores. */
2770 this_n.vuse = gimple_vuse (info->ins_stmt);
2771 unsigned int bitpos = info->bitpos - infof->bitpos;
2772 if (!do_shift_rotate (LSHIFT_EXPR, &this_n,
2773 BYTES_BIG_ENDIAN
2774 ? try_size - info->bitsize - bitpos
2775 : bitpos))
2776 return false;
2777 if (this_n.base_addr && vuse_store)
2779 unsigned int j;
2780 for (j = first; j <= last; ++j)
2781 if (this_n.vuse == gimple_vuse (m_store_info[j]->stmt))
2782 break;
2783 if (j > last)
2785 if (vuse_store == 1)
2786 return false;
2787 vuse_store = 0;
2790 if (i == first)
2792 n = this_n;
2793 ins_stmt = info->ins_stmt;
2795 else
2797 if (n.base_addr && n.vuse != this_n.vuse)
2799 if (vuse_store == 0)
2800 return false;
2801 vuse_store = 1;
2803 if (info->order > last_order)
2805 last_order = info->order;
2806 last_stmt = info->stmt;
2808 else if (info->order < first_order)
2810 first_order = info->order;
2811 first_stmt = info->stmt;
2813 end = MAX (end, info->bitpos + info->bitsize);
2815 ins_stmt = perform_symbolic_merge (ins_stmt, &n, info->ins_stmt,
2816 &this_n, &n);
2817 if (ins_stmt == NULL)
2818 return false;
2822 uint64_t cmpxchg, cmpnop;
2823 find_bswap_or_nop_finalize (&n, &cmpxchg, &cmpnop);
2825 /* A complete byte swap should make the symbolic number to start with
2826 the largest digit in the highest order byte. Unchanged symbolic
2827 number indicates a read with same endianness as target architecture. */
2828 if (n.n != cmpnop && n.n != cmpxchg)
2829 return false;
2831 if (n.base_addr == NULL_TREE && !is_gimple_val (n.src))
2832 return false;
2834 if (!check_no_overlap (m_store_info, last, false, first_order, last_order,
2835 merged_store->start, end, first_earlier, first))
2836 return false;
2838 /* Don't handle memory copy this way if normal non-bswap processing
2839 would handle it too. */
2840 if (n.n == cmpnop && (unsigned) n.n_ops == last - first + 1)
2842 unsigned int i;
2843 for (i = first; i <= last; ++i)
2844 if (m_store_info[i]->rhs_code != MEM_REF)
2845 break;
2846 if (i == last + 1)
2847 return false;
2850 if (n.n == cmpxchg)
2851 switch (try_size)
2853 case 16:
2854 /* Will emit LROTATE_EXPR. */
2855 break;
2856 case 32:
2857 if (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2858 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing)
2859 break;
2860 return false;
2861 case 64:
2862 if (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2863 && optab_handler (bswap_optab, DImode) != CODE_FOR_nothing)
2864 break;
2865 return false;
2866 default:
2867 gcc_unreachable ();
2870 if (!allow_unaligned && n.base_addr)
2872 unsigned int align = get_object_alignment (n.src);
2873 if (align < try_size)
2874 return false;
2877 /* If each load has vuse of the corresponding store, need to verify
2878 the loads can be sunk right before the last store. */
2879 if (vuse_store == 1)
2881 auto_vec<tree, 64> refs;
2882 for (unsigned int i = first; i <= last; ++i)
2883 gather_bswap_load_refs (&refs,
2884 gimple_assign_rhs1 (m_store_info[i]->stmt));
2886 unsigned int i;
2887 tree ref;
2888 FOR_EACH_VEC_ELT (refs, i, ref)
2889 if (stmts_may_clobber_ref_p (first_stmt, last_stmt, ref))
2890 return false;
2891 n.vuse = NULL_TREE;
2894 infof->n = n;
2895 infof->ins_stmt = ins_stmt;
2896 for (unsigned int i = first; i <= last; ++i)
2898 m_store_info[i]->rhs_code = n.n == cmpxchg ? LROTATE_EXPR : NOP_EXPR;
2899 m_store_info[i]->ops[0].base_addr = NULL_TREE;
2900 m_store_info[i]->ops[1].base_addr = NULL_TREE;
2901 if (i != first)
2902 merged_store->merge_into (m_store_info[i]);
2905 return true;
2908 /* Go through the candidate stores recorded in m_store_info and merge them
2909 into merged_store_group objects recorded into m_merged_store_groups
2910 representing the widened stores. Return true if coalescing was successful
2911 and the number of widened stores is fewer than the original number
2912 of stores. */
2914 bool
2915 imm_store_chain_info::coalesce_immediate_stores ()
2917 /* Anything less can't be processed. */
2918 if (m_store_info.length () < 2)
2919 return false;
2921 if (dump_file && (dump_flags & TDF_DETAILS))
2922 fprintf (dump_file, "Attempting to coalesce %u stores in chain\n",
2923 m_store_info.length ());
2925 store_immediate_info *info;
2926 unsigned int i, ignore = 0;
2927 unsigned int first_earlier = 0;
2928 unsigned int end_earlier = 0;
2930 /* Order the stores by the bitposition they write to. */
2931 m_store_info.qsort (sort_by_bitpos);
2933 info = m_store_info[0];
2934 merged_store_group *merged_store = new merged_store_group (info);
2935 if (dump_file && (dump_flags & TDF_DETAILS))
2936 fputs ("New store group\n", dump_file);
2938 FOR_EACH_VEC_ELT (m_store_info, i, info)
2940 unsigned HOST_WIDE_INT new_bitregion_start, new_bitregion_end;
2942 if (i <= ignore)
2943 goto done;
2945 while (first_earlier < end_earlier
2946 && (m_store_info[first_earlier]->bitpos
2947 + m_store_info[first_earlier]->bitsize
2948 <= merged_store->start))
2949 first_earlier++;
2951 /* First try to handle group of stores like:
2952 p[0] = data >> 24;
2953 p[1] = data >> 16;
2954 p[2] = data >> 8;
2955 p[3] = data;
2956 using the bswap framework. */
2957 if (info->bitpos == merged_store->start + merged_store->width
2958 && merged_store->stores.length () == 1
2959 && merged_store->stores[0]->ins_stmt != NULL
2960 && info->lp_nr == merged_store->lp_nr
2961 && info->ins_stmt != NULL)
2963 unsigned int try_size;
2964 for (try_size = 64; try_size >= 16; try_size >>= 1)
2965 if (try_coalesce_bswap (merged_store, i - 1, try_size,
2966 first_earlier))
2967 break;
2969 if (try_size >= 16)
2971 ignore = i + merged_store->stores.length () - 1;
2972 m_merged_store_groups.safe_push (merged_store);
2973 if (ignore < m_store_info.length ())
2975 merged_store = new merged_store_group (m_store_info[ignore]);
2976 end_earlier = ignore;
2978 else
2979 merged_store = NULL;
2980 goto done;
2984 new_bitregion_start
2985 = MIN (merged_store->bitregion_start, info->bitregion_start);
2986 new_bitregion_end
2987 = MAX (merged_store->bitregion_end, info->bitregion_end);
2989 if (info->order >= merged_store->first_nonmergeable_order
2990 || (((new_bitregion_end - new_bitregion_start + 1) / BITS_PER_UNIT)
2991 > (unsigned) param_store_merging_max_size))
2994 /* |---store 1---|
2995 |---store 2---|
2996 Overlapping stores. */
2997 else if (IN_RANGE (info->bitpos, merged_store->start,
2998 merged_store->start + merged_store->width - 1)
2999 /* |---store 1---||---store 2---|
3000 Handle also the consecutive INTEGER_CST stores case here,
3001 as we have here the code to deal with overlaps. */
3002 || (info->bitregion_start <= merged_store->bitregion_end
3003 && info->rhs_code == INTEGER_CST
3004 && merged_store->only_constants
3005 && merged_store->can_be_merged_into (info)))
3007 /* Only allow overlapping stores of constants. */
3008 if (info->rhs_code == INTEGER_CST
3009 && merged_store->only_constants
3010 && info->lp_nr == merged_store->lp_nr)
3012 unsigned int first_order
3013 = MIN (merged_store->first_order, info->order);
3014 unsigned int last_order
3015 = MAX (merged_store->last_order, info->order);
3016 unsigned HOST_WIDE_INT end
3017 = MAX (merged_store->start + merged_store->width,
3018 info->bitpos + info->bitsize);
3019 if (check_no_overlap (m_store_info, i, true, first_order,
3020 last_order, merged_store->start, end,
3021 first_earlier, end_earlier))
3023 /* check_no_overlap call above made sure there are no
3024 overlapping stores with non-INTEGER_CST rhs_code
3025 in between the first and last of the stores we've
3026 just merged. If there are any INTEGER_CST rhs_code
3027 stores in between, we need to merge_overlapping them
3028 even if in the sort_by_bitpos order there are other
3029 overlapping stores in between. Keep those stores as is.
3030 Example:
3031 MEM[(int *)p_28] = 0;
3032 MEM[(char *)p_28 + 3B] = 1;
3033 MEM[(char *)p_28 + 1B] = 2;
3034 MEM[(char *)p_28 + 2B] = MEM[(char *)p_28 + 6B];
3035 We can't merge the zero store with the store of two and
3036 not merge anything else, because the store of one is
3037 in the original order in between those two, but in
3038 store_by_bitpos order it comes after the last store that
3039 we can't merge with them. We can merge the first 3 stores
3040 and keep the last store as is though. */
3041 unsigned int len = m_store_info.length ();
3042 unsigned int try_order = last_order;
3043 unsigned int first_nonmergeable_order;
3044 unsigned int k;
3045 bool last_iter = false;
3046 int attempts = 0;
3049 unsigned int max_order = 0;
3050 unsigned int min_order = first_order;
3051 unsigned first_nonmergeable_int_order = ~0U;
3052 unsigned HOST_WIDE_INT this_end = end;
3053 k = i;
3054 first_nonmergeable_order = ~0U;
3055 for (unsigned int j = i + 1; j < len; ++j)
3057 store_immediate_info *info2 = m_store_info[j];
3058 if (info2->bitpos >= this_end)
3059 break;
3060 if (info2->order < try_order)
3062 if (info2->rhs_code != INTEGER_CST
3063 || info2->lp_nr != merged_store->lp_nr)
3065 /* Normally check_no_overlap makes sure this
3066 doesn't happen, but if end grows below,
3067 then we need to process more stores than
3068 check_no_overlap verified. Example:
3069 MEM[(int *)p_5] = 0;
3070 MEM[(short *)p_5 + 3B] = 1;
3071 MEM[(char *)p_5 + 4B] = _9;
3072 MEM[(char *)p_5 + 2B] = 2; */
3073 k = 0;
3074 break;
3076 k = j;
3077 min_order = MIN (min_order, info2->order);
3078 this_end = MAX (this_end,
3079 info2->bitpos + info2->bitsize);
3081 else if (info2->rhs_code == INTEGER_CST
3082 && info2->lp_nr == merged_store->lp_nr
3083 && !last_iter)
3085 max_order = MAX (max_order, info2->order + 1);
3086 first_nonmergeable_int_order
3087 = MIN (first_nonmergeable_int_order,
3088 info2->order);
3090 else
3091 first_nonmergeable_order
3092 = MIN (first_nonmergeable_order, info2->order);
3094 if (k > i
3095 && !check_no_overlap (m_store_info, len - 1, true,
3096 min_order, try_order,
3097 merged_store->start, this_end,
3098 first_earlier, end_earlier))
3099 k = 0;
3100 if (k == 0)
3102 if (last_order == try_order)
3103 break;
3104 /* If this failed, but only because we grew
3105 try_order, retry with the last working one,
3106 so that we merge at least something. */
3107 try_order = last_order;
3108 last_iter = true;
3109 continue;
3111 last_order = try_order;
3112 /* Retry with a larger try_order to see if we could
3113 merge some further INTEGER_CST stores. */
3114 if (max_order
3115 && (first_nonmergeable_int_order
3116 < first_nonmergeable_order))
3118 try_order = MIN (max_order,
3119 first_nonmergeable_order);
3120 try_order
3121 = MIN (try_order,
3122 merged_store->first_nonmergeable_order);
3123 if (try_order > last_order && ++attempts < 16)
3124 continue;
3126 first_nonmergeable_order
3127 = MIN (first_nonmergeable_order,
3128 first_nonmergeable_int_order);
3129 end = this_end;
3130 break;
3132 while (1);
3134 if (k != 0)
3136 merged_store->merge_overlapping (info);
3138 merged_store->first_nonmergeable_order
3139 = MIN (merged_store->first_nonmergeable_order,
3140 first_nonmergeable_order);
3142 for (unsigned int j = i + 1; j <= k; j++)
3144 store_immediate_info *info2 = m_store_info[j];
3145 gcc_assert (info2->bitpos < end);
3146 if (info2->order < last_order)
3148 gcc_assert (info2->rhs_code == INTEGER_CST);
3149 if (info != info2)
3150 merged_store->merge_overlapping (info2);
3152 /* Other stores are kept and not merged in any
3153 way. */
3155 ignore = k;
3156 goto done;
3161 /* |---store 1---||---store 2---|
3162 This store is consecutive to the previous one.
3163 Merge it into the current store group. There can be gaps in between
3164 the stores, but there can't be gaps in between bitregions. */
3165 else if (info->bitregion_start <= merged_store->bitregion_end
3166 && merged_store->can_be_merged_into (info))
3168 store_immediate_info *infof = merged_store->stores[0];
3170 /* All the rhs_code ops that take 2 operands are commutative,
3171 swap the operands if it could make the operands compatible. */
3172 if (infof->ops[0].base_addr
3173 && infof->ops[1].base_addr
3174 && info->ops[0].base_addr
3175 && info->ops[1].base_addr
3176 && known_eq (info->ops[1].bitpos - infof->ops[0].bitpos,
3177 info->bitpos - infof->bitpos)
3178 && operand_equal_p (info->ops[1].base_addr,
3179 infof->ops[0].base_addr, 0))
3181 std::swap (info->ops[0], info->ops[1]);
3182 info->ops_swapped_p = true;
3184 if (check_no_overlap (m_store_info, i, false,
3185 MIN (merged_store->first_order, info->order),
3186 MAX (merged_store->last_order, info->order),
3187 merged_store->start,
3188 MAX (merged_store->start + merged_store->width,
3189 info->bitpos + info->bitsize),
3190 first_earlier, end_earlier))
3192 /* Turn MEM_REF into BIT_INSERT_EXPR for bit-field stores. */
3193 if (info->rhs_code == MEM_REF && infof->rhs_code != MEM_REF)
3195 info->rhs_code = BIT_INSERT_EXPR;
3196 info->ops[0].val = gimple_assign_rhs1 (info->stmt);
3197 info->ops[0].base_addr = NULL_TREE;
3199 else if (infof->rhs_code == MEM_REF && info->rhs_code != MEM_REF)
3201 store_immediate_info *infoj;
3202 unsigned int j;
3203 FOR_EACH_VEC_ELT (merged_store->stores, j, infoj)
3205 infoj->rhs_code = BIT_INSERT_EXPR;
3206 infoj->ops[0].val = gimple_assign_rhs1 (infoj->stmt);
3207 infoj->ops[0].base_addr = NULL_TREE;
3209 merged_store->bit_insertion = true;
3211 if ((infof->ops[0].base_addr
3212 ? compatible_load_p (merged_store, info, base_addr, 0)
3213 : !info->ops[0].base_addr)
3214 && (infof->ops[1].base_addr
3215 ? compatible_load_p (merged_store, info, base_addr, 1)
3216 : !info->ops[1].base_addr))
3218 merged_store->merge_into (info);
3219 goto done;
3224 /* |---store 1---| <gap> |---store 2---|.
3225 Gap between stores or the rhs not compatible. Start a new group. */
3227 /* Try to apply all the stores recorded for the group to determine
3228 the bitpattern they write and discard it if that fails.
3229 This will also reject single-store groups. */
3230 if (merged_store->apply_stores ())
3231 m_merged_store_groups.safe_push (merged_store);
3232 else
3233 delete merged_store;
3235 merged_store = new merged_store_group (info);
3236 end_earlier = i;
3237 if (dump_file && (dump_flags & TDF_DETAILS))
3238 fputs ("New store group\n", dump_file);
3240 done:
3241 if (dump_file && (dump_flags & TDF_DETAILS))
3243 fprintf (dump_file, "Store %u:\nbitsize:" HOST_WIDE_INT_PRINT_DEC
3244 " bitpos:" HOST_WIDE_INT_PRINT_DEC " val:",
3245 i, info->bitsize, info->bitpos);
3246 print_generic_expr (dump_file, gimple_assign_rhs1 (info->stmt));
3247 fputc ('\n', dump_file);
3251 /* Record or discard the last store group. */
3252 if (merged_store)
3254 if (merged_store->apply_stores ())
3255 m_merged_store_groups.safe_push (merged_store);
3256 else
3257 delete merged_store;
3260 gcc_assert (m_merged_store_groups.length () <= m_store_info.length ());
3262 bool success
3263 = !m_merged_store_groups.is_empty ()
3264 && m_merged_store_groups.length () < m_store_info.length ();
3266 if (success && dump_file)
3267 fprintf (dump_file, "Coalescing successful!\nMerged into %u stores\n",
3268 m_merged_store_groups.length ());
3270 return success;
3273 /* Return the type to use for the merged stores or loads described by STMTS.
3274 This is needed to get the alias sets right. If IS_LOAD, look for rhs,
3275 otherwise lhs. Additionally set *CLIQUEP and *BASEP to MR_DEPENDENCE_*
3276 of the MEM_REFs if any. */
3278 static tree
3279 get_alias_type_for_stmts (vec<gimple *> &stmts, bool is_load,
3280 unsigned short *cliquep, unsigned short *basep)
3282 gimple *stmt;
3283 unsigned int i;
3284 tree type = NULL_TREE;
3285 tree ret = NULL_TREE;
3286 *cliquep = 0;
3287 *basep = 0;
3289 FOR_EACH_VEC_ELT (stmts, i, stmt)
3291 tree ref = is_load ? gimple_assign_rhs1 (stmt)
3292 : gimple_assign_lhs (stmt);
3293 tree type1 = reference_alias_ptr_type (ref);
3294 tree base = get_base_address (ref);
3296 if (i == 0)
3298 if (TREE_CODE (base) == MEM_REF)
3300 *cliquep = MR_DEPENDENCE_CLIQUE (base);
3301 *basep = MR_DEPENDENCE_BASE (base);
3303 ret = type = type1;
3304 continue;
3306 if (!alias_ptr_types_compatible_p (type, type1))
3307 ret = ptr_type_node;
3308 if (TREE_CODE (base) != MEM_REF
3309 || *cliquep != MR_DEPENDENCE_CLIQUE (base)
3310 || *basep != MR_DEPENDENCE_BASE (base))
3312 *cliquep = 0;
3313 *basep = 0;
3316 return ret;
3319 /* Return the location_t information we can find among the statements
3320 in STMTS. */
3322 static location_t
3323 get_location_for_stmts (vec<gimple *> &stmts)
3325 gimple *stmt;
3326 unsigned int i;
3328 FOR_EACH_VEC_ELT (stmts, i, stmt)
3329 if (gimple_has_location (stmt))
3330 return gimple_location (stmt);
3332 return UNKNOWN_LOCATION;
3335 /* Used to decribe a store resulting from splitting a wide store in smaller
3336 regularly-sized stores in split_group. */
3338 class split_store
3340 public:
3341 unsigned HOST_WIDE_INT bytepos;
3342 unsigned HOST_WIDE_INT size;
3343 unsigned HOST_WIDE_INT align;
3344 auto_vec<store_immediate_info *> orig_stores;
3345 /* True if there is a single orig stmt covering the whole split store. */
3346 bool orig;
3347 split_store (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
3348 unsigned HOST_WIDE_INT);
3351 /* Simple constructor. */
3353 split_store::split_store (unsigned HOST_WIDE_INT bp,
3354 unsigned HOST_WIDE_INT sz,
3355 unsigned HOST_WIDE_INT al)
3356 : bytepos (bp), size (sz), align (al), orig (false)
3358 orig_stores.create (0);
3361 /* Record all stores in GROUP that write to the region starting at BITPOS and
3362 is of size BITSIZE. Record infos for such statements in STORES if
3363 non-NULL. The stores in GROUP must be sorted by bitposition. Return INFO
3364 if there is exactly one original store in the range (in that case ignore
3365 clobber stmts, unless there are only clobber stmts). */
3367 static store_immediate_info *
3368 find_constituent_stores (class merged_store_group *group,
3369 vec<store_immediate_info *> *stores,
3370 unsigned int *first,
3371 unsigned HOST_WIDE_INT bitpos,
3372 unsigned HOST_WIDE_INT bitsize)
3374 store_immediate_info *info, *ret = NULL;
3375 unsigned int i;
3376 bool second = false;
3377 bool update_first = true;
3378 unsigned HOST_WIDE_INT end = bitpos + bitsize;
3379 for (i = *first; group->stores.iterate (i, &info); ++i)
3381 unsigned HOST_WIDE_INT stmt_start = info->bitpos;
3382 unsigned HOST_WIDE_INT stmt_end = stmt_start + info->bitsize;
3383 if (stmt_end <= bitpos)
3385 /* BITPOS passed to this function never decreases from within the
3386 same split_group call, so optimize and don't scan info records
3387 which are known to end before or at BITPOS next time.
3388 Only do it if all stores before this one also pass this. */
3389 if (update_first)
3390 *first = i + 1;
3391 continue;
3393 else
3394 update_first = false;
3396 /* The stores in GROUP are ordered by bitposition so if we're past
3397 the region for this group return early. */
3398 if (stmt_start >= end)
3399 return ret;
3401 if (gimple_clobber_p (info->stmt))
3403 if (stores)
3404 stores->safe_push (info);
3405 if (ret == NULL)
3406 ret = info;
3407 continue;
3409 if (stores)
3411 stores->safe_push (info);
3412 if (ret && !gimple_clobber_p (ret->stmt))
3414 ret = NULL;
3415 second = true;
3418 else if (ret && !gimple_clobber_p (ret->stmt))
3419 return NULL;
3420 if (!second)
3421 ret = info;
3423 return ret;
3426 /* Return how many SSA_NAMEs used to compute value to store in the INFO
3427 store have multiple uses. If any SSA_NAME has multiple uses, also
3428 count statements needed to compute it. */
3430 static unsigned
3431 count_multiple_uses (store_immediate_info *info)
3433 gimple *stmt = info->stmt;
3434 unsigned ret = 0;
3435 switch (info->rhs_code)
3437 case INTEGER_CST:
3438 case STRING_CST:
3439 return 0;
3440 case BIT_AND_EXPR:
3441 case BIT_IOR_EXPR:
3442 case BIT_XOR_EXPR:
3443 if (info->bit_not_p)
3445 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3446 ret = 1; /* Fall through below to return
3447 the BIT_NOT_EXPR stmt and then
3448 BIT_{AND,IOR,XOR}_EXPR and anything it
3449 uses. */
3450 else
3451 /* stmt is after this the BIT_NOT_EXPR. */
3452 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3454 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3456 ret += 1 + info->ops[0].bit_not_p;
3457 if (info->ops[1].base_addr)
3458 ret += 1 + info->ops[1].bit_not_p;
3459 return ret + 1;
3461 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3462 /* stmt is now the BIT_*_EXPR. */
3463 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3464 ret += 1 + info->ops[info->ops_swapped_p].bit_not_p;
3465 else if (info->ops[info->ops_swapped_p].bit_not_p)
3467 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3468 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3469 ++ret;
3471 if (info->ops[1].base_addr == NULL_TREE)
3473 gcc_checking_assert (!info->ops_swapped_p);
3474 return ret;
3476 if (!has_single_use (gimple_assign_rhs2 (stmt)))
3477 ret += 1 + info->ops[1 - info->ops_swapped_p].bit_not_p;
3478 else if (info->ops[1 - info->ops_swapped_p].bit_not_p)
3480 gimple *stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
3481 if (!has_single_use (gimple_assign_rhs1 (stmt2)))
3482 ++ret;
3484 return ret;
3485 case MEM_REF:
3486 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3487 return 1 + info->ops[0].bit_not_p;
3488 else if (info->ops[0].bit_not_p)
3490 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
3491 if (!has_single_use (gimple_assign_rhs1 (stmt)))
3492 return 1;
3494 return 0;
3495 case BIT_INSERT_EXPR:
3496 return has_single_use (gimple_assign_rhs1 (stmt)) ? 0 : 1;
3497 default:
3498 gcc_unreachable ();
3502 /* Split a merged store described by GROUP by populating the SPLIT_STORES
3503 vector (if non-NULL) with split_store structs describing the byte offset
3504 (from the base), the bit size and alignment of each store as well as the
3505 original statements involved in each such split group.
3506 This is to separate the splitting strategy from the statement
3507 building/emission/linking done in output_merged_store.
3508 Return number of new stores.
3509 If ALLOW_UNALIGNED_STORE is false, then all stores must be aligned.
3510 If ALLOW_UNALIGNED_LOAD is false, then all loads must be aligned.
3511 BZERO_FIRST may be true only when the first store covers the whole group
3512 and clears it; if BZERO_FIRST is true, keep that first store in the set
3513 unmodified and emit further stores for the overrides only.
3514 If SPLIT_STORES is NULL, it is just a dry run to count number of
3515 new stores. */
3517 static unsigned int
3518 split_group (merged_store_group *group, bool allow_unaligned_store,
3519 bool allow_unaligned_load, bool bzero_first,
3520 vec<split_store *> *split_stores,
3521 unsigned *total_orig,
3522 unsigned *total_new)
3524 unsigned HOST_WIDE_INT pos = group->bitregion_start;
3525 unsigned HOST_WIDE_INT size = group->bitregion_end - pos;
3526 unsigned HOST_WIDE_INT bytepos = pos / BITS_PER_UNIT;
3527 unsigned HOST_WIDE_INT group_align = group->align;
3528 unsigned HOST_WIDE_INT align_base = group->align_base;
3529 unsigned HOST_WIDE_INT group_load_align = group_align;
3530 bool any_orig = false;
3532 gcc_assert ((size % BITS_PER_UNIT == 0) && (pos % BITS_PER_UNIT == 0));
3534 /* For bswap framework using sets of stores, all the checking has been done
3535 earlier in try_coalesce_bswap and the result always needs to be emitted
3536 as a single store. Likewise for string concatenation, */
3537 if (group->stores[0]->rhs_code == LROTATE_EXPR
3538 || group->stores[0]->rhs_code == NOP_EXPR
3539 || group->string_concatenation)
3541 gcc_assert (!bzero_first);
3542 if (total_orig)
3544 /* Avoid the old/new stmt count heuristics. It should be
3545 always beneficial. */
3546 total_new[0] = 1;
3547 total_orig[0] = 2;
3550 if (split_stores)
3552 unsigned HOST_WIDE_INT align_bitpos
3553 = (group->start - align_base) & (group_align - 1);
3554 unsigned HOST_WIDE_INT align = group_align;
3555 if (align_bitpos)
3556 align = least_bit_hwi (align_bitpos);
3557 bytepos = group->start / BITS_PER_UNIT;
3558 split_store *store
3559 = new split_store (bytepos, group->width, align);
3560 unsigned int first = 0;
3561 find_constituent_stores (group, &store->orig_stores,
3562 &first, group->start, group->width);
3563 split_stores->safe_push (store);
3566 return 1;
3569 unsigned int ret = 0, first = 0;
3570 unsigned HOST_WIDE_INT try_pos = bytepos;
3572 if (total_orig)
3574 unsigned int i;
3575 store_immediate_info *info = group->stores[0];
3577 total_new[0] = 0;
3578 total_orig[0] = 1; /* The orig store. */
3579 info = group->stores[0];
3580 if (info->ops[0].base_addr)
3581 total_orig[0]++;
3582 if (info->ops[1].base_addr)
3583 total_orig[0]++;
3584 switch (info->rhs_code)
3586 case BIT_AND_EXPR:
3587 case BIT_IOR_EXPR:
3588 case BIT_XOR_EXPR:
3589 total_orig[0]++; /* The orig BIT_*_EXPR stmt. */
3590 break;
3591 default:
3592 break;
3594 total_orig[0] *= group->stores.length ();
3596 FOR_EACH_VEC_ELT (group->stores, i, info)
3598 total_new[0] += count_multiple_uses (info);
3599 total_orig[0] += (info->bit_not_p
3600 + info->ops[0].bit_not_p
3601 + info->ops[1].bit_not_p);
3605 if (!allow_unaligned_load)
3606 for (int i = 0; i < 2; ++i)
3607 if (group->load_align[i])
3608 group_load_align = MIN (group_load_align, group->load_align[i]);
3610 if (bzero_first)
3612 store_immediate_info *gstore;
3613 FOR_EACH_VEC_ELT (group->stores, first, gstore)
3614 if (!gimple_clobber_p (gstore->stmt))
3615 break;
3616 ++first;
3617 ret = 1;
3618 if (split_stores)
3620 split_store *store
3621 = new split_store (bytepos, gstore->bitsize, align_base);
3622 store->orig_stores.safe_push (gstore);
3623 store->orig = true;
3624 any_orig = true;
3625 split_stores->safe_push (store);
3629 while (size > 0)
3631 if ((allow_unaligned_store || group_align <= BITS_PER_UNIT)
3632 && (group->mask[try_pos - bytepos] == (unsigned char) ~0U
3633 || (bzero_first && group->val[try_pos - bytepos] == 0)))
3635 /* Skip padding bytes. */
3636 ++try_pos;
3637 size -= BITS_PER_UNIT;
3638 continue;
3641 unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
3642 unsigned int try_size = MAX_STORE_BITSIZE, nonmasked;
3643 unsigned HOST_WIDE_INT align_bitpos
3644 = (try_bitpos - align_base) & (group_align - 1);
3645 unsigned HOST_WIDE_INT align = group_align;
3646 bool found_orig = false;
3647 if (align_bitpos)
3648 align = least_bit_hwi (align_bitpos);
3649 if (!allow_unaligned_store)
3650 try_size = MIN (try_size, align);
3651 if (!allow_unaligned_load)
3653 /* If we can't do or don't want to do unaligned stores
3654 as well as loads, we need to take the loads into account
3655 as well. */
3656 unsigned HOST_WIDE_INT load_align = group_load_align;
3657 align_bitpos = (try_bitpos - align_base) & (load_align - 1);
3658 if (align_bitpos)
3659 load_align = least_bit_hwi (align_bitpos);
3660 for (int i = 0; i < 2; ++i)
3661 if (group->load_align[i])
3663 align_bitpos
3664 = known_alignment (try_bitpos
3665 - group->stores[0]->bitpos
3666 + group->stores[0]->ops[i].bitpos
3667 - group->load_align_base[i]);
3668 if (align_bitpos & (group_load_align - 1))
3670 unsigned HOST_WIDE_INT a = least_bit_hwi (align_bitpos);
3671 load_align = MIN (load_align, a);
3674 try_size = MIN (try_size, load_align);
3676 store_immediate_info *info
3677 = find_constituent_stores (group, NULL, &first, try_bitpos, try_size);
3678 if (info && !gimple_clobber_p (info->stmt))
3680 /* If there is just one original statement for the range, see if
3681 we can just reuse the original store which could be even larger
3682 than try_size. */
3683 unsigned HOST_WIDE_INT stmt_end
3684 = ROUND_UP (info->bitpos + info->bitsize, BITS_PER_UNIT);
3685 info = find_constituent_stores (group, NULL, &first, try_bitpos,
3686 stmt_end - try_bitpos);
3687 if (info && info->bitpos >= try_bitpos)
3689 store_immediate_info *info2 = NULL;
3690 unsigned int first_copy = first;
3691 if (info->bitpos > try_bitpos
3692 && stmt_end - try_bitpos <= try_size)
3694 info2 = find_constituent_stores (group, NULL, &first_copy,
3695 try_bitpos,
3696 info->bitpos - try_bitpos);
3697 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3699 if (info2 == NULL && stmt_end - try_bitpos < try_size)
3701 info2 = find_constituent_stores (group, NULL, &first_copy,
3702 stmt_end,
3703 (try_bitpos + try_size)
3704 - stmt_end);
3705 gcc_assert (info2 == NULL || gimple_clobber_p (info2->stmt));
3707 if (info2 == NULL)
3709 try_size = stmt_end - try_bitpos;
3710 found_orig = true;
3711 goto found;
3716 /* Approximate store bitsize for the case when there are no padding
3717 bits. */
3718 while (try_size > size)
3719 try_size /= 2;
3720 /* Now look for whole padding bytes at the end of that bitsize. */
3721 for (nonmasked = try_size / BITS_PER_UNIT; nonmasked > 0; --nonmasked)
3722 if (group->mask[try_pos - bytepos + nonmasked - 1]
3723 != (unsigned char) ~0U
3724 && (!bzero_first
3725 || group->val[try_pos - bytepos + nonmasked - 1] != 0))
3726 break;
3727 if (nonmasked == 0 || (info && gimple_clobber_p (info->stmt)))
3729 /* If entire try_size range is padding, skip it. */
3730 try_pos += try_size / BITS_PER_UNIT;
3731 size -= try_size;
3732 continue;
3734 /* Otherwise try to decrease try_size if second half, last 3 quarters
3735 etc. are padding. */
3736 nonmasked *= BITS_PER_UNIT;
3737 while (nonmasked <= try_size / 2)
3738 try_size /= 2;
3739 if (!allow_unaligned_store && group_align > BITS_PER_UNIT)
3741 /* Now look for whole padding bytes at the start of that bitsize. */
3742 unsigned int try_bytesize = try_size / BITS_PER_UNIT, masked;
3743 for (masked = 0; masked < try_bytesize; ++masked)
3744 if (group->mask[try_pos - bytepos + masked] != (unsigned char) ~0U
3745 && (!bzero_first
3746 || group->val[try_pos - bytepos + masked] != 0))
3747 break;
3748 masked *= BITS_PER_UNIT;
3749 gcc_assert (masked < try_size);
3750 if (masked >= try_size / 2)
3752 while (masked >= try_size / 2)
3754 try_size /= 2;
3755 try_pos += try_size / BITS_PER_UNIT;
3756 size -= try_size;
3757 masked -= try_size;
3759 /* Need to recompute the alignment, so just retry at the new
3760 position. */
3761 continue;
3765 found:
3766 ++ret;
3768 if (split_stores)
3770 split_store *store
3771 = new split_store (try_pos, try_size, align);
3772 info = find_constituent_stores (group, &store->orig_stores,
3773 &first, try_bitpos, try_size);
3774 if (info
3775 && !gimple_clobber_p (info->stmt)
3776 && info->bitpos >= try_bitpos
3777 && info->bitpos + info->bitsize <= try_bitpos + try_size
3778 && (store->orig_stores.length () == 1
3779 || found_orig
3780 || (info->bitpos == try_bitpos
3781 && (info->bitpos + info->bitsize
3782 == try_bitpos + try_size))))
3784 store->orig = true;
3785 any_orig = true;
3787 split_stores->safe_push (store);
3790 try_pos += try_size / BITS_PER_UNIT;
3791 size -= try_size;
3794 if (total_orig)
3796 unsigned int i;
3797 split_store *store;
3798 /* If we are reusing some original stores and any of the
3799 original SSA_NAMEs had multiple uses, we need to subtract
3800 those now before we add the new ones. */
3801 if (total_new[0] && any_orig)
3803 FOR_EACH_VEC_ELT (*split_stores, i, store)
3804 if (store->orig)
3805 total_new[0] -= count_multiple_uses (store->orig_stores[0]);
3807 total_new[0] += ret; /* The new store. */
3808 store_immediate_info *info = group->stores[0];
3809 if (info->ops[0].base_addr)
3810 total_new[0] += ret;
3811 if (info->ops[1].base_addr)
3812 total_new[0] += ret;
3813 switch (info->rhs_code)
3815 case BIT_AND_EXPR:
3816 case BIT_IOR_EXPR:
3817 case BIT_XOR_EXPR:
3818 total_new[0] += ret; /* The new BIT_*_EXPR stmt. */
3819 break;
3820 default:
3821 break;
3823 FOR_EACH_VEC_ELT (*split_stores, i, store)
3825 unsigned int j;
3826 bool bit_not_p[3] = { false, false, false };
3827 /* If all orig_stores have certain bit_not_p set, then
3828 we'd use a BIT_NOT_EXPR stmt and need to account for it.
3829 If some orig_stores have certain bit_not_p set, then
3830 we'd use a BIT_XOR_EXPR with a mask and need to account for
3831 it. */
3832 FOR_EACH_VEC_ELT (store->orig_stores, j, info)
3834 if (info->ops[0].bit_not_p)
3835 bit_not_p[0] = true;
3836 if (info->ops[1].bit_not_p)
3837 bit_not_p[1] = true;
3838 if (info->bit_not_p)
3839 bit_not_p[2] = true;
3841 total_new[0] += bit_not_p[0] + bit_not_p[1] + bit_not_p[2];
3846 return ret;
3849 /* Return the operation through which the operand IDX (if < 2) or
3850 result (IDX == 2) should be inverted. If NOP_EXPR, no inversion
3851 is done, if BIT_NOT_EXPR, all bits are inverted, if BIT_XOR_EXPR,
3852 the bits should be xored with mask. */
3854 static enum tree_code
3855 invert_op (split_store *split_store, int idx, tree int_type, tree &mask)
3857 unsigned int i;
3858 store_immediate_info *info;
3859 unsigned int cnt = 0;
3860 bool any_paddings = false;
3861 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3863 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3864 if (bit_not_p)
3866 ++cnt;
3867 tree lhs = gimple_assign_lhs (info->stmt);
3868 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3869 && TYPE_PRECISION (TREE_TYPE (lhs)) < info->bitsize)
3870 any_paddings = true;
3873 mask = NULL_TREE;
3874 if (cnt == 0)
3875 return NOP_EXPR;
3876 if (cnt == split_store->orig_stores.length () && !any_paddings)
3877 return BIT_NOT_EXPR;
3879 unsigned HOST_WIDE_INT try_bitpos = split_store->bytepos * BITS_PER_UNIT;
3880 unsigned buf_size = split_store->size / BITS_PER_UNIT;
3881 unsigned char *buf
3882 = XALLOCAVEC (unsigned char, buf_size);
3883 memset (buf, ~0U, buf_size);
3884 FOR_EACH_VEC_ELT (split_store->orig_stores, i, info)
3886 bool bit_not_p = idx < 2 ? info->ops[idx].bit_not_p : info->bit_not_p;
3887 if (!bit_not_p)
3888 continue;
3889 /* Clear regions with bit_not_p and invert afterwards, rather than
3890 clear regions with !bit_not_p, so that gaps in between stores aren't
3891 set in the mask. */
3892 unsigned HOST_WIDE_INT bitsize = info->bitsize;
3893 unsigned HOST_WIDE_INT prec = bitsize;
3894 unsigned int pos_in_buffer = 0;
3895 if (any_paddings)
3897 tree lhs = gimple_assign_lhs (info->stmt);
3898 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
3899 && TYPE_PRECISION (TREE_TYPE (lhs)) < bitsize)
3900 prec = TYPE_PRECISION (TREE_TYPE (lhs));
3902 if (info->bitpos < try_bitpos)
3904 gcc_assert (info->bitpos + bitsize > try_bitpos);
3905 if (!BYTES_BIG_ENDIAN)
3907 if (prec <= try_bitpos - info->bitpos)
3908 continue;
3909 prec -= try_bitpos - info->bitpos;
3911 bitsize -= try_bitpos - info->bitpos;
3912 if (BYTES_BIG_ENDIAN && prec > bitsize)
3913 prec = bitsize;
3915 else
3916 pos_in_buffer = info->bitpos - try_bitpos;
3917 if (prec < bitsize)
3919 /* If this is a bool inversion, invert just the least significant
3920 prec bits rather than all bits of it. */
3921 if (BYTES_BIG_ENDIAN)
3923 pos_in_buffer += bitsize - prec;
3924 if (pos_in_buffer >= split_store->size)
3925 continue;
3927 bitsize = prec;
3929 if (pos_in_buffer + bitsize > split_store->size)
3930 bitsize = split_store->size - pos_in_buffer;
3931 unsigned char *p = buf + (pos_in_buffer / BITS_PER_UNIT);
3932 if (BYTES_BIG_ENDIAN)
3933 clear_bit_region_be (p, (BITS_PER_UNIT - 1
3934 - (pos_in_buffer % BITS_PER_UNIT)), bitsize);
3935 else
3936 clear_bit_region (p, pos_in_buffer % BITS_PER_UNIT, bitsize);
3938 for (unsigned int i = 0; i < buf_size; ++i)
3939 buf[i] = ~buf[i];
3940 mask = native_interpret_expr (int_type, buf, buf_size);
3941 return BIT_XOR_EXPR;
3944 /* Given a merged store group GROUP output the widened version of it.
3945 The store chain is against the base object BASE.
3946 Try store sizes of at most MAX_STORE_BITSIZE bits wide and don't output
3947 unaligned stores for STRICT_ALIGNMENT targets or if it's too expensive.
3948 Make sure that the number of statements output is less than the number of
3949 original statements. If a better sequence is possible emit it and
3950 return true. */
3952 bool
3953 imm_store_chain_info::output_merged_store (merged_store_group *group)
3955 const unsigned HOST_WIDE_INT start_byte_pos
3956 = group->bitregion_start / BITS_PER_UNIT;
3957 unsigned int orig_num_stmts = group->stores.length ();
3958 if (orig_num_stmts < 2)
3959 return false;
3961 bool allow_unaligned_store
3962 = !STRICT_ALIGNMENT && param_store_merging_allow_unaligned;
3963 bool allow_unaligned_load = allow_unaligned_store;
3964 bool bzero_first = false;
3965 store_immediate_info *store;
3966 unsigned int num_clobber_stmts = 0;
3967 if (group->stores[0]->rhs_code == INTEGER_CST)
3969 unsigned int i;
3970 FOR_EACH_VEC_ELT (group->stores, i, store)
3971 if (gimple_clobber_p (store->stmt))
3972 num_clobber_stmts++;
3973 else if (TREE_CODE (gimple_assign_rhs1 (store->stmt)) == CONSTRUCTOR
3974 && CONSTRUCTOR_NELTS (gimple_assign_rhs1 (store->stmt)) == 0
3975 && group->start == store->bitpos
3976 && group->width == store->bitsize
3977 && (group->start % BITS_PER_UNIT) == 0
3978 && (group->width % BITS_PER_UNIT) == 0)
3980 bzero_first = true;
3981 break;
3983 else
3984 break;
3985 FOR_EACH_VEC_ELT_FROM (group->stores, i, store, i)
3986 if (gimple_clobber_p (store->stmt))
3987 num_clobber_stmts++;
3988 if (num_clobber_stmts == orig_num_stmts)
3989 return false;
3990 orig_num_stmts -= num_clobber_stmts;
3992 if (allow_unaligned_store || bzero_first)
3994 /* If unaligned stores are allowed, see how many stores we'd emit
3995 for unaligned and how many stores we'd emit for aligned stores.
3996 Only use unaligned stores if it allows fewer stores than aligned.
3997 Similarly, if there is a whole region clear first, prefer expanding
3998 it together compared to expanding clear first followed by merged
3999 further stores. */
4000 unsigned cnt[4] = { ~0U, ~0U, ~0U, ~0U };
4001 int pass_min = 0;
4002 for (int pass = 0; pass < 4; ++pass)
4004 if (!allow_unaligned_store && (pass & 1) != 0)
4005 continue;
4006 if (!bzero_first && (pass & 2) != 0)
4007 continue;
4008 cnt[pass] = split_group (group, (pass & 1) != 0,
4009 allow_unaligned_load, (pass & 2) != 0,
4010 NULL, NULL, NULL);
4011 if (cnt[pass] < cnt[pass_min])
4012 pass_min = pass;
4014 if ((pass_min & 1) == 0)
4015 allow_unaligned_store = false;
4016 if ((pass_min & 2) == 0)
4017 bzero_first = false;
4020 auto_vec<class split_store *, 32> split_stores;
4021 split_store *split_store;
4022 unsigned total_orig, total_new, i;
4023 split_group (group, allow_unaligned_store, allow_unaligned_load, bzero_first,
4024 &split_stores, &total_orig, &total_new);
4026 /* Determine if there is a clobber covering the whole group at the start,
4027 followed by proposed split stores that cover the whole group. In that
4028 case, prefer the transformation even if
4029 split_stores.length () == orig_num_stmts. */
4030 bool clobber_first = false;
4031 if (num_clobber_stmts
4032 && gimple_clobber_p (group->stores[0]->stmt)
4033 && group->start == group->stores[0]->bitpos
4034 && group->width == group->stores[0]->bitsize
4035 && (group->start % BITS_PER_UNIT) == 0
4036 && (group->width % BITS_PER_UNIT) == 0)
4038 clobber_first = true;
4039 unsigned HOST_WIDE_INT pos = group->start / BITS_PER_UNIT;
4040 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4041 if (split_store->bytepos != pos)
4043 clobber_first = false;
4044 break;
4046 else
4047 pos += split_store->size / BITS_PER_UNIT;
4048 if (pos != (group->start + group->width) / BITS_PER_UNIT)
4049 clobber_first = false;
4052 if (split_stores.length () >= orig_num_stmts + clobber_first)
4055 /* We didn't manage to reduce the number of statements. Bail out. */
4056 if (dump_file && (dump_flags & TDF_DETAILS))
4057 fprintf (dump_file, "Exceeded original number of stmts (%u)."
4058 " Not profitable to emit new sequence.\n",
4059 orig_num_stmts);
4060 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4061 delete split_store;
4062 return false;
4064 if (total_orig <= total_new)
4066 /* If number of estimated new statements is above estimated original
4067 statements, bail out too. */
4068 if (dump_file && (dump_flags & TDF_DETAILS))
4069 fprintf (dump_file, "Estimated number of original stmts (%u)"
4070 " not larger than estimated number of new"
4071 " stmts (%u).\n",
4072 total_orig, total_new);
4073 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4074 delete split_store;
4075 return false;
4077 if (group->stores[0]->rhs_code == INTEGER_CST)
4079 bool all_orig = true;
4080 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4081 if (!split_store->orig)
4083 all_orig = false;
4084 break;
4086 if (all_orig)
4088 unsigned int cnt = split_stores.length ();
4089 store_immediate_info *store;
4090 FOR_EACH_VEC_ELT (group->stores, i, store)
4091 if (gimple_clobber_p (store->stmt))
4092 ++cnt;
4093 /* Punt if we wouldn't make any real changes, i.e. keep all
4094 orig stmts + all clobbers. */
4095 if (cnt == group->stores.length ())
4097 if (dump_file && (dump_flags & TDF_DETAILS))
4098 fprintf (dump_file, "Exceeded original number of stmts (%u)."
4099 " Not profitable to emit new sequence.\n",
4100 orig_num_stmts);
4101 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4102 delete split_store;
4103 return false;
4108 gimple_stmt_iterator last_gsi = gsi_for_stmt (group->last_stmt);
4109 gimple_seq seq = NULL;
4110 tree last_vdef, new_vuse;
4111 last_vdef = gimple_vdef (group->last_stmt);
4112 new_vuse = gimple_vuse (group->last_stmt);
4113 tree bswap_res = NULL_TREE;
4115 /* Clobbers are not removed. */
4116 if (gimple_clobber_p (group->last_stmt))
4118 new_vuse = make_ssa_name (gimple_vop (cfun), group->last_stmt);
4119 gimple_set_vdef (group->last_stmt, new_vuse);
4122 if (group->stores[0]->rhs_code == LROTATE_EXPR
4123 || group->stores[0]->rhs_code == NOP_EXPR)
4125 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
4126 gimple *ins_stmt = group->stores[0]->ins_stmt;
4127 struct symbolic_number *n = &group->stores[0]->n;
4128 bool bswap = group->stores[0]->rhs_code == LROTATE_EXPR;
4130 switch (n->range)
4132 case 16:
4133 load_type = bswap_type = uint16_type_node;
4134 break;
4135 case 32:
4136 load_type = uint32_type_node;
4137 if (bswap)
4139 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
4140 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
4142 break;
4143 case 64:
4144 load_type = uint64_type_node;
4145 if (bswap)
4147 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
4148 bswap_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
4150 break;
4151 default:
4152 gcc_unreachable ();
4155 /* If the loads have each vuse of the corresponding store,
4156 we've checked the aliasing already in try_coalesce_bswap and
4157 we want to sink the need load into seq. So need to use new_vuse
4158 on the load. */
4159 if (n->base_addr)
4161 if (n->vuse == NULL)
4163 n->vuse = new_vuse;
4164 ins_stmt = NULL;
4166 else
4167 /* Update vuse in case it has changed by output_merged_stores. */
4168 n->vuse = gimple_vuse (ins_stmt);
4170 bswap_res = bswap_replace (gsi_start (seq), ins_stmt, fndecl,
4171 bswap_type, load_type, n, bswap);
4172 gcc_assert (bswap_res);
4175 gimple *stmt = NULL;
4176 auto_vec<gimple *, 32> orig_stmts;
4177 gimple_seq this_seq;
4178 tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq,
4179 is_gimple_mem_ref_addr, NULL_TREE);
4180 gimple_seq_add_seq_without_update (&seq, this_seq);
4182 tree load_addr[2] = { NULL_TREE, NULL_TREE };
4183 gimple_seq load_seq[2] = { NULL, NULL };
4184 gimple_stmt_iterator load_gsi[2] = { gsi_none (), gsi_none () };
4185 for (int j = 0; j < 2; ++j)
4187 store_operand_info &op = group->stores[0]->ops[j];
4188 if (op.base_addr == NULL_TREE)
4189 continue;
4191 store_immediate_info *infol = group->stores.last ();
4192 if (gimple_vuse (op.stmt) == gimple_vuse (infol->ops[j].stmt))
4194 /* We can't pick the location randomly; while we've verified
4195 all the loads have the same vuse, they can be still in different
4196 basic blocks and we need to pick the one from the last bb:
4197 int x = q[0];
4198 if (x == N) return;
4199 int y = q[1];
4200 p[0] = x;
4201 p[1] = y;
4202 otherwise if we put the wider load at the q[0] load, we might
4203 segfault if q[1] is not mapped. */
4204 basic_block bb = gimple_bb (op.stmt);
4205 gimple *ostmt = op.stmt;
4206 store_immediate_info *info;
4207 FOR_EACH_VEC_ELT (group->stores, i, info)
4209 gimple *tstmt = info->ops[j].stmt;
4210 basic_block tbb = gimple_bb (tstmt);
4211 if (dominated_by_p (CDI_DOMINATORS, tbb, bb))
4213 ostmt = tstmt;
4214 bb = tbb;
4217 load_gsi[j] = gsi_for_stmt (ostmt);
4218 load_addr[j]
4219 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4220 &load_seq[j], is_gimple_mem_ref_addr,
4221 NULL_TREE);
4223 else if (operand_equal_p (base_addr, op.base_addr, 0))
4224 load_addr[j] = addr;
4225 else
4227 load_addr[j]
4228 = force_gimple_operand_1 (unshare_expr (op.base_addr),
4229 &this_seq, is_gimple_mem_ref_addr,
4230 NULL_TREE);
4231 gimple_seq_add_seq_without_update (&seq, this_seq);
4235 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4237 const unsigned HOST_WIDE_INT try_size = split_store->size;
4238 const unsigned HOST_WIDE_INT try_pos = split_store->bytepos;
4239 const unsigned HOST_WIDE_INT try_bitpos = try_pos * BITS_PER_UNIT;
4240 const unsigned HOST_WIDE_INT try_align = split_store->align;
4241 const unsigned HOST_WIDE_INT try_offset = try_pos - start_byte_pos;
4242 tree dest, src;
4243 location_t loc;
4245 if (split_store->orig)
4247 /* If there is just a single non-clobber constituent store
4248 which covers the whole area, just reuse the lhs and rhs. */
4249 gimple *orig_stmt = NULL;
4250 store_immediate_info *store;
4251 unsigned int j;
4252 FOR_EACH_VEC_ELT (split_store->orig_stores, j, store)
4253 if (!gimple_clobber_p (store->stmt))
4255 orig_stmt = store->stmt;
4256 break;
4258 dest = gimple_assign_lhs (orig_stmt);
4259 src = gimple_assign_rhs1 (orig_stmt);
4260 loc = gimple_location (orig_stmt);
4262 else
4264 store_immediate_info *info;
4265 unsigned short clique, base;
4266 unsigned int k;
4267 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4268 orig_stmts.safe_push (info->stmt);
4269 tree offset_type
4270 = get_alias_type_for_stmts (orig_stmts, false, &clique, &base);
4271 tree dest_type;
4272 loc = get_location_for_stmts (orig_stmts);
4273 orig_stmts.truncate (0);
4275 if (group->string_concatenation)
4276 dest_type
4277 = build_array_type_nelts (char_type_node,
4278 try_size / BITS_PER_UNIT);
4279 else
4281 dest_type = build_nonstandard_integer_type (try_size, UNSIGNED);
4282 dest_type = build_aligned_type (dest_type, try_align);
4284 dest = fold_build2 (MEM_REF, dest_type, addr,
4285 build_int_cst (offset_type, try_pos));
4286 if (TREE_CODE (dest) == MEM_REF)
4288 MR_DEPENDENCE_CLIQUE (dest) = clique;
4289 MR_DEPENDENCE_BASE (dest) = base;
4292 tree mask;
4293 if (bswap_res || group->string_concatenation)
4294 mask = integer_zero_node;
4295 else
4296 mask = native_interpret_expr (dest_type,
4297 group->mask + try_offset,
4298 group->buf_size);
4300 tree ops[2];
4301 for (int j = 0;
4302 j < 1 + (split_store->orig_stores[0]->ops[1].val != NULL_TREE);
4303 ++j)
4305 store_operand_info &op = split_store->orig_stores[0]->ops[j];
4306 if (bswap_res)
4307 ops[j] = bswap_res;
4308 else if (group->string_concatenation)
4310 ops[j] = build_string (try_size / BITS_PER_UNIT,
4311 (const char *) group->val + try_offset);
4312 TREE_TYPE (ops[j]) = dest_type;
4314 else if (op.base_addr)
4316 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4317 orig_stmts.safe_push (info->ops[j].stmt);
4319 offset_type = get_alias_type_for_stmts (orig_stmts, true,
4320 &clique, &base);
4321 location_t load_loc = get_location_for_stmts (orig_stmts);
4322 orig_stmts.truncate (0);
4324 unsigned HOST_WIDE_INT load_align = group->load_align[j];
4325 unsigned HOST_WIDE_INT align_bitpos
4326 = known_alignment (try_bitpos
4327 - split_store->orig_stores[0]->bitpos
4328 + op.bitpos);
4329 if (align_bitpos & (load_align - 1))
4330 load_align = least_bit_hwi (align_bitpos);
4332 tree load_int_type
4333 = build_nonstandard_integer_type (try_size, UNSIGNED);
4334 load_int_type
4335 = build_aligned_type (load_int_type, load_align);
4337 poly_uint64 load_pos
4338 = exact_div (try_bitpos
4339 - split_store->orig_stores[0]->bitpos
4340 + op.bitpos,
4341 BITS_PER_UNIT);
4342 ops[j] = fold_build2 (MEM_REF, load_int_type, load_addr[j],
4343 build_int_cst (offset_type, load_pos));
4344 if (TREE_CODE (ops[j]) == MEM_REF)
4346 MR_DEPENDENCE_CLIQUE (ops[j]) = clique;
4347 MR_DEPENDENCE_BASE (ops[j]) = base;
4349 if (!integer_zerop (mask))
4350 /* The load might load some bits (that will be masked off
4351 later on) uninitialized, avoid -W*uninitialized
4352 warnings in that case. */
4353 TREE_NO_WARNING (ops[j]) = 1;
4355 stmt = gimple_build_assign (make_ssa_name (dest_type), ops[j]);
4356 gimple_set_location (stmt, load_loc);
4357 if (gsi_bb (load_gsi[j]))
4359 gimple_set_vuse (stmt, gimple_vuse (op.stmt));
4360 gimple_seq_add_stmt_without_update (&load_seq[j], stmt);
4362 else
4364 gimple_set_vuse (stmt, new_vuse);
4365 gimple_seq_add_stmt_without_update (&seq, stmt);
4367 ops[j] = gimple_assign_lhs (stmt);
4368 tree xor_mask;
4369 enum tree_code inv_op
4370 = invert_op (split_store, j, dest_type, xor_mask);
4371 if (inv_op != NOP_EXPR)
4373 stmt = gimple_build_assign (make_ssa_name (dest_type),
4374 inv_op, ops[j], xor_mask);
4375 gimple_set_location (stmt, load_loc);
4376 ops[j] = gimple_assign_lhs (stmt);
4378 if (gsi_bb (load_gsi[j]))
4379 gimple_seq_add_stmt_without_update (&load_seq[j],
4380 stmt);
4381 else
4382 gimple_seq_add_stmt_without_update (&seq, stmt);
4385 else
4386 ops[j] = native_interpret_expr (dest_type,
4387 group->val + try_offset,
4388 group->buf_size);
4391 switch (split_store->orig_stores[0]->rhs_code)
4393 case BIT_AND_EXPR:
4394 case BIT_IOR_EXPR:
4395 case BIT_XOR_EXPR:
4396 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4398 tree rhs1 = gimple_assign_rhs1 (info->stmt);
4399 orig_stmts.safe_push (SSA_NAME_DEF_STMT (rhs1));
4401 location_t bit_loc;
4402 bit_loc = get_location_for_stmts (orig_stmts);
4403 orig_stmts.truncate (0);
4405 stmt
4406 = gimple_build_assign (make_ssa_name (dest_type),
4407 split_store->orig_stores[0]->rhs_code,
4408 ops[0], ops[1]);
4409 gimple_set_location (stmt, bit_loc);
4410 /* If there is just one load and there is a separate
4411 load_seq[0], emit the bitwise op right after it. */
4412 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4413 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4414 /* Otherwise, if at least one load is in seq, we need to
4415 emit the bitwise op right before the store. If there
4416 are two loads and are emitted somewhere else, it would
4417 be better to emit the bitwise op as early as possible;
4418 we don't track where that would be possible right now
4419 though. */
4420 else
4421 gimple_seq_add_stmt_without_update (&seq, stmt);
4422 src = gimple_assign_lhs (stmt);
4423 tree xor_mask;
4424 enum tree_code inv_op;
4425 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
4426 if (inv_op != NOP_EXPR)
4428 stmt = gimple_build_assign (make_ssa_name (dest_type),
4429 inv_op, src, xor_mask);
4430 gimple_set_location (stmt, bit_loc);
4431 if (load_addr[1] == NULL_TREE && gsi_bb (load_gsi[0]))
4432 gimple_seq_add_stmt_without_update (&load_seq[0], stmt);
4433 else
4434 gimple_seq_add_stmt_without_update (&seq, stmt);
4435 src = gimple_assign_lhs (stmt);
4437 break;
4438 case LROTATE_EXPR:
4439 case NOP_EXPR:
4440 src = ops[0];
4441 if (!is_gimple_val (src))
4443 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (src)),
4444 src);
4445 gimple_seq_add_stmt_without_update (&seq, stmt);
4446 src = gimple_assign_lhs (stmt);
4448 if (!useless_type_conversion_p (dest_type, TREE_TYPE (src)))
4450 stmt = gimple_build_assign (make_ssa_name (dest_type),
4451 NOP_EXPR, src);
4452 gimple_seq_add_stmt_without_update (&seq, stmt);
4453 src = gimple_assign_lhs (stmt);
4455 inv_op = invert_op (split_store, 2, dest_type, xor_mask);
4456 if (inv_op != NOP_EXPR)
4458 stmt = gimple_build_assign (make_ssa_name (dest_type),
4459 inv_op, src, xor_mask);
4460 gimple_set_location (stmt, loc);
4461 gimple_seq_add_stmt_without_update (&seq, stmt);
4462 src = gimple_assign_lhs (stmt);
4464 break;
4465 default:
4466 src = ops[0];
4467 break;
4470 /* If bit insertion is required, we use the source as an accumulator
4471 into which the successive bit-field values are manually inserted.
4472 FIXME: perhaps use BIT_INSERT_EXPR instead in some cases? */
4473 if (group->bit_insertion)
4474 FOR_EACH_VEC_ELT (split_store->orig_stores, k, info)
4475 if (info->rhs_code == BIT_INSERT_EXPR
4476 && info->bitpos < try_bitpos + try_size
4477 && info->bitpos + info->bitsize > try_bitpos)
4479 /* Mask, truncate, convert to final type, shift and ior into
4480 the accumulator. Note that every step can be a no-op. */
4481 const HOST_WIDE_INT start_gap = info->bitpos - try_bitpos;
4482 const HOST_WIDE_INT end_gap
4483 = (try_bitpos + try_size) - (info->bitpos + info->bitsize);
4484 tree tem = info->ops[0].val;
4485 if (!INTEGRAL_TYPE_P (TREE_TYPE (tem)))
4487 const unsigned HOST_WIDE_INT size
4488 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tem)));
4489 tree integer_type
4490 = build_nonstandard_integer_type (size, UNSIGNED);
4491 tem = gimple_build (&seq, loc, VIEW_CONVERT_EXPR,
4492 integer_type, tem);
4494 if (TYPE_PRECISION (TREE_TYPE (tem)) <= info->bitsize)
4496 tree bitfield_type
4497 = build_nonstandard_integer_type (info->bitsize,
4498 UNSIGNED);
4499 tem = gimple_convert (&seq, loc, bitfield_type, tem);
4501 else if ((BYTES_BIG_ENDIAN ? start_gap : end_gap) > 0)
4503 const unsigned HOST_WIDE_INT imask
4504 = (HOST_WIDE_INT_1U << info->bitsize) - 1;
4505 tem = gimple_build (&seq, loc,
4506 BIT_AND_EXPR, TREE_TYPE (tem), tem,
4507 build_int_cst (TREE_TYPE (tem),
4508 imask));
4510 const HOST_WIDE_INT shift
4511 = (BYTES_BIG_ENDIAN ? end_gap : start_gap);
4512 if (shift < 0)
4513 tem = gimple_build (&seq, loc,
4514 RSHIFT_EXPR, TREE_TYPE (tem), tem,
4515 build_int_cst (NULL_TREE, -shift));
4516 tem = gimple_convert (&seq, loc, dest_type, tem);
4517 if (shift > 0)
4518 tem = gimple_build (&seq, loc,
4519 LSHIFT_EXPR, dest_type, tem,
4520 build_int_cst (NULL_TREE, shift));
4521 src = gimple_build (&seq, loc,
4522 BIT_IOR_EXPR, dest_type, tem, src);
4525 if (!integer_zerop (mask))
4527 tree tem = make_ssa_name (dest_type);
4528 tree load_src = unshare_expr (dest);
4529 /* The load might load some or all bits uninitialized,
4530 avoid -W*uninitialized warnings in that case.
4531 As optimization, it would be nice if all the bits are
4532 provably uninitialized (no stores at all yet or previous
4533 store a CLOBBER) we'd optimize away the load and replace
4534 it e.g. with 0. */
4535 TREE_NO_WARNING (load_src) = 1;
4536 stmt = gimple_build_assign (tem, load_src);
4537 gimple_set_location (stmt, loc);
4538 gimple_set_vuse (stmt, new_vuse);
4539 gimple_seq_add_stmt_without_update (&seq, stmt);
4541 /* FIXME: If there is a single chunk of zero bits in mask,
4542 perhaps use BIT_INSERT_EXPR instead? */
4543 stmt = gimple_build_assign (make_ssa_name (dest_type),
4544 BIT_AND_EXPR, tem, mask);
4545 gimple_set_location (stmt, loc);
4546 gimple_seq_add_stmt_without_update (&seq, stmt);
4547 tem = gimple_assign_lhs (stmt);
4549 if (TREE_CODE (src) == INTEGER_CST)
4550 src = wide_int_to_tree (dest_type,
4551 wi::bit_and_not (wi::to_wide (src),
4552 wi::to_wide (mask)));
4553 else
4555 tree nmask
4556 = wide_int_to_tree (dest_type,
4557 wi::bit_not (wi::to_wide (mask)));
4558 stmt = gimple_build_assign (make_ssa_name (dest_type),
4559 BIT_AND_EXPR, src, nmask);
4560 gimple_set_location (stmt, loc);
4561 gimple_seq_add_stmt_without_update (&seq, stmt);
4562 src = gimple_assign_lhs (stmt);
4564 stmt = gimple_build_assign (make_ssa_name (dest_type),
4565 BIT_IOR_EXPR, tem, src);
4566 gimple_set_location (stmt, loc);
4567 gimple_seq_add_stmt_without_update (&seq, stmt);
4568 src = gimple_assign_lhs (stmt);
4572 stmt = gimple_build_assign (dest, src);
4573 gimple_set_location (stmt, loc);
4574 gimple_set_vuse (stmt, new_vuse);
4575 gimple_seq_add_stmt_without_update (&seq, stmt);
4577 if (group->lp_nr && stmt_could_throw_p (cfun, stmt))
4578 add_stmt_to_eh_lp (stmt, group->lp_nr);
4580 tree new_vdef;
4581 if (i < split_stores.length () - 1)
4582 new_vdef = make_ssa_name (gimple_vop (cfun), stmt);
4583 else
4584 new_vdef = last_vdef;
4586 gimple_set_vdef (stmt, new_vdef);
4587 SSA_NAME_DEF_STMT (new_vdef) = stmt;
4588 new_vuse = new_vdef;
4591 FOR_EACH_VEC_ELT (split_stores, i, split_store)
4592 delete split_store;
4594 gcc_assert (seq);
4595 if (dump_file)
4597 fprintf (dump_file,
4598 "New sequence of %u stores to replace old one of %u stores\n",
4599 split_stores.length (), orig_num_stmts);
4600 if (dump_flags & TDF_DETAILS)
4601 print_gimple_seq (dump_file, seq, 0, TDF_VOPS | TDF_MEMSYMS);
4604 if (gimple_clobber_p (group->last_stmt))
4605 update_stmt (group->last_stmt);
4607 if (group->lp_nr > 0)
4609 /* We're going to insert a sequence of (potentially) throwing stores
4610 into an active EH region. This means that we're going to create
4611 new basic blocks with EH edges pointing to the post landing pad
4612 and, therefore, to have to update its PHI nodes, if any. For the
4613 virtual PHI node, we're going to use the VDEFs created above, but
4614 for the other nodes, we need to record the original reaching defs. */
4615 eh_landing_pad lp = get_eh_landing_pad_from_number (group->lp_nr);
4616 basic_block lp_bb = label_to_block (cfun, lp->post_landing_pad);
4617 basic_block last_bb = gimple_bb (group->last_stmt);
4618 edge last_edge = find_edge (last_bb, lp_bb);
4619 auto_vec<tree, 16> last_defs;
4620 gphi_iterator gpi;
4621 for (gpi = gsi_start_phis (lp_bb); !gsi_end_p (gpi); gsi_next (&gpi))
4623 gphi *phi = gpi.phi ();
4624 tree last_def;
4625 if (virtual_operand_p (gimple_phi_result (phi)))
4626 last_def = NULL_TREE;
4627 else
4628 last_def = gimple_phi_arg_def (phi, last_edge->dest_idx);
4629 last_defs.safe_push (last_def);
4632 /* Do the insertion. Then, if new basic blocks have been created in the
4633 process, rewind the chain of VDEFs create above to walk the new basic
4634 blocks and update the corresponding arguments of the PHI nodes. */
4635 update_modified_stmts (seq);
4636 if (gimple_find_sub_bbs (seq, &last_gsi))
4637 while (last_vdef != gimple_vuse (group->last_stmt))
4639 gimple *stmt = SSA_NAME_DEF_STMT (last_vdef);
4640 if (stmt_could_throw_p (cfun, stmt))
4642 edge new_edge = find_edge (gimple_bb (stmt), lp_bb);
4643 unsigned int i;
4644 for (gpi = gsi_start_phis (lp_bb), i = 0;
4645 !gsi_end_p (gpi);
4646 gsi_next (&gpi), i++)
4648 gphi *phi = gpi.phi ();
4649 tree new_def;
4650 if (virtual_operand_p (gimple_phi_result (phi)))
4651 new_def = last_vdef;
4652 else
4653 new_def = last_defs[i];
4654 add_phi_arg (phi, new_def, new_edge, UNKNOWN_LOCATION);
4657 last_vdef = gimple_vuse (stmt);
4660 else
4661 gsi_insert_seq_after (&last_gsi, seq, GSI_SAME_STMT);
4663 for (int j = 0; j < 2; ++j)
4664 if (load_seq[j])
4665 gsi_insert_seq_after (&load_gsi[j], load_seq[j], GSI_SAME_STMT);
4667 return true;
4670 /* Process the merged_store_group objects created in the coalescing phase.
4671 The stores are all against the base object BASE.
4672 Try to output the widened stores and delete the original statements if
4673 successful. Return true iff any changes were made. */
4675 bool
4676 imm_store_chain_info::output_merged_stores ()
4678 unsigned int i;
4679 merged_store_group *merged_store;
4680 bool ret = false;
4681 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_store)
4683 if (dbg_cnt (store_merging)
4684 && output_merged_store (merged_store))
4686 unsigned int j;
4687 store_immediate_info *store;
4688 FOR_EACH_VEC_ELT (merged_store->stores, j, store)
4690 gimple *stmt = store->stmt;
4691 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4692 /* Don't remove clobbers, they are still useful even if
4693 everything is overwritten afterwards. */
4694 if (gimple_clobber_p (stmt))
4695 continue;
4696 gsi_remove (&gsi, true);
4697 if (store->lp_nr)
4698 remove_stmt_from_eh_lp (stmt);
4699 if (stmt != merged_store->last_stmt)
4701 unlink_stmt_vdef (stmt);
4702 release_defs (stmt);
4705 ret = true;
4708 if (ret && dump_file)
4709 fprintf (dump_file, "Merging successful!\n");
4711 return ret;
4714 /* Coalesce the store_immediate_info objects recorded against the base object
4715 BASE in the first phase and output them.
4716 Delete the allocated structures.
4717 Return true if any changes were made. */
4719 bool
4720 imm_store_chain_info::terminate_and_process_chain ()
4722 if (dump_file && (dump_flags & TDF_DETAILS))
4723 fprintf (dump_file, "Terminating chain with %u stores\n",
4724 m_store_info.length ());
4725 /* Process store chain. */
4726 bool ret = false;
4727 if (m_store_info.length () > 1)
4729 ret = coalesce_immediate_stores ();
4730 if (ret)
4731 ret = output_merged_stores ();
4734 /* Delete all the entries we allocated ourselves. */
4735 store_immediate_info *info;
4736 unsigned int i;
4737 FOR_EACH_VEC_ELT (m_store_info, i, info)
4738 delete info;
4740 merged_store_group *merged_info;
4741 FOR_EACH_VEC_ELT (m_merged_store_groups, i, merged_info)
4742 delete merged_info;
4744 return ret;
4747 /* Return true iff LHS is a destination potentially interesting for
4748 store merging. In practice these are the codes that get_inner_reference
4749 can process. */
4751 static bool
4752 lhs_valid_for_store_merging_p (tree lhs)
4754 if (DECL_P (lhs))
4755 return true;
4757 switch (TREE_CODE (lhs))
4759 case ARRAY_REF:
4760 case ARRAY_RANGE_REF:
4761 case BIT_FIELD_REF:
4762 case COMPONENT_REF:
4763 case MEM_REF:
4764 case VIEW_CONVERT_EXPR:
4765 return true;
4766 default:
4767 return false;
4770 gcc_unreachable ();
4773 /* Return true if the tree RHS is a constant we want to consider
4774 during store merging. In practice accept all codes that
4775 native_encode_expr accepts. */
4777 static bool
4778 rhs_valid_for_store_merging_p (tree rhs)
4780 unsigned HOST_WIDE_INT size;
4781 if (TREE_CODE (rhs) == CONSTRUCTOR
4782 && CONSTRUCTOR_NELTS (rhs) == 0
4783 && TYPE_SIZE_UNIT (TREE_TYPE (rhs))
4784 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs))))
4785 return true;
4786 return (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (rhs))).is_constant (&size)
4787 && native_encode_expr (rhs, NULL, size) != 0);
4790 /* Adjust *PBITPOS, *PBITREGION_START and *PBITREGION_END by BYTE_OFF bytes
4791 and return true on success or false on failure. */
4793 static bool
4794 adjust_bit_pos (poly_offset_int byte_off,
4795 poly_int64 *pbitpos,
4796 poly_uint64 *pbitregion_start,
4797 poly_uint64 *pbitregion_end)
4799 poly_offset_int bit_off = byte_off << LOG2_BITS_PER_UNIT;
4800 bit_off += *pbitpos;
4802 if (known_ge (bit_off, 0) && bit_off.to_shwi (pbitpos))
4804 if (maybe_ne (*pbitregion_end, 0U))
4806 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4807 bit_off += *pbitregion_start;
4808 if (bit_off.to_uhwi (pbitregion_start))
4810 bit_off = byte_off << LOG2_BITS_PER_UNIT;
4811 bit_off += *pbitregion_end;
4812 if (!bit_off.to_uhwi (pbitregion_end))
4813 *pbitregion_end = 0;
4815 else
4816 *pbitregion_end = 0;
4818 return true;
4820 else
4821 return false;
4824 /* If MEM is a memory reference usable for store merging (either as
4825 store destination or for loads), return the non-NULL base_addr
4826 and set *PBITSIZE, *PBITPOS, *PBITREGION_START and *PBITREGION_END.
4827 Otherwise return NULL, *PBITPOS should be still valid even for that
4828 case. */
4830 static tree
4831 mem_valid_for_store_merging (tree mem, poly_uint64 *pbitsize,
4832 poly_uint64 *pbitpos,
4833 poly_uint64 *pbitregion_start,
4834 poly_uint64 *pbitregion_end)
4836 poly_int64 bitsize, bitpos;
4837 poly_uint64 bitregion_start = 0, bitregion_end = 0;
4838 machine_mode mode;
4839 int unsignedp = 0, reversep = 0, volatilep = 0;
4840 tree offset;
4841 tree base_addr = get_inner_reference (mem, &bitsize, &bitpos, &offset, &mode,
4842 &unsignedp, &reversep, &volatilep);
4843 *pbitsize = bitsize;
4844 if (known_eq (bitsize, 0))
4845 return NULL_TREE;
4847 if (TREE_CODE (mem) == COMPONENT_REF
4848 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (mem, 1)))
4850 get_bit_range (&bitregion_start, &bitregion_end, mem, &bitpos, &offset);
4851 if (maybe_ne (bitregion_end, 0U))
4852 bitregion_end += 1;
4855 if (reversep)
4856 return NULL_TREE;
4858 /* We do not want to rewrite TARGET_MEM_REFs. */
4859 if (TREE_CODE (base_addr) == TARGET_MEM_REF)
4860 return NULL_TREE;
4861 /* In some cases get_inner_reference may return a
4862 MEM_REF [ptr + byteoffset]. For the purposes of this pass
4863 canonicalize the base_addr to MEM_REF [ptr] and take
4864 byteoffset into account in the bitpos. This occurs in
4865 PR 23684 and this way we can catch more chains. */
4866 else if (TREE_CODE (base_addr) == MEM_REF)
4868 if (!adjust_bit_pos (mem_ref_offset (base_addr), &bitpos,
4869 &bitregion_start, &bitregion_end))
4870 return NULL_TREE;
4871 base_addr = TREE_OPERAND (base_addr, 0);
4873 /* get_inner_reference returns the base object, get at its
4874 address now. */
4875 else
4877 if (maybe_lt (bitpos, 0))
4878 return NULL_TREE;
4879 base_addr = build_fold_addr_expr (base_addr);
4882 if (offset)
4884 /* If the access is variable offset then a base decl has to be
4885 address-taken to be able to emit pointer-based stores to it.
4886 ??? We might be able to get away with re-using the original
4887 base up to the first variable part and then wrapping that inside
4888 a BIT_FIELD_REF. */
4889 tree base = get_base_address (base_addr);
4890 if (!base || (DECL_P (base) && !TREE_ADDRESSABLE (base)))
4891 return NULL_TREE;
4893 /* Similarly to above for the base, remove constant from the offset. */
4894 if (TREE_CODE (offset) == PLUS_EXPR
4895 && TREE_CODE (TREE_OPERAND (offset, 1)) == INTEGER_CST
4896 && adjust_bit_pos (wi::to_poly_offset (TREE_OPERAND (offset, 1)),
4897 &bitpos, &bitregion_start, &bitregion_end))
4898 offset = TREE_OPERAND (offset, 0);
4900 base_addr = build2 (POINTER_PLUS_EXPR, TREE_TYPE (base_addr),
4901 base_addr, offset);
4904 if (known_eq (bitregion_end, 0U))
4906 bitregion_start = round_down_to_byte_boundary (bitpos);
4907 bitregion_end = round_up_to_byte_boundary (bitpos + bitsize);
4910 *pbitsize = bitsize;
4911 *pbitpos = bitpos;
4912 *pbitregion_start = bitregion_start;
4913 *pbitregion_end = bitregion_end;
4914 return base_addr;
4917 /* Return true if STMT is a load that can be used for store merging.
4918 In that case fill in *OP. BITSIZE, BITPOS, BITREGION_START and
4919 BITREGION_END are properties of the corresponding store. */
4921 static bool
4922 handled_load (gimple *stmt, store_operand_info *op,
4923 poly_uint64 bitsize, poly_uint64 bitpos,
4924 poly_uint64 bitregion_start, poly_uint64 bitregion_end)
4926 if (!is_gimple_assign (stmt))
4927 return false;
4928 if (gimple_assign_rhs_code (stmt) == BIT_NOT_EXPR)
4930 tree rhs1 = gimple_assign_rhs1 (stmt);
4931 if (TREE_CODE (rhs1) == SSA_NAME
4932 && handled_load (SSA_NAME_DEF_STMT (rhs1), op, bitsize, bitpos,
4933 bitregion_start, bitregion_end))
4935 /* Don't allow _1 = load; _2 = ~1; _3 = ~_2; which should have
4936 been optimized earlier, but if allowed here, would confuse the
4937 multiple uses counting. */
4938 if (op->bit_not_p)
4939 return false;
4940 op->bit_not_p = !op->bit_not_p;
4941 return true;
4943 return false;
4945 if (gimple_vuse (stmt)
4946 && gimple_assign_load_p (stmt)
4947 && !stmt_can_throw_internal (cfun, stmt)
4948 && !gimple_has_volatile_ops (stmt))
4950 tree mem = gimple_assign_rhs1 (stmt);
4951 op->base_addr
4952 = mem_valid_for_store_merging (mem, &op->bitsize, &op->bitpos,
4953 &op->bitregion_start,
4954 &op->bitregion_end);
4955 if (op->base_addr != NULL_TREE
4956 && known_eq (op->bitsize, bitsize)
4957 && multiple_p (op->bitpos - bitpos, BITS_PER_UNIT)
4958 && known_ge (op->bitpos - op->bitregion_start,
4959 bitpos - bitregion_start)
4960 && known_ge (op->bitregion_end - op->bitpos,
4961 bitregion_end - bitpos))
4963 op->stmt = stmt;
4964 op->val = mem;
4965 op->bit_not_p = false;
4966 return true;
4969 return false;
4972 /* Return the index number of the landing pad for STMT, if any. */
4974 static int
4975 lp_nr_for_store (gimple *stmt)
4977 if (!cfun->can_throw_non_call_exceptions || !cfun->eh)
4978 return 0;
4980 if (!stmt_could_throw_p (cfun, stmt))
4981 return 0;
4983 return lookup_stmt_eh_lp (stmt);
4986 /* Record the store STMT for store merging optimization if it can be
4987 optimized. Return true if any changes were made. */
4989 bool
4990 pass_store_merging::process_store (gimple *stmt)
4992 tree lhs = gimple_assign_lhs (stmt);
4993 tree rhs = gimple_assign_rhs1 (stmt);
4994 poly_uint64 bitsize, bitpos = 0;
4995 poly_uint64 bitregion_start = 0, bitregion_end = 0;
4996 tree base_addr
4997 = mem_valid_for_store_merging (lhs, &bitsize, &bitpos,
4998 &bitregion_start, &bitregion_end);
4999 if (known_eq (bitsize, 0U))
5000 return false;
5002 bool invalid = (base_addr == NULL_TREE
5003 || (maybe_gt (bitsize,
5004 (unsigned int) MAX_BITSIZE_MODE_ANY_INT)
5005 && TREE_CODE (rhs) != INTEGER_CST
5006 && (TREE_CODE (rhs) != CONSTRUCTOR
5007 || CONSTRUCTOR_NELTS (rhs) != 0)));
5008 enum tree_code rhs_code = ERROR_MARK;
5009 bool bit_not_p = false;
5010 struct symbolic_number n;
5011 gimple *ins_stmt = NULL;
5012 store_operand_info ops[2];
5013 if (invalid)
5015 else if (TREE_CODE (rhs) == STRING_CST)
5017 rhs_code = STRING_CST;
5018 ops[0].val = rhs;
5020 else if (rhs_valid_for_store_merging_p (rhs))
5022 rhs_code = INTEGER_CST;
5023 ops[0].val = rhs;
5025 else if (TREE_CODE (rhs) == SSA_NAME)
5027 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs), *def_stmt1, *def_stmt2;
5028 if (!is_gimple_assign (def_stmt))
5029 invalid = true;
5030 else if (handled_load (def_stmt, &ops[0], bitsize, bitpos,
5031 bitregion_start, bitregion_end))
5032 rhs_code = MEM_REF;
5033 else if (gimple_assign_rhs_code (def_stmt) == BIT_NOT_EXPR)
5035 tree rhs1 = gimple_assign_rhs1 (def_stmt);
5036 if (TREE_CODE (rhs1) == SSA_NAME
5037 && is_gimple_assign (SSA_NAME_DEF_STMT (rhs1)))
5039 bit_not_p = true;
5040 def_stmt = SSA_NAME_DEF_STMT (rhs1);
5044 if (rhs_code == ERROR_MARK && !invalid)
5045 switch ((rhs_code = gimple_assign_rhs_code (def_stmt)))
5047 case BIT_AND_EXPR:
5048 case BIT_IOR_EXPR:
5049 case BIT_XOR_EXPR:
5050 tree rhs1, rhs2;
5051 rhs1 = gimple_assign_rhs1 (def_stmt);
5052 rhs2 = gimple_assign_rhs2 (def_stmt);
5053 invalid = true;
5054 if (TREE_CODE (rhs1) != SSA_NAME)
5055 break;
5056 def_stmt1 = SSA_NAME_DEF_STMT (rhs1);
5057 if (!is_gimple_assign (def_stmt1)
5058 || !handled_load (def_stmt1, &ops[0], bitsize, bitpos,
5059 bitregion_start, bitregion_end))
5060 break;
5061 if (rhs_valid_for_store_merging_p (rhs2))
5062 ops[1].val = rhs2;
5063 else if (TREE_CODE (rhs2) != SSA_NAME)
5064 break;
5065 else
5067 def_stmt2 = SSA_NAME_DEF_STMT (rhs2);
5068 if (!is_gimple_assign (def_stmt2))
5069 break;
5070 else if (!handled_load (def_stmt2, &ops[1], bitsize, bitpos,
5071 bitregion_start, bitregion_end))
5072 break;
5074 invalid = false;
5075 break;
5076 default:
5077 invalid = true;
5078 break;
5081 unsigned HOST_WIDE_INT const_bitsize;
5082 if (bitsize.is_constant (&const_bitsize)
5083 && (const_bitsize % BITS_PER_UNIT) == 0
5084 && const_bitsize <= 64
5085 && multiple_p (bitpos, BITS_PER_UNIT))
5087 ins_stmt = find_bswap_or_nop_1 (def_stmt, &n, 12);
5088 if (ins_stmt)
5090 uint64_t nn = n.n;
5091 for (unsigned HOST_WIDE_INT i = 0;
5092 i < const_bitsize;
5093 i += BITS_PER_UNIT, nn >>= BITS_PER_MARKER)
5094 if ((nn & MARKER_MASK) == 0
5095 || (nn & MARKER_MASK) == MARKER_BYTE_UNKNOWN)
5097 ins_stmt = NULL;
5098 break;
5100 if (ins_stmt)
5102 if (invalid)
5104 rhs_code = LROTATE_EXPR;
5105 ops[0].base_addr = NULL_TREE;
5106 ops[1].base_addr = NULL_TREE;
5108 invalid = false;
5113 if (invalid
5114 && bitsize.is_constant (&const_bitsize)
5115 && ((const_bitsize % BITS_PER_UNIT) != 0
5116 || !multiple_p (bitpos, BITS_PER_UNIT))
5117 && const_bitsize <= MAX_FIXED_MODE_SIZE)
5119 /* Bypass a conversion to the bit-field type. */
5120 if (!bit_not_p
5121 && is_gimple_assign (def_stmt)
5122 && CONVERT_EXPR_CODE_P (rhs_code))
5124 tree rhs1 = gimple_assign_rhs1 (def_stmt);
5125 if (TREE_CODE (rhs1) == SSA_NAME
5126 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
5127 rhs = rhs1;
5129 rhs_code = BIT_INSERT_EXPR;
5130 bit_not_p = false;
5131 ops[0].val = rhs;
5132 ops[0].base_addr = NULL_TREE;
5133 ops[1].base_addr = NULL_TREE;
5134 invalid = false;
5137 else
5138 invalid = true;
5140 unsigned HOST_WIDE_INT const_bitsize, const_bitpos;
5141 unsigned HOST_WIDE_INT const_bitregion_start, const_bitregion_end;
5142 if (invalid
5143 || !bitsize.is_constant (&const_bitsize)
5144 || !bitpos.is_constant (&const_bitpos)
5145 || !bitregion_start.is_constant (&const_bitregion_start)
5146 || !bitregion_end.is_constant (&const_bitregion_end))
5147 return terminate_all_aliasing_chains (NULL, stmt);
5149 if (!ins_stmt)
5150 memset (&n, 0, sizeof (n));
5152 class imm_store_chain_info **chain_info = NULL;
5153 bool ret = false;
5154 if (base_addr)
5155 chain_info = m_stores.get (base_addr);
5157 store_immediate_info *info;
5158 if (chain_info)
5160 unsigned int ord = (*chain_info)->m_store_info.length ();
5161 info = new store_immediate_info (const_bitsize, const_bitpos,
5162 const_bitregion_start,
5163 const_bitregion_end,
5164 stmt, ord, rhs_code, n, ins_stmt,
5165 bit_not_p, lp_nr_for_store (stmt),
5166 ops[0], ops[1]);
5167 if (dump_file && (dump_flags & TDF_DETAILS))
5169 fprintf (dump_file, "Recording immediate store from stmt:\n");
5170 print_gimple_stmt (dump_file, stmt, 0);
5172 (*chain_info)->m_store_info.safe_push (info);
5173 m_n_stores++;
5174 ret |= terminate_all_aliasing_chains (chain_info, stmt);
5175 /* If we reach the limit of stores to merge in a chain terminate and
5176 process the chain now. */
5177 if ((*chain_info)->m_store_info.length ()
5178 == (unsigned int) param_max_stores_to_merge)
5180 if (dump_file && (dump_flags & TDF_DETAILS))
5181 fprintf (dump_file,
5182 "Reached maximum number of statements to merge:\n");
5183 ret |= terminate_and_process_chain (*chain_info);
5186 else
5188 /* Store aliases any existing chain? */
5189 ret |= terminate_all_aliasing_chains (NULL, stmt);
5191 /* Start a new chain. */
5192 class imm_store_chain_info *new_chain
5193 = new imm_store_chain_info (m_stores_head, base_addr);
5194 info = new store_immediate_info (const_bitsize, const_bitpos,
5195 const_bitregion_start,
5196 const_bitregion_end,
5197 stmt, 0, rhs_code, n, ins_stmt,
5198 bit_not_p, lp_nr_for_store (stmt),
5199 ops[0], ops[1]);
5200 new_chain->m_store_info.safe_push (info);
5201 m_n_stores++;
5202 m_stores.put (base_addr, new_chain);
5203 m_n_chains++;
5204 if (dump_file && (dump_flags & TDF_DETAILS))
5206 fprintf (dump_file, "Starting active chain number %u with statement:\n",
5207 m_n_chains);
5208 print_gimple_stmt (dump_file, stmt, 0);
5209 fprintf (dump_file, "The base object is:\n");
5210 print_generic_expr (dump_file, base_addr);
5211 fprintf (dump_file, "\n");
5215 /* Prune oldest chains so that after adding the chain or store above
5216 we're again within the limits set by the params. */
5217 if (m_n_chains > (unsigned)param_max_store_chains_to_track
5218 || m_n_stores > (unsigned)param_max_stores_to_track)
5220 if (dump_file && (dump_flags & TDF_DETAILS))
5221 fprintf (dump_file, "Too many chains (%u > %d) or stores (%u > %d), "
5222 "terminating oldest chain(s).\n", m_n_chains,
5223 param_max_store_chains_to_track, m_n_stores,
5224 param_max_stores_to_track);
5225 imm_store_chain_info **e = &m_stores_head;
5226 unsigned idx = 0;
5227 unsigned n_stores = 0;
5228 while (*e)
5230 if (idx >= (unsigned)param_max_store_chains_to_track
5231 || (n_stores + (*e)->m_store_info.length ()
5232 > (unsigned)param_max_stores_to_track))
5233 ret |= terminate_and_process_chain (*e);
5234 else
5236 n_stores += (*e)->m_store_info.length ();
5237 e = &(*e)->next;
5238 ++idx;
5243 return ret;
5246 /* Return true if STMT is a store valid for store merging. */
5248 static bool
5249 store_valid_for_store_merging_p (gimple *stmt)
5251 return gimple_assign_single_p (stmt)
5252 && gimple_vdef (stmt)
5253 && lhs_valid_for_store_merging_p (gimple_assign_lhs (stmt))
5254 && (!gimple_has_volatile_ops (stmt) || gimple_clobber_p (stmt));
5257 enum basic_block_status { BB_INVALID, BB_VALID, BB_EXTENDED_VALID };
5259 /* Return the status of basic block BB wrt store merging. */
5261 static enum basic_block_status
5262 get_status_for_store_merging (basic_block bb)
5264 unsigned int num_statements = 0;
5265 unsigned int num_constructors = 0;
5266 gimple_stmt_iterator gsi;
5267 edge e;
5269 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5271 gimple *stmt = gsi_stmt (gsi);
5273 if (is_gimple_debug (stmt))
5274 continue;
5276 if (store_valid_for_store_merging_p (stmt) && ++num_statements >= 2)
5277 break;
5279 if (is_gimple_assign (stmt)
5280 && gimple_assign_rhs_code (stmt) == CONSTRUCTOR)
5282 tree rhs = gimple_assign_rhs1 (stmt);
5283 if (VECTOR_TYPE_P (TREE_TYPE (rhs))
5284 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (rhs)))
5285 && gimple_assign_lhs (stmt) != NULL_TREE)
5287 HOST_WIDE_INT sz
5288 = int_size_in_bytes (TREE_TYPE (rhs)) * BITS_PER_UNIT;
5289 if (sz == 16 || sz == 32 || sz == 64)
5291 num_constructors = 1;
5292 break;
5298 if (num_statements == 0 && num_constructors == 0)
5299 return BB_INVALID;
5301 if (cfun->can_throw_non_call_exceptions && cfun->eh
5302 && store_valid_for_store_merging_p (gimple_seq_last_stmt (bb_seq (bb)))
5303 && (e = find_fallthru_edge (bb->succs))
5304 && e->dest == bb->next_bb)
5305 return BB_EXTENDED_VALID;
5307 return (num_statements >= 2 || num_constructors) ? BB_VALID : BB_INVALID;
5310 /* Entry point for the pass. Go over each basic block recording chains of
5311 immediate stores. Upon encountering a terminating statement (as defined
5312 by stmt_terminates_chain_p) process the recorded stores and emit the widened
5313 variants. */
5315 unsigned int
5316 pass_store_merging::execute (function *fun)
5318 basic_block bb;
5319 hash_set<gimple *> orig_stmts;
5320 bool changed = false, open_chains = false;
5322 /* If the function can throw and catch non-call exceptions, we'll be trying
5323 to merge stores across different basic blocks so we need to first unsplit
5324 the EH edges in order to streamline the CFG of the function. */
5325 if (cfun->can_throw_non_call_exceptions && cfun->eh)
5326 unsplit_eh_edges ();
5328 calculate_dominance_info (CDI_DOMINATORS);
5330 FOR_EACH_BB_FN (bb, fun)
5332 const basic_block_status bb_status = get_status_for_store_merging (bb);
5333 gimple_stmt_iterator gsi;
5335 if (open_chains && (bb_status == BB_INVALID || !single_pred_p (bb)))
5337 changed |= terminate_and_process_all_chains ();
5338 open_chains = false;
5341 if (bb_status == BB_INVALID)
5342 continue;
5344 if (dump_file && (dump_flags & TDF_DETAILS))
5345 fprintf (dump_file, "Processing basic block <%d>:\n", bb->index);
5347 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
5349 gimple *stmt = gsi_stmt (gsi);
5350 gsi_next (&gsi);
5352 if (is_gimple_debug (stmt))
5353 continue;
5355 if (gimple_has_volatile_ops (stmt) && !gimple_clobber_p (stmt))
5357 /* Terminate all chains. */
5358 if (dump_file && (dump_flags & TDF_DETAILS))
5359 fprintf (dump_file, "Volatile access terminates "
5360 "all chains\n");
5361 changed |= terminate_and_process_all_chains ();
5362 open_chains = false;
5363 continue;
5366 if (is_gimple_assign (stmt)
5367 && gimple_assign_rhs_code (stmt) == CONSTRUCTOR
5368 && maybe_optimize_vector_constructor (stmt))
5369 continue;
5371 if (store_valid_for_store_merging_p (stmt))
5372 changed |= process_store (stmt);
5373 else
5374 changed |= terminate_all_aliasing_chains (NULL, stmt);
5377 if (bb_status == BB_EXTENDED_VALID)
5378 open_chains = true;
5379 else
5381 changed |= terminate_and_process_all_chains ();
5382 open_chains = false;
5386 if (open_chains)
5387 changed |= terminate_and_process_all_chains ();
5389 /* If the function can throw and catch non-call exceptions and something
5390 changed during the pass, then the CFG has (very likely) changed too. */
5391 if (cfun->can_throw_non_call_exceptions && cfun->eh && changed)
5393 free_dominance_info (CDI_DOMINATORS);
5394 return TODO_cleanup_cfg;
5397 return 0;
5400 } // anon namespace
5402 /* Construct and return a store merging pass object. */
5404 gimple_opt_pass *
5405 make_pass_store_merging (gcc::context *ctxt)
5407 return new pass_store_merging (ctxt);
5410 #if CHECKING_P
5412 namespace selftest {
5414 /* Selftests for store merging helpers. */
5416 /* Assert that all elements of the byte arrays X and Y, both of length N
5417 are equal. */
5419 static void
5420 verify_array_eq (unsigned char *x, unsigned char *y, unsigned int n)
5422 for (unsigned int i = 0; i < n; i++)
5424 if (x[i] != y[i])
5426 fprintf (stderr, "Arrays do not match. X:\n");
5427 dump_char_array (stderr, x, n);
5428 fprintf (stderr, "Y:\n");
5429 dump_char_array (stderr, y, n);
5431 ASSERT_EQ (x[i], y[i]);
5435 /* Test shift_bytes_in_array_left and that it carries bits across between
5436 bytes correctly. */
5438 static void
5439 verify_shift_bytes_in_array_left (void)
5441 /* byte 1 | byte 0
5442 00011111 | 11100000. */
5443 unsigned char orig[2] = { 0xe0, 0x1f };
5444 unsigned char in[2];
5445 memcpy (in, orig, sizeof orig);
5447 unsigned char expected[2] = { 0x80, 0x7f };
5448 shift_bytes_in_array_left (in, sizeof (in), 2);
5449 verify_array_eq (in, expected, sizeof (in));
5451 memcpy (in, orig, sizeof orig);
5452 memcpy (expected, orig, sizeof orig);
5453 /* Check that shifting by zero doesn't change anything. */
5454 shift_bytes_in_array_left (in, sizeof (in), 0);
5455 verify_array_eq (in, expected, sizeof (in));
5459 /* Test shift_bytes_in_array_right and that it carries bits across between
5460 bytes correctly. */
5462 static void
5463 verify_shift_bytes_in_array_right (void)
5465 /* byte 1 | byte 0
5466 00011111 | 11100000. */
5467 unsigned char orig[2] = { 0x1f, 0xe0};
5468 unsigned char in[2];
5469 memcpy (in, orig, sizeof orig);
5470 unsigned char expected[2] = { 0x07, 0xf8};
5471 shift_bytes_in_array_right (in, sizeof (in), 2);
5472 verify_array_eq (in, expected, sizeof (in));
5474 memcpy (in, orig, sizeof orig);
5475 memcpy (expected, orig, sizeof orig);
5476 /* Check that shifting by zero doesn't change anything. */
5477 shift_bytes_in_array_right (in, sizeof (in), 0);
5478 verify_array_eq (in, expected, sizeof (in));
5481 /* Test clear_bit_region that it clears exactly the bits asked and
5482 nothing more. */
5484 static void
5485 verify_clear_bit_region (void)
5487 /* Start with all bits set and test clearing various patterns in them. */
5488 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5489 unsigned char in[3];
5490 unsigned char expected[3];
5491 memcpy (in, orig, sizeof in);
5493 /* Check zeroing out all the bits. */
5494 clear_bit_region (in, 0, 3 * BITS_PER_UNIT);
5495 expected[0] = expected[1] = expected[2] = 0;
5496 verify_array_eq (in, expected, sizeof in);
5498 memcpy (in, orig, sizeof in);
5499 /* Leave the first and last bits intact. */
5500 clear_bit_region (in, 1, 3 * BITS_PER_UNIT - 2);
5501 expected[0] = 0x1;
5502 expected[1] = 0;
5503 expected[2] = 0x80;
5504 verify_array_eq (in, expected, sizeof in);
5507 /* Test clear_bit_region_be that it clears exactly the bits asked and
5508 nothing more. */
5510 static void
5511 verify_clear_bit_region_be (void)
5513 /* Start with all bits set and test clearing various patterns in them. */
5514 unsigned char orig[3] = { 0xff, 0xff, 0xff};
5515 unsigned char in[3];
5516 unsigned char expected[3];
5517 memcpy (in, orig, sizeof in);
5519 /* Check zeroing out all the bits. */
5520 clear_bit_region_be (in, BITS_PER_UNIT - 1, 3 * BITS_PER_UNIT);
5521 expected[0] = expected[1] = expected[2] = 0;
5522 verify_array_eq (in, expected, sizeof in);
5524 memcpy (in, orig, sizeof in);
5525 /* Leave the first and last bits intact. */
5526 clear_bit_region_be (in, BITS_PER_UNIT - 2, 3 * BITS_PER_UNIT - 2);
5527 expected[0] = 0x80;
5528 expected[1] = 0;
5529 expected[2] = 0x1;
5530 verify_array_eq (in, expected, sizeof in);
5534 /* Run all of the selftests within this file. */
5536 void
5537 store_merging_c_tests (void)
5539 verify_shift_bytes_in_array_left ();
5540 verify_shift_bytes_in_array_right ();
5541 verify_clear_bit_region ();
5542 verify_clear_bit_region_be ();
5545 } // namespace selftest
5546 #endif /* CHECKING_P. */