pahole: Describe expected use of 'default' in the man page
[dwarves.git] / dwarves_reorganize.c
blob14f5e8228fa8a95dd9cf9cd96909bd5629732328
1 /*
2 SPDX-License-Identifier: GPL-2.0-only
4 Copyright (C) 2006 Mandriva Conectiva S.A.
5 Copyright (C) 2006 Arnaldo Carvalho de Melo <acme@mandriva.com>
6 Copyright (C) 2007 Red Hat Inc.
7 Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
8 */
10 #include "list.h"
11 #include "dwarves_reorganize.h"
12 #include "dwarves.h"
14 static void class__recalc_holes(struct class *class)
16 class->holes_searched = 0;
17 class__find_holes(class);
20 void class__subtract_offsets_from(struct class *class,
21 struct class_member *from,
22 const uint16_t size)
24 struct class_member *member;
26 class__for_each_member_continue(class, from, member) {
27 member->byte_offset -= size;
28 member->bit_offset -= size * 8;
31 if (class->padding != 0) {
32 struct class_member *last_member =
33 type__last_member(&class->type);
34 const ssize_t new_padding = (class__size(class) -
35 (last_member->byte_offset +
36 last_member->byte_size));
37 if (new_padding > 0)
38 class->padding = new_padding;
39 else
40 class->padding = 0;
44 void class__add_offsets_from(struct class *class, struct class_member *from,
45 const uint16_t size)
47 struct class_member *member;
49 class__for_each_member_continue(class, from, member) {
50 member->byte_offset += size;
51 member->bit_offset += size * 8;
56 * XXX: Check this more thoroughly. Right now it is used because I was
57 * to lazy to do class__remove_member properly, adjusting alignments and
58 * holes as we go removing fields. Ditto for class__add_offsets_from.
60 void class__fixup_alignment(struct class *class, const struct cu *cu)
62 struct class_member *pos, *last_member = NULL;
63 size_t power2;
65 type__for_each_data_member(&class->type, pos) {
66 if (last_member == NULL && pos->byte_offset != 0) { /* paranoid! */
67 class__subtract_offsets_from(class, pos,
68 (pos->byte_offset -
69 pos->byte_size));
70 pos->byte_offset = 0;
71 pos->bit_offset = 0;
72 } else if (last_member != NULL &&
73 last_member->hole >= cu->addr_size) {
74 size_t dec = (last_member->hole / cu->addr_size) *
75 cu->addr_size;
77 last_member->hole -= dec;
78 if (last_member->hole == 0)
79 --class->nr_holes;
80 pos->byte_offset -= dec;
81 pos->bit_offset -= dec * 8;
82 class->type.size -= dec;
83 class__subtract_offsets_from(class, pos, dec);
84 } else for (power2 = cu->addr_size; power2 >= 2; power2 /= 2) {
85 const size_t remainder = pos->byte_offset % power2;
87 if (pos->byte_size == power2) {
88 if (remainder == 0) /* perfectly aligned */
89 break;
90 if (last_member->hole >= remainder) {
91 last_member->hole -= remainder;
92 if (last_member->hole == 0)
93 --class->nr_holes;
94 pos->byte_offset -= remainder;
95 pos->bit_offset -= remainder * 8;
96 class__subtract_offsets_from(class, pos, remainder);
97 } else {
98 const size_t inc = power2 - remainder;
100 if (last_member->hole == 0)
101 ++class->nr_holes;
102 last_member->hole += inc;
103 pos->byte_offset += inc;
104 pos->bit_offset += inc * 8;
105 class->type.size += inc;
106 class__add_offsets_from(class, pos, inc);
111 last_member = pos;
114 if (last_member != NULL) {
115 struct class_member *m =
116 type__find_first_biggest_size_base_type_member(&class->type, cu);
117 size_t unpadded_size = last_member->byte_offset + last_member->byte_size;
118 size_t m_size = m->byte_size, remainder;
120 /* google for struct zone_padding in the linux kernel for an example */
121 if (m_size == 0)
122 return;
124 remainder = unpadded_size % m_size;
125 if (remainder != 0) {
126 class->padding = m_size - remainder;
127 class->type.size = unpadded_size + class->padding;
132 static struct class_member *
133 class__find_next_hole_of_size(struct class *class,
134 struct class_member *from, size_t size)
136 struct class_member *bitfield_head = NULL;
137 struct class_member *member;
139 class__for_each_member_continue(class, from, member) {
140 if (member->bitfield_size != 0) {
141 if (bitfield_head == NULL)
142 bitfield_head = member;
143 } else
144 bitfield_head = NULL;
145 if (member->hole != 0) {
146 if (member->byte_size != 0 && member->byte_size <= size)
147 return bitfield_head ? : member;
151 return NULL;
154 static struct class_member *
155 class__find_last_member_of_size(struct class *class,
156 struct class_member *to, size_t size)
158 struct class_member *member;
160 class__for_each_member_reverse(class, member) {
161 if (member->tag.tag != DW_TAG_member)
162 continue;
164 if (member == to)
165 break;
167 * Check if this is the first member of a bitfield. It either
168 * has another member before it that is not part of the current
169 * bitfield or it is the first member of the struct.
171 if (member->bitfield_size != 0 && member->byte_offset != 0) {
172 struct class_member *prev =
173 list_entry(member->tag.node.prev,
174 struct class_member,
175 tag.node);
176 if (prev->bitfield_size != 0)
177 continue;
181 if (member->byte_size != 0 && member->byte_size <= size)
182 return member;
185 return NULL;
188 static bool class__move_member(struct class *class, struct class_member *dest,
189 struct class_member *from, const struct cu *cu,
190 int from_padding, const int verbose, FILE *fp)
192 const size_t from_size = from->byte_size;
193 const size_t dest_size = dest->byte_size;
195 #ifndef BITFIELD_REORG_ALGORITHMS_ENABLED
197 * For now refuse to move a bitfield, we need to first fixup some BRAIN FARTs
199 if (from->bitfield_size != 0)
200 return false;
201 #endif
202 const bool from_was_last = from->tag.node.next == class__tags(class);
203 struct class_member *tail_from = from;
204 struct class_member *from_prev = list_entry(from->tag.node.prev,
205 struct class_member,
206 tag.node);
207 uint16_t orig_tail_from_hole = tail_from->hole;
208 const uint16_t orig_from_offset = from->byte_offset;
210 * Align 'from' after 'dest':
212 const uint16_t offset = dest->hole % (from_size > cu->addr_size ?
213 cu->addr_size : from_size);
215 * Set new 'from' offset, after 'dest->byte_offset', aligned
217 const uint16_t new_from_offset = dest->byte_offset + dest_size + offset;
219 if (verbose)
220 fputs("/* Moving", fp);
222 if (from->bitfield_size != 0) {
223 struct class_member *pos, *tmp;
224 LIST_HEAD(from_list);
226 if (verbose)
227 fprintf(fp, " bitfield('%s' ... ",
228 class_member__name(from));
229 class__for_each_member_safe_from(class, from, pos, tmp) {
231 * Have we reached the end of the bitfield?
233 if (pos->byte_offset != orig_from_offset)
234 break;
235 tail_from = pos;
236 orig_tail_from_hole = tail_from->hole;
237 pos->byte_offset = new_from_offset;
238 pos->bit_offset = new_from_offset * 8 + pos->bitfield_offset;
239 list_move_tail(&pos->tag.node, &from_list);
241 list_splice(&from_list, &dest->tag.node);
242 if (verbose)
243 fprintf(fp, "'%s')",
244 class_member__name(tail_from));
245 } else {
246 if (verbose)
247 fprintf(fp, " '%s'", class_member__name(from));
249 * Remove 'from' from the list
251 list_del(&from->tag.node);
253 * Add 'from' after 'dest':
255 __list_add(&from->tag.node, &dest->tag.node,
256 dest->tag.node.next);
257 from->byte_offset = new_from_offset;
258 from->bit_offset = new_from_offset * 8 + from->bitfield_offset;
261 if (verbose)
262 fprintf(fp, " from after '%s' to after '%s' */\n",
263 class_member__name(from_prev),
264 class_member__name(dest));
266 if (from_padding) {
268 * Check if we're eliminating the need for padding:
270 if (orig_from_offset % cu->addr_size == 0) {
272 * Good, no need for padding anymore:
274 class->type.size -= from_size + class->padding;
275 } else {
277 * No, so just add from_size to the padding:
279 if (verbose)
280 fprintf(fp, "/* adding %zd bytes from %s to "
281 "the padding */\n",
282 from_size, class_member__name(from));
284 } else if (from_was_last) {
285 class->type.size -= from_size + class->padding;
286 } else {
288 * See if we are adding a new hole that is bigger than
289 * sizeof(long), this may have problems with explicit alignment
290 * made by the programmer, perhaps we need A switch that allows
291 * us to avoid realignment, just using existing holes but
292 * keeping the existing alignment, anyway the programmer has to
293 * check the resulting rerganization before using it, and for
294 * automatic stuff such as the one that will be used for struct
295 * "views" in tools such as ctracer we are more interested in
296 * packing the subset as tightly as possible.
298 if (orig_tail_from_hole + from_size >= cu->addr_size) {
299 class->type.size -= cu->addr_size;
300 class__subtract_offsets_from(class, from_prev,
301 cu->addr_size);
305 class__recalc_holes(class);
307 if (verbose > 1) {
308 class__fprintf(class, cu, fp);
309 fputc('\n', fp);
312 return true;
315 #ifdef BITFIELD_REORG_ALGORITHMS_ENABLED
316 static struct class_member *
317 class__find_next_bit_hole_of_size(struct class *class,
318 struct class_member *from,
319 size_t size)
321 struct class_member *member;
323 class__for_each_member_continue(class, from, member) {
324 if (member->tag.tag != DW_TAG_member)
325 continue;
326 if (member->bit_hole != 0 &&
327 member->bitfield_size <= size)
328 return member;
330 #if 0
332 * FIXME: Handle the case where the bit padding is on the same bitfield
333 * that we're looking, i.e. we can't combine a bitfield with itclass,
334 * perhaps we should tag bitfields with a sequential, clearly marking
335 * each of the bitfields in advance, so that all the algoriths that
336 * have to deal with bitfields, moving them around, demoting, etc, can
337 * be simplified.
340 * Now look if the last member is a one member bitfield,
341 * i.e. if we have bit_padding
343 if (class->bit_padding != 0)
344 return type__last_member(&class->type);
345 #endif
346 return NULL;
349 static void class__move_bit_member(struct class *class, const struct cu *cu,
350 struct class_member *dest,
351 struct class_member *from,
352 const int verbose, FILE *fp)
354 struct class_member *from_prev = list_entry(from->tag.node.prev,
355 struct class_member,
356 tag.node);
358 if (verbose)
359 fprintf(fp, "/* Moving '%s:%u' from after '%s' to "
360 "after '%s:%u' */\n",
361 class_member__name(from), from->bitfield_size,
362 class_member__name(from_prev),
363 class_member__name(dest), dest->bitfield_size);
365 * Remove 'from' from the list
367 list_del(&from->tag.node);
369 * Add from after dest:
371 __list_add(&from->tag.node,
372 &dest->tag.node,
373 dest->tag.node.next);
375 /* Check if this was the last entry in the bitfield */
376 if (from_prev->bitfield_size == 0) {
377 size_t from_size = from->byte_size;
379 * Are we shrinking the struct?
381 if (from_size + from->hole >= cu->addr_size) {
382 class->type.size -= from_size + from->hole;
383 class__subtract_offsets_from(class, from_prev,
384 from_size + from->hole);
388 * Tricky, what are the rules for bitfield layouts on this arch?
389 * Assume its IA32
391 from->bitfield_offset = dest->bitfield_offset + dest->bitfield_size;
393 * Now both have the same offset:
395 from->byte_offset = dest->byte_offset;
396 from->bit_offset = dest->byte_offset * 8 + from->bitfield_offset;
398 class__recalc_holes(class);
400 if (verbose > 1) {
401 class__fprintf(class, cu, fp);
402 fputc('\n', fp);
406 static void class__demote_bitfield_members(struct class *class,
407 struct class_member *from,
408 struct class_member *to,
409 const struct base_type *old_type,
410 const struct base_type *new_type,
411 type_id_t new_type_id)
413 struct class_member *member;
415 class__for_each_member_from(class, from, member) {
416 member->byte_size = new_type->bit_size / 8;
417 member->tag.type = new_type_id;
418 if (member == to)
419 break;
423 static struct tag *cu__find_base_type_of_size(const struct cu *cu,
424 const size_t size, type_id_t *id)
426 const char *type_name, *type_name_alt = NULL;
428 switch (size) {
429 case sizeof(unsigned char):
430 type_name = "unsigned char"; break;
431 case sizeof(unsigned short int):
432 type_name = "short unsigned int";
433 type_name_alt = "unsigned short"; break;
434 case sizeof(unsigned int):
435 type_name = "unsigned int";
436 type_name_alt = "unsigned"; break;
437 case sizeof(unsigned long long):
438 if (cu->addr_size == 8) {
439 type_name = "long unsigned int";
440 type_name_alt = "unsigned long";
441 } else {
442 type_name = "long long unsigned int";
443 type_name_alt = "unsigned long long";
445 break;
446 default:
447 return NULL;
450 struct tag *ret = cu__find_base_type_by_name(cu, type_name, id);
451 return ret ?: cu__find_base_type_by_name(cu, type_name_alt, id);
454 static int class__demote_bitfields(struct class *class, const struct cu *cu,
455 const int verbose, FILE *fp)
457 struct class_member *member;
458 struct class_member *bitfield_head = NULL;
459 const struct tag *old_type_tag, *new_type_tag;
460 size_t current_bitfield_size = 0, size, bytes_needed;
461 int some_was_demoted = 0;
463 type__for_each_data_member(&class->type, member) {
465 * Check if we are moving away from a bitfield
467 if (member->bitfield_size == 0) {
468 current_bitfield_size = 0;
469 bitfield_head = NULL;
470 } else {
471 if (bitfield_head == NULL) {
472 bitfield_head = member;
473 current_bitfield_size = member->bitfield_size;
474 } else if (bitfield_head->byte_offset != member->byte_offset) {
476 * We moved from one bitfield to another, for
477 * now don't handle this case, just move on to
478 * the next bitfield, we may well move it to
479 * another place and then the first bitfield will
480 * be isolated and will be handled in the next
481 * pass.
483 bitfield_head = member;
484 current_bitfield_size = member->bitfield_size;
485 } else
486 current_bitfield_size += member->bitfield_size;
490 * Have we got to the end of a bitfield with holes?
492 if (member->bit_hole == 0)
493 continue;
495 size = member->byte_size;
496 bytes_needed = (current_bitfield_size + 7) / 8;
497 bytes_needed = roundup_pow_of_two(bytes_needed);
498 if (bytes_needed == size)
499 continue;
501 type_id_t new_type_id;
502 old_type_tag = cu__type(cu, member->tag.type);
503 new_type_tag = cu__find_base_type_of_size(cu, bytes_needed,
504 &new_type_id);
506 if (new_type_tag == NULL) {
507 fprintf(fp, "/* BRAIN FART ALERT! couldn't find a "
508 "%zd bytes base type */\n\n", bytes_needed);
509 continue;
511 if (verbose) {
512 char old_bf[64], new_bf[64];
513 fprintf(fp, "/* Demoting bitfield ('%s' ... '%s') "
514 "from '%s' to '%s' */\n",
515 class_member__name(bitfield_head),
516 class_member__name(member),
517 base_type__name(tag__base_type(old_type_tag),
518 old_bf, sizeof(old_bf)),
519 base_type__name(tag__base_type(new_type_tag),
520 new_bf, sizeof(new_bf)));
523 class__demote_bitfield_members(class,
524 bitfield_head, member,
525 tag__base_type(old_type_tag),
526 tag__base_type(new_type_tag),
527 new_type_id);
528 class__recalc_holes(class);
529 some_was_demoted = 1;
531 if (verbose > 1) {
532 class__fprintf(class, cu, fp);
533 fputc('\n', fp);
537 * Now look if we have bit padding, i.e. if the the last member
538 * is a bitfield and its the sole member in this bitfield, i.e.
539 * if it wasn't already demoted as part of a bitfield of more than
540 * one member:
542 member = type__last_member(&class->type);
543 if (class->bit_padding != 0 && bitfield_head == member) {
544 size = member->byte_size;
545 bytes_needed = (member->bitfield_size + 7) / 8;
546 if (bytes_needed < size) {
547 old_type_tag = cu__type(cu, member->tag.type);
548 type_id_t new_type_id;
549 new_type_tag =
550 cu__find_base_type_of_size(cu, bytes_needed,
551 &new_type_id);
553 tag__assert_search_result(old_type_tag, member->tag.tag, class_member__name(member));
554 tag__assert_search_result(new_type_tag, member->tag.tag, class_member__name(member));
556 if (verbose) {
557 char old_bf[64], new_bf[64];
558 fprintf(fp, "/* Demoting bitfield ('%s') "
559 "from '%s' to '%s' */\n",
560 class_member__name(member),
561 base_type__name(tag__base_type(old_type_tag),
562 old_bf, sizeof(old_bf)),
563 base_type__name(tag__base_type(new_type_tag),
564 new_bf, sizeof(new_bf)));
566 class__demote_bitfield_members(class,
567 member, member,
568 tag__base_type(old_type_tag),
569 tag__base_type(new_type_tag),
570 new_type_id);
571 class__recalc_holes(class);
572 some_was_demoted = 1;
574 if (verbose > 1) {
575 class__fprintf(class, cu, fp);
576 fputc('\n', fp);
581 return some_was_demoted;
584 static void class__reorganize_bitfields(struct class *class,
585 const struct cu *cu,
586 const int verbose, FILE *fp)
588 struct class_member *member, *brother;
589 restart:
590 type__for_each_data_member(&class->type, member) {
591 /* See if we have a hole after this member */
592 if (member->bit_hole != 0) {
594 * OK, try to find a member that has a bit hole after
595 * it and that has a size that fits the current hole:
597 brother =
598 class__find_next_bit_hole_of_size(class, member,
599 member->bit_hole);
600 if (brother != NULL) {
601 class__move_bit_member(class, cu,
602 member, brother,
603 verbose, fp);
604 goto restart;
610 static void class__fixup_bitfield_types(struct class *class,
611 struct class_member *from,
612 struct class_member *to_before,
613 type_id_t type)
615 struct class_member *member;
617 class__for_each_member_from(class, from, member) {
618 if (member == to_before)
619 break;
620 member->tag.type = type;
625 * Think about this pahole output a bit:
627 * [filo examples]$ pahole swiss_cheese cheese
628 * / * <11b> /home/acme/git/pahole/examples/swiss_cheese.c:3 * /
629 * struct cheese {
630 * <SNIP>
631 * int bitfield1:1; / * 64 4 * /
632 * int bitfield2:1; / * 64 4 * /
634 * / * XXX 14 bits hole, try to pack * /
635 * / * Bitfield WARNING: DWARF size=4, real size=2 * /
637 * short int d; / * 66 2 * /
638 * <SNIP>
640 * The compiler (gcc 4.1.1 20070105 (Red Hat 4.1.1-51) in the above example),
641 * Decided to combine what was declared as an int (4 bytes) bitfield but doesn't
642 * uses even one byte with the next field, that is a short int (2 bytes),
643 * without demoting the type of the bitfield to short int (2 bytes), so in terms
644 * of alignment the real size is 2, not 4, to make things easier for the rest of
645 * the reorganizing routines we just do the demotion ourselves, fixing up the
646 * sizes.
648 static void class__fixup_member_types(struct class *class, const struct cu *cu,
649 const uint8_t verbose, FILE *fp)
651 struct class_member *pos, *bitfield_head = NULL;
652 uint8_t fixup_was_done = 0;
654 type__for_each_data_member(&class->type, pos) {
656 * Is this bitfield member?
658 if (pos->bitfield_size != 0) {
660 * The first entry in a bitfield?
662 if (bitfield_head == NULL)
663 bitfield_head = pos;
664 continue;
667 * OK, not a bitfield member, but have we just passed
668 * by a bitfield?
670 if (bitfield_head != NULL) {
671 const uint16_t real_size = (pos->byte_offset -
672 bitfield_head->byte_offset);
673 const size_t size = bitfield_head->byte_size;
675 * Another case:
676 struct irq_cfg {
677 struct irq_pin_list * irq_2_pin; / * 0 8 * /
678 cpumask_var_t domain; / * 8 16 * /
679 cpumask_var_t old_domain; / * 24 16 * /
680 u8 vector; / * 40 1 * /
681 u8 move_in_progress:1; / * 41: 7 1 * /
682 u8 remapped:1; / * 41: 6 1 * /
684 / * XXX 6 bits hole, try to pack * /
685 / * XXX 6 bytes hole, try to pack * /
687 union {
688 struct irq_2_iommu irq_2_iommu; / * 16 * /
689 struct irq_2_irte irq_2_irte; / * 4 * /
690 }; / * 48 16 * /
691 / * --- cacheline 1 boundary (64 bytes) --- * /
693 * So just fix it up if the byte_size of the bitfield is
694 * greater than what it really uses.
696 if (real_size < size) {
697 type_id_t new_type_id;
698 struct tag *new_type_tag =
699 cu__find_base_type_of_size(cu,
700 real_size,
701 &new_type_id);
702 if (new_type_tag == NULL) {
703 fprintf(stderr, "%s: couldn't find"
704 " a base_type of %d bytes!\n",
705 __func__, real_size);
706 continue;
708 class__fixup_bitfield_types(class,
709 bitfield_head, pos,
710 new_type_id);
711 fixup_was_done = 1;
714 bitfield_head = NULL;
716 if (fixup_was_done) {
717 class__recalc_holes(class);
719 if (verbose && fixup_was_done) {
720 fprintf(fp, "/* bitfield types were fixed */\n");
721 if (verbose > 1) {
722 class__fprintf(class, cu, fp);
723 fputc('\n', fp);
727 #endif // BITFIELD_REORG_ALGORITHMS_ENABLED
729 void class__reorganize(struct class *class, const struct cu *cu,
730 const int verbose, FILE *fp)
732 struct class_member *member, *brother, *last_member;
733 size_t alignment_size;
735 class__find_holes(class);
736 #ifdef BITFIELD_REORG_ALGORITHMS_ENABLED
737 class__fixup_member_types(class, cu, verbose, fp);
738 while (class__demote_bitfields(class, cu, verbose, fp))
739 class__reorganize_bitfields(class, cu, verbose, fp);
740 #endif
741 /* Now try to combine holes */
742 restart:
743 alignment_size = 0;
745 * It can be NULL if this class doesn't have any data members,
746 * just inheritance entries
748 last_member = type__last_member(&class->type);
749 if (last_member == NULL)
750 return;
752 type__for_each_data_member(&class->type, member) {
753 const size_t aligned_size = member->byte_size + member->hole;
754 if (aligned_size <= cu->addr_size &&
755 aligned_size > alignment_size)
756 alignment_size = aligned_size;
759 if (alignment_size != 0) {
760 size_t modulo;
761 uint16_t new_padding;
763 if (alignment_size > 1)
764 alignment_size = roundup(alignment_size, 2);
765 modulo = (last_member->byte_offset +
766 last_member->byte_size) % alignment_size;
767 if (modulo != 0)
768 new_padding = cu->addr_size - modulo;
769 else
770 new_padding = 0;
772 if (new_padding != class->padding) {
773 class->padding = new_padding;
774 class->type.size = (last_member->byte_offset +
775 last_member->byte_size + new_padding);
779 type__for_each_data_member(&class->type, member) {
780 /* See if we have a hole after this member */
781 if (member->hole != 0) {
783 * OK, try to find a member that has a hole after it
784 * and that has a size that fits the current hole:
786 brother = class__find_next_hole_of_size(class, member,
787 member->hole);
788 if (brother != NULL) {
789 struct class_member *brother_prev =
790 list_entry(brother->tag.node.prev,
791 struct class_member,
792 tag.node);
794 * If it the next member, avoid moving it closer,
795 * it could be a explicit alignment rule, like
796 * ____cacheline_aligned_in_smp in the Linux
797 * kernel.
799 if (brother_prev != member) {
800 if (class__move_member(class, member, brother, cu, 0, verbose, fp))
801 goto restart;
805 * OK, but is there padding? If so the last member
806 * has a hole, if we are not at the last member and
807 * it has a size that is smaller than the current hole
808 * we can move it after the current member, reducing
809 * the padding or eliminating it altogether.
811 if (class->padding > 0 &&
812 member != last_member &&
813 last_member->byte_size != 0 &&
814 last_member->byte_size <= member->hole) {
815 if (class__move_member(class, member, last_member, cu, 1, verbose, fp))
816 goto restart;
821 /* Now try to move members at the tail to after holes */
822 if (class->nr_holes == 0)
823 return;
825 type__for_each_data_member(&class->type, member) {
826 /* See if we have a hole after this member */
827 if (member->hole != 0) {
828 brother = class__find_last_member_of_size(class, member,
829 member->hole);
830 if (brother != NULL) {
831 struct class_member *brother_prev =
832 list_entry(brother->tag.node.prev,
833 struct class_member,
834 tag.node);
836 * If it the next member, avoid moving it closer,
837 * it could be a explicit alignment rule, like
838 * ____cacheline_aligned_in_smp in the Linux
839 * kernel.
841 if (brother_prev != member) {
842 if (class__move_member(class, member, brother, cu, 0, verbose, fp))
843 goto restart;