style: factor the access to a rule from its items
[bison.git] / src / AnnotationList.c
bloba95a67be42b0e6aa4f3b70478cd9a4a03158fce9
1 /* IELR's inadequacy annotation list.
3 Copyright (C) 2009-2015, 2018-2020 Free Software Foundation, Inc.
5 This file is part of Bison, the GNU Compiler Compiler.
7 This program is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include <config.h>
22 #include "AnnotationList.h"
24 #include "system.h"
26 #include "ielr.h"
27 #include "lalr.h"
29 /**
30 * \pre
31 * - <tt>annotations_obstackp != NULL</tt>.
32 * \post
33 * - \c result is a new \c AnnotationList with one node whose:
34 * - \c inadequacyNode member is \c NULL.
35 * - \c contributions member is allocated with \c contribution_count
36 * uninitialized elements.
37 * - All memory was allocated on \c annotations_obstackp.
39 static AnnotationList*
40 AnnotationList__alloc_on_obstack (ContributionIndex contribution_count,
41 struct obstack *annotations_obstackp)
43 AnnotationList *res;
44 size_t contributions_size = contribution_count * sizeof res->contributions[0];
45 res = obstack_alloc (annotations_obstackp,
46 offsetof (AnnotationList, contributions)
47 + contributions_size);
48 res->next = NULL;
49 res->inadequacyNode = NULL;
50 return res;
53 /**
54 * \pre
55 * - <tt>self != NULL</tt>.
56 * - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>.
57 * \post
58 * - \c result = true iff contribution \c ci in \c self represents an
59 * "always" contribution.
61 static bool
62 AnnotationList__isContributionAlways (AnnotationList const *self,
63 ContributionIndex ci)
65 aver (0 <= ci && ci < self->inadequacyNode->contributionCount);
66 return self->contributions[ci] == NULL;
69 /**
70 * \pre
71 * - \c self is a single node.
72 * - \c self annotates the same state as every other node in \c list, and
73 * that state has \c nitems kernel items.
74 * \post
75 * - If the list \c list already contains an identical annotation to \c self,
76 * \c self was discarded, \c result is false, and the caller is responsible
77 * for the memory of \c self.
78 * - Otherwise, \c list now contains the node \c self, \c result is true, and
79 * \c list assumes responsibility for the memory of \c self.
80 * - The sort in \c list is:
81 * - Sort in reverse order on the unique ID of the associated
82 * inadequacy node. Because these IDs are assigned in ascending
83 * order, this should mean that the insertion position within an
84 * annotation list is usually near the beginning with other
85 * annotations associated with the same inadequacy.
86 * - Next, sort on the first contribution that is different as follows:
87 * - Sort an always-contribution before a never-contribution before a
88 * potential-contribution.
89 * - Two always-contributions are identical.
90 * - Two never-contributions are identical.
91 * - For two potential-contributions, sort on the contributions' kernel
92 * item bitsets interpreted as binary numbers.
93 * - The sorting has a few effects:
94 * - It accelerates elimination of identical annotations during insertion.
95 * - It determines how the output of \c AnnotationList__debug is sorted.
96 * - Other than that, it's probably not important.
98 static bool
99 AnnotationList__insertInto (AnnotationList *self, AnnotationList **list,
100 size_t nitems)
102 AnnotationList **node;
103 for (node = list; *node; node = &(*node)->next)
105 int cmp = 0;
106 if (self->inadequacyNode->id < (*node)->inadequacyNode->id)
107 cmp = 1;
108 else if ((*node)->inadequacyNode->id < self->inadequacyNode->id)
109 cmp = -1;
110 else
111 for (ContributionIndex ci = 0;
112 cmp == 0 && ci < self->inadequacyNode->contributionCount;
113 ++ci)
115 if (AnnotationList__isContributionAlways (self, ci))
117 if (!AnnotationList__isContributionAlways (*node, ci))
118 cmp = -1;
120 else if (AnnotationList__isContributionAlways (*node, ci))
121 cmp = 1;
122 else
123 for (size_t item = 0; cmp == 0 && item < nitems; ++item)
125 if (!Sbitset__test (self->contributions[ci], item))
127 if (Sbitset__test ((*node)->contributions[ci], item))
128 cmp = -1;
130 else if (!Sbitset__test ((*node)->contributions[ci], item))
131 cmp = 1;
134 if (cmp < 0)
136 self->next = *node;
137 *node = self;
138 break;
140 else if (cmp == 0)
142 self = NULL;
143 break;
146 if (!*node)
147 *node = self;
148 return self != NULL;
151 static bitset
152 AnnotationList__compute_shift_tokens (transitions *trans)
154 bitset shift_tokens = bitset_create (ntokens, BITSET_FIXED);
155 int i;
156 FOR_EACH_SHIFT (trans, i)
157 bitset_set (shift_tokens, TRANSITION_SYMBOL (trans, i));
158 return shift_tokens;
161 static bitset
162 AnnotationList__compute_conflicted_tokens (bitset shift_tokens,
163 reductions *reds)
165 bitset conflicted_tokens = bitset_create (ntokens, BITSET_FIXED);
166 bitset conflicted_tokens_rule = bitset_create (ntokens, BITSET_FIXED);
167 bitset tokens = bitset_create (ntokens, BITSET_FIXED);
169 bitset_copy (tokens, shift_tokens);
170 for (int i = 0; i < reds->num; ++i)
172 bitset_and (conflicted_tokens_rule, tokens, reds->lookahead_tokens[i]);
173 bitset_or (conflicted_tokens,
174 conflicted_tokens, conflicted_tokens_rule);
175 bitset_or (tokens, tokens, reds->lookahead_tokens[i]);
176 /* Check that rules are sorted on rule number or the next step in
177 AnnotationList__compute_from_inadequacies will misbehave. */
178 aver (i == 0 || reds->rules[i-1] < reds->rules[i]);
181 bitset_free (tokens);
182 bitset_free (conflicted_tokens_rule);
184 return conflicted_tokens;
187 static bool
188 AnnotationList__compute_lhs_contributions (state *s, const rule *the_rule,
189 symbol_number conflicted_token,
190 bitsetv follow_kernel_items,
191 bitsetv always_follows,
192 state ***predecessors,
193 bitset **item_lookahead_sets,
194 Sbitset *items,
195 struct obstack
196 *annotations_obstackp)
198 goto_number lhs_goto = map_goto (s->number, the_rule->lhs->number);
199 if (bitset_test (always_follows[lhs_goto], conflicted_token))
200 return true;
201 *items = Sbitset__new_on_obstack (s->nitems, annotations_obstackp);
203 bitset_iterator biter_item;
204 bitset_bindex item;
205 BITSET_FOR_EACH (biter_item, follow_kernel_items[lhs_goto], item, 0)
206 if (ielr_item_has_lookahead (s, 0, item, conflicted_token,
207 predecessors, item_lookahead_sets))
208 Sbitset__set (*items, item);
210 return false;
213 static void
214 AnnotationList__computePredecessorAnnotations (
215 AnnotationList *self, state *s,
216 bitsetv follow_kernel_items,
217 bitsetv always_follows,
218 state ***predecessors,
219 bitset **item_lookahead_sets,
220 AnnotationList **annotation_lists,
221 AnnotationIndex *annotation_counts,
222 struct obstack *annotations_obstackp)
224 for (state **predecessor = predecessors[s->number]; *predecessor; ++predecessor)
226 AnnotationList *annotation_node =
227 AnnotationList__alloc_on_obstack (
228 self->inadequacyNode->contributionCount, annotations_obstackp);
229 annotation_node->inadequacyNode = self->inadequacyNode;
230 bool potential_contribution = false;
231 bitset *lookaheads = NULL;
233 for (ContributionIndex ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
235 symbol_number contribution_token =
236 InadequacyList__getContributionToken (self->inadequacyNode, ci)
237 ->content->number;
238 if (AnnotationList__isContributionAlways (self, ci))
240 annotation_node->contributions[ci] = NULL;
241 continue;
243 annotation_node->contributions[ci] =
244 Sbitset__new_on_obstack ((*predecessor)->nitems,
245 annotations_obstackp);
247 size_t predecessor_item = 0;
248 Sbitset sbiter_item;
249 Sbitset__Index self_item;
250 SBITSET__FOR_EACH (self->contributions[ci], s->nitems,
251 sbiter_item, self_item)
253 /* If this kernel item is the beginning of a RHS, it must be
254 the kernel item in the start state, and so it has an empty
255 lookahead set. Thus, it can't contribute to inadequacies,
256 and so it should never have been identified as a
257 contribution. If, instead, this kernel item is the
258 successor of the start state's kernel item, the lookahead
259 set is still empty, and so it also should never have been
260 identified as a contribution. This situation is fortunate
261 because we want to avoid the - 2 below in both cases. */
262 aver (s->items[self_item] > 1);
263 /* If this kernel item is next to the beginning of the RHS,
264 then check all of the predecessor's goto follows for the
265 LHS. */
266 if (item_number_is_rule_number (ritem[s->items[self_item] - 2]))
268 Sbitset items;
269 if (AnnotationList__compute_lhs_contributions (
270 *predecessor,
271 item_rule (&ritem[s->items[self_item]]),
272 contribution_token,
273 follow_kernel_items, always_follows, predecessors,
274 item_lookahead_sets, &items, annotations_obstackp))
276 obstack_free (annotations_obstackp,
277 annotation_node->contributions[ci]);
278 annotation_node->contributions[ci] = NULL;
279 break;
281 else
283 Sbitset__or (annotation_node->contributions[ci],
284 annotation_node->contributions[ci],
285 items, (*predecessor)->nitems);
286 obstack_free (annotations_obstackp, items);
289 /* If this kernel item is later in the RHS, then check the
290 predecessor item's lookahead set. */
291 else
293 /* We don't have to start the predecessor item search at
294 the beginning every time because items from both
295 states are sorted by their indices in ritem. */
296 for (;
297 predecessor_item < (*predecessor)->nitems;
298 ++predecessor_item)
299 if ((*predecessor)->items[predecessor_item]
300 == s->items[self_item] - 1)
301 break;
302 aver (predecessor_item != (*predecessor)->nitems);
303 if (ielr_item_has_lookahead (*predecessor, 0,
304 predecessor_item,
305 contribution_token,
306 predecessors,
307 item_lookahead_sets))
308 Sbitset__set (annotation_node->contributions[ci],
309 predecessor_item);
313 if (annotation_node->contributions[ci])
315 Sbitset biter;
316 Sbitset__Index i;
317 SBITSET__FOR_EACH (annotation_node->contributions[ci],
318 (*predecessor)->nitems, biter, i)
320 potential_contribution = true;
321 if (!lookaheads)
323 lookaheads = xnmalloc ((*predecessor)->nitems,
324 sizeof *lookaheads);
325 for (size_t j = 0; j < (*predecessor)->nitems; ++j)
326 lookaheads[j] = NULL;
328 if (!lookaheads[i])
329 lookaheads[i] = bitset_create (ntokens, BITSET_FIXED);
330 bitset_set (lookaheads[i], contribution_token);
335 /* If the predecessor has any contributions besides just "always" and
336 "never" contributions:
337 - If the dominant contribution is split-stable, the annotation could
338 not affect merging on this predecessor state or its eventual
339 predecessor states. Moreover, all contributions that affect
340 whether the dominant contribution remains dominant must be "always"
341 or "never" contributions in order for the dominant contribution to
342 be split-stable. Thus, the dominant contribution computation result
343 in eventual successor states will not be affected by lookaheads
344 tracked for this predecessor state. (Also, as in the isocore
345 compatibility test, we depend on the fact that isocores with equal
346 dominant contributions will have the same dominant contribution when
347 merged. Otherwise, we might have to worry that the presence of a
348 potential contribution might somehow be the culprit of that behavior
349 and thus need to be tracked regardless of the split stability of the
350 dominant contribution.) Thus, go ahead and discard the annotation
351 to save space now plus time during state splitting.
352 - Otherwise, record the annotation, and compute any resulting
353 annotations needed on predecessor states. */
354 if (potential_contribution)
356 if (ContributionIndex__none
357 != AnnotationList__computeDominantContribution (
358 annotation_node, (*predecessor)->nitems, lookaheads, true))
360 obstack_free (annotations_obstackp, annotation_node);
361 annotation_node = NULL;
364 for (size_t i = 0; i < (*predecessor)->nitems; ++i)
365 if (lookaheads[i])
366 bitset_free (lookaheads[i]);
367 free (lookaheads);
369 if (annotation_node)
371 if (AnnotationList__insertInto (annotation_node,
372 &annotation_lists[(*predecessor)
373 ->number],
374 (*predecessor)->nitems))
376 ++annotation_counts[(*predecessor)->number];
377 AnnotationList__computePredecessorAnnotations (
378 annotation_node, *predecessor,
379 follow_kernel_items, always_follows, predecessors,
380 item_lookahead_sets, annotation_lists, annotation_counts,
381 annotations_obstackp);
383 else
384 obstack_free (annotations_obstackp, annotation_node);
387 else
388 obstack_free (annotations_obstackp, annotation_node);
392 void
393 AnnotationList__compute_from_inadequacies (
394 state *s, bitsetv follow_kernel_items, bitsetv always_follows,
395 state ***predecessors, bitset **item_lookahead_sets,
396 InadequacyList **inadequacy_lists, AnnotationList **annotation_lists,
397 AnnotationIndex *annotation_counts,
398 ContributionIndex *max_contributionsp,
399 struct obstack *annotations_obstackp,
400 InadequacyListNodeCount *inadequacy_list_node_count)
402 /* Return an empty list if s->lookahead_tokens = NULL. */
403 if (s->consistent)
404 return;
406 bitsetv all_lookaheads = bitsetv_create (s->nitems, ntokens, BITSET_FIXED);
407 bitsetv_ones (all_lookaheads);
408 bitset shift_tokens = AnnotationList__compute_shift_tokens (s->transitions);
409 bitset conflicted_tokens =
410 AnnotationList__compute_conflicted_tokens (shift_tokens, s->reductions);
412 /* Add an inadequacy annotation for each conflicted_token. */
413 bitset_iterator biter_conflict;
414 bitset_bindex conflicted_token;
415 BITSET_FOR_EACH (biter_conflict, conflicted_tokens, conflicted_token, 0)
417 AnnotationList *annotation_node;
418 ContributionIndex contribution_count = 0;
420 /* Allocate the annotation node. */
422 for (int rule_i = 0; rule_i < s->reductions->num; ++rule_i)
423 if (bitset_test (s->reductions->lookahead_tokens[rule_i],
424 conflicted_token))
425 ++contribution_count;
426 if (bitset_test (shift_tokens, conflicted_token))
427 ++contribution_count;
428 annotation_node =
429 AnnotationList__alloc_on_obstack (contribution_count,
430 annotations_obstackp);
433 /* FIXME: Would a BITSET_FRUGAL or BITEST_SPARSE be more efficient? Now
434 or convert it inside InadequacyList__new_conflict? */
435 bitset actions = bitset_create (s->reductions->num + 1, BITSET_FIXED);
436 bool potential_contribution = false;
438 /* Add a contribution for each reduction that has conflicted_token as a
439 lookahead. */
441 ContributionIndex ci = 0;
442 int item_i = 0;
443 for (int rule_i = 0; rule_i < s->reductions->num; ++rule_i)
445 rule *the_rule = s->reductions->rules[rule_i];
446 if (bitset_test (s->reductions->lookahead_tokens[rule_i],
447 conflicted_token))
449 bitset_set (actions, rule_i);
450 /* If this reduction is on a kernel item, just add it. */
451 if (!item_number_is_rule_number (the_rule->rhs[0]))
453 annotation_node->contributions[ci] =
454 Sbitset__new_on_obstack (s->nitems,
455 annotations_obstackp);
456 /* Catch item_i up to rule_i. This works because both are
457 sorted on rule number. */
458 while (!item_number_is_rule_number (ritem[s->items[item_i]])
459 || item_number_as_rule_number (ritem[s->items[item_i]]) != the_rule->number)
461 ++item_i;
462 aver (item_i < s->nitems);
464 Sbitset__set (annotation_node->contributions[ci], item_i);
466 /* Otherwise, add the kernel items whose lookahead sets
467 contribute the conflicted token to this reduction's
468 lookahead set. */
469 else if (AnnotationList__compute_lhs_contributions (
470 s, the_rule, conflicted_token, follow_kernel_items,
471 always_follows, predecessors, item_lookahead_sets,
472 &annotation_node->contributions[ci],
473 annotations_obstackp))
475 annotation_node->contributions[ci++] = NULL;
476 continue;
478 /* The lookahead token has to come from somewhere. */
479 aver (!Sbitset__isEmpty (annotation_node->contributions[ci],
480 s->nitems));
481 ++ci;
482 potential_contribution = true;
487 /* If there are any contributions besides just "always" contributions:
488 - If there's also a shift contribution, record it.
489 - If the dominant contribution is split-stable, then the annotation
490 could not affect merging, so go ahead and discard the annotation and
491 the inadequacy to save space now plus time during state splitting.
492 - Otherwise, record the annotation and the inadequacy, and compute any
493 resulting annotations needed on predecessor states. */
494 if (potential_contribution)
496 if (bitset_test (shift_tokens, conflicted_token))
498 bitset_set (actions, s->reductions->num);
499 annotation_node->contributions[contribution_count - 1] = NULL;
502 InadequacyList *conflict_node =
503 InadequacyList__new_conflict (
504 s, symbols[conflicted_token], actions,
505 inadequacy_list_node_count);
506 actions = NULL;
507 annotation_node->inadequacyNode = conflict_node;
508 if (ContributionIndex__none
509 != AnnotationList__computeDominantContribution (
510 annotation_node, s->nitems, all_lookaheads, true))
512 obstack_free (annotations_obstackp, annotation_node);
513 InadequacyList__delete (conflict_node);
515 else
517 InadequacyList__prependTo (conflict_node,
518 &inadequacy_lists[s->number]);
520 bool b =
521 AnnotationList__insertInto (annotation_node,
522 &annotation_lists[s->number],
523 s->nitems);
524 aver (b); (void) b;
526 /* This aver makes sure the
527 AnnotationList__computeDominantContribution check above
528 does discard annotations in the simplest case of a S/R
529 conflict with no token precedence. */
530 aver (!bitset_test (shift_tokens, conflicted_token)
531 || symbols[conflicted_token]->content->prec);
532 ++annotation_counts[s->number];
533 if (contribution_count > *max_contributionsp)
534 *max_contributionsp = contribution_count;
535 AnnotationList__computePredecessorAnnotations (
536 annotation_node, s,
537 follow_kernel_items, always_follows, predecessors,
538 item_lookahead_sets, annotation_lists, annotation_counts,
539 annotations_obstackp);
543 else
545 bitset_free (actions);
546 obstack_free (annotations_obstackp, annotation_node);
550 bitsetv_free (all_lookaheads);
551 bitset_free (shift_tokens);
552 bitset_free (conflicted_tokens);
555 void
556 AnnotationList__debug (AnnotationList const *self, size_t nitems, int spaces)
558 AnnotationList const *a;
559 AnnotationIndex ai;
560 for (a = self, ai = 0; a; a = a->next, ++ai)
562 fprintf (stderr, "%*sAnnotation %d (manifesting state %d):\n",
563 spaces, "",
564 ai, a->inadequacyNode->manifestingState->number);
565 bitset_bindex rulei
566 = bitset_first (a->inadequacyNode->inadequacy.conflict.actions);
567 for (ContributionIndex ci = 0; ci < a->inadequacyNode->contributionCount; ++ci)
569 symbol_number token =
570 InadequacyList__getContributionToken (a->inadequacyNode, ci)
571 ->content->number;
572 fprintf (stderr, "%*s", spaces+2, "");
573 if (ci == InadequacyList__getShiftContributionIndex (
574 a->inadequacyNode))
575 fprintf (stderr, "Contributes shift of token %d.\n", token);
576 else
578 fprintf (stderr, "Contributes token %d", token);
579 aver (rulei != BITSET_BINDEX_MAX);
580 fprintf (stderr, " as lookahead, rule number %d",
581 a->inadequacyNode->manifestingState
582 ->reductions->rules[rulei]->number);
583 rulei =
584 bitset_next (a->inadequacyNode->inadequacy.conflict.actions,
585 rulei+1);
586 if (AnnotationList__isContributionAlways (a, ci))
587 fprintf (stderr, " always.");
588 else
590 fprintf (stderr, ", items: ");
591 Sbitset__fprint (a->contributions[ci], nitems, stderr);
593 fprintf (stderr, "\n");
599 void
600 AnnotationList__computeLookaheadFilter (AnnotationList const *self,
601 size_t nitems,
602 bitsetv lookahead_filter)
604 bitsetv_zero (lookahead_filter);
605 for (; self; self = self->next)
606 for (ContributionIndex ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
607 if (!AnnotationList__isContributionAlways (self, ci))
609 symbol_number token =
610 InadequacyList__getContributionToken (self->inadequacyNode, ci)
611 ->content->number;
612 Sbitset__Index item;
613 Sbitset biter;
614 SBITSET__FOR_EACH (self->contributions[ci], nitems, biter, item)
615 bitset_set (lookahead_filter[item], token);
620 * \pre
621 * - <tt>self != NULL</tt>.
622 * - \c nitems is the number of kernel items in the LR(0) state that \c self
623 * annotates.
624 * - \c lookaheads describes the lookahead sets on the kernel items of some
625 * isocore of the LR(0) state that \c self annotates. Either:
626 * - <tt>lookaheads = NULL</tt> only if the lookahead set on every kernel
627 * item is empty.
628 * - For any <tt>0 <= i < nitems</tt>, <tt>lookaheads[i]</tt> is either:
629 * - \c NULL only if the lookahead set on kernel item \c i is empty.
630 * - The (possibly empty) lookahead set on kernel item \c i.
631 * - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>.
632 * \post
633 * - \c result = true iff contribution \c ci in \c self is made by the state
634 * described by \c lookaheads.
636 static bool
637 AnnotationList__stateMakesContribution (AnnotationList const *self,
638 size_t nitems, ContributionIndex ci,
639 bitset *lookaheads)
641 if (AnnotationList__isContributionAlways (self, ci))
642 return true;
643 if (!lookaheads)
644 return false;
646 symbol_number token =
647 InadequacyList__getContributionToken (self->inadequacyNode, ci)
648 ->content->number;
649 Sbitset__Index item;
650 Sbitset biter;
651 SBITSET__FOR_EACH (self->contributions[ci], nitems, biter, item)
652 if (lookaheads[item] && bitset_test (lookaheads[item], token))
653 return true;
655 return false;
658 ContributionIndex
659 AnnotationList__computeDominantContribution (AnnotationList const *self,
660 size_t nitems, bitset *lookaheads,
661 bool require_split_stable)
663 ContributionIndex const ci_shift =
664 InadequacyList__getShiftContributionIndex (self->inadequacyNode);
666 symbol *token = self->inadequacyNode->inadequacy.conflict.token;
668 /* S/R conflict. */
669 if (ci_shift != ContributionIndex__none)
671 bool find_stable_domination_over_shift = false;
672 bool find_stable_error_action_domination = false;
674 int shift_precedence = token->content->prec;
676 /* If the token has no precedence set, shift is always chosen. */
677 if (!shift_precedence)
678 return ci_shift;
680 /* Figure out which reductions contribute, which of those would
681 dominate in a R/R comparison, and whether any reduction dominates
682 the shift so that the R/R comparison is actually needed. */
683 ContributionIndex ci_rr_dominator = ContributionIndex__none;
684 int actioni;
685 ContributionIndex ci;
686 for (ci = 0,
687 actioni = bitset_first (self->inadequacyNode->inadequacy
688 .conflict.actions);
689 ci < self->inadequacyNode->contributionCount;
690 ++ci,
691 actioni = bitset_next (self->inadequacyNode->inadequacy
692 .conflict.actions, actioni+1))
694 int reduce_precedence = 0;
695 if (ci == ci_shift)
696 continue;
698 rule *r = self->inadequacyNode->manifestingState
699 ->reductions->rules[actioni];
700 if (r->prec)
701 reduce_precedence = r->prec->prec;
703 /* If there's no need to check whether this reduction actually
704 contributes because the shift eliminates it from the R/R
705 comparison anyway, continue to the next reduction. */
706 if (reduce_precedence
707 && (reduce_precedence < shift_precedence
708 || (reduce_precedence == shift_precedence
709 && token->content->assoc == right_assoc)))
710 continue;
711 if (!AnnotationList__stateMakesContribution (self, nitems, ci,
712 lookaheads))
713 continue;
714 /* This uneliminated reduction contributes, so see if it can cause
715 an error action. */
716 if (reduce_precedence == shift_precedence
717 && token->content->assoc == non_assoc)
719 /* It's not possible to find split-stable domination over
720 shift after a potential %nonassoc. */
721 if (find_stable_domination_over_shift)
722 return ContributionIndex__none;
723 if (!require_split_stable
724 || AnnotationList__isContributionAlways (self, ci))
725 return ContributionIndex__error_action;
726 find_stable_error_action_domination = true;
728 /* Consider this uneliminated contributing reduction in the R/R
729 comparison. */
730 if (ci_rr_dominator == ContributionIndex__none)
731 ci_rr_dominator = ci;
732 /* If precedence is set for this uneliminated contributing
733 reduction, it dominates the shift, so try to figure out which
734 reduction dominates the R/R comparison. */
735 if (reduce_precedence)
737 /* It's not possible to find split-stable error action
738 domination after a potential reduction. */
739 if (find_stable_error_action_domination)
740 return ContributionIndex__none;
741 if (!require_split_stable)
742 return ci_rr_dominator;
743 if (!AnnotationList__isContributionAlways (self,
744 ci_rr_dominator))
745 return ContributionIndex__none;
746 if (AnnotationList__isContributionAlways (self, ci))
747 return ci_rr_dominator;
748 find_stable_domination_over_shift = true;
752 if (find_stable_domination_over_shift
753 || find_stable_error_action_domination)
754 return ContributionIndex__none;
755 /* No reduce or error action domination found, so shift dominates. */
756 return ci_shift;
759 /* R/R conflict, so the reduction with the lowest rule number dominates.
760 Fortunately, contributions are sorted by rule number. */
761 for (ContributionIndex ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
762 if (AnnotationList__stateMakesContribution (self, nitems, ci, lookaheads))
764 if (require_split_stable
765 && !AnnotationList__isContributionAlways (self, ci))
766 return ContributionIndex__none;
767 return ci;
769 return ContributionIndex__none;