perf/x86: Fix event scheduling
[linux-2.6/btrfs-unstable.git] / kernel / rcu / srcu.c
blob3318d82843841971f4926d8a8f9eb9a375e2eab3
1 /*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2006
19 * Copyright (C) Fujitsu, 2012
21 * Author: Paul McKenney <paulmck@us.ibm.com>
22 * Lai Jiangshan <laijs@cn.fujitsu.com>
24 * For detailed explanation of Read-Copy Update mechanism see -
25 * Documentation/RCU/ *.txt
29 #include <linux/export.h>
30 #include <linux/mutex.h>
31 #include <linux/percpu.h>
32 #include <linux/preempt.h>
33 #include <linux/rcupdate.h>
34 #include <linux/sched.h>
35 #include <linux/smp.h>
36 #include <linux/delay.h>
37 #include <linux/srcu.h>
39 #include <trace/events/rcu.h>
41 #include "rcu.h"
44 * Initialize an rcu_batch structure to empty.
46 static inline void rcu_batch_init(struct rcu_batch *b)
48 b->head = NULL;
49 b->tail = &b->head;
53 * Enqueue a callback onto the tail of the specified rcu_batch structure.
55 static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head)
57 *b->tail = head;
58 b->tail = &head->next;
62 * Is the specified rcu_batch structure empty?
64 static inline bool rcu_batch_empty(struct rcu_batch *b)
66 return b->tail == &b->head;
70 * Remove the callback at the head of the specified rcu_batch structure
71 * and return a pointer to it, or return NULL if the structure is empty.
73 static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b)
75 struct rcu_head *head;
77 if (rcu_batch_empty(b))
78 return NULL;
80 head = b->head;
81 b->head = head->next;
82 if (b->tail == &head->next)
83 rcu_batch_init(b);
85 return head;
89 * Move all callbacks from the rcu_batch structure specified by "from" to
90 * the structure specified by "to".
92 static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
94 if (!rcu_batch_empty(from)) {
95 *to->tail = from->head;
96 to->tail = from->tail;
97 rcu_batch_init(from);
101 static int init_srcu_struct_fields(struct srcu_struct *sp)
103 sp->completed = 0;
104 spin_lock_init(&sp->queue_lock);
105 sp->running = false;
106 rcu_batch_init(&sp->batch_queue);
107 rcu_batch_init(&sp->batch_check0);
108 rcu_batch_init(&sp->batch_check1);
109 rcu_batch_init(&sp->batch_done);
110 INIT_DELAYED_WORK(&sp->work, process_srcu);
111 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
112 return sp->per_cpu_ref ? 0 : -ENOMEM;
115 #ifdef CONFIG_DEBUG_LOCK_ALLOC
117 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
118 struct lock_class_key *key)
120 /* Don't re-initialize a lock while it is held. */
121 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
122 lockdep_init_map(&sp->dep_map, name, key, 0);
123 return init_srcu_struct_fields(sp);
125 EXPORT_SYMBOL_GPL(__init_srcu_struct);
127 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
130 * init_srcu_struct - initialize a sleep-RCU structure
131 * @sp: structure to initialize.
133 * Must invoke this on a given srcu_struct before passing that srcu_struct
134 * to any other function. Each srcu_struct represents a separate domain
135 * of SRCU protection.
137 int init_srcu_struct(struct srcu_struct *sp)
139 return init_srcu_struct_fields(sp);
141 EXPORT_SYMBOL_GPL(init_srcu_struct);
143 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
146 * Returns approximate total of the readers' ->seq[] values for the
147 * rank of per-CPU counters specified by idx.
149 static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx)
151 int cpu;
152 unsigned long sum = 0;
153 unsigned long t;
155 for_each_possible_cpu(cpu) {
156 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
157 sum += t;
159 return sum;
163 * Returns approximate number of readers active on the specified rank
164 * of the per-CPU ->c[] counters.
166 static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
168 int cpu;
169 unsigned long sum = 0;
170 unsigned long t;
172 for_each_possible_cpu(cpu) {
173 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
174 sum += t;
176 return sum;
180 * Return true if the number of pre-existing readers is determined to
181 * be stably zero. An example unstable zero can occur if the call
182 * to srcu_readers_active_idx() misses an __srcu_read_lock() increment,
183 * but due to task migration, sees the corresponding __srcu_read_unlock()
184 * decrement. This can happen because srcu_readers_active_idx() takes
185 * time to sum the array, and might in fact be interrupted or preempted
186 * partway through the summation.
188 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
190 unsigned long seq;
192 seq = srcu_readers_seq_idx(sp, idx);
195 * The following smp_mb() A pairs with the smp_mb() B located in
196 * __srcu_read_lock(). This pairing ensures that if an
197 * __srcu_read_lock() increments its counter after the summation
198 * in srcu_readers_active_idx(), then the corresponding SRCU read-side
199 * critical section will see any changes made prior to the start
200 * of the current SRCU grace period.
202 * Also, if the above call to srcu_readers_seq_idx() saw the
203 * increment of ->seq[], then the call to srcu_readers_active_idx()
204 * must see the increment of ->c[].
206 smp_mb(); /* A */
209 * Note that srcu_readers_active_idx() can incorrectly return
210 * zero even though there is a pre-existing reader throughout.
211 * To see this, suppose that task A is in a very long SRCU
212 * read-side critical section that started on CPU 0, and that
213 * no other reader exists, so that the sum of the counters
214 * is equal to one. Then suppose that task B starts executing
215 * srcu_readers_active_idx(), summing up to CPU 1, and then that
216 * task C starts reading on CPU 0, so that its increment is not
217 * summed, but finishes reading on CPU 2, so that its decrement
218 * -is- summed. Then when task B completes its sum, it will
219 * incorrectly get zero, despite the fact that task A has been
220 * in its SRCU read-side critical section the whole time.
222 * We therefore do a validation step should srcu_readers_active_idx()
223 * return zero.
225 if (srcu_readers_active_idx(sp, idx) != 0)
226 return false;
229 * The remainder of this function is the validation step.
230 * The following smp_mb() D pairs with the smp_mb() C in
231 * __srcu_read_unlock(). If the __srcu_read_unlock() was seen
232 * by srcu_readers_active_idx() above, then any destructive
233 * operation performed after the grace period will happen after
234 * the corresponding SRCU read-side critical section.
236 * Note that there can be at most NR_CPUS worth of readers using
237 * the old index, which is not enough to overflow even a 32-bit
238 * integer. (Yes, this does mean that systems having more than
239 * a billion or so CPUs need to be 64-bit systems.) Therefore,
240 * the sum of the ->seq[] counters cannot possibly overflow.
241 * Therefore, the only way that the return values of the two
242 * calls to srcu_readers_seq_idx() can be equal is if there were
243 * no increments of the corresponding rank of ->seq[] counts
244 * in the interim. But the missed-increment scenario laid out
245 * above includes an increment of the ->seq[] counter by
246 * the corresponding __srcu_read_lock(). Therefore, if this
247 * scenario occurs, the return values from the two calls to
248 * srcu_readers_seq_idx() will differ, and thus the validation
249 * step below suffices.
251 smp_mb(); /* D */
253 return srcu_readers_seq_idx(sp, idx) == seq;
257 * srcu_readers_active - returns approximate number of readers.
258 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
260 * Note that this is not an atomic primitive, and can therefore suffer
261 * severe errors when invoked on an active srcu_struct. That said, it
262 * can be useful as an error check at cleanup time.
264 static int srcu_readers_active(struct srcu_struct *sp)
266 int cpu;
267 unsigned long sum = 0;
269 for_each_possible_cpu(cpu) {
270 sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
271 sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
273 return sum;
277 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
278 * @sp: structure to clean up.
280 * Must invoke this after you are finished using a given srcu_struct that
281 * was initialized via init_srcu_struct(), else you leak memory.
283 void cleanup_srcu_struct(struct srcu_struct *sp)
285 if (WARN_ON(srcu_readers_active(sp)))
286 return; /* Leakage unless caller handles error. */
287 free_percpu(sp->per_cpu_ref);
288 sp->per_cpu_ref = NULL;
290 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
293 * Counts the new reader in the appropriate per-CPU element of the
294 * srcu_struct. Must be called from process context.
295 * Returns an index that must be passed to the matching srcu_read_unlock().
297 int __srcu_read_lock(struct srcu_struct *sp)
299 int idx;
301 idx = ACCESS_ONCE(sp->completed) & 0x1;
302 preempt_disable();
303 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
304 smp_mb(); /* B */ /* Avoid leaking the critical section. */
305 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
306 preempt_enable();
307 return idx;
309 EXPORT_SYMBOL_GPL(__srcu_read_lock);
312 * Removes the count for the old reader from the appropriate per-CPU
313 * element of the srcu_struct. Note that this may well be a different
314 * CPU than that which was incremented by the corresponding srcu_read_lock().
315 * Must be called from process context.
317 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
319 smp_mb(); /* C */ /* Avoid leaking the critical section. */
320 this_cpu_dec(sp->per_cpu_ref->c[idx]);
322 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
325 * We use an adaptive strategy for synchronize_srcu() and especially for
326 * synchronize_srcu_expedited(). We spin for a fixed time period
327 * (defined below) to allow SRCU readers to exit their read-side critical
328 * sections. If there are still some readers after 10 microseconds,
329 * we repeatedly block for 1-millisecond time periods. This approach
330 * has done well in testing, so there is no need for a config parameter.
332 #define SRCU_RETRY_CHECK_DELAY 5
333 #define SYNCHRONIZE_SRCU_TRYCOUNT 2
334 #define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12
337 * @@@ Wait until all pre-existing readers complete. Such readers
338 * will have used the index specified by "idx".
339 * the caller should ensures the ->completed is not changed while checking
340 * and idx = (->completed & 1) ^ 1
342 static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
344 for (;;) {
345 if (srcu_readers_active_idx_check(sp, idx))
346 return true;
347 if (--trycount <= 0)
348 return false;
349 udelay(SRCU_RETRY_CHECK_DELAY);
354 * Increment the ->completed counter so that future SRCU readers will
355 * use the other rank of the ->c[] and ->seq[] arrays. This allows
356 * us to wait for pre-existing readers in a starvation-free manner.
358 static void srcu_flip(struct srcu_struct *sp)
360 sp->completed++;
364 * Enqueue an SRCU callback on the specified srcu_struct structure,
365 * initiating grace-period processing if it is not already running.
367 * Note that all CPUs must agree that the grace period extended beyond
368 * all pre-existing SRCU read-side critical section. On systems with
369 * more than one CPU, this means that when "func()" is invoked, each CPU
370 * is guaranteed to have executed a full memory barrier since the end of
371 * its last corresponding SRCU read-side critical section whose beginning
372 * preceded the call to call_rcu(). It also means that each CPU executing
373 * an SRCU read-side critical section that continues beyond the start of
374 * "func()" must have executed a memory barrier after the call_rcu()
375 * but before the beginning of that SRCU read-side critical section.
376 * Note that these guarantees include CPUs that are offline, idle, or
377 * executing in user mode, as well as CPUs that are executing in the kernel.
379 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
380 * resulting SRCU callback function "func()", then both CPU A and CPU
381 * B are guaranteed to execute a full memory barrier during the time
382 * interval between the call to call_rcu() and the invocation of "func()".
383 * This guarantee applies even if CPU A and CPU B are the same CPU (but
384 * again only if the system has more than one CPU).
386 * Of course, these guarantees apply only for invocations of call_srcu(),
387 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
388 * srcu_struct structure.
390 void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
391 void (*func)(struct rcu_head *head))
393 unsigned long flags;
395 head->next = NULL;
396 head->func = func;
397 spin_lock_irqsave(&sp->queue_lock, flags);
398 rcu_batch_queue(&sp->batch_queue, head);
399 if (!sp->running) {
400 sp->running = true;
401 schedule_delayed_work(&sp->work, 0);
403 spin_unlock_irqrestore(&sp->queue_lock, flags);
405 EXPORT_SYMBOL_GPL(call_srcu);
407 struct rcu_synchronize {
408 struct rcu_head head;
409 struct completion completion;
413 * Awaken the corresponding synchronize_srcu() instance now that a
414 * grace period has elapsed.
416 static void wakeme_after_rcu(struct rcu_head *head)
418 struct rcu_synchronize *rcu;
420 rcu = container_of(head, struct rcu_synchronize, head);
421 complete(&rcu->completion);
424 static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
425 static void srcu_reschedule(struct srcu_struct *sp);
428 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
430 static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
432 struct rcu_synchronize rcu;
433 struct rcu_head *head = &rcu.head;
434 bool done = false;
436 rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
437 !lock_is_held(&rcu_bh_lock_map) &&
438 !lock_is_held(&rcu_lock_map) &&
439 !lock_is_held(&rcu_sched_lock_map),
440 "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
442 might_sleep();
443 init_completion(&rcu.completion);
445 head->next = NULL;
446 head->func = wakeme_after_rcu;
447 spin_lock_irq(&sp->queue_lock);
448 if (!sp->running) {
449 /* steal the processing owner */
450 sp->running = true;
451 rcu_batch_queue(&sp->batch_check0, head);
452 spin_unlock_irq(&sp->queue_lock);
454 srcu_advance_batches(sp, trycount);
455 if (!rcu_batch_empty(&sp->batch_done)) {
456 BUG_ON(sp->batch_done.head != head);
457 rcu_batch_dequeue(&sp->batch_done);
458 done = true;
460 /* give the processing owner to work_struct */
461 srcu_reschedule(sp);
462 } else {
463 rcu_batch_queue(&sp->batch_queue, head);
464 spin_unlock_irq(&sp->queue_lock);
467 if (!done)
468 wait_for_completion(&rcu.completion);
472 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
473 * @sp: srcu_struct with which to synchronize.
475 * Wait for the count to drain to zero of both indexes. To avoid the
476 * possible starvation of synchronize_srcu(), it waits for the count of
477 * the index=((->completed & 1) ^ 1) to drain to zero at first,
478 * and then flip the completed and wait for the count of the other index.
480 * Can block; must be called from process context.
482 * Note that it is illegal to call synchronize_srcu() from the corresponding
483 * SRCU read-side critical section; doing so will result in deadlock.
484 * However, it is perfectly legal to call synchronize_srcu() on one
485 * srcu_struct from some other srcu_struct's read-side critical section,
486 * as long as the resulting graph of srcu_structs is acyclic.
488 * There are memory-ordering constraints implied by synchronize_srcu().
489 * On systems with more than one CPU, when synchronize_srcu() returns,
490 * each CPU is guaranteed to have executed a full memory barrier since
491 * the end of its last corresponding SRCU-sched read-side critical section
492 * whose beginning preceded the call to synchronize_srcu(). In addition,
493 * each CPU having an SRCU read-side critical section that extends beyond
494 * the return from synchronize_srcu() is guaranteed to have executed a
495 * full memory barrier after the beginning of synchronize_srcu() and before
496 * the beginning of that SRCU read-side critical section. Note that these
497 * guarantees include CPUs that are offline, idle, or executing in user mode,
498 * as well as CPUs that are executing in the kernel.
500 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
501 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
502 * to have executed a full memory barrier during the execution of
503 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
504 * are the same CPU, but again only if the system has more than one CPU.
506 * Of course, these memory-ordering guarantees apply only when
507 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
508 * passed the same srcu_struct structure.
510 void synchronize_srcu(struct srcu_struct *sp)
512 __synchronize_srcu(sp, rcu_expedited
513 ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
514 : SYNCHRONIZE_SRCU_TRYCOUNT);
516 EXPORT_SYMBOL_GPL(synchronize_srcu);
519 * synchronize_srcu_expedited - Brute-force SRCU grace period
520 * @sp: srcu_struct with which to synchronize.
522 * Wait for an SRCU grace period to elapse, but be more aggressive about
523 * spinning rather than blocking when waiting.
525 * Note that synchronize_srcu_expedited() has the same deadlock and
526 * memory-ordering properties as does synchronize_srcu().
528 void synchronize_srcu_expedited(struct srcu_struct *sp)
530 __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
532 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
535 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
536 * @sp: srcu_struct on which to wait for in-flight callbacks.
538 void srcu_barrier(struct srcu_struct *sp)
540 synchronize_srcu(sp);
542 EXPORT_SYMBOL_GPL(srcu_barrier);
545 * srcu_batches_completed - return batches completed.
546 * @sp: srcu_struct on which to report batch completion.
548 * Report the number of batches, correlated with, but not necessarily
549 * precisely the same as, the number of grace periods that have elapsed.
551 long srcu_batches_completed(struct srcu_struct *sp)
553 return sp->completed;
555 EXPORT_SYMBOL_GPL(srcu_batches_completed);
557 #define SRCU_CALLBACK_BATCH 10
558 #define SRCU_INTERVAL 1
561 * Move any new SRCU callbacks to the first stage of the SRCU grace
562 * period pipeline.
564 static void srcu_collect_new(struct srcu_struct *sp)
566 if (!rcu_batch_empty(&sp->batch_queue)) {
567 spin_lock_irq(&sp->queue_lock);
568 rcu_batch_move(&sp->batch_check0, &sp->batch_queue);
569 spin_unlock_irq(&sp->queue_lock);
574 * Core SRCU state machine. Advance callbacks from ->batch_check0 to
575 * ->batch_check1 and then to ->batch_done as readers drain.
577 static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
579 int idx = 1 ^ (sp->completed & 1);
582 * Because readers might be delayed for an extended period after
583 * fetching ->completed for their index, at any point in time there
584 * might well be readers using both idx=0 and idx=1. We therefore
585 * need to wait for readers to clear from both index values before
586 * invoking a callback.
589 if (rcu_batch_empty(&sp->batch_check0) &&
590 rcu_batch_empty(&sp->batch_check1))
591 return; /* no callbacks need to be advanced */
593 if (!try_check_zero(sp, idx, trycount))
594 return; /* failed to advance, will try after SRCU_INTERVAL */
597 * The callbacks in ->batch_check1 have already done with their
598 * first zero check and flip back when they were enqueued on
599 * ->batch_check0 in a previous invocation of srcu_advance_batches().
600 * (Presumably try_check_zero() returned false during that
601 * invocation, leaving the callbacks stranded on ->batch_check1.)
602 * They are therefore ready to invoke, so move them to ->batch_done.
604 rcu_batch_move(&sp->batch_done, &sp->batch_check1);
606 if (rcu_batch_empty(&sp->batch_check0))
607 return; /* no callbacks need to be advanced */
608 srcu_flip(sp);
611 * The callbacks in ->batch_check0 just finished their
612 * first check zero and flip, so move them to ->batch_check1
613 * for future checking on the other idx.
615 rcu_batch_move(&sp->batch_check1, &sp->batch_check0);
618 * SRCU read-side critical sections are normally short, so check
619 * at least twice in quick succession after a flip.
621 trycount = trycount < 2 ? 2 : trycount;
622 if (!try_check_zero(sp, idx^1, trycount))
623 return; /* failed to advance, will try after SRCU_INTERVAL */
626 * The callbacks in ->batch_check1 have now waited for all
627 * pre-existing readers using both idx values. They are therefore
628 * ready to invoke, so move them to ->batch_done.
630 rcu_batch_move(&sp->batch_done, &sp->batch_check1);
634 * Invoke a limited number of SRCU callbacks that have passed through
635 * their grace period. If there are more to do, SRCU will reschedule
636 * the workqueue.
638 static void srcu_invoke_callbacks(struct srcu_struct *sp)
640 int i;
641 struct rcu_head *head;
643 for (i = 0; i < SRCU_CALLBACK_BATCH; i++) {
644 head = rcu_batch_dequeue(&sp->batch_done);
645 if (!head)
646 break;
647 local_bh_disable();
648 head->func(head);
649 local_bh_enable();
654 * Finished one round of SRCU grace period. Start another if there are
655 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
657 static void srcu_reschedule(struct srcu_struct *sp)
659 bool pending = true;
661 if (rcu_batch_empty(&sp->batch_done) &&
662 rcu_batch_empty(&sp->batch_check1) &&
663 rcu_batch_empty(&sp->batch_check0) &&
664 rcu_batch_empty(&sp->batch_queue)) {
665 spin_lock_irq(&sp->queue_lock);
666 if (rcu_batch_empty(&sp->batch_done) &&
667 rcu_batch_empty(&sp->batch_check1) &&
668 rcu_batch_empty(&sp->batch_check0) &&
669 rcu_batch_empty(&sp->batch_queue)) {
670 sp->running = false;
671 pending = false;
673 spin_unlock_irq(&sp->queue_lock);
676 if (pending)
677 schedule_delayed_work(&sp->work, SRCU_INTERVAL);
681 * This is the work-queue function that handles SRCU grace periods.
683 void process_srcu(struct work_struct *work)
685 struct srcu_struct *sp;
687 sp = container_of(work, struct srcu_struct, work.work);
689 srcu_collect_new(sp);
690 srcu_advance_batches(sp, 1);
691 srcu_invoke_callbacks(sp);
692 srcu_reschedule(sp);
694 EXPORT_SYMBOL_GPL(process_srcu);