2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/percpu.h>
11 #include <linux/init.h>
12 #include <linux/gfp.h>
13 #include <linux/smp.h>
14 #include <linux/cpu.h>
18 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
20 struct list_head queue
;
22 } call_function __cacheline_aligned_in_smp
=
24 .queue
= LIST_HEAD_INIT(call_function
.queue
),
25 .lock
= __RAW_SPIN_LOCK_UNLOCKED(call_function
.lock
),
32 struct call_function_data
{
33 struct call_single_data csd
;
35 cpumask_var_t cpumask
;
36 cpumask_var_t cpumask_ipi
;
39 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data
, cfd_data
);
41 struct call_single_queue
{
42 struct list_head list
;
46 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue
, call_single_queue
);
49 hotplug_cfd(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
51 long cpu
= (long)hcpu
;
52 struct call_function_data
*cfd
= &per_cpu(cfd_data
, cpu
);
56 case CPU_UP_PREPARE_FROZEN
:
57 if (!zalloc_cpumask_var_node(&cfd
->cpumask
, GFP_KERNEL
,
59 return notifier_from_errno(-ENOMEM
);
60 if (!zalloc_cpumask_var_node(&cfd
->cpumask_ipi
, GFP_KERNEL
,
62 return notifier_from_errno(-ENOMEM
);
65 #ifdef CONFIG_HOTPLUG_CPU
67 case CPU_UP_CANCELED_FROZEN
:
71 free_cpumask_var(cfd
->cpumask
);
72 free_cpumask_var(cfd
->cpumask_ipi
);
80 static struct notifier_block __cpuinitdata hotplug_cfd_notifier
= {
81 .notifier_call
= hotplug_cfd
,
84 void __init
call_function_init(void)
86 void *cpu
= (void *)(long)smp_processor_id();
89 for_each_possible_cpu(i
) {
90 struct call_single_queue
*q
= &per_cpu(call_single_queue
, i
);
92 raw_spin_lock_init(&q
->lock
);
93 INIT_LIST_HEAD(&q
->list
);
96 hotplug_cfd(&hotplug_cfd_notifier
, CPU_UP_PREPARE
, cpu
);
97 register_cpu_notifier(&hotplug_cfd_notifier
);
101 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
103 * For non-synchronous ipi calls the csd can still be in use by the
104 * previous function call. For multi-cpu calls its even more interesting
105 * as we'll have to ensure no other cpu is observing our csd.
107 static void csd_lock_wait(struct call_single_data
*data
)
109 while (data
->flags
& CSD_FLAG_LOCK
)
113 static void csd_lock(struct call_single_data
*data
)
116 data
->flags
= CSD_FLAG_LOCK
;
119 * prevent CPU from reordering the above assignment
120 * to ->flags with any subsequent assignments to other
121 * fields of the specified call_single_data structure:
126 static void csd_unlock(struct call_single_data
*data
)
128 WARN_ON(!(data
->flags
& CSD_FLAG_LOCK
));
131 * ensure we're all done before releasing data:
135 data
->flags
&= ~CSD_FLAG_LOCK
;
139 * Insert a previously allocated call_single_data element
140 * for execution on the given CPU. data must already have
141 * ->func, ->info, and ->flags set.
144 void generic_exec_single(int cpu
, struct call_single_data
*data
, int wait
)
146 struct call_single_queue
*dst
= &per_cpu(call_single_queue
, cpu
);
150 raw_spin_lock_irqsave(&dst
->lock
, flags
);
151 ipi
= list_empty(&dst
->list
);
152 list_add_tail(&data
->list
, &dst
->list
);
153 raw_spin_unlock_irqrestore(&dst
->lock
, flags
);
156 * The list addition should be visible before sending the IPI
157 * handler locks the list to pull the entry off it because of
158 * normal cache coherency rules implied by spinlocks.
160 * If IPIs can go out of order to the cache coherency protocol
161 * in an architecture, sufficient synchronisation should be added
162 * to arch code to make it appear to obey cache coherency WRT
163 * locking and barrier primitives. Generic code isn't really
164 * equipped to do the right thing...
167 arch_send_call_function_single_ipi(cpu
);
174 * Invoked by arch to handle an IPI for call function. Must be called with
175 * interrupts disabled.
177 void generic_smp_call_function_interrupt(void)
179 struct call_function_data
*data
;
180 int cpu
= smp_processor_id();
183 * Shouldn't receive this interrupt on a cpu that is not yet online.
185 WARN_ON_ONCE(!cpu_online(cpu
));
188 * Ensure entry is visible on call_function_queue after we have
189 * entered the IPI. See comment in smp_call_function_many.
190 * If we don't have this, then we may miss an entry on the list
191 * and never get another IPI to process it.
196 * It's ok to use list_for_each_rcu() here even though we may
197 * delete 'pos', since list_del_rcu() doesn't clear ->next
199 list_for_each_entry_rcu(data
, &call_function
.queue
, csd
.list
) {
201 smp_call_func_t func
;
204 * Since we walk the list without any locks, we might
205 * see an entry that was completed, removed from the
206 * list and is in the process of being reused.
208 * We must check that the cpu is in the cpumask before
209 * checking the refs, and both must be set before
210 * executing the callback on this cpu.
213 if (!cpumask_test_cpu(cpu
, data
->cpumask
))
218 if (atomic_read(&data
->refs
) == 0)
221 func
= data
->csd
.func
; /* save for later warn */
222 func(data
->csd
.info
);
225 * If the cpu mask is not still set then func enabled
226 * interrupts (BUG), and this cpu took another smp call
227 * function interrupt and executed func(info) twice
228 * on this cpu. That nested execution decremented refs.
230 if (!cpumask_test_and_clear_cpu(cpu
, data
->cpumask
)) {
231 WARN(1, "%pf enabled interrupts and double executed\n", func
);
235 refs
= atomic_dec_return(&data
->refs
);
241 WARN_ON(!cpumask_empty(data
->cpumask
));
243 raw_spin_lock(&call_function
.lock
);
244 list_del_rcu(&data
->csd
.list
);
245 raw_spin_unlock(&call_function
.lock
);
247 csd_unlock(&data
->csd
);
253 * Invoked by arch to handle an IPI for call function single. Must be
254 * called from the arch with interrupts disabled.
256 void generic_smp_call_function_single_interrupt(void)
258 struct call_single_queue
*q
= &__get_cpu_var(call_single_queue
);
259 unsigned int data_flags
;
263 * Shouldn't receive this interrupt on a cpu that is not yet online.
265 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
267 raw_spin_lock(&q
->lock
);
268 list_replace_init(&q
->list
, &list
);
269 raw_spin_unlock(&q
->lock
);
271 while (!list_empty(&list
)) {
272 struct call_single_data
*data
;
274 data
= list_entry(list
.next
, struct call_single_data
, list
);
275 list_del(&data
->list
);
278 * 'data' can be invalid after this call if flags == 0
279 * (when called through generic_exec_single()),
280 * so save them away before making the call:
282 data_flags
= data
->flags
;
284 data
->func(data
->info
);
287 * Unlocked CSDs are valid through generic_exec_single():
289 if (data_flags
& CSD_FLAG_LOCK
)
294 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data
, csd_data
);
297 * smp_call_function_single - Run a function on a specific CPU
298 * @func: The function to run. This must be fast and non-blocking.
299 * @info: An arbitrary pointer to pass to the function.
300 * @wait: If true, wait until function has completed on other CPUs.
302 * Returns 0 on success, else a negative status code.
304 int smp_call_function_single(int cpu
, smp_call_func_t func
, void *info
,
307 struct call_single_data d
= {
315 * prevent preemption and reschedule on another processor,
316 * as well as CPU removal
318 this_cpu
= get_cpu();
321 * Can deadlock when called with interrupts disabled.
322 * We allow cpu's that are not yet online though, as no one else can
323 * send smp call function interrupt to this cpu and as such deadlocks
326 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
327 && !oops_in_progress
);
329 if (cpu
== this_cpu
) {
330 local_irq_save(flags
);
332 local_irq_restore(flags
);
334 if ((unsigned)cpu
< nr_cpu_ids
&& cpu_online(cpu
)) {
335 struct call_single_data
*data
= &d
;
338 data
= &__get_cpu_var(csd_data
);
344 generic_exec_single(cpu
, data
, wait
);
346 err
= -ENXIO
; /* CPU not online */
354 EXPORT_SYMBOL(smp_call_function_single
);
357 * smp_call_function_any - Run a function on any of the given cpus
358 * @mask: The mask of cpus it can run on.
359 * @func: The function to run. This must be fast and non-blocking.
360 * @info: An arbitrary pointer to pass to the function.
361 * @wait: If true, wait until function has completed.
363 * Returns 0 on success, else a negative status code (if no cpus were online).
364 * Note that @wait will be implicitly turned on in case of allocation failures,
365 * since we fall back to on-stack allocation.
367 * Selection preference:
368 * 1) current cpu if in @mask
369 * 2) any cpu of current node if in @mask
370 * 3) any other online cpu in @mask
372 int smp_call_function_any(const struct cpumask
*mask
,
373 smp_call_func_t func
, void *info
, int wait
)
376 const struct cpumask
*nodemask
;
379 /* Try for same CPU (cheapest) */
381 if (cpumask_test_cpu(cpu
, mask
))
384 /* Try for same node. */
385 nodemask
= cpumask_of_node(cpu_to_node(cpu
));
386 for (cpu
= cpumask_first_and(nodemask
, mask
); cpu
< nr_cpu_ids
;
387 cpu
= cpumask_next_and(cpu
, nodemask
, mask
)) {
392 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
393 cpu
= cpumask_any_and(mask
, cpu_online_mask
);
395 ret
= smp_call_function_single(cpu
, func
, info
, wait
);
399 EXPORT_SYMBOL_GPL(smp_call_function_any
);
402 * __smp_call_function_single(): Run a function on a specific CPU
403 * @cpu: The CPU to run on.
404 * @data: Pre-allocated and setup data structure
405 * @wait: If true, wait until function has completed on specified CPU.
407 * Like smp_call_function_single(), but allow caller to pass in a
408 * pre-allocated data structure. Useful for embedding @data inside
409 * other structures, for instance.
411 void __smp_call_function_single(int cpu
, struct call_single_data
*data
,
414 unsigned int this_cpu
;
417 this_cpu
= get_cpu();
419 * Can deadlock when called with interrupts disabled.
420 * We allow cpu's that are not yet online though, as no one else can
421 * send smp call function interrupt to this cpu and as such deadlocks
424 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait
&& irqs_disabled()
425 && !oops_in_progress
);
427 if (cpu
== this_cpu
) {
428 local_irq_save(flags
);
429 data
->func(data
->info
);
430 local_irq_restore(flags
);
433 generic_exec_single(cpu
, data
, wait
);
439 * smp_call_function_many(): Run a function on a set of other CPUs.
440 * @mask: The set of cpus to run on (only runs on online subset).
441 * @func: The function to run. This must be fast and non-blocking.
442 * @info: An arbitrary pointer to pass to the function.
443 * @wait: If true, wait (atomically) until function has completed
446 * If @wait is true, then returns once @func has returned.
448 * You must not call this function with disabled interrupts or from a
449 * hardware interrupt handler or from a bottom half handler. Preemption
450 * must be disabled when calling this function.
452 void smp_call_function_many(const struct cpumask
*mask
,
453 smp_call_func_t func
, void *info
, bool wait
)
455 struct call_function_data
*data
;
457 int refs
, cpu
, next_cpu
, this_cpu
= smp_processor_id();
460 * Can deadlock when called with interrupts disabled.
461 * We allow cpu's that are not yet online though, as no one else can
462 * send smp call function interrupt to this cpu and as such deadlocks
465 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
466 && !oops_in_progress
&& !early_boot_irqs_disabled
);
468 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
469 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
471 cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
473 /* No online cpus? We're done. */
474 if (cpu
>= nr_cpu_ids
)
477 /* Do we have another CPU which isn't us? */
478 next_cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
479 if (next_cpu
== this_cpu
)
480 next_cpu
= cpumask_next_and(next_cpu
, mask
, cpu_online_mask
);
482 /* Fastpath: do that cpu by itself. */
483 if (next_cpu
>= nr_cpu_ids
) {
484 smp_call_function_single(cpu
, func
, info
, wait
);
488 data
= &__get_cpu_var(cfd_data
);
489 csd_lock(&data
->csd
);
491 /* This BUG_ON verifies our reuse assertions and can be removed */
492 BUG_ON(atomic_read(&data
->refs
) || !cpumask_empty(data
->cpumask
));
495 * The global call function queue list add and delete are protected
496 * by a lock, but the list is traversed without any lock, relying
497 * on the rcu list add and delete to allow safe concurrent traversal.
498 * We reuse the call function data without waiting for any grace
499 * period after some other cpu removes it from the global queue.
500 * This means a cpu might find our data block as it is being
503 * We hold off the interrupt handler on the other cpu by
504 * ordering our writes to the cpu mask vs our setting of the
505 * refs counter. We assert only the cpu owning the data block
506 * will set a bit in cpumask, and each bit will only be cleared
507 * by the subject cpu. Each cpu must first find its bit is
508 * set and then check that refs is set indicating the element is
509 * ready to be processed, otherwise it must skip the entry.
511 * On the previous iteration refs was set to 0 by another cpu.
512 * To avoid the use of transitivity, set the counter to 0 here
513 * so the wmb will pair with the rmb in the interrupt handler.
515 atomic_set(&data
->refs
, 0); /* convert 3rd to 1st party write */
517 data
->csd
.func
= func
;
518 data
->csd
.info
= info
;
520 /* Ensure 0 refs is visible before mask. Also orders func and info */
523 /* We rely on the "and" being processed before the store */
524 cpumask_and(data
->cpumask
, mask
, cpu_online_mask
);
525 cpumask_clear_cpu(this_cpu
, data
->cpumask
);
526 refs
= cpumask_weight(data
->cpumask
);
528 /* Some callers race with other cpus changing the passed mask */
529 if (unlikely(!refs
)) {
530 csd_unlock(&data
->csd
);
535 * After we put an entry into the list, data->cpumask
536 * may be cleared again when another CPU sends another IPI for
537 * a SMP function call, so data->cpumask will be zero.
539 cpumask_copy(data
->cpumask_ipi
, data
->cpumask
);
540 raw_spin_lock_irqsave(&call_function
.lock
, flags
);
542 * Place entry at the _HEAD_ of the list, so that any cpu still
543 * observing the entry in generic_smp_call_function_interrupt()
544 * will not miss any other list entries:
546 list_add_rcu(&data
->csd
.list
, &call_function
.queue
);
548 * We rely on the wmb() in list_add_rcu to complete our writes
549 * to the cpumask before this write to refs, which indicates
550 * data is on the list and is ready to be processed.
552 atomic_set(&data
->refs
, refs
);
553 raw_spin_unlock_irqrestore(&call_function
.lock
, flags
);
556 * Make the list addition visible before sending the ipi.
557 * (IPIs must obey or appear to obey normal Linux cache
558 * coherency rules -- see comment in generic_exec_single).
562 /* Send a message to all CPUs in the map */
563 arch_send_call_function_ipi_mask(data
->cpumask_ipi
);
565 /* Optionally wait for the CPUs to complete */
567 csd_lock_wait(&data
->csd
);
569 EXPORT_SYMBOL(smp_call_function_many
);
572 * smp_call_function(): Run a function on all other CPUs.
573 * @func: The function to run. This must be fast and non-blocking.
574 * @info: An arbitrary pointer to pass to the function.
575 * @wait: If true, wait (atomically) until function has completed
580 * If @wait is true, then returns once @func has returned; otherwise
581 * it returns just before the target cpu calls @func.
583 * You must not call this function with disabled interrupts or from a
584 * hardware interrupt handler or from a bottom half handler.
586 int smp_call_function(smp_call_func_t func
, void *info
, int wait
)
589 smp_call_function_many(cpu_online_mask
, func
, info
, wait
);
594 EXPORT_SYMBOL(smp_call_function
);
595 #endif /* USE_GENERIC_SMP_HELPERS */
597 /* Setup configured maximum number of CPUs to activate */
598 unsigned int setup_max_cpus
= NR_CPUS
;
599 EXPORT_SYMBOL(setup_max_cpus
);
603 * Setup routine for controlling SMP activation
605 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
606 * activation entirely (the MPS table probe still happens, though).
608 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
609 * greater than 0, limits the maximum number of CPUs activated in
613 void __weak
arch_disable_smp_support(void) { }
615 static int __init
nosmp(char *str
)
618 arch_disable_smp_support();
623 early_param("nosmp", nosmp
);
625 /* this is hard limit */
626 static int __init
nrcpus(char *str
)
630 get_option(&str
, &nr_cpus
);
631 if (nr_cpus
> 0 && nr_cpus
< nr_cpu_ids
)
632 nr_cpu_ids
= nr_cpus
;
637 early_param("nr_cpus", nrcpus
);
639 static int __init
maxcpus(char *str
)
641 get_option(&str
, &setup_max_cpus
);
642 if (setup_max_cpus
== 0)
643 arch_disable_smp_support();
648 early_param("maxcpus", maxcpus
);
650 /* Setup number of possible processor ids */
651 int nr_cpu_ids __read_mostly
= NR_CPUS
;
652 EXPORT_SYMBOL(nr_cpu_ids
);
654 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
655 void __init
setup_nr_cpu_ids(void)
657 nr_cpu_ids
= find_last_bit(cpumask_bits(cpu_possible_mask
),NR_CPUS
) + 1;
660 /* Called by boot processor to activate the rest. */
661 void __init
smp_init(void)
667 /* FIXME: This should be done in userspace --RR */
668 for_each_present_cpu(cpu
) {
669 if (num_online_cpus() >= setup_max_cpus
)
671 if (!cpu_online(cpu
))
675 /* Any cleanup work */
676 printk(KERN_INFO
"Brought up %ld CPUs\n", (long)num_online_cpus());
677 smp_cpus_done(setup_max_cpus
);
681 * Call a function on all processors. May be used during early boot while
682 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
683 * of local_irq_disable/enable().
685 int on_each_cpu(void (*func
) (void *info
), void *info
, int wait
)
691 ret
= smp_call_function(func
, info
, wait
);
692 local_irq_save(flags
);
694 local_irq_restore(flags
);
698 EXPORT_SYMBOL(on_each_cpu
);
701 * on_each_cpu_mask(): Run a function on processors specified by
702 * cpumask, which may include the local processor.
703 * @mask: The set of cpus to run on (only runs on online subset).
704 * @func: The function to run. This must be fast and non-blocking.
705 * @info: An arbitrary pointer to pass to the function.
706 * @wait: If true, wait (atomically) until function has completed
709 * If @wait is true, then returns once @func has returned.
711 * You must not call this function with disabled interrupts or
712 * from a hardware interrupt handler or from a bottom half handler.
714 void on_each_cpu_mask(const struct cpumask
*mask
, smp_call_func_t func
,
715 void *info
, bool wait
)
719 smp_call_function_many(mask
, func
, info
, wait
);
720 if (cpumask_test_cpu(cpu
, mask
)) {
727 EXPORT_SYMBOL(on_each_cpu_mask
);
730 * on_each_cpu_cond(): Call a function on each processor for which
731 * the supplied function cond_func returns true, optionally waiting
732 * for all the required CPUs to finish. This may include the local
734 * @cond_func: A callback function that is passed a cpu id and
735 * the the info parameter. The function is called
736 * with preemption disabled. The function should
737 * return a blooean value indicating whether to IPI
739 * @func: The function to run on all applicable CPUs.
740 * This must be fast and non-blocking.
741 * @info: An arbitrary pointer to pass to both functions.
742 * @wait: If true, wait (atomically) until function has
743 * completed on other CPUs.
744 * @gfp_flags: GFP flags to use when allocating the cpumask
745 * used internally by the function.
747 * The function might sleep if the GFP flags indicates a non
748 * atomic allocation is allowed.
750 * Preemption is disabled to protect against CPUs going offline but not online.
751 * CPUs going online during the call will not be seen or sent an IPI.
753 * You must not call this function with disabled interrupts or
754 * from a hardware interrupt handler or from a bottom half handler.
756 void on_each_cpu_cond(bool (*cond_func
)(int cpu
, void *info
),
757 smp_call_func_t func
, void *info
, bool wait
,
763 might_sleep_if(gfp_flags
& __GFP_WAIT
);
765 if (likely(zalloc_cpumask_var(&cpus
, (gfp_flags
|__GFP_NOWARN
)))) {
767 for_each_online_cpu(cpu
)
768 if (cond_func(cpu
, info
))
769 cpumask_set_cpu(cpu
, cpus
);
770 on_each_cpu_mask(cpus
, func
, info
, wait
);
772 free_cpumask_var(cpus
);
775 * No free cpumask, bother. No matter, we'll
776 * just have to IPI them one by one.
779 for_each_online_cpu(cpu
)
780 if (cond_func(cpu
, info
)) {
781 ret
= smp_call_function_single(cpu
, func
,
788 EXPORT_SYMBOL(on_each_cpu_cond
);
790 static void do_nothing(void *unused
)
795 * kick_all_cpus_sync - Force all cpus out of idle
797 * Used to synchronize the update of pm_idle function pointer. It's
798 * called after the pointer is updated and returns after the dummy
799 * callback function has been executed on all cpus. The execution of
800 * the function can only happen on the remote cpus after they have
801 * left the idle function which had been called via pm_idle function
802 * pointer. So it's guaranteed that nothing uses the previous pointer
805 void kick_all_cpus_sync(void)
807 /* Make sure the change is visible before we kick the cpus */
809 smp_call_function(do_nothing
, NULL
, 1);
811 EXPORT_SYMBOL_GPL(kick_all_cpus_sync
);