2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/percpu.h>
11 #include <linux/init.h>
12 #include <linux/gfp.h>
13 #include <linux/smp.h>
14 #include <linux/cpu.h>
18 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
20 struct list_head queue
;
22 } call_function __cacheline_aligned_in_smp
=
24 .queue
= LIST_HEAD_INIT(call_function
.queue
),
25 .lock
= __RAW_SPIN_LOCK_UNLOCKED(call_function
.lock
),
32 struct call_function_data
{
33 struct call_single_data csd
;
35 cpumask_var_t cpumask
;
38 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data
, cfd_data
);
40 struct call_single_queue
{
41 struct list_head list
;
45 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue
, call_single_queue
);
48 hotplug_cfd(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
50 long cpu
= (long)hcpu
;
51 struct call_function_data
*cfd
= &per_cpu(cfd_data
, cpu
);
55 case CPU_UP_PREPARE_FROZEN
:
56 if (!zalloc_cpumask_var_node(&cfd
->cpumask
, GFP_KERNEL
,
58 return notifier_from_errno(-ENOMEM
);
61 #ifdef CONFIG_HOTPLUG_CPU
63 case CPU_UP_CANCELED_FROZEN
:
67 free_cpumask_var(cfd
->cpumask
);
75 static struct notifier_block __cpuinitdata hotplug_cfd_notifier
= {
76 .notifier_call
= hotplug_cfd
,
79 void __init
call_function_init(void)
81 void *cpu
= (void *)(long)smp_processor_id();
84 for_each_possible_cpu(i
) {
85 struct call_single_queue
*q
= &per_cpu(call_single_queue
, i
);
87 raw_spin_lock_init(&q
->lock
);
88 INIT_LIST_HEAD(&q
->list
);
91 hotplug_cfd(&hotplug_cfd_notifier
, CPU_UP_PREPARE
, cpu
);
92 register_cpu_notifier(&hotplug_cfd_notifier
);
96 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
98 * For non-synchronous ipi calls the csd can still be in use by the
99 * previous function call. For multi-cpu calls its even more interesting
100 * as we'll have to ensure no other cpu is observing our csd.
102 static void csd_lock_wait(struct call_single_data
*data
)
104 while (data
->flags
& CSD_FLAG_LOCK
)
108 static void csd_lock(struct call_single_data
*data
)
111 data
->flags
= CSD_FLAG_LOCK
;
114 * prevent CPU from reordering the above assignment
115 * to ->flags with any subsequent assignments to other
116 * fields of the specified call_single_data structure:
121 static void csd_unlock(struct call_single_data
*data
)
123 WARN_ON(!(data
->flags
& CSD_FLAG_LOCK
));
126 * ensure we're all done before releasing data:
130 data
->flags
&= ~CSD_FLAG_LOCK
;
134 * Insert a previously allocated call_single_data element
135 * for execution on the given CPU. data must already have
136 * ->func, ->info, and ->flags set.
139 void generic_exec_single(int cpu
, struct call_single_data
*data
, int wait
)
141 struct call_single_queue
*dst
= &per_cpu(call_single_queue
, cpu
);
145 raw_spin_lock_irqsave(&dst
->lock
, flags
);
146 ipi
= list_empty(&dst
->list
);
147 list_add_tail(&data
->list
, &dst
->list
);
148 raw_spin_unlock_irqrestore(&dst
->lock
, flags
);
151 * The list addition should be visible before sending the IPI
152 * handler locks the list to pull the entry off it because of
153 * normal cache coherency rules implied by spinlocks.
155 * If IPIs can go out of order to the cache coherency protocol
156 * in an architecture, sufficient synchronisation should be added
157 * to arch code to make it appear to obey cache coherency WRT
158 * locking and barrier primitives. Generic code isn't really
159 * equipped to do the right thing...
162 arch_send_call_function_single_ipi(cpu
);
169 * Invoked by arch to handle an IPI for call function. Must be called with
170 * interrupts disabled.
172 void generic_smp_call_function_interrupt(void)
174 struct call_function_data
*data
;
175 int cpu
= smp_processor_id();
178 * Shouldn't receive this interrupt on a cpu that is not yet online.
180 WARN_ON_ONCE(!cpu_online(cpu
));
183 * Ensure entry is visible on call_function_queue after we have
184 * entered the IPI. See comment in smp_call_function_many.
185 * If we don't have this, then we may miss an entry on the list
186 * and never get another IPI to process it.
191 * It's ok to use list_for_each_rcu() here even though we may
192 * delete 'pos', since list_del_rcu() doesn't clear ->next
194 list_for_each_entry_rcu(data
, &call_function
.queue
, csd
.list
) {
196 smp_call_func_t func
;
199 * Since we walk the list without any locks, we might
200 * see an entry that was completed, removed from the
201 * list and is in the process of being reused.
203 * We must check that the cpu is in the cpumask before
204 * checking the refs, and both must be set before
205 * executing the callback on this cpu.
208 if (!cpumask_test_cpu(cpu
, data
->cpumask
))
213 if (atomic_read(&data
->refs
) == 0)
216 func
= data
->csd
.func
; /* save for later warn */
217 func(data
->csd
.info
);
220 * If the cpu mask is not still set then func enabled
221 * interrupts (BUG), and this cpu took another smp call
222 * function interrupt and executed func(info) twice
223 * on this cpu. That nested execution decremented refs.
225 if (!cpumask_test_and_clear_cpu(cpu
, data
->cpumask
)) {
226 WARN(1, "%pf enabled interrupts and double executed\n", func
);
230 refs
= atomic_dec_return(&data
->refs
);
236 WARN_ON(!cpumask_empty(data
->cpumask
));
238 raw_spin_lock(&call_function
.lock
);
239 list_del_rcu(&data
->csd
.list
);
240 raw_spin_unlock(&call_function
.lock
);
242 csd_unlock(&data
->csd
);
248 * Invoked by arch to handle an IPI for call function single. Must be
249 * called from the arch with interrupts disabled.
251 void generic_smp_call_function_single_interrupt(void)
253 struct call_single_queue
*q
= &__get_cpu_var(call_single_queue
);
254 unsigned int data_flags
;
258 * Shouldn't receive this interrupt on a cpu that is not yet online.
260 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
262 raw_spin_lock(&q
->lock
);
263 list_replace_init(&q
->list
, &list
);
264 raw_spin_unlock(&q
->lock
);
266 while (!list_empty(&list
)) {
267 struct call_single_data
*data
;
269 data
= list_entry(list
.next
, struct call_single_data
, list
);
270 list_del(&data
->list
);
273 * 'data' can be invalid after this call if flags == 0
274 * (when called through generic_exec_single()),
275 * so save them away before making the call:
277 data_flags
= data
->flags
;
279 data
->func(data
->info
);
282 * Unlocked CSDs are valid through generic_exec_single():
284 if (data_flags
& CSD_FLAG_LOCK
)
289 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data
, csd_data
);
292 * smp_call_function_single - Run a function on a specific CPU
293 * @func: The function to run. This must be fast and non-blocking.
294 * @info: An arbitrary pointer to pass to the function.
295 * @wait: If true, wait until function has completed on other CPUs.
297 * Returns 0 on success, else a negative status code.
299 int smp_call_function_single(int cpu
, smp_call_func_t func
, void *info
,
302 struct call_single_data d
= {
310 * prevent preemption and reschedule on another processor,
311 * as well as CPU removal
313 this_cpu
= get_cpu();
316 * Can deadlock when called with interrupts disabled.
317 * We allow cpu's that are not yet online though, as no one else can
318 * send smp call function interrupt to this cpu and as such deadlocks
321 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
322 && !oops_in_progress
);
324 if (cpu
== this_cpu
) {
325 local_irq_save(flags
);
327 local_irq_restore(flags
);
329 if ((unsigned)cpu
< nr_cpu_ids
&& cpu_online(cpu
)) {
330 struct call_single_data
*data
= &d
;
333 data
= &__get_cpu_var(csd_data
);
339 generic_exec_single(cpu
, data
, wait
);
341 err
= -ENXIO
; /* CPU not online */
349 EXPORT_SYMBOL(smp_call_function_single
);
352 * smp_call_function_any - Run a function on any of the given cpus
353 * @mask: The mask of cpus it can run on.
354 * @func: The function to run. This must be fast and non-blocking.
355 * @info: An arbitrary pointer to pass to the function.
356 * @wait: If true, wait until function has completed.
358 * Returns 0 on success, else a negative status code (if no cpus were online).
359 * Note that @wait will be implicitly turned on in case of allocation failures,
360 * since we fall back to on-stack allocation.
362 * Selection preference:
363 * 1) current cpu if in @mask
364 * 2) any cpu of current node if in @mask
365 * 3) any other online cpu in @mask
367 int smp_call_function_any(const struct cpumask
*mask
,
368 smp_call_func_t func
, void *info
, int wait
)
371 const struct cpumask
*nodemask
;
374 /* Try for same CPU (cheapest) */
376 if (cpumask_test_cpu(cpu
, mask
))
379 /* Try for same node. */
380 nodemask
= cpumask_of_node(cpu_to_node(cpu
));
381 for (cpu
= cpumask_first_and(nodemask
, mask
); cpu
< nr_cpu_ids
;
382 cpu
= cpumask_next_and(cpu
, nodemask
, mask
)) {
387 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
388 cpu
= cpumask_any_and(mask
, cpu_online_mask
);
390 ret
= smp_call_function_single(cpu
, func
, info
, wait
);
394 EXPORT_SYMBOL_GPL(smp_call_function_any
);
397 * __smp_call_function_single(): Run a function on a specific CPU
398 * @cpu: The CPU to run on.
399 * @data: Pre-allocated and setup data structure
400 * @wait: If true, wait until function has completed on specified CPU.
402 * Like smp_call_function_single(), but allow caller to pass in a
403 * pre-allocated data structure. Useful for embedding @data inside
404 * other structures, for instance.
406 void __smp_call_function_single(int cpu
, struct call_single_data
*data
,
409 unsigned int this_cpu
;
412 this_cpu
= get_cpu();
414 * Can deadlock when called with interrupts disabled.
415 * We allow cpu's that are not yet online though, as no one else can
416 * send smp call function interrupt to this cpu and as such deadlocks
419 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait
&& irqs_disabled()
420 && !oops_in_progress
);
422 if (cpu
== this_cpu
) {
423 local_irq_save(flags
);
424 data
->func(data
->info
);
425 local_irq_restore(flags
);
428 generic_exec_single(cpu
, data
, wait
);
434 * smp_call_function_many(): Run a function on a set of other CPUs.
435 * @mask: The set of cpus to run on (only runs on online subset).
436 * @func: The function to run. This must be fast and non-blocking.
437 * @info: An arbitrary pointer to pass to the function.
438 * @wait: If true, wait (atomically) until function has completed
441 * If @wait is true, then returns once @func has returned.
443 * You must not call this function with disabled interrupts or from a
444 * hardware interrupt handler or from a bottom half handler. Preemption
445 * must be disabled when calling this function.
447 void smp_call_function_many(const struct cpumask
*mask
,
448 smp_call_func_t func
, void *info
, bool wait
)
450 struct call_function_data
*data
;
452 int refs
, cpu
, next_cpu
, this_cpu
= smp_processor_id();
455 * Can deadlock when called with interrupts disabled.
456 * We allow cpu's that are not yet online though, as no one else can
457 * send smp call function interrupt to this cpu and as such deadlocks
460 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
461 && !oops_in_progress
&& !early_boot_irqs_disabled
);
463 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
464 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
466 cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
468 /* No online cpus? We're done. */
469 if (cpu
>= nr_cpu_ids
)
472 /* Do we have another CPU which isn't us? */
473 next_cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
474 if (next_cpu
== this_cpu
)
475 next_cpu
= cpumask_next_and(next_cpu
, mask
, cpu_online_mask
);
477 /* Fastpath: do that cpu by itself. */
478 if (next_cpu
>= nr_cpu_ids
) {
479 smp_call_function_single(cpu
, func
, info
, wait
);
483 data
= &__get_cpu_var(cfd_data
);
484 csd_lock(&data
->csd
);
486 /* This BUG_ON verifies our reuse assertions and can be removed */
487 BUG_ON(atomic_read(&data
->refs
) || !cpumask_empty(data
->cpumask
));
490 * The global call function queue list add and delete are protected
491 * by a lock, but the list is traversed without any lock, relying
492 * on the rcu list add and delete to allow safe concurrent traversal.
493 * We reuse the call function data without waiting for any grace
494 * period after some other cpu removes it from the global queue.
495 * This means a cpu might find our data block as it is being
498 * We hold off the interrupt handler on the other cpu by
499 * ordering our writes to the cpu mask vs our setting of the
500 * refs counter. We assert only the cpu owning the data block
501 * will set a bit in cpumask, and each bit will only be cleared
502 * by the subject cpu. Each cpu must first find its bit is
503 * set and then check that refs is set indicating the element is
504 * ready to be processed, otherwise it must skip the entry.
506 * On the previous iteration refs was set to 0 by another cpu.
507 * To avoid the use of transitivity, set the counter to 0 here
508 * so the wmb will pair with the rmb in the interrupt handler.
510 atomic_set(&data
->refs
, 0); /* convert 3rd to 1st party write */
512 data
->csd
.func
= func
;
513 data
->csd
.info
= info
;
515 /* Ensure 0 refs is visible before mask. Also orders func and info */
518 /* We rely on the "and" being processed before the store */
519 cpumask_and(data
->cpumask
, mask
, cpu_online_mask
);
520 cpumask_clear_cpu(this_cpu
, data
->cpumask
);
521 refs
= cpumask_weight(data
->cpumask
);
523 /* Some callers race with other cpus changing the passed mask */
524 if (unlikely(!refs
)) {
525 csd_unlock(&data
->csd
);
529 raw_spin_lock_irqsave(&call_function
.lock
, flags
);
531 * Place entry at the _HEAD_ of the list, so that any cpu still
532 * observing the entry in generic_smp_call_function_interrupt()
533 * will not miss any other list entries:
535 list_add_rcu(&data
->csd
.list
, &call_function
.queue
);
537 * We rely on the wmb() in list_add_rcu to complete our writes
538 * to the cpumask before this write to refs, which indicates
539 * data is on the list and is ready to be processed.
541 atomic_set(&data
->refs
, refs
);
542 raw_spin_unlock_irqrestore(&call_function
.lock
, flags
);
545 * Make the list addition visible before sending the ipi.
546 * (IPIs must obey or appear to obey normal Linux cache
547 * coherency rules -- see comment in generic_exec_single).
551 /* Send a message to all CPUs in the map */
552 arch_send_call_function_ipi_mask(data
->cpumask
);
554 /* Optionally wait for the CPUs to complete */
556 csd_lock_wait(&data
->csd
);
558 EXPORT_SYMBOL(smp_call_function_many
);
561 * smp_call_function(): Run a function on all other CPUs.
562 * @func: The function to run. This must be fast and non-blocking.
563 * @info: An arbitrary pointer to pass to the function.
564 * @wait: If true, wait (atomically) until function has completed
569 * If @wait is true, then returns once @func has returned; otherwise
570 * it returns just before the target cpu calls @func.
572 * You must not call this function with disabled interrupts or from a
573 * hardware interrupt handler or from a bottom half handler.
575 int smp_call_function(smp_call_func_t func
, void *info
, int wait
)
578 smp_call_function_many(cpu_online_mask
, func
, info
, wait
);
583 EXPORT_SYMBOL(smp_call_function
);
584 #endif /* USE_GENERIC_SMP_HELPERS */
586 /* Setup configured maximum number of CPUs to activate */
587 unsigned int setup_max_cpus
= NR_CPUS
;
588 EXPORT_SYMBOL(setup_max_cpus
);
592 * Setup routine for controlling SMP activation
594 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
595 * activation entirely (the MPS table probe still happens, though).
597 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
598 * greater than 0, limits the maximum number of CPUs activated in
602 void __weak
arch_disable_smp_support(void) { }
604 static int __init
nosmp(char *str
)
607 arch_disable_smp_support();
612 early_param("nosmp", nosmp
);
614 /* this is hard limit */
615 static int __init
nrcpus(char *str
)
619 get_option(&str
, &nr_cpus
);
620 if (nr_cpus
> 0 && nr_cpus
< nr_cpu_ids
)
621 nr_cpu_ids
= nr_cpus
;
626 early_param("nr_cpus", nrcpus
);
628 static int __init
maxcpus(char *str
)
630 get_option(&str
, &setup_max_cpus
);
631 if (setup_max_cpus
== 0)
632 arch_disable_smp_support();
637 early_param("maxcpus", maxcpus
);
639 /* Setup number of possible processor ids */
640 int nr_cpu_ids __read_mostly
= NR_CPUS
;
641 EXPORT_SYMBOL(nr_cpu_ids
);
643 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
644 void __init
setup_nr_cpu_ids(void)
646 nr_cpu_ids
= find_last_bit(cpumask_bits(cpu_possible_mask
),NR_CPUS
) + 1;
649 /* Called by boot processor to activate the rest. */
650 void __init
smp_init(void)
656 /* FIXME: This should be done in userspace --RR */
657 for_each_present_cpu(cpu
) {
658 if (num_online_cpus() >= setup_max_cpus
)
660 if (!cpu_online(cpu
))
664 /* Any cleanup work */
665 printk(KERN_INFO
"Brought up %ld CPUs\n", (long)num_online_cpus());
666 smp_cpus_done(setup_max_cpus
);
670 * Call a function on all processors. May be used during early boot while
671 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
672 * of local_irq_disable/enable().
674 int on_each_cpu(void (*func
) (void *info
), void *info
, int wait
)
680 ret
= smp_call_function(func
, info
, wait
);
681 local_irq_save(flags
);
683 local_irq_restore(flags
);
687 EXPORT_SYMBOL(on_each_cpu
);
690 * on_each_cpu_mask(): Run a function on processors specified by
691 * cpumask, which may include the local processor.
692 * @mask: The set of cpus to run on (only runs on online subset).
693 * @func: The function to run. This must be fast and non-blocking.
694 * @info: An arbitrary pointer to pass to the function.
695 * @wait: If true, wait (atomically) until function has completed
698 * If @wait is true, then returns once @func has returned.
700 * You must not call this function with disabled interrupts or
701 * from a hardware interrupt handler or from a bottom half handler.
703 void on_each_cpu_mask(const struct cpumask
*mask
, smp_call_func_t func
,
704 void *info
, bool wait
)
708 smp_call_function_many(mask
, func
, info
, wait
);
709 if (cpumask_test_cpu(cpu
, mask
)) {
716 EXPORT_SYMBOL(on_each_cpu_mask
);
719 * on_each_cpu_cond(): Call a function on each processor for which
720 * the supplied function cond_func returns true, optionally waiting
721 * for all the required CPUs to finish. This may include the local
723 * @cond_func: A callback function that is passed a cpu id and
724 * the the info parameter. The function is called
725 * with preemption disabled. The function should
726 * return a blooean value indicating whether to IPI
728 * @func: The function to run on all applicable CPUs.
729 * This must be fast and non-blocking.
730 * @info: An arbitrary pointer to pass to both functions.
731 * @wait: If true, wait (atomically) until function has
732 * completed on other CPUs.
733 * @gfp_flags: GFP flags to use when allocating the cpumask
734 * used internally by the function.
736 * The function might sleep if the GFP flags indicates a non
737 * atomic allocation is allowed.
739 * Preemption is disabled to protect against CPUs going offline but not online.
740 * CPUs going online during the call will not be seen or sent an IPI.
742 * You must not call this function with disabled interrupts or
743 * from a hardware interrupt handler or from a bottom half handler.
745 void on_each_cpu_cond(bool (*cond_func
)(int cpu
, void *info
),
746 smp_call_func_t func
, void *info
, bool wait
,
752 might_sleep_if(gfp_flags
& __GFP_WAIT
);
754 if (likely(zalloc_cpumask_var(&cpus
, (gfp_flags
|__GFP_NOWARN
)))) {
756 for_each_online_cpu(cpu
)
757 if (cond_func(cpu
, info
))
758 cpumask_set_cpu(cpu
, cpus
);
759 on_each_cpu_mask(cpus
, func
, info
, wait
);
761 free_cpumask_var(cpus
);
764 * No free cpumask, bother. No matter, we'll
765 * just have to IPI them one by one.
768 for_each_online_cpu(cpu
)
769 if (cond_func(cpu
, info
)) {
770 ret
= smp_call_function_single(cpu
, func
,
777 EXPORT_SYMBOL(on_each_cpu_cond
);
779 static void do_nothing(void *unused
)
784 * kick_all_cpus_sync - Force all cpus out of idle
786 * Used to synchronize the update of pm_idle function pointer. It's
787 * called after the pointer is updated and returns after the dummy
788 * callback function has been executed on all cpus. The execution of
789 * the function can only happen on the remote cpus after they have
790 * left the idle function which had been called via pm_idle function
791 * pointer. So it's guaranteed that nothing uses the previous pointer
794 void kick_all_cpus_sync(void)
796 /* Make sure the change is visible before we kick the cpus */
798 smp_call_function(do_nothing
, NULL
, 1);
800 EXPORT_SYMBOL_GPL(kick_all_cpus_sync
);