2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/percpu.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <linux/cpu.h>
15 static DEFINE_PER_CPU(struct call_single_queue
, call_single_queue
);
18 struct list_head queue
;
20 } call_function __cacheline_aligned_in_smp
=
22 .queue
= LIST_HEAD_INIT(call_function
.queue
),
23 .lock
= __SPIN_LOCK_UNLOCKED(call_function
.lock
),
30 struct call_function_data
{
31 struct call_single_data csd
;
33 cpumask_var_t cpumask
;
36 struct call_single_queue
{
37 struct list_head list
;
41 static DEFINE_PER_CPU(struct call_function_data
, cfd_data
);
44 hotplug_cfd(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
46 long cpu
= (long)hcpu
;
47 struct call_function_data
*cfd
= &per_cpu(cfd_data
, cpu
);
51 case CPU_UP_PREPARE_FROZEN
:
52 if (!zalloc_cpumask_var_node(&cfd
->cpumask
, GFP_KERNEL
,
57 #ifdef CONFIG_HOTPLUG_CPU
59 case CPU_UP_CANCELED_FROZEN
:
63 free_cpumask_var(cfd
->cpumask
);
71 static struct notifier_block __cpuinitdata hotplug_cfd_notifier
= {
72 .notifier_call
= hotplug_cfd
,
75 static int __cpuinit
init_call_single_data(void)
77 void *cpu
= (void *)(long)smp_processor_id();
80 for_each_possible_cpu(i
) {
81 struct call_single_queue
*q
= &per_cpu(call_single_queue
, i
);
83 spin_lock_init(&q
->lock
);
84 INIT_LIST_HEAD(&q
->list
);
87 hotplug_cfd(&hotplug_cfd_notifier
, CPU_UP_PREPARE
, cpu
);
88 register_cpu_notifier(&hotplug_cfd_notifier
);
92 early_initcall(init_call_single_data
);
95 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
97 * For non-synchronous ipi calls the csd can still be in use by the
98 * previous function call. For multi-cpu calls its even more interesting
99 * as we'll have to ensure no other cpu is observing our csd.
101 static void csd_lock_wait(struct call_single_data
*data
)
103 while (data
->flags
& CSD_FLAG_LOCK
)
107 static void csd_lock(struct call_single_data
*data
)
110 data
->flags
= CSD_FLAG_LOCK
;
113 * prevent CPU from reordering the above assignment
114 * to ->flags with any subsequent assignments to other
115 * fields of the specified call_single_data structure:
120 static void csd_unlock(struct call_single_data
*data
)
122 WARN_ON(!(data
->flags
& CSD_FLAG_LOCK
));
125 * ensure we're all done before releasing data:
129 data
->flags
&= ~CSD_FLAG_LOCK
;
133 * Insert a previously allocated call_single_data element
134 * for execution on the given CPU. data must already have
135 * ->func, ->info, and ->flags set.
138 void generic_exec_single(int cpu
, struct call_single_data
*data
, int wait
)
140 struct call_single_queue
*dst
= &per_cpu(call_single_queue
, cpu
);
144 spin_lock_irqsave(&dst
->lock
, flags
);
145 ipi
= list_empty(&dst
->list
);
146 list_add_tail(&data
->list
, &dst
->list
);
147 spin_unlock_irqrestore(&dst
->lock
, flags
);
150 * The list addition should be visible before sending the IPI
151 * handler locks the list to pull the entry off it because of
152 * normal cache coherency rules implied by spinlocks.
154 * If IPIs can go out of order to the cache coherency protocol
155 * in an architecture, sufficient synchronisation should be added
156 * to arch code to make it appear to obey cache coherency WRT
157 * locking and barrier primitives. Generic code isn't really
158 * equipped to do the right thing...
161 arch_send_call_function_single_ipi(cpu
);
168 * Invoked by arch to handle an IPI for call function. Must be called with
169 * interrupts disabled.
171 void generic_smp_call_function_interrupt(void)
173 struct call_function_data
*data
;
177 * Shouldn't receive this interrupt on a cpu that is not yet online.
179 WARN_ON_ONCE(!cpu_online(cpu
));
182 * Ensure entry is visible on call_function_queue after we have
183 * entered the IPI. See comment in smp_call_function_many.
184 * If we don't have this, then we may miss an entry on the list
185 * and never get another IPI to process it.
190 * It's ok to use list_for_each_rcu() here even though we may
191 * delete 'pos', since list_del_rcu() doesn't clear ->next
193 list_for_each_entry_rcu(data
, &call_function
.queue
, csd
.list
) {
196 if (!cpumask_test_and_clear_cpu(cpu
, data
->cpumask
))
199 data
->csd
.func(data
->csd
.info
);
201 refs
= atomic_dec_return(&data
->refs
);
204 spin_lock(&call_function
.lock
);
205 list_del_rcu(&data
->csd
.list
);
206 spin_unlock(&call_function
.lock
);
212 csd_unlock(&data
->csd
);
219 * Invoked by arch to handle an IPI for call function single. Must be
220 * called from the arch with interrupts disabled.
222 void generic_smp_call_function_single_interrupt(void)
224 struct call_single_queue
*q
= &__get_cpu_var(call_single_queue
);
225 unsigned int data_flags
;
229 * Shouldn't receive this interrupt on a cpu that is not yet online.
231 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
234 list_replace_init(&q
->list
, &list
);
235 spin_unlock(&q
->lock
);
237 while (!list_empty(&list
)) {
238 struct call_single_data
*data
;
240 data
= list_entry(list
.next
, struct call_single_data
, list
);
241 list_del(&data
->list
);
244 * 'data' can be invalid after this call if flags == 0
245 * (when called through generic_exec_single()),
246 * so save them away before making the call:
248 data_flags
= data
->flags
;
250 data
->func(data
->info
);
253 * Unlocked CSDs are valid through generic_exec_single():
255 if (data_flags
& CSD_FLAG_LOCK
)
260 static DEFINE_PER_CPU(struct call_single_data
, csd_data
);
263 * smp_call_function_single - Run a function on a specific CPU
264 * @func: The function to run. This must be fast and non-blocking.
265 * @info: An arbitrary pointer to pass to the function.
266 * @wait: If true, wait until function has completed on other CPUs.
268 * Returns 0 on success, else a negative status code. Note that @wait
269 * will be implicitly turned on in case of allocation failures, since
270 * we fall back to on-stack allocation.
272 int smp_call_function_single(int cpu
, void (*func
) (void *info
), void *info
,
275 struct call_single_data d
= {
283 * prevent preemption and reschedule on another processor,
284 * as well as CPU removal
286 this_cpu
= get_cpu();
289 * Can deadlock when called with interrupts disabled.
290 * We allow cpu's that are not yet online though, as no one else can
291 * send smp call function interrupt to this cpu and as such deadlocks
294 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
295 && !oops_in_progress
);
297 if (cpu
== this_cpu
) {
298 local_irq_save(flags
);
300 local_irq_restore(flags
);
302 if ((unsigned)cpu
< nr_cpu_ids
&& cpu_online(cpu
)) {
303 struct call_single_data
*data
= &d
;
306 data
= &__get_cpu_var(csd_data
);
312 generic_exec_single(cpu
, data
, wait
);
314 err
= -ENXIO
; /* CPU not online */
322 EXPORT_SYMBOL(smp_call_function_single
);
325 * __smp_call_function_single(): Run a function on another CPU
326 * @cpu: The CPU to run on.
327 * @data: Pre-allocated and setup data structure
329 * Like smp_call_function_single(), but allow caller to pass in a
330 * pre-allocated data structure. Useful for embedding @data inside
331 * other structures, for instance.
333 void __smp_call_function_single(int cpu
, struct call_single_data
*data
,
339 * Can deadlock when called with interrupts disabled.
340 * We allow cpu's that are not yet online though, as no one else can
341 * send smp call function interrupt to this cpu and as such deadlocks
344 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait
&& irqs_disabled()
345 && !oops_in_progress
);
347 generic_exec_single(cpu
, data
, wait
);
350 /* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
352 #ifndef arch_send_call_function_ipi_mask
353 # define arch_send_call_function_ipi_mask(maskp) \
354 arch_send_call_function_ipi(*(maskp))
358 * smp_call_function_many(): Run a function on a set of other CPUs.
359 * @mask: The set of cpus to run on (only runs on online subset).
360 * @func: The function to run. This must be fast and non-blocking.
361 * @info: An arbitrary pointer to pass to the function.
362 * @wait: If true, wait (atomically) until function has completed
365 * If @wait is true, then returns once @func has returned. Note that @wait
366 * will be implicitly turned on in case of allocation failures, since
367 * we fall back to on-stack allocation.
369 * You must not call this function with disabled interrupts or from a
370 * hardware interrupt handler or from a bottom half handler. Preemption
371 * must be disabled when calling this function.
373 void smp_call_function_many(const struct cpumask
*mask
,
374 void (*func
)(void *), void *info
, bool wait
)
376 struct call_function_data
*data
;
378 int cpu
, next_cpu
, this_cpu
= smp_processor_id();
381 * Can deadlock when called with interrupts disabled.
382 * We allow cpu's that are not yet online though, as no one else can
383 * send smp call function interrupt to this cpu and as such deadlocks
386 WARN_ON_ONCE(cpu_online(this_cpu
) && irqs_disabled()
387 && !oops_in_progress
);
389 /* So, what's a CPU they want? Ignoring this one. */
390 cpu
= cpumask_first_and(mask
, cpu_online_mask
);
392 cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
394 /* No online cpus? We're done. */
395 if (cpu
>= nr_cpu_ids
)
398 /* Do we have another CPU which isn't us? */
399 next_cpu
= cpumask_next_and(cpu
, mask
, cpu_online_mask
);
400 if (next_cpu
== this_cpu
)
401 next_cpu
= cpumask_next_and(next_cpu
, mask
, cpu_online_mask
);
403 /* Fastpath: do that cpu by itself. */
404 if (next_cpu
>= nr_cpu_ids
) {
405 smp_call_function_single(cpu
, func
, info
, wait
);
409 data
= &__get_cpu_var(cfd_data
);
410 csd_lock(&data
->csd
);
412 data
->csd
.func
= func
;
413 data
->csd
.info
= info
;
414 cpumask_and(data
->cpumask
, mask
, cpu_online_mask
);
415 cpumask_clear_cpu(this_cpu
, data
->cpumask
);
416 atomic_set(&data
->refs
, cpumask_weight(data
->cpumask
));
418 spin_lock_irqsave(&call_function
.lock
, flags
);
420 * Place entry at the _HEAD_ of the list, so that any cpu still
421 * observing the entry in generic_smp_call_function_interrupt()
422 * will not miss any other list entries:
424 list_add_rcu(&data
->csd
.list
, &call_function
.queue
);
425 spin_unlock_irqrestore(&call_function
.lock
, flags
);
428 * Make the list addition visible before sending the ipi.
429 * (IPIs must obey or appear to obey normal Linux cache
430 * coherency rules -- see comment in generic_exec_single).
434 /* Send a message to all CPUs in the map */
435 arch_send_call_function_ipi_mask(data
->cpumask
);
437 /* Optionally wait for the CPUs to complete */
439 csd_lock_wait(&data
->csd
);
441 EXPORT_SYMBOL(smp_call_function_many
);
444 * smp_call_function(): Run a function on all other CPUs.
445 * @func: The function to run. This must be fast and non-blocking.
446 * @info: An arbitrary pointer to pass to the function.
447 * @wait: If true, wait (atomically) until function has completed
452 * If @wait is true, then returns once @func has returned; otherwise
453 * it returns just before the target cpu calls @func. In case of allocation
454 * failure, @wait will be implicitly turned on.
456 * You must not call this function with disabled interrupts or from a
457 * hardware interrupt handler or from a bottom half handler.
459 int smp_call_function(void (*func
)(void *), void *info
, int wait
)
462 smp_call_function_many(cpu_online_mask
, func
, info
, wait
);
467 EXPORT_SYMBOL(smp_call_function
);
469 void ipi_call_lock(void)
471 spin_lock(&call_function
.lock
);
474 void ipi_call_unlock(void)
476 spin_unlock(&call_function
.lock
);
479 void ipi_call_lock_irq(void)
481 spin_lock_irq(&call_function
.lock
);
484 void ipi_call_unlock_irq(void)
486 spin_unlock_irq(&call_function
.lock
);