Use WARN() in kernel/irq/manage.c
[linux-2.6/mini2440.git] / kernel / smp.c
blob96fc7c0edc59d1f09ca56a500d90b0a8a212c7d7
1 /*
2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 */
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/percpu.h>
10 #include <linux/rcupdate.h>
11 #include <linux/rculist.h>
12 #include <linux/smp.h>
14 static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
15 static LIST_HEAD(call_function_queue);
16 __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
18 enum {
19 CSD_FLAG_WAIT = 0x01,
20 CSD_FLAG_ALLOC = 0x02,
23 struct call_function_data {
24 struct call_single_data csd;
25 spinlock_t lock;
26 unsigned int refs;
27 cpumask_t cpumask;
28 struct rcu_head rcu_head;
31 struct call_single_queue {
32 struct list_head list;
33 spinlock_t lock;
36 static int __cpuinit init_call_single_data(void)
38 int i;
40 for_each_possible_cpu(i) {
41 struct call_single_queue *q = &per_cpu(call_single_queue, i);
43 spin_lock_init(&q->lock);
44 INIT_LIST_HEAD(&q->list);
46 return 0;
48 early_initcall(init_call_single_data);
50 static void csd_flag_wait(struct call_single_data *data)
52 /* Wait for response */
53 do {
55 * We need to see the flags store in the IPI handler
57 smp_mb();
58 if (!(data->flags & CSD_FLAG_WAIT))
59 break;
60 cpu_relax();
61 } while (1);
65 * Insert a previously allocated call_single_data element for execution
66 * on the given CPU. data must already have ->func, ->info, and ->flags set.
68 static void generic_exec_single(int cpu, struct call_single_data *data)
70 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
71 int wait = data->flags & CSD_FLAG_WAIT, ipi;
72 unsigned long flags;
74 spin_lock_irqsave(&dst->lock, flags);
75 ipi = list_empty(&dst->list);
76 list_add_tail(&data->list, &dst->list);
77 spin_unlock_irqrestore(&dst->lock, flags);
79 if (ipi)
80 arch_send_call_function_single_ipi(cpu);
82 if (wait)
83 csd_flag_wait(data);
86 static void rcu_free_call_data(struct rcu_head *head)
88 struct call_function_data *data;
90 data = container_of(head, struct call_function_data, rcu_head);
92 kfree(data);
96 * Invoked by arch to handle an IPI for call function. Must be called with
97 * interrupts disabled.
99 void generic_smp_call_function_interrupt(void)
101 struct call_function_data *data;
102 int cpu = get_cpu();
105 * It's ok to use list_for_each_rcu() here even though we may delete
106 * 'pos', since list_del_rcu() doesn't clear ->next
108 rcu_read_lock();
109 list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
110 int refs;
112 if (!cpu_isset(cpu, data->cpumask))
113 continue;
115 data->csd.func(data->csd.info);
117 spin_lock(&data->lock);
118 cpu_clear(cpu, data->cpumask);
119 WARN_ON(data->refs == 0);
120 data->refs--;
121 refs = data->refs;
122 spin_unlock(&data->lock);
124 if (refs)
125 continue;
127 spin_lock(&call_function_lock);
128 list_del_rcu(&data->csd.list);
129 spin_unlock(&call_function_lock);
131 if (data->csd.flags & CSD_FLAG_WAIT) {
133 * serialize stores to data with the flag clear
134 * and wakeup
136 smp_wmb();
137 data->csd.flags &= ~CSD_FLAG_WAIT;
138 } else
139 call_rcu(&data->rcu_head, rcu_free_call_data);
141 rcu_read_unlock();
143 put_cpu();
147 * Invoked by arch to handle an IPI for call function single. Must be called
148 * from the arch with interrupts disabled.
150 void generic_smp_call_function_single_interrupt(void)
152 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
153 LIST_HEAD(list);
156 * Need to see other stores to list head for checking whether
157 * list is empty without holding q->lock
159 smp_mb();
160 while (!list_empty(&q->list)) {
161 unsigned int data_flags;
163 spin_lock(&q->lock);
164 list_replace_init(&q->list, &list);
165 spin_unlock(&q->lock);
167 while (!list_empty(&list)) {
168 struct call_single_data *data;
170 data = list_entry(list.next, struct call_single_data,
171 list);
172 list_del(&data->list);
175 * 'data' can be invalid after this call if
176 * flags == 0 (when called through
177 * generic_exec_single(), so save them away before
178 * making the call.
180 data_flags = data->flags;
182 data->func(data->info);
184 if (data_flags & CSD_FLAG_WAIT) {
185 smp_wmb();
186 data->flags &= ~CSD_FLAG_WAIT;
187 } else if (data_flags & CSD_FLAG_ALLOC)
188 kfree(data);
191 * See comment on outer loop
193 smp_mb();
198 * smp_call_function_single - Run a function on a specific CPU
199 * @func: The function to run. This must be fast and non-blocking.
200 * @info: An arbitrary pointer to pass to the function.
201 * @wait: If true, wait until function has completed on other CPUs.
203 * Returns 0 on success, else a negative status code. Note that @wait
204 * will be implicitly turned on in case of allocation failures, since
205 * we fall back to on-stack allocation.
207 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
208 int wait)
210 struct call_single_data d;
211 unsigned long flags;
212 /* prevent preemption and reschedule on another processor */
213 int me = get_cpu();
215 /* Can deadlock when called with interrupts disabled */
216 WARN_ON(irqs_disabled());
218 if (cpu == me) {
219 local_irq_save(flags);
220 func(info);
221 local_irq_restore(flags);
222 } else {
223 struct call_single_data *data = NULL;
225 if (!wait) {
226 data = kmalloc(sizeof(*data), GFP_ATOMIC);
227 if (data)
228 data->flags = CSD_FLAG_ALLOC;
230 if (!data) {
231 data = &d;
232 data->flags = CSD_FLAG_WAIT;
235 data->func = func;
236 data->info = info;
237 generic_exec_single(cpu, data);
240 put_cpu();
241 return 0;
243 EXPORT_SYMBOL(smp_call_function_single);
246 * __smp_call_function_single(): Run a function on another CPU
247 * @cpu: The CPU to run on.
248 * @data: Pre-allocated and setup data structure
250 * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
251 * data structure. Useful for embedding @data inside other structures, for
252 * instance.
255 void __smp_call_function_single(int cpu, struct call_single_data *data)
257 /* Can deadlock when called with interrupts disabled */
258 WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
260 generic_exec_single(cpu, data);
264 * smp_call_function_mask(): Run a function on a set of other CPUs.
265 * @mask: The set of cpus to run on.
266 * @func: The function to run. This must be fast and non-blocking.
267 * @info: An arbitrary pointer to pass to the function.
268 * @wait: If true, wait (atomically) until function has completed on other CPUs.
270 * Returns 0 on success, else a negative status code.
272 * If @wait is true, then returns once @func has returned. Note that @wait
273 * will be implicitly turned on in case of allocation failures, since
274 * we fall back to on-stack allocation.
276 * You must not call this function with disabled interrupts or from a
277 * hardware interrupt handler or from a bottom half handler. Preemption
278 * must be disabled when calling this function.
280 int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
281 int wait)
283 struct call_function_data d;
284 struct call_function_data *data = NULL;
285 cpumask_t allbutself;
286 unsigned long flags;
287 int cpu, num_cpus;
289 /* Can deadlock when called with interrupts disabled */
290 WARN_ON(irqs_disabled());
292 cpu = smp_processor_id();
293 allbutself = cpu_online_map;
294 cpu_clear(cpu, allbutself);
295 cpus_and(mask, mask, allbutself);
296 num_cpus = cpus_weight(mask);
299 * If zero CPUs, return. If just a single CPU, turn this request
300 * into a targetted single call instead since it's faster.
302 if (!num_cpus)
303 return 0;
304 else if (num_cpus == 1) {
305 cpu = first_cpu(mask);
306 return smp_call_function_single(cpu, func, info, wait);
309 if (!wait) {
310 data = kmalloc(sizeof(*data), GFP_ATOMIC);
311 if (data)
312 data->csd.flags = CSD_FLAG_ALLOC;
314 if (!data) {
315 data = &d;
316 data->csd.flags = CSD_FLAG_WAIT;
317 wait = 1;
320 spin_lock_init(&data->lock);
321 data->csd.func = func;
322 data->csd.info = info;
323 data->refs = num_cpus;
324 data->cpumask = mask;
326 spin_lock_irqsave(&call_function_lock, flags);
327 list_add_tail_rcu(&data->csd.list, &call_function_queue);
328 spin_unlock_irqrestore(&call_function_lock, flags);
330 /* Send a message to all CPUs in the map */
331 arch_send_call_function_ipi(mask);
333 /* optionally wait for the CPUs to complete */
334 if (wait)
335 csd_flag_wait(&data->csd);
337 return 0;
339 EXPORT_SYMBOL(smp_call_function_mask);
342 * smp_call_function(): Run a function on all other CPUs.
343 * @func: The function to run. This must be fast and non-blocking.
344 * @info: An arbitrary pointer to pass to the function.
345 * @wait: If true, wait (atomically) until function has completed on other CPUs.
347 * Returns 0 on success, else a negative status code.
349 * If @wait is true, then returns once @func has returned; otherwise
350 * it returns just before the target cpu calls @func. In case of allocation
351 * failure, @wait will be implicitly turned on.
353 * You must not call this function with disabled interrupts or from a
354 * hardware interrupt handler or from a bottom half handler.
356 int smp_call_function(void (*func)(void *), void *info, int wait)
358 int ret;
360 preempt_disable();
361 ret = smp_call_function_mask(cpu_online_map, func, info, wait);
362 preempt_enable();
363 return ret;
365 EXPORT_SYMBOL(smp_call_function);
367 void ipi_call_lock(void)
369 spin_lock(&call_function_lock);
372 void ipi_call_unlock(void)
374 spin_unlock(&call_function_lock);
377 void ipi_call_lock_irq(void)
379 spin_lock_irq(&call_function_lock);
382 void ipi_call_unlock_irq(void)
384 spin_unlock_irq(&call_function_lock);