hwmon: applesmc: specified number of bytes to read should match actual
[linux-2.6/verdex.git] / kernel / softirq.c
blob83ba21a13bd470cea2815d6792e8ff24af43e727
1 /*
2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/smp.h>
25 #include <linux/tick.h>
27 #include <asm/irq.h>
29 - No shared variables, all the data are CPU local.
30 - If a softirq needs serialization, let it serialize itself
31 by its own spinlocks.
32 - Even if softirq is serialized, only local cpu is marked for
33 execution. Hence, we get something sort of weak cpu binding.
34 Though it is still not clear, will it result in better locality
35 or will not.
37 Examples:
38 - NET RX softirq. It is multithreaded and does not require
39 any global serialization.
40 - NET TX softirq. It kicks software netdevice queues, hence
41 it is logically serialized per device, but this serialization
42 is invisible to common code.
43 - Tasklets: serialized wrt itself.
46 #ifndef __ARCH_IRQ_STAT
47 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
48 EXPORT_SYMBOL(irq_stat);
49 #endif
51 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
53 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
56 * we cannot loop indefinitely here to avoid userspace starvation,
57 * but we also don't want to introduce a worst case 1/HZ latency
58 * to the pending events, so lets the scheduler to balance
59 * the softirq load for us.
61 static inline void wakeup_softirqd(void)
63 /* Interrupts are disabled: no need to stop preemption */
64 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
66 if (tsk && tsk->state != TASK_RUNNING)
67 wake_up_process(tsk);
71 * This one is for softirq.c-internal use,
72 * where hardirqs are disabled legitimately:
74 #ifdef CONFIG_TRACE_IRQFLAGS
75 static void __local_bh_disable(unsigned long ip)
77 unsigned long flags;
79 WARN_ON_ONCE(in_irq());
81 raw_local_irq_save(flags);
82 add_preempt_count(SOFTIRQ_OFFSET);
84 * Were softirqs turned off above:
86 if (softirq_count() == SOFTIRQ_OFFSET)
87 trace_softirqs_off(ip);
88 raw_local_irq_restore(flags);
90 #else /* !CONFIG_TRACE_IRQFLAGS */
91 static inline void __local_bh_disable(unsigned long ip)
93 add_preempt_count(SOFTIRQ_OFFSET);
94 barrier();
96 #endif /* CONFIG_TRACE_IRQFLAGS */
98 void local_bh_disable(void)
100 __local_bh_disable((unsigned long)__builtin_return_address(0));
103 EXPORT_SYMBOL(local_bh_disable);
105 void __local_bh_enable(void)
107 WARN_ON_ONCE(in_irq());
110 * softirqs should never be enabled by __local_bh_enable(),
111 * it always nests inside local_bh_enable() sections:
113 WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
115 sub_preempt_count(SOFTIRQ_OFFSET);
117 EXPORT_SYMBOL_GPL(__local_bh_enable);
120 * Special-case - softirqs can safely be enabled in
121 * cond_resched_softirq(), or by __do_softirq(),
122 * without processing still-pending softirqs:
124 void _local_bh_enable(void)
126 WARN_ON_ONCE(in_irq());
127 WARN_ON_ONCE(!irqs_disabled());
129 if (softirq_count() == SOFTIRQ_OFFSET)
130 trace_softirqs_on((unsigned long)__builtin_return_address(0));
131 sub_preempt_count(SOFTIRQ_OFFSET);
134 EXPORT_SYMBOL(_local_bh_enable);
136 static inline void _local_bh_enable_ip(unsigned long ip)
138 WARN_ON_ONCE(in_irq() || irqs_disabled());
139 #ifdef CONFIG_TRACE_IRQFLAGS
140 local_irq_disable();
141 #endif
143 * Are softirqs going to be turned on now:
145 if (softirq_count() == SOFTIRQ_OFFSET)
146 trace_softirqs_on(ip);
148 * Keep preemption disabled until we are done with
149 * softirq processing:
151 sub_preempt_count(SOFTIRQ_OFFSET - 1);
153 if (unlikely(!in_interrupt() && local_softirq_pending()))
154 do_softirq();
156 dec_preempt_count();
157 #ifdef CONFIG_TRACE_IRQFLAGS
158 local_irq_enable();
159 #endif
160 preempt_check_resched();
163 void local_bh_enable(void)
165 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
167 EXPORT_SYMBOL(local_bh_enable);
169 void local_bh_enable_ip(unsigned long ip)
171 _local_bh_enable_ip(ip);
173 EXPORT_SYMBOL(local_bh_enable_ip);
176 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
177 * and we fall back to softirqd after that.
179 * This number has been established via experimentation.
180 * The two things to balance is latency against fairness -
181 * we want to handle softirqs as soon as possible, but they
182 * should not be able to lock up the box.
184 #define MAX_SOFTIRQ_RESTART 10
186 asmlinkage void __do_softirq(void)
188 struct softirq_action *h;
189 __u32 pending;
190 int max_restart = MAX_SOFTIRQ_RESTART;
191 int cpu;
193 pending = local_softirq_pending();
194 account_system_vtime(current);
196 __local_bh_disable((unsigned long)__builtin_return_address(0));
197 trace_softirq_enter();
199 cpu = smp_processor_id();
200 restart:
201 /* Reset the pending bitmask before enabling irqs */
202 set_softirq_pending(0);
204 local_irq_enable();
206 h = softirq_vec;
208 do {
209 if (pending & 1) {
210 int prev_count = preempt_count();
212 h->action(h);
214 if (unlikely(prev_count != preempt_count())) {
215 printk(KERN_ERR "huh, entered softirq %td %p"
216 "with preempt_count %08x,"
217 " exited with %08x?\n", h - softirq_vec,
218 h->action, prev_count, preempt_count());
219 preempt_count() = prev_count;
222 rcu_bh_qsctr_inc(cpu);
224 h++;
225 pending >>= 1;
226 } while (pending);
228 local_irq_disable();
230 pending = local_softirq_pending();
231 if (pending && --max_restart)
232 goto restart;
234 if (pending)
235 wakeup_softirqd();
237 trace_softirq_exit();
239 account_system_vtime(current);
240 _local_bh_enable();
243 #ifndef __ARCH_HAS_DO_SOFTIRQ
245 asmlinkage void do_softirq(void)
247 __u32 pending;
248 unsigned long flags;
250 if (in_interrupt())
251 return;
253 local_irq_save(flags);
255 pending = local_softirq_pending();
257 if (pending)
258 __do_softirq();
260 local_irq_restore(flags);
263 #endif
266 * Enter an interrupt context.
268 void irq_enter(void)
270 #ifdef CONFIG_NO_HZ
271 int cpu = smp_processor_id();
272 if (idle_cpu(cpu) && !in_interrupt())
273 tick_nohz_stop_idle(cpu);
274 #endif
275 __irq_enter();
276 #ifdef CONFIG_NO_HZ
277 if (idle_cpu(cpu))
278 tick_nohz_update_jiffies();
279 #endif
282 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
283 # define invoke_softirq() __do_softirq()
284 #else
285 # define invoke_softirq() do_softirq()
286 #endif
289 * Exit an interrupt context. Process softirqs if needed and possible:
291 void irq_exit(void)
293 account_system_vtime(current);
294 trace_hardirq_exit();
295 sub_preempt_count(IRQ_EXIT_OFFSET);
296 if (!in_interrupt() && local_softirq_pending())
297 invoke_softirq();
299 #ifdef CONFIG_NO_HZ
300 /* Make sure that timer wheel updates are propagated */
301 if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
302 tick_nohz_stop_sched_tick(0);
303 rcu_irq_exit();
304 #endif
305 preempt_enable_no_resched();
309 * This function must run with irqs disabled!
311 inline void raise_softirq_irqoff(unsigned int nr)
313 __raise_softirq_irqoff(nr);
316 * If we're in an interrupt or softirq, we're done
317 * (this also catches softirq-disabled code). We will
318 * actually run the softirq once we return from
319 * the irq or softirq.
321 * Otherwise we wake up ksoftirqd to make sure we
322 * schedule the softirq soon.
324 if (!in_interrupt())
325 wakeup_softirqd();
328 void raise_softirq(unsigned int nr)
330 unsigned long flags;
332 local_irq_save(flags);
333 raise_softirq_irqoff(nr);
334 local_irq_restore(flags);
337 void open_softirq(int nr, void (*action)(struct softirq_action *))
339 softirq_vec[nr].action = action;
342 /* Tasklets */
343 struct tasklet_head
345 struct tasklet_struct *head;
346 struct tasklet_struct **tail;
349 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
350 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
352 void __tasklet_schedule(struct tasklet_struct *t)
354 unsigned long flags;
356 local_irq_save(flags);
357 t->next = NULL;
358 *__get_cpu_var(tasklet_vec).tail = t;
359 __get_cpu_var(tasklet_vec).tail = &(t->next);
360 raise_softirq_irqoff(TASKLET_SOFTIRQ);
361 local_irq_restore(flags);
364 EXPORT_SYMBOL(__tasklet_schedule);
366 void __tasklet_hi_schedule(struct tasklet_struct *t)
368 unsigned long flags;
370 local_irq_save(flags);
371 t->next = NULL;
372 *__get_cpu_var(tasklet_hi_vec).tail = t;
373 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
374 raise_softirq_irqoff(HI_SOFTIRQ);
375 local_irq_restore(flags);
378 EXPORT_SYMBOL(__tasklet_hi_schedule);
380 static void tasklet_action(struct softirq_action *a)
382 struct tasklet_struct *list;
384 local_irq_disable();
385 list = __get_cpu_var(tasklet_vec).head;
386 __get_cpu_var(tasklet_vec).head = NULL;
387 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
388 local_irq_enable();
390 while (list) {
391 struct tasklet_struct *t = list;
393 list = list->next;
395 if (tasklet_trylock(t)) {
396 if (!atomic_read(&t->count)) {
397 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
398 BUG();
399 t->func(t->data);
400 tasklet_unlock(t);
401 continue;
403 tasklet_unlock(t);
406 local_irq_disable();
407 t->next = NULL;
408 *__get_cpu_var(tasklet_vec).tail = t;
409 __get_cpu_var(tasklet_vec).tail = &(t->next);
410 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
411 local_irq_enable();
415 static void tasklet_hi_action(struct softirq_action *a)
417 struct tasklet_struct *list;
419 local_irq_disable();
420 list = __get_cpu_var(tasklet_hi_vec).head;
421 __get_cpu_var(tasklet_hi_vec).head = NULL;
422 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
423 local_irq_enable();
425 while (list) {
426 struct tasklet_struct *t = list;
428 list = list->next;
430 if (tasklet_trylock(t)) {
431 if (!atomic_read(&t->count)) {
432 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
433 BUG();
434 t->func(t->data);
435 tasklet_unlock(t);
436 continue;
438 tasklet_unlock(t);
441 local_irq_disable();
442 t->next = NULL;
443 *__get_cpu_var(tasklet_hi_vec).tail = t;
444 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
445 __raise_softirq_irqoff(HI_SOFTIRQ);
446 local_irq_enable();
451 void tasklet_init(struct tasklet_struct *t,
452 void (*func)(unsigned long), unsigned long data)
454 t->next = NULL;
455 t->state = 0;
456 atomic_set(&t->count, 0);
457 t->func = func;
458 t->data = data;
461 EXPORT_SYMBOL(tasklet_init);
463 void tasklet_kill(struct tasklet_struct *t)
465 if (in_interrupt())
466 printk("Attempt to kill tasklet from interrupt\n");
468 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
470 yield();
471 while (test_bit(TASKLET_STATE_SCHED, &t->state));
473 tasklet_unlock_wait(t);
474 clear_bit(TASKLET_STATE_SCHED, &t->state);
477 EXPORT_SYMBOL(tasklet_kill);
479 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
480 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
482 static void __local_trigger(struct call_single_data *cp, int softirq)
484 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
486 list_add_tail(&cp->list, head);
488 /* Trigger the softirq only if the list was previously empty. */
489 if (head->next == &cp->list)
490 raise_softirq_irqoff(softirq);
493 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
494 static void remote_softirq_receive(void *data)
496 struct call_single_data *cp = data;
497 unsigned long flags;
498 int softirq;
500 softirq = cp->priv;
502 local_irq_save(flags);
503 __local_trigger(cp, softirq);
504 local_irq_restore(flags);
507 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
509 if (cpu_online(cpu)) {
510 cp->func = remote_softirq_receive;
511 cp->info = cp;
512 cp->flags = 0;
513 cp->priv = softirq;
515 __smp_call_function_single(cpu, cp);
516 return 0;
518 return 1;
520 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
521 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
523 return 1;
525 #endif
528 * __send_remote_softirq - try to schedule softirq work on a remote cpu
529 * @cp: private SMP call function data area
530 * @cpu: the remote cpu
531 * @this_cpu: the currently executing cpu
532 * @softirq: the softirq for the work
534 * Attempt to schedule softirq work on a remote cpu. If this cannot be
535 * done, the work is instead queued up on the local cpu.
537 * Interrupts must be disabled.
539 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
541 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
542 __local_trigger(cp, softirq);
544 EXPORT_SYMBOL(__send_remote_softirq);
547 * send_remote_softirq - try to schedule softirq work on a remote cpu
548 * @cp: private SMP call function data area
549 * @cpu: the remote cpu
550 * @softirq: the softirq for the work
552 * Like __send_remote_softirq except that disabling interrupts and
553 * computing the current cpu is done for the caller.
555 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
557 unsigned long flags;
558 int this_cpu;
560 local_irq_save(flags);
561 this_cpu = smp_processor_id();
562 __send_remote_softirq(cp, cpu, this_cpu, softirq);
563 local_irq_restore(flags);
565 EXPORT_SYMBOL(send_remote_softirq);
567 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
568 unsigned long action, void *hcpu)
571 * If a CPU goes away, splice its entries to the current CPU
572 * and trigger a run of the softirq
574 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
575 int cpu = (unsigned long) hcpu;
576 int i;
578 local_irq_disable();
579 for (i = 0; i < NR_SOFTIRQS; i++) {
580 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
581 struct list_head *local_head;
583 if (list_empty(head))
584 continue;
586 local_head = &__get_cpu_var(softirq_work_list[i]);
587 list_splice_init(head, local_head);
588 raise_softirq_irqoff(i);
590 local_irq_enable();
593 return NOTIFY_OK;
596 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
597 .notifier_call = remote_softirq_cpu_notify,
600 void __init softirq_init(void)
602 int cpu;
604 for_each_possible_cpu(cpu) {
605 int i;
607 per_cpu(tasklet_vec, cpu).tail =
608 &per_cpu(tasklet_vec, cpu).head;
609 per_cpu(tasklet_hi_vec, cpu).tail =
610 &per_cpu(tasklet_hi_vec, cpu).head;
611 for (i = 0; i < NR_SOFTIRQS; i++)
612 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
615 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
617 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
618 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
621 static int ksoftirqd(void * __bind_cpu)
623 set_current_state(TASK_INTERRUPTIBLE);
625 while (!kthread_should_stop()) {
626 preempt_disable();
627 if (!local_softirq_pending()) {
628 preempt_enable_no_resched();
629 schedule();
630 preempt_disable();
633 __set_current_state(TASK_RUNNING);
635 while (local_softirq_pending()) {
636 /* Preempt disable stops cpu going offline.
637 If already offline, we'll be on wrong CPU:
638 don't process */
639 if (cpu_is_offline((long)__bind_cpu))
640 goto wait_to_die;
641 do_softirq();
642 preempt_enable_no_resched();
643 cond_resched();
644 preempt_disable();
646 preempt_enable();
647 set_current_state(TASK_INTERRUPTIBLE);
649 __set_current_state(TASK_RUNNING);
650 return 0;
652 wait_to_die:
653 preempt_enable();
654 /* Wait for kthread_stop */
655 set_current_state(TASK_INTERRUPTIBLE);
656 while (!kthread_should_stop()) {
657 schedule();
658 set_current_state(TASK_INTERRUPTIBLE);
660 __set_current_state(TASK_RUNNING);
661 return 0;
664 #ifdef CONFIG_HOTPLUG_CPU
666 * tasklet_kill_immediate is called to remove a tasklet which can already be
667 * scheduled for execution on @cpu.
669 * Unlike tasklet_kill, this function removes the tasklet
670 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
672 * When this function is called, @cpu must be in the CPU_DEAD state.
674 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
676 struct tasklet_struct **i;
678 BUG_ON(cpu_online(cpu));
679 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
681 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
682 return;
684 /* CPU is dead, so no lock needed. */
685 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
686 if (*i == t) {
687 *i = t->next;
688 /* If this was the tail element, move the tail ptr */
689 if (*i == NULL)
690 per_cpu(tasklet_vec, cpu).tail = i;
691 return;
694 BUG();
697 static void takeover_tasklets(unsigned int cpu)
699 /* CPU is dead, so no lock needed. */
700 local_irq_disable();
702 /* Find end, append list for that CPU. */
703 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
704 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
705 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
706 per_cpu(tasklet_vec, cpu).head = NULL;
707 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
709 raise_softirq_irqoff(TASKLET_SOFTIRQ);
711 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
712 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
713 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
714 per_cpu(tasklet_hi_vec, cpu).head = NULL;
715 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
717 raise_softirq_irqoff(HI_SOFTIRQ);
719 local_irq_enable();
721 #endif /* CONFIG_HOTPLUG_CPU */
723 static int __cpuinit cpu_callback(struct notifier_block *nfb,
724 unsigned long action,
725 void *hcpu)
727 int hotcpu = (unsigned long)hcpu;
728 struct task_struct *p;
730 switch (action) {
731 case CPU_UP_PREPARE:
732 case CPU_UP_PREPARE_FROZEN:
733 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
734 if (IS_ERR(p)) {
735 printk("ksoftirqd for %i failed\n", hotcpu);
736 return NOTIFY_BAD;
738 kthread_bind(p, hotcpu);
739 per_cpu(ksoftirqd, hotcpu) = p;
740 break;
741 case CPU_ONLINE:
742 case CPU_ONLINE_FROZEN:
743 wake_up_process(per_cpu(ksoftirqd, hotcpu));
744 break;
745 #ifdef CONFIG_HOTPLUG_CPU
746 case CPU_UP_CANCELED:
747 case CPU_UP_CANCELED_FROZEN:
748 if (!per_cpu(ksoftirqd, hotcpu))
749 break;
750 /* Unbind so it can run. Fall thru. */
751 kthread_bind(per_cpu(ksoftirqd, hotcpu),
752 any_online_cpu(cpu_online_map));
753 case CPU_DEAD:
754 case CPU_DEAD_FROZEN: {
755 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
757 p = per_cpu(ksoftirqd, hotcpu);
758 per_cpu(ksoftirqd, hotcpu) = NULL;
759 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
760 kthread_stop(p);
761 takeover_tasklets(hotcpu);
762 break;
764 #endif /* CONFIG_HOTPLUG_CPU */
766 return NOTIFY_OK;
769 static struct notifier_block __cpuinitdata cpu_nfb = {
770 .notifier_call = cpu_callback
773 static __init int spawn_ksoftirqd(void)
775 void *cpu = (void *)(long)smp_processor_id();
776 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
778 BUG_ON(err == NOTIFY_BAD);
779 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
780 register_cpu_notifier(&cpu_nfb);
781 return 0;
783 early_initcall(spawn_ksoftirqd);
785 #ifdef CONFIG_SMP
787 * Call a function on all processors
789 int on_each_cpu(void (*func) (void *info), void *info, int wait)
791 int ret = 0;
793 preempt_disable();
794 ret = smp_call_function(func, info, wait);
795 local_irq_disable();
796 func(info);
797 local_irq_enable();
798 preempt_enable();
799 return ret;
801 EXPORT_SYMBOL(on_each_cpu);
802 #endif