[NET_SCHED]: sch_cbq: fix cbq_undelay_prio for non-active priorites
[linux-2.6.22.y-op.git] / arch / m32r / kernel / smp.c
blob360129174b2bd17aaeed1fcc37c91e80a8100d05
1 /*
2 * linux/arch/m32r/kernel/smp.c
4 * M32R SMP support routines.
6 * Copyright (c) 2001, 2002 Hitoshi Yamamoto
8 * Taken from i386 version.
9 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
10 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
12 * This code is released under the GNU General Public License version 2 or
13 * later.
16 #undef DEBUG_SMP
18 #include <linux/irq.h>
19 #include <linux/interrupt.h>
20 #include <linux/spinlock.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/profile.h>
24 #include <linux/cpu.h>
26 #include <asm/cacheflush.h>
27 #include <asm/pgalloc.h>
28 #include <asm/atomic.h>
29 #include <asm/io.h>
30 #include <asm/mmu_context.h>
31 #include <asm/m32r.h>
33 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
34 /* Data structures and variables */
35 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
38 * Structure and data for smp_call_function(). This is designed to minimise
39 * static memory requirements. It also looks cleaner.
41 static DEFINE_SPINLOCK(call_lock);
43 struct call_data_struct {
44 void (*func) (void *info);
45 void *info;
46 atomic_t started;
47 atomic_t finished;
48 int wait;
49 } __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
51 static struct call_data_struct *call_data;
54 * For flush_cache_all()
56 static DEFINE_SPINLOCK(flushcache_lock);
57 static volatile unsigned long flushcache_cpumask = 0;
60 * For flush_tlb_others()
62 static volatile cpumask_t flush_cpumask;
63 static struct mm_struct *flush_mm;
64 static struct vm_area_struct *flush_vma;
65 static volatile unsigned long flush_va;
66 static DEFINE_SPINLOCK(tlbstate_lock);
67 #define FLUSH_ALL 0xffffffff
69 DECLARE_PER_CPU(int, prof_multiplier);
70 DECLARE_PER_CPU(int, prof_old_multiplier);
71 DECLARE_PER_CPU(int, prof_counter);
73 extern spinlock_t ipi_lock[];
75 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
76 /* Function Prototypes */
77 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
79 void smp_send_reschedule(int);
80 void smp_reschedule_interrupt(void);
82 void smp_flush_cache_all(void);
83 void smp_flush_cache_all_interrupt(void);
85 void smp_flush_tlb_all(void);
86 static void flush_tlb_all_ipi(void *);
88 void smp_flush_tlb_mm(struct mm_struct *);
89 void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
90 unsigned long);
91 void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
92 static void flush_tlb_others(cpumask_t, struct mm_struct *,
93 struct vm_area_struct *, unsigned long);
94 void smp_invalidate_interrupt(void);
96 void smp_send_stop(void);
97 static void stop_this_cpu(void *);
99 int smp_call_function(void (*) (void *), void *, int, int);
100 void smp_call_function_interrupt(void);
102 void smp_send_timer(void);
103 void smp_ipi_timer_interrupt(struct pt_regs *);
104 void smp_local_timer_interrupt(void);
106 void send_IPI_allbutself(int, int);
107 static void send_IPI_mask(cpumask_t, int, int);
108 unsigned long send_IPI_mask_phys(cpumask_t, int, int);
110 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
111 /* Rescheduling request Routines */
112 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
114 /*==========================================================================*
115 * Name: smp_send_reschedule
117 * Description: This routine requests other CPU to execute rescheduling.
118 * 1.Send 'RESCHEDULE_IPI' to other CPU.
119 * Request other CPU to execute 'smp_reschedule_interrupt()'.
121 * Born on Date: 2002.02.05
123 * Arguments: cpu_id - Target CPU ID
125 * Returns: void (cannot fail)
127 * Modification log:
128 * Date Who Description
129 * ---------- --- --------------------------------------------------------
131 *==========================================================================*/
132 void smp_send_reschedule(int cpu_id)
134 WARN_ON(cpu_is_offline(cpu_id));
135 send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1);
138 /*==========================================================================*
139 * Name: smp_reschedule_interrupt
141 * Description: This routine executes on CPU which received
142 * 'RESCHEDULE_IPI'.
143 * Rescheduling is processed at the exit of interrupt
144 * operation.
146 * Born on Date: 2002.02.05
148 * Arguments: NONE
150 * Returns: void (cannot fail)
152 * Modification log:
153 * Date Who Description
154 * ---------- --- --------------------------------------------------------
156 *==========================================================================*/
157 void smp_reschedule_interrupt(void)
159 /* nothing to do */
162 /*==========================================================================*
163 * Name: smp_flush_cache_all
165 * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
166 * CPUs in the system.
168 * Born on Date: 2003-05-28
170 * Arguments: NONE
172 * Returns: void (cannot fail)
174 * Modification log:
175 * Date Who Description
176 * ---------- --- --------------------------------------------------------
178 *==========================================================================*/
179 void smp_flush_cache_all(void)
181 cpumask_t cpumask;
182 unsigned long *mask;
184 preempt_disable();
185 cpumask = cpu_online_map;
186 cpu_clear(smp_processor_id(), cpumask);
187 spin_lock(&flushcache_lock);
188 mask=cpus_addr(cpumask);
189 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
190 send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0);
191 _flush_cache_copyback_all();
192 while (flushcache_cpumask)
193 mb();
194 spin_unlock(&flushcache_lock);
195 preempt_enable();
198 void smp_flush_cache_all_interrupt(void)
200 _flush_cache_copyback_all();
201 clear_bit(smp_processor_id(), &flushcache_cpumask);
204 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
205 /* TLB flush request Routins */
206 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
208 /*==========================================================================*
209 * Name: smp_flush_tlb_all
211 * Description: This routine flushes all processes TLBs.
212 * 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
213 * 2.Execute 'do_flush_tlb_all_local()'.
215 * Born on Date: 2002.02.05
217 * Arguments: NONE
219 * Returns: void (cannot fail)
221 * Modification log:
222 * Date Who Description
223 * ---------- --- --------------------------------------------------------
225 *==========================================================================*/
226 void smp_flush_tlb_all(void)
228 unsigned long flags;
230 preempt_disable();
231 local_irq_save(flags);
232 __flush_tlb_all();
233 local_irq_restore(flags);
234 smp_call_function(flush_tlb_all_ipi, NULL, 1, 1);
235 preempt_enable();
238 /*==========================================================================*
239 * Name: flush_tlb_all_ipi
241 * Description: This routine flushes all local TLBs.
242 * 1.Execute 'do_flush_tlb_all_local()'.
244 * Born on Date: 2002.02.05
246 * Arguments: *info - not used
248 * Returns: void (cannot fail)
250 * Modification log:
251 * Date Who Description
252 * ---------- --- --------------------------------------------------------
254 *==========================================================================*/
255 static void flush_tlb_all_ipi(void *info)
257 __flush_tlb_all();
260 /*==========================================================================*
261 * Name: smp_flush_tlb_mm
263 * Description: This routine flushes the specified mm context TLB's.
265 * Born on Date: 2002.02.05
267 * Arguments: *mm - a pointer to the mm struct for flush TLB
269 * Returns: void (cannot fail)
271 * Modification log:
272 * Date Who Description
273 * ---------- --- --------------------------------------------------------
275 *==========================================================================*/
276 void smp_flush_tlb_mm(struct mm_struct *mm)
278 int cpu_id;
279 cpumask_t cpu_mask;
280 unsigned long *mmc;
281 unsigned long flags;
283 preempt_disable();
284 cpu_id = smp_processor_id();
285 mmc = &mm->context[cpu_id];
286 cpu_mask = mm->cpu_vm_mask;
287 cpu_clear(cpu_id, cpu_mask);
289 if (*mmc != NO_CONTEXT) {
290 local_irq_save(flags);
291 *mmc = NO_CONTEXT;
292 if (mm == current->mm)
293 activate_context(mm);
294 else
295 cpu_clear(cpu_id, mm->cpu_vm_mask);
296 local_irq_restore(flags);
298 if (!cpus_empty(cpu_mask))
299 flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
301 preempt_enable();
304 /*==========================================================================*
305 * Name: smp_flush_tlb_range
307 * Description: This routine flushes a range of pages.
309 * Born on Date: 2002.02.05
311 * Arguments: *mm - a pointer to the mm struct for flush TLB
312 * start - not used
313 * end - not used
315 * Returns: void (cannot fail)
317 * Modification log:
318 * Date Who Description
319 * ---------- --- --------------------------------------------------------
321 *==========================================================================*/
322 void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
323 unsigned long end)
325 smp_flush_tlb_mm(vma->vm_mm);
328 /*==========================================================================*
329 * Name: smp_flush_tlb_page
331 * Description: This routine flushes one page.
333 * Born on Date: 2002.02.05
335 * Arguments: *vma - a pointer to the vma struct include va
336 * va - virtual address for flush TLB
338 * Returns: void (cannot fail)
340 * Modification log:
341 * Date Who Description
342 * ---------- --- --------------------------------------------------------
344 *==========================================================================*/
345 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
347 struct mm_struct *mm = vma->vm_mm;
348 int cpu_id;
349 cpumask_t cpu_mask;
350 unsigned long *mmc;
351 unsigned long flags;
353 preempt_disable();
354 cpu_id = smp_processor_id();
355 mmc = &mm->context[cpu_id];
356 cpu_mask = mm->cpu_vm_mask;
357 cpu_clear(cpu_id, cpu_mask);
359 #ifdef DEBUG_SMP
360 if (!mm)
361 BUG();
362 #endif
364 if (*mmc != NO_CONTEXT) {
365 local_irq_save(flags);
366 va &= PAGE_MASK;
367 va |= (*mmc & MMU_CONTEXT_ASID_MASK);
368 __flush_tlb_page(va);
369 local_irq_restore(flags);
371 if (!cpus_empty(cpu_mask))
372 flush_tlb_others(cpu_mask, mm, vma, va);
374 preempt_enable();
377 /*==========================================================================*
378 * Name: flush_tlb_others
380 * Description: This routine requests other CPU to execute flush TLB.
381 * 1.Setup parmeters.
382 * 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
383 * Request other CPU to execute 'smp_invalidate_interrupt()'.
384 * 3.Wait for other CPUs operation finished.
386 * Born on Date: 2002.02.05
388 * Arguments: cpumask - bitmap of target CPUs
389 * *mm - a pointer to the mm struct for flush TLB
390 * *vma - a pointer to the vma struct include va
391 * va - virtual address for flush TLB
393 * Returns: void (cannot fail)
395 * Modification log:
396 * Date Who Description
397 * ---------- --- --------------------------------------------------------
399 *==========================================================================*/
400 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
401 struct vm_area_struct *vma, unsigned long va)
403 unsigned long *mask;
404 #ifdef DEBUG_SMP
405 unsigned long flags;
406 __save_flags(flags);
407 if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
408 BUG();
409 #endif /* DEBUG_SMP */
412 * A couple of (to be removed) sanity checks:
414 * - we do not send IPIs to not-yet booted CPUs.
415 * - current CPU must not be in mask
416 * - mask must exist :)
418 BUG_ON(cpus_empty(cpumask));
420 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
421 BUG_ON(!mm);
423 /* If a CPU which we ran on has gone down, OK. */
424 cpus_and(cpumask, cpumask, cpu_online_map);
425 if (cpus_empty(cpumask))
426 return;
429 * i'm not happy about this global shared spinlock in the
430 * MM hot path, but we'll see how contended it is.
431 * Temporarily this turns IRQs off, so that lockups are
432 * detected by the NMI watchdog.
434 spin_lock(&tlbstate_lock);
436 flush_mm = mm;
437 flush_vma = vma;
438 flush_va = va;
439 mask=cpus_addr(cpumask);
440 atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
443 * We have to send the IPI only to
444 * CPUs affected.
446 send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0);
448 while (!cpus_empty(flush_cpumask)) {
449 /* nothing. lockup detection does not belong here */
450 mb();
453 flush_mm = NULL;
454 flush_vma = NULL;
455 flush_va = 0;
456 spin_unlock(&tlbstate_lock);
459 /*==========================================================================*
460 * Name: smp_invalidate_interrupt
462 * Description: This routine executes on CPU which received
463 * 'INVALIDATE_TLB_IPI'.
464 * 1.Flush local TLB.
465 * 2.Report flush TLB process was finished.
467 * Born on Date: 2002.02.05
469 * Arguments: NONE
471 * Returns: void (cannot fail)
473 * Modification log:
474 * Date Who Description
475 * ---------- --- --------------------------------------------------------
477 *==========================================================================*/
478 void smp_invalidate_interrupt(void)
480 int cpu_id = smp_processor_id();
481 unsigned long *mmc = &flush_mm->context[cpu_id];
483 if (!cpu_isset(cpu_id, flush_cpumask))
484 return;
486 if (flush_va == FLUSH_ALL) {
487 *mmc = NO_CONTEXT;
488 if (flush_mm == current->active_mm)
489 activate_context(flush_mm);
490 else
491 cpu_clear(cpu_id, flush_mm->cpu_vm_mask);
492 } else {
493 unsigned long va = flush_va;
495 if (*mmc != NO_CONTEXT) {
496 va &= PAGE_MASK;
497 va |= (*mmc & MMU_CONTEXT_ASID_MASK);
498 __flush_tlb_page(va);
501 cpu_clear(cpu_id, flush_cpumask);
504 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
505 /* Stop CPU request Routins */
506 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
508 /*==========================================================================*
509 * Name: smp_send_stop
511 * Description: This routine requests stop all CPUs.
512 * 1.Request other CPU to execute 'stop_this_cpu()'.
514 * Born on Date: 2002.02.05
516 * Arguments: NONE
518 * Returns: void (cannot fail)
520 * Modification log:
521 * Date Who Description
522 * ---------- --- --------------------------------------------------------
524 *==========================================================================*/
525 void smp_send_stop(void)
527 smp_call_function(stop_this_cpu, NULL, 1, 0);
530 /*==========================================================================*
531 * Name: stop_this_cpu
533 * Description: This routine halt CPU.
535 * Born on Date: 2002.02.05
537 * Arguments: NONE
539 * Returns: void (cannot fail)
541 * Modification log:
542 * Date Who Description
543 * ---------- --- --------------------------------------------------------
545 *==========================================================================*/
546 static void stop_this_cpu(void *dummy)
548 int cpu_id = smp_processor_id();
551 * Remove this CPU:
553 cpu_clear(cpu_id, cpu_online_map);
556 * PSW IE = 1;
557 * IMASK = 0;
558 * goto SLEEP
560 local_irq_disable();
561 outl(0, M32R_ICU_IMASK_PORTL);
562 inl(M32R_ICU_IMASK_PORTL); /* dummy read */
563 local_irq_enable();
565 for ( ; ; );
568 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
569 /* Call function Routins */
570 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
572 /*==========================================================================*
573 * Name: smp_call_function
575 * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs
576 * in the system.
578 * Born on Date: 2002.02.05
580 * Arguments: *func - The function to run. This must be fast and
581 * non-blocking.
582 * *info - An arbitrary pointer to pass to the function.
583 * nonatomic - currently unused.
584 * wait - If true, wait (atomically) until function has
585 * completed on other CPUs.
587 * Returns: 0 on success, else a negative status code. Does not return
588 * until remote CPUs are nearly ready to execute <<func>> or
589 * are or have executed.
591 * Cautions: You must not call this function with disabled interrupts or
592 * from a hardware interrupt handler, you may call it from a
593 * bottom half handler.
595 * Modification log:
596 * Date Who Description
597 * ---------- --- --------------------------------------------------------
599 *==========================================================================*/
600 int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
601 int wait)
603 struct call_data_struct data;
604 int cpus;
606 #ifdef DEBUG_SMP
607 unsigned long flags;
608 __save_flags(flags);
609 if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
610 BUG();
611 #endif /* DEBUG_SMP */
613 /* Holding any lock stops cpus from going down. */
614 spin_lock(&call_lock);
615 cpus = num_online_cpus() - 1;
617 if (!cpus) {
618 spin_unlock(&call_lock);
619 return 0;
622 /* Can deadlock when called with interrupts disabled */
623 WARN_ON(irqs_disabled());
625 data.func = func;
626 data.info = info;
627 atomic_set(&data.started, 0);
628 data.wait = wait;
629 if (wait)
630 atomic_set(&data.finished, 0);
632 call_data = &data;
633 mb();
635 /* Send a message to all other CPUs and wait for them to respond */
636 send_IPI_allbutself(CALL_FUNCTION_IPI, 0);
638 /* Wait for response */
639 while (atomic_read(&data.started) != cpus)
640 barrier();
642 if (wait)
643 while (atomic_read(&data.finished) != cpus)
644 barrier();
645 spin_unlock(&call_lock);
647 return 0;
650 /*==========================================================================*
651 * Name: smp_call_function_interrupt
653 * Description: This routine executes on CPU which received
654 * 'CALL_FUNCTION_IPI'.
656 * Born on Date: 2002.02.05
658 * Arguments: NONE
660 * Returns: void (cannot fail)
662 * Modification log:
663 * Date Who Description
664 * ---------- --- --------------------------------------------------------
666 *==========================================================================*/
667 void smp_call_function_interrupt(void)
669 void (*func) (void *info) = call_data->func;
670 void *info = call_data->info;
671 int wait = call_data->wait;
674 * Notify initiating CPU that I've grabbed the data and am
675 * about to execute the function
677 mb();
678 atomic_inc(&call_data->started);
680 * At this point the info structure may be out of scope unless wait==1
682 irq_enter();
683 (*func)(info);
684 irq_exit();
686 if (wait) {
687 mb();
688 atomic_inc(&call_data->finished);
692 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
693 /* Timer Routins */
694 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
696 /*==========================================================================*
697 * Name: smp_send_timer
699 * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
700 * in the system.
702 * Born on Date: 2002.02.05
704 * Arguments: NONE
706 * Returns: void (cannot fail)
708 * Modification log:
709 * Date Who Description
710 * ---------- --- --------------------------------------------------------
712 *==========================================================================*/
713 void smp_send_timer(void)
715 send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
718 /*==========================================================================*
719 * Name: smp_send_timer
721 * Description: This routine executes on CPU which received
722 * 'LOCAL_TIMER_IPI'.
724 * Born on Date: 2002.02.05
726 * Arguments: *regs - a pointer to the saved regster info
728 * Returns: void (cannot fail)
730 * Modification log:
731 * Date Who Description
732 * ---------- --- --------------------------------------------------------
734 *==========================================================================*/
735 void smp_ipi_timer_interrupt(struct pt_regs *regs)
737 struct pt_regs *old_regs;
738 old_regs = set_irq_regs(regs);
739 irq_enter();
740 smp_local_timer_interrupt();
741 irq_exit();
742 set_irq_regs(old_regs);
745 /*==========================================================================*
746 * Name: smp_local_timer_interrupt
748 * Description: Local timer interrupt handler. It does both profiling and
749 * process statistics/rescheduling.
750 * We do profiling in every local tick, statistics/rescheduling
751 * happen only every 'profiling multiplier' ticks. The default
752 * multiplier is 1 and it can be changed by writing the new
753 * multiplier value into /proc/profile.
755 * Born on Date: 2002.02.05
757 * Arguments: *regs - a pointer to the saved regster info
759 * Returns: void (cannot fail)
761 * Original: arch/i386/kernel/apic.c
763 * Modification log:
764 * Date Who Description
765 * ---------- --- --------------------------------------------------------
766 * 2003-06-24 hy use per_cpu structure.
767 *==========================================================================*/
768 void smp_local_timer_interrupt(void)
770 int user = user_mode(get_irq_regs());
771 int cpu_id = smp_processor_id();
774 * The profiling function is SMP safe. (nothing can mess
775 * around with "current", and the profiling counters are
776 * updated with atomic operations). This is especially
777 * useful with a profiling multiplier != 1
780 profile_tick(CPU_PROFILING);
782 if (--per_cpu(prof_counter, cpu_id) <= 0) {
784 * The multiplier may have changed since the last time we got
785 * to this point as a result of the user writing to
786 * /proc/profile. In this case we need to adjust the APIC
787 * timer accordingly.
789 * Interrupts are already masked off at this point.
791 per_cpu(prof_counter, cpu_id)
792 = per_cpu(prof_multiplier, cpu_id);
793 if (per_cpu(prof_counter, cpu_id)
794 != per_cpu(prof_old_multiplier, cpu_id))
796 per_cpu(prof_old_multiplier, cpu_id)
797 = per_cpu(prof_counter, cpu_id);
800 update_process_times(user);
804 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
805 /* Send IPI Routins */
806 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
808 /*==========================================================================*
809 * Name: send_IPI_allbutself
811 * Description: This routine sends a IPI to all other CPUs in the system.
813 * Born on Date: 2002.02.05
815 * Arguments: ipi_num - Number of IPI
816 * try - 0 : Send IPI certainly.
817 * !0 : The following IPI is not sended when Target CPU
818 * has not received the before IPI.
820 * Returns: void (cannot fail)
822 * Modification log:
823 * Date Who Description
824 * ---------- --- --------------------------------------------------------
826 *==========================================================================*/
827 void send_IPI_allbutself(int ipi_num, int try)
829 cpumask_t cpumask;
831 cpumask = cpu_online_map;
832 cpu_clear(smp_processor_id(), cpumask);
834 send_IPI_mask(cpumask, ipi_num, try);
837 /*==========================================================================*
838 * Name: send_IPI_mask
840 * Description: This routine sends a IPI to CPUs in the system.
842 * Born on Date: 2002.02.05
844 * Arguments: cpu_mask - Bitmap of target CPUs logical ID
845 * ipi_num - Number of IPI
846 * try - 0 : Send IPI certainly.
847 * !0 : The following IPI is not sended when Target CPU
848 * has not received the before IPI.
850 * Returns: void (cannot fail)
852 * Modification log:
853 * Date Who Description
854 * ---------- --- --------------------------------------------------------
856 *==========================================================================*/
857 static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
859 cpumask_t physid_mask, tmp;
860 int cpu_id, phys_id;
861 int num_cpus = num_online_cpus();
863 if (num_cpus <= 1) /* NO MP */
864 return;
866 cpus_and(tmp, cpumask, cpu_online_map);
867 BUG_ON(!cpus_equal(cpumask, tmp));
869 physid_mask = CPU_MASK_NONE;
870 for_each_cpu_mask(cpu_id, cpumask){
871 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
872 cpu_set(phys_id, physid_mask);
875 send_IPI_mask_phys(physid_mask, ipi_num, try);
878 /*==========================================================================*
879 * Name: send_IPI_mask_phys
881 * Description: This routine sends a IPI to other CPUs in the system.
883 * Born on Date: 2002.02.05
885 * Arguments: cpu_mask - Bitmap of target CPUs physical ID
886 * ipi_num - Number of IPI
887 * try - 0 : Send IPI certainly.
888 * !0 : The following IPI is not sended when Target CPU
889 * has not received the before IPI.
891 * Returns: IPICRi regster value.
893 * Modification log:
894 * Date Who Description
895 * ---------- --- --------------------------------------------------------
897 *==========================================================================*/
898 unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
899 int try)
901 spinlock_t *ipilock;
902 volatile unsigned long *ipicr_addr;
903 unsigned long ipicr_val;
904 unsigned long my_physid_mask;
905 unsigned long mask = cpus_addr(physid_mask)[0];
908 if (mask & ~physids_coerce(phys_cpu_present_map))
909 BUG();
910 if (ipi_num >= NR_IPIS)
911 BUG();
913 mask <<= IPI_SHIFT;
914 ipilock = &ipi_lock[ipi_num];
915 ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
916 + (ipi_num << 2));
917 my_physid_mask = ~(1 << smp_processor_id());
920 * lock ipi_lock[i]
921 * check IPICRi == 0
922 * write IPICRi (send IPIi)
923 * unlock ipi_lock[i]
925 spin_lock(ipilock);
926 __asm__ __volatile__ (
927 ";; CHECK IPICRi == 0 \n\t"
928 ".fillinsn \n"
929 "1: \n\t"
930 "ld %0, @%1 \n\t"
931 "and %0, %4 \n\t"
932 "beqz %0, 2f \n\t"
933 "bnez %3, 3f \n\t"
934 "bra 1b \n\t"
935 ";; WRITE IPICRi (send IPIi) \n\t"
936 ".fillinsn \n"
937 "2: \n\t"
938 "st %2, @%1 \n\t"
939 ".fillinsn \n"
940 "3: \n\t"
941 : "=&r"(ipicr_val)
942 : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
943 : "memory"
945 spin_unlock(ipilock);
947 return ipicr_val;