Import 2.3.10pre1
[davej-history.git] / arch / alpha / kernel / smp.c
blobeef452e57b61146ae2bcb73154ffcc1b718469c3
1 /*
2 * linux/arch/alpha/kernel/smp.c
3 */
5 #include <linux/errno.h>
6 #include <linux/kernel.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/tasks.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/delay.h>
17 #include <asm/hwrpb.h>
18 #include <asm/ptrace.h>
19 #include <asm/atomic.h>
21 #include <asm/io.h>
22 #include <asm/irq.h>
23 #include <asm/bitops.h>
24 #include <asm/pgtable.h>
25 #include <asm/spinlock.h>
26 #include <asm/hardirq.h>
27 #include <asm/softirq.h>
29 #define __KERNEL_SYSCALLS__
30 #include <asm/unistd.h>
32 #include "proto.h"
33 #include "irq.h"
36 #define DEBUG_SMP 0
37 #if DEBUG_SMP
38 #define DBGS(args) printk args
39 #else
40 #define DBGS(args)
41 #endif
43 /* A collection of per-processor data. */
44 struct cpuinfo_alpha cpu_data[NR_CPUS];
46 /* A collection of single bit ipi messages. */
47 static struct {
48 unsigned long bits __cacheline_aligned;
49 } ipi_data[NR_CPUS];
51 enum ipi_message_type {
52 IPI_RESCHEDULE,
53 IPI_CALL_FUNC,
54 IPI_CPU_STOP,
57 spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
59 /* Set to a secondary's cpuid when it comes online. */
60 static unsigned long smp_secondary_alive;
62 unsigned long cpu_present_mask; /* Which cpus ids came online. */
64 static int max_cpus = -1; /* Command-line limitation. */
65 int smp_boot_cpuid; /* Which processor we booted from. */
66 int smp_num_probed; /* Internal processor count */
67 int smp_num_cpus = 1; /* Number that came online. */
68 int smp_threads_ready; /* True once the per process idle is forked. */
69 cycles_t cacheflush_time;
71 int cpu_number_map[NR_CPUS];
72 int __cpu_logical_map[NR_CPUS];
74 extern void calibrate_delay(void);
75 extern asmlinkage void entInt(void);
79 * Process bootcommand SMP options, like "nosmp" and "maxcpus=".
81 void __init
82 smp_setup(char *str, int *ints)
84 if (ints && ints[0] > 0)
85 max_cpus = ints[1];
86 else
87 max_cpus = 0;
91 * Called by both boot and secondaries to move global data into
92 * per-processor storage.
94 static inline void __init
95 smp_store_cpu_info(int cpuid)
97 cpu_data[cpuid].loops_per_sec = loops_per_sec;
98 cpu_data[cpuid].last_asn
99 = (cpuid << WIDTH_HARDWARE_ASN) + ASN_FIRST_VERSION;
103 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
105 static inline void __init
106 smp_setup_percpu_timer(int cpuid)
108 cpu_data[cpuid].prof_counter = 1;
109 cpu_data[cpuid].prof_multiplier = 1;
111 #ifdef NOT_YET_PROFILING
112 load_profile_irq(mid_xlate[cpu], lvl14_resolution);
113 if (cpu == smp_boot_cpuid)
114 enable_pil_irq(14);
115 #endif
119 * Where secondaries begin a life of C.
121 void __init
122 smp_callin(void)
124 int cpuid = hard_smp_processor_id();
126 DBGS(("CALLIN %d state 0x%lx\n", cpuid, current->state));
128 /* Turn on machine checks. */
129 wrmces(7);
131 /* Set trap vectors. */
132 trap_init();
134 /* Set interrupt vector. */
135 wrent(entInt, 0);
137 /* Setup the scheduler for this processor. */
138 init_idle();
140 /* Get our local ticker going. */
141 smp_setup_percpu_timer(cpuid);
143 /* Must have completely accurate bogos. */
144 __sti();
145 calibrate_delay();
146 smp_store_cpu_info(cpuid);
148 /* Allow master to continue. */
149 wmb();
150 smp_secondary_alive = cpuid;
152 /* Wait for the go code. */
153 while (!smp_threads_ready)
154 barrier();
156 DBGS(("smp_callin: commencing CPU %d current %p\n",
157 cpuid, current));
159 /* Do nothing. */
160 cpu_idle(NULL);
165 * Rough estimation for SMP scheduling, this is the number of cycles it
166 * takes for a fully memory-limited process to flush the SMP-local cache.
168 * We are not told how much cache there is, so we have to guess.
170 static void __init
171 smp_tune_scheduling (void)
173 struct percpu_struct *cpu;
174 unsigned long on_chip_cache;
175 unsigned long freq;
177 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
178 switch (cpu->type)
180 case EV45_CPU:
181 on_chip_cache = 16 + 16;
182 break;
184 case EV5_CPU:
185 case EV56_CPU:
186 on_chip_cache = 8 + 8 + 96;
187 break;
189 case PCA56_CPU:
190 on_chip_cache = 16 + 8;
191 break;
193 case EV6_CPU:
194 on_chip_cache = 64 + 64;
195 break;
197 default:
198 on_chip_cache = 8 + 8;
199 break;
202 freq = hwrpb->cycle_freq ? : est_cycle_freq;
204 /* Magic estimation stolen from x86 port. */
205 cacheflush_time = freq / 1024 * on_chip_cache / 5000;
209 * Send a message to a secondary's console. "START" is one such
210 * interesting message. ;-)
212 static void
213 send_secondary_console_msg(char *str, int cpuid)
215 struct percpu_struct *cpu;
216 register char *cp1, *cp2;
217 unsigned long cpumask;
218 size_t len;
219 long timeout;
221 cpu = (struct percpu_struct *)
222 ((char*)hwrpb
223 + hwrpb->processor_offset
224 + cpuid * hwrpb->processor_size);
226 cpumask = (1L << cpuid);
227 if (hwrpb->txrdy & cpumask)
228 goto delay1;
229 ready1:
231 cp2 = str;
232 len = strlen(cp2);
233 *(unsigned int *)&cpu->ipc_buffer[0] = len;
234 cp1 = (char *) &cpu->ipc_buffer[1];
235 memcpy(cp1, cp2, len);
237 /* atomic test and set */
238 wmb();
239 set_bit(cpuid, &hwrpb->rxrdy);
241 if (hwrpb->txrdy & cpumask)
242 goto delay2;
243 ready2:
244 return;
246 delay1:
247 /* Wait one second. Note that jiffies aren't ticking yet. */
248 for (timeout = 100000; timeout > 0; --timeout) {
249 if (!(hwrpb->txrdy & cpumask))
250 goto ready1;
251 udelay(10);
252 barrier();
254 goto timeout;
256 delay2:
257 /* Wait one second. */
258 for (timeout = 100000; timeout > 0; --timeout) {
259 if (!(hwrpb->txrdy & cpumask))
260 goto ready2;
261 udelay(10);
262 barrier();
264 goto timeout;
266 timeout:
267 printk("Processor %x not ready\n", cpuid);
268 return;
272 * A secondary console wants to send a message. Receive it.
274 static void
275 recv_secondary_console_msg(void)
277 int mycpu, i, cnt;
278 unsigned long txrdy = hwrpb->txrdy;
279 char *cp1, *cp2, buf[80];
280 struct percpu_struct *cpu;
282 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
284 mycpu = hard_smp_processor_id();
286 for (i = 0; i < NR_CPUS; i++) {
287 if (!(txrdy & (1L << i)))
288 continue;
290 DBGS(("recv_secondary_console_msg: "
291 "TXRDY contains CPU %d.\n", i));
293 cpu = (struct percpu_struct *)
294 ((char*)hwrpb
295 + hwrpb->processor_offset
296 + i * hwrpb->processor_size);
298 DBGS(("recv_secondary_console_msg: on %d from %d"
299 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
300 mycpu, i, cpu->halt_reason, cpu->flags));
302 cnt = cpu->ipc_buffer[0] >> 32;
303 if (cnt <= 0 || cnt >= 80)
304 strcpy(buf, "<<< BOGUS MSG >>>");
305 else {
306 cp1 = (char *) &cpu->ipc_buffer[11];
307 cp2 = buf;
308 strcpy(cp2, cp1);
310 while ((cp2 = strchr(cp2, '\r')) != 0) {
311 *cp2 = ' ';
312 if (cp2[1] == '\n')
313 cp2[1] = ' ';
317 printk(KERN_INFO "recv_secondary_console_msg: on %d "
318 "message is '%s'\n", mycpu, buf);
321 hwrpb->txrdy = 0;
325 * Convince the console to have a secondary cpu begin execution.
327 static int __init
328 secondary_cpu_start(int cpuid, struct task_struct *idle)
330 struct percpu_struct *cpu;
331 struct pcb_struct *hwpcb;
332 long timeout;
334 cpu = (struct percpu_struct *)
335 ((char*)hwrpb
336 + hwrpb->processor_offset
337 + cpuid * hwrpb->processor_size);
338 hwpcb = (struct pcb_struct *) cpu->hwpcb;
340 /* Initialize the CPU's HWPCB to something just good enough for
341 us to get started. Immediately after starting, we'll swpctx
342 to the target idle task's tss. Reuse the stack in the mean
343 time. Precalculate the target PCBB. */
344 hwpcb->ksp = (unsigned long) idle + sizeof(union task_union) - 16;
345 hwpcb->usp = 0;
346 hwpcb->ptbr = idle->tss.ptbr;
347 hwpcb->pcc = 0;
348 hwpcb->asn = 0;
349 hwpcb->unique = virt_to_phys(&idle->tss);
350 hwpcb->flags = idle->tss.pal_flags;
351 hwpcb->res1 = hwpcb->res2 = 0;
353 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
354 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwcpb->unique));
355 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
356 cpuid, idle->state, idle->tss.pal_flags));
358 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
359 hwrpb->CPU_restart = __smp_callin;
360 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
362 /* Recalculate and update the HWRPB checksum */
363 hwrpb_update_checksum(hwrpb);
366 * Send a "start" command to the specified processor.
369 /* SRM III 3.4.1.3 */
370 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
371 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
372 wmb();
374 send_secondary_console_msg("START\r\n", cpuid);
376 /* Wait 1 second for an ACK from the console. Note that jiffies
377 aren't ticking yet. */
378 for (timeout = 100000; timeout > 0; timeout--) {
379 if (cpu->flags & 1)
380 goto started;
381 udelay(10);
382 barrier();
384 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
385 return -1;
387 started:
388 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
389 return 0;
393 * Bring one cpu online.
395 static int __init
396 smp_boot_one_cpu(int cpuid, int cpunum)
398 struct task_struct *idle;
399 long timeout;
401 /* Cook up an idler for this guy. Note that the address we give
402 to kernel_thread is irrelevant -- it's going to start where
403 HWRPB.CPU_restart says to start. But this gets all the other
404 task-y sort of data structures set up like we wish. */
405 kernel_thread((void *)__smp_callin, NULL, CLONE_PID|CLONE_VM);
406 idle = task[cpunum];
407 if (!idle)
408 panic("No idle process for CPU %d", cpuid);
409 idle->processor = cpuid;
411 /* Schedule the first task manually. */
412 /* ??? Ingo, what is this? */
413 idle->has_cpu = 1;
415 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
416 cpuid, idle->state, idle->flags));
418 /* The secondary will change this once it is happy. Note that
419 secondary_cpu_start contains the necessary memory barrier. */
420 smp_secondary_alive = -1;
422 /* Whirrr, whirrr, whirrrrrrrrr... */
423 if (secondary_cpu_start(cpuid, idle))
424 return -1;
426 /* We've been acked by the console; wait one second for the task
427 to start up for real. Note that jiffies aren't ticking yet. */
428 for (timeout = 0; timeout < 100000; timeout++) {
429 if (smp_secondary_alive != -1)
430 goto alive;
431 udelay(10);
432 barrier();
435 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
436 return -1;
438 alive:
439 /* Another "Red Snapper". */
440 cpu_number_map[cpuid] = cpunum;
441 __cpu_logical_map[cpunum] = cpuid;
442 return 0;
446 * Called from setup_arch. Detect an SMP system and which processors
447 * are present.
449 void __init
450 setup_smp(void)
452 struct percpu_struct *cpubase, *cpu;
453 int i;
455 smp_boot_cpuid = hard_smp_processor_id();
456 if (smp_boot_cpuid != 0) {
457 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
458 smp_boot_cpuid);
461 if (hwrpb->nr_processors > 1) {
462 int boot_cpu_palrev;
464 DBGS(("setup_smp: nr_processors %ld\n",
465 hwrpb->nr_processors));
467 cpubase = (struct percpu_struct *)
468 ((char*)hwrpb + hwrpb->processor_offset);
469 boot_cpu_palrev = cpubase->pal_revision;
471 for (i = 0; i < hwrpb->nr_processors; i++ ) {
472 cpu = (struct percpu_struct *)
473 ((char *)cpubase + i*hwrpb->processor_size);
474 if ((cpu->flags & 0x1cc) == 0x1cc) {
475 smp_num_probed++;
476 /* Assume here that "whami" == index */
477 cpu_present_mask |= (1L << i);
478 cpu->pal_revision = boot_cpu_palrev;
481 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
482 i, cpu->flags, cpu->type));
483 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
484 i, cpu->pal_revision));
486 } else {
487 smp_num_probed = 1;
488 cpu_present_mask = (1L << smp_boot_cpuid);
491 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
492 smp_num_probed, cpu_present_mask);
496 * Called by smp_init bring all the secondaries online and hold them.
498 void __init
499 smp_boot_cpus(void)
501 int cpu_count, i;
502 unsigned long bogosum;
504 /* Take care of some initial bookkeeping. */
505 memset(cpu_number_map, -1, sizeof(cpu_number_map));
506 memset(__cpu_logical_map, -1, sizeof(__cpu_logical_map));
507 memset(ipi_data, 0, sizeof(ipi_data));
509 cpu_number_map[smp_boot_cpuid] = 0;
510 __cpu_logical_map[0] = smp_boot_cpuid;
511 current->processor = smp_boot_cpuid;
513 smp_store_cpu_info(smp_boot_cpuid);
514 smp_tune_scheduling();
515 smp_setup_percpu_timer(smp_boot_cpuid);
517 init_idle();
519 /* Nothing to do on a UP box, or when told not to. */
520 if (smp_num_probed == 1 || max_cpus == 0) {
521 printk(KERN_INFO "SMP mode deactivated.\n");
522 return;
525 printk(KERN_INFO "SMP starting up secondaries.\n");
527 cpu_count = 1;
528 for (i = 0; i < NR_CPUS; i++) {
529 if (i == smp_boot_cpuid)
530 continue;
532 if (((cpu_present_mask >> i) & 1) == 0)
533 continue;
535 if (smp_boot_one_cpu(i, cpu_count))
536 continue;
538 cpu_count++;
541 if (cpu_count == 1) {
542 printk(KERN_ERR "SMP: Only one lonely processor alive.\n");
543 return;
546 bogosum = 0;
547 for (i = 0; i < NR_CPUS; i++) {
548 if (cpu_present_mask & (1L << i))
549 bogosum += cpu_data[i].loops_per_sec;
551 printk(KERN_INFO "SMP: Total of %d processors activated "
552 "(%lu.%02lu BogoMIPS).\n",
553 cpu_count, (bogosum + 2500) / 500000,
554 ((bogosum + 2500) / 5000) % 100);
556 smp_num_cpus = cpu_count;
560 * Called by smp_init to release the blocking online cpus once they
561 * are all started.
563 void __init
564 smp_commence(void)
566 /* smp_init sets smp_threads_ready -- that's enough. */
567 mb();
571 * Only broken Intel needs this, thus it should not even be
572 * referenced globally.
575 void __init
576 initialize_secondary(void)
581 extern void update_one_process(struct task_struct *p, unsigned long ticks,
582 unsigned long user, unsigned long system,
583 int cpu);
585 void
586 smp_percpu_timer_interrupt(struct pt_regs *regs)
588 int cpu = smp_processor_id();
589 int user = user_mode(regs);
590 struct cpuinfo_alpha *data = &cpu_data[cpu];
592 #ifdef NOT_YET_PROFILING
593 clear_profile_irq(mid_xlate[cpu]);
594 if (!user)
595 alpha_do_profile(regs->pc);
596 #endif
598 if (!--data->prof_counter) {
599 /* We need to make like a normal interrupt -- otherwise
600 timer interrupts ignore the global interrupt lock,
601 which would be a Bad Thing. */
602 irq_enter(cpu, TIMER_IRQ);
604 update_one_process(current, 1, user, !user, cpu);
605 if (current->pid) {
606 if (--current->counter <= 0) {
607 current->counter = 0;
608 current->need_resched = 1;
611 if (user) {
612 if (current->priority < DEF_PRIORITY) {
613 kstat.cpu_nice++;
614 kstat.per_cpu_nice[cpu]++;
615 } else {
616 kstat.cpu_user++;
617 kstat.per_cpu_user[cpu]++;
619 } else {
620 kstat.cpu_system++;
621 kstat.per_cpu_system[cpu]++;
625 data->prof_counter = data->prof_multiplier;
626 irq_exit(cpu, TIMER_IRQ);
630 int __init
631 setup_profiling_timer(unsigned int multiplier)
633 #ifdef NOT_YET_PROFILING
634 int i;
635 unsigned long flags;
637 /* Prevent level14 ticker IRQ flooding. */
638 if((!multiplier) || (lvl14_resolution / multiplier) < 500)
639 return -EINVAL;
641 save_and_cli(flags);
642 for (i = 0; i < NR_CPUS; i++) {
643 if (cpu_present_mask & (1L << i)) {
644 load_profile_irq(mid_xlate[i],
645 lvl14_resolution / multiplier);
646 prof_multiplier[i] = multiplier;
649 restore_flags(flags);
651 return 0;
652 #else
653 return -EINVAL;
654 #endif
658 static void
659 send_ipi_message(unsigned long to_whom, enum ipi_message_type operation)
661 long i, j;
663 /* Reduce the number of memory barriers by doing two loops,
664 one to set the bits, one to invoke the interrupts. */
666 mb(); /* Order out-of-band data and bit setting. */
668 for (i = 0, j = 1; i < NR_CPUS; ++i, j <<= 1) {
669 if (to_whom & j)
670 set_bit(operation, &ipi_data[i].bits);
673 mb(); /* Order bit setting and interrupt. */
675 for (i = 0, j = 1; i < NR_CPUS; ++i, j <<= 1) {
676 if (to_whom & j)
677 wripir(i);
681 /* Structure and data for smp_call_function. This is designed to
682 minimize static memory requirements. Plus it looks cleaner. */
684 struct smp_call_struct {
685 void (*func) (void *info);
686 void *info;
687 long wait;
688 atomic_t unstarted_count;
689 atomic_t unfinished_count;
692 static struct smp_call_struct *smp_call_function_data;
694 /* Atomicly drop data into a shared pointer. The pointer is free if
695 it is initially locked. If retry, spin until free. */
697 static inline int
698 pointer_lock (void *lock, void *data, int retry)
700 void *old, *tmp;
702 mb();
703 again:
704 /* Compare and swap with zero. */
705 asm volatile (
706 "1: ldq_l %0,%1\n"
707 " mov %3,%2\n"
708 " bne %0,2f\n"
709 " stq_c %2,%1\n"
710 " beq %2,1b\n"
711 "2:"
712 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
713 : "r"(data)
714 : "memory");
716 if (old == 0)
717 return 0;
718 if (! retry)
719 return -EBUSY;
721 while (*(void **)lock)
722 schedule();
723 goto again;
726 void
727 handle_ipi(struct pt_regs *regs)
729 int this_cpu = smp_processor_id();
730 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
731 unsigned long ops;
733 DBGS(("handle_ipi: on CPU %d ops 0x%x PC 0x%lx\n",
734 this_cpu, *pending_ipis, regs->pc));
736 mb(); /* Order interrupt and bit testing. */
737 while ((ops = xchg(pending_ipis, 0)) != 0) {
738 mb(); /* Order bit clearing and data access. */
739 do {
740 unsigned long which;
742 which = ops & -ops;
743 ops &= ~which;
744 which = ffz(~which);
746 if (which == IPI_RESCHEDULE) {
747 /* Reschedule callback. Everything to be done
748 is done by the interrupt return path. */
750 else if (which == IPI_CALL_FUNC) {
751 struct smp_call_struct *data;
752 void (*func)(void *info);
753 void *info;
754 int wait;
756 data = smp_call_function_data;
757 func = data->func;
758 info = data->info;
759 wait = data->wait;
761 /* Notify the sending CPU that the data has been
762 received, and execution is about to begin. */
763 mb();
764 atomic_dec (&data->unstarted_count);
766 /* At this point the structure may be gone unless
767 wait is true. */
768 (*func)(info);
770 /* Notify the sending CPU that the task is done. */
771 mb();
772 if (wait) atomic_dec (&data->unfinished_count);
774 else if (which == IPI_CPU_STOP) {
775 halt();
777 else {
778 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
779 this_cpu, which);
781 } while (ops);
783 mb(); /* Order data access and bit testing. */
786 cpu_data[this_cpu].ipi_count++;
788 if (hwrpb->txrdy)
789 recv_secondary_console_msg();
792 void
793 smp_send_reschedule(int cpu)
795 #if DEBUG_IPI_MSG
796 if (cpu == hard_smp_processor_id())
797 printk(KERN_WARNING
798 "smp_send_reschedule: Sending IPI to self.\n");
799 #endif
800 send_ipi_message(1L << cpu, IPI_RESCHEDULE);
803 void
804 smp_send_stop(void)
806 unsigned long to_whom = cpu_present_mask ^ (1L << smp_processor_id());
807 #if DEBUG_IPI_MSG
808 if (hard_smp_processor_id() != boot_cpu_id)
809 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
810 #endif
811 send_ipi_message(to_whom, IPI_CPU_STOP);
815 * Run a function on all other CPUs.
816 * <func> The function to run. This must be fast and non-blocking.
817 * <info> An arbitrary pointer to pass to the function.
818 * <retry> If true, keep retrying until ready.
819 * <wait> If true, wait until function has completed on other CPUs.
820 * [RETURNS] 0 on success, else a negative status code.
822 * Does not return until remote CPUs are nearly ready to execute <func>
823 * or are or have executed.
827 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
829 unsigned long to_whom = cpu_present_mask ^ (1L << smp_processor_id());
830 struct smp_call_struct data;
831 long timeout;
833 data.func = func;
834 data.info = info;
835 data.wait = wait;
836 atomic_set(&data.unstarted_count, smp_num_cpus - 1);
837 atomic_set(&data.unfinished_count, smp_num_cpus - 1);
839 /* Aquire the smp_call_function_data mutex. */
840 if (pointer_lock(&smp_call_function_data, &data, retry))
841 return -EBUSY;
843 /* Send a message to all other CPUs. */
844 send_ipi_message(to_whom, IPI_CALL_FUNC);
846 /* Wait for a minimal response. */
847 timeout = jiffies + HZ;
848 while (atomic_read (&data.unstarted_count) > 0
849 && time_before (jiffies, timeout))
850 barrier();
852 /* We either got one or timed out -- clear the lock. */
853 mb();
854 smp_call_function_data = 0;
855 if (atomic_read (&data.unstarted_count) > 0)
856 return -ETIMEDOUT;
858 /* Wait for a complete response, if needed. */
859 if (wait) {
860 while (atomic_read (&data.unfinished_count) > 0)
861 barrier();
864 return 0;
867 static void
868 ipi_flush_tlb_all(void *ignored)
870 tbia();
873 void
874 flush_tlb_all(void)
876 /* Although we don't have any data to pass, we do want to
877 synchronize with the other processors. */
878 if (smp_call_function(ipi_flush_tlb_all, NULL, 1, 1)) {
879 printk(KERN_CRIT "flush_tlb_all: timed out\n");
882 tbia();
885 static void
886 ipi_flush_tlb_mm(void *x)
888 struct mm_struct *mm = (struct mm_struct *) x;
889 if (mm == current->mm)
890 flush_tlb_current(mm);
893 void
894 flush_tlb_mm(struct mm_struct *mm)
896 if (mm == current->mm)
897 flush_tlb_current(mm);
898 else
899 flush_tlb_other(mm);
901 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
902 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
906 struct flush_tlb_page_struct {
907 struct vm_area_struct *vma;
908 struct mm_struct *mm;
909 unsigned long addr;
912 static void
913 ipi_flush_tlb_page(void *x)
915 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
916 if (data->mm == current->mm)
917 flush_tlb_current_page(data->mm, data->vma, data->addr);
920 void
921 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
923 struct flush_tlb_page_struct data;
924 struct mm_struct *mm = vma->vm_mm;
926 data.vma = vma;
927 data.mm = mm;
928 data.addr = addr;
930 if (mm == current->mm)
931 flush_tlb_current_page(mm, vma, addr);
932 else
933 flush_tlb_other(mm);
935 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
936 printk(KERN_CRIT "flush_tlb_page: timed out\n");
940 void
941 flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
943 /* On the Alpha we always flush the whole user tlb. */
944 flush_tlb_mm(mm);
949 smp_info(char *buffer)
951 long i;
952 unsigned long sum = 0;
953 for (i = 0; i < NR_CPUS; i++)
954 sum += cpu_data[i].ipi_count;
956 return sprintf(buffer, "CPUs probed %d active %d map 0x%lx IPIs %ld\n",
957 smp_num_probed, smp_num_cpus, cpu_present_mask, sum);
961 #if DEBUG_SPINLOCK
962 void
963 spin_unlock(spinlock_t * lock)
965 mb();
966 lock->lock = 0;
968 lock->on_cpu = -1;
969 lock->previous = NULL;
970 lock->task = NULL;
971 lock->base_file = "none";
972 lock->line_no = 0;
975 void
976 debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
978 long tmp;
979 long stuck;
980 void *inline_pc = __builtin_return_address(0);
981 unsigned long started = jiffies;
982 int printed = 0;
983 int cpu = smp_processor_id();
985 stuck = 1L << 28;
986 try_again:
988 /* Use sub-sections to put the actual loop at the end
989 of this object file's text section so as to perfect
990 branch prediction. */
991 __asm__ __volatile__(
992 "1: ldl_l %0,%1\n"
993 " subq %2,1,%2\n"
994 " blbs %0,2f\n"
995 " or %0,1,%0\n"
996 " stl_c %0,%1\n"
997 " beq %0,3f\n"
998 "4: mb\n"
999 ".section .text2,\"ax\"\n"
1000 "2: ldl %0,%1\n"
1001 " subq %2,1,%2\n"
1002 "3: blt %2,4b\n"
1003 " blbs %0,2b\n"
1004 " br 1b\n"
1005 ".previous"
1006 : "=r" (tmp), "=m" (__dummy_lock(lock)), "=r" (stuck)
1007 : "1" (__dummy_lock(lock)), "2" (stuck));
1009 if (stuck < 0) {
1010 printk(KERN_WARNING
1011 "%s:%d spinlock stuck in %s at %p(%d)"
1012 " owner %s at %p(%d) %s:%d\n",
1013 base_file, line_no,
1014 current->comm, inline_pc, cpu,
1015 lock->task->comm, lock->previous,
1016 lock->on_cpu, lock->base_file, lock->line_no);
1017 stuck = 1L << 36;
1018 printed = 1;
1019 goto try_again;
1022 /* Exiting. Got the lock. */
1023 lock->on_cpu = cpu;
1024 lock->previous = inline_pc;
1025 lock->task = current;
1026 lock->base_file = base_file;
1027 lock->line_no = line_no;
1029 if (printed) {
1030 printk(KERN_WARNING
1031 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
1032 base_file, line_no, current->comm, inline_pc,
1033 cpu, jiffies - started);
1038 debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
1040 int ret;
1041 if ((ret = !test_and_set_bit(0, lock))) {
1042 lock->on_cpu = smp_processor_id();
1043 lock->previous = __builtin_return_address(0);
1044 lock->task = current;
1045 } else {
1046 lock->base_file = base_file;
1047 lock->line_no = line_no;
1049 return ret;
1051 #endif /* DEBUG_SPINLOCK */
1053 #if DEBUG_RWLOCK
1054 void write_lock(rwlock_t * lock)
1056 long regx, regy;
1057 int stuck_lock, stuck_reader;
1058 void *inline_pc = __builtin_return_address(0);
1060 try_again:
1062 stuck_lock = 1<<26;
1063 stuck_reader = 1<<26;
1065 __asm__ __volatile__(
1066 "1: ldl_l %1,%0\n"
1067 " blbs %1,6f\n"
1068 " blt %1,8f\n"
1069 " mov 1,%1\n"
1070 " stl_c %1,%0\n"
1071 " beq %1,6f\n"
1072 "4: mb\n"
1073 ".section .text2,\"ax\"\n"
1074 "6: blt %3,4b # debug\n"
1075 " subl %3,1,%3 # debug\n"
1076 " ldl %1,%0\n"
1077 " blbs %1,6b\n"
1078 "8: blt %4,4b # debug\n"
1079 " subl %4,1,%4 # debug\n"
1080 " ldl %1,%0\n"
1081 " blt %1,8b\n"
1082 " br 1b\n"
1083 ".previous"
1084 : "=m" (__dummy_lock(lock)), "=&r" (regx), "=&r" (regy),
1085 "=&r" (stuck_lock), "=&r" (stuck_reader)
1086 : "0" (__dummy_lock(lock)), "3" (stuck_lock), "4" (stuck_reader));
1088 if (stuck_lock < 0) {
1089 printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc);
1090 goto try_again;
1092 if (stuck_reader < 0) {
1093 printk(KERN_WARNING "write_lock stuck on readers at %p\n",
1094 inline_pc);
1095 goto try_again;
1099 void read_lock(rwlock_t * lock)
1101 long regx;
1102 int stuck_lock;
1103 void *inline_pc = __builtin_return_address(0);
1105 try_again:
1107 stuck_lock = 1<<26;
1109 __asm__ __volatile__(
1110 "1: ldl_l %1,%0;"
1111 " blbs %1,6f;"
1112 " subl %1,2,%1;"
1113 " stl_c %1,%0;"
1114 " beq %1,6f;"
1115 "4: mb\n"
1116 ".section .text2,\"ax\"\n"
1117 "6: ldl %1,%0;"
1118 " blt %2,4b # debug\n"
1119 " subl %2,1,%2 # debug\n"
1120 " blbs %1,6b;"
1121 " br 1b\n"
1122 ".previous"
1123 : "=m" (__dummy_lock(lock)), "=&r" (regx), "=&r" (stuck_lock)
1124 : "0" (__dummy_lock(lock)), "2" (stuck_lock));
1126 if (stuck_lock < 0) {
1127 printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc);
1128 goto try_again;
1131 #endif /* DEBUG_RWLOCK */