Import 2.3.13pre1
[davej-history.git] / arch / alpha / kernel / smp.c
blobac50c19a2d1aa9e3971f722c5caa5f03a0bfe39e
1 /*
2 * linux/arch/alpha/kernel/smp.c
3 */
5 #include <linux/errno.h>
6 #include <linux/kernel.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/tasks.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/delay.h>
17 #include <asm/hwrpb.h>
18 #include <asm/ptrace.h>
19 #include <asm/atomic.h>
21 #include <asm/io.h>
22 #include <asm/irq.h>
23 #include <asm/bitops.h>
24 #include <asm/pgtable.h>
25 #include <asm/spinlock.h>
26 #include <asm/hardirq.h>
27 #include <asm/softirq.h>
29 #define __KERNEL_SYSCALLS__
30 #include <asm/unistd.h>
32 #include "proto.h"
33 #include "irq.h"
36 #define DEBUG_SMP 0
37 #if DEBUG_SMP
38 #define DBGS(args) printk args
39 #else
40 #define DBGS(args)
41 #endif
43 /* A collection of per-processor data. */
44 struct cpuinfo_alpha cpu_data[NR_CPUS];
46 /* A collection of single bit ipi messages. */
47 static struct {
48 unsigned long bits __cacheline_aligned;
49 } ipi_data[NR_CPUS];
51 enum ipi_message_type {
52 IPI_RESCHEDULE,
53 IPI_CALL_FUNC,
54 IPI_CPU_STOP,
57 spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
59 /* Set to a secondary's cpuid when it comes online. */
60 static unsigned long smp_secondary_alive;
62 unsigned long cpu_present_mask; /* Which cpus ids came online. */
64 static int max_cpus = -1; /* Command-line limitation. */
65 int smp_boot_cpuid; /* Which processor we booted from. */
66 int smp_num_probed; /* Internal processor count */
67 int smp_num_cpus = 1; /* Number that came online. */
68 int smp_threads_ready; /* True once the per process idle is forked. */
69 cycles_t cacheflush_time;
71 int cpu_number_map[NR_CPUS];
72 int __cpu_logical_map[NR_CPUS];
74 extern void calibrate_delay(void);
75 extern asmlinkage void entInt(void);
79 * Process bootcommand SMP options, like "nosmp" and "maxcpus=".
81 void __init
82 smp_setup(char *str, int *ints)
84 if (ints && ints[0] > 0)
85 max_cpus = ints[1];
86 else
87 max_cpus = 0;
91 * Called by both boot and secondaries to move global data into
92 * per-processor storage.
94 static inline void __init
95 smp_store_cpu_info(int cpuid)
97 cpu_data[cpuid].loops_per_sec = loops_per_sec;
98 cpu_data[cpuid].last_asn
99 = (cpuid << WIDTH_HARDWARE_ASN) + ASN_FIRST_VERSION;
100 cpu_data[cpuid].irq_count = 0;
101 cpu_data[cpuid].bh_count = 0;
105 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
107 static inline void __init
108 smp_setup_percpu_timer(int cpuid)
110 cpu_data[cpuid].prof_counter = 1;
111 cpu_data[cpuid].prof_multiplier = 1;
115 * Where secondaries begin a life of C.
117 void __init
118 smp_callin(void)
120 int cpuid = hard_smp_processor_id();
122 DBGS(("CALLIN %d state 0x%lx\n", cpuid, current->state));
124 /* Turn on machine checks. */
125 wrmces(7);
127 /* Set trap vectors. */
128 trap_init();
130 /* Set interrupt vector. */
131 wrent(entInt, 0);
133 /* Setup the scheduler for this processor. */
134 init_idle();
136 /* Get our local ticker going. */
137 smp_setup_percpu_timer(cpuid);
139 /* Must have completely accurate bogos. */
140 __sti();
141 calibrate_delay();
142 smp_store_cpu_info(cpuid);
144 /* Allow master to continue. */
145 wmb();
146 smp_secondary_alive = cpuid;
148 /* Wait for the go code. */
149 while (!smp_threads_ready)
150 barrier();
152 DBGS(("smp_callin: commencing CPU %d current %p\n",
153 cpuid, current));
155 /* Do nothing. */
156 cpu_idle(NULL);
161 * Rough estimation for SMP scheduling, this is the number of cycles it
162 * takes for a fully memory-limited process to flush the SMP-local cache.
164 * We are not told how much cache there is, so we have to guess.
166 static void __init
167 smp_tune_scheduling (void)
169 struct percpu_struct *cpu;
170 unsigned long on_chip_cache;
171 unsigned long freq;
173 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
174 switch (cpu->type)
176 case EV45_CPU:
177 on_chip_cache = 16 + 16;
178 break;
180 case EV5_CPU:
181 case EV56_CPU:
182 on_chip_cache = 8 + 8 + 96;
183 break;
185 case PCA56_CPU:
186 on_chip_cache = 16 + 8;
187 break;
189 case EV6_CPU:
190 on_chip_cache = 64 + 64;
191 break;
193 default:
194 on_chip_cache = 8 + 8;
195 break;
198 freq = hwrpb->cycle_freq ? : est_cycle_freq;
200 /* Magic estimation stolen from x86 port. */
201 cacheflush_time = freq / 1024 * on_chip_cache / 5000;
205 * Send a message to a secondary's console. "START" is one such
206 * interesting message. ;-)
208 static void
209 send_secondary_console_msg(char *str, int cpuid)
211 struct percpu_struct *cpu;
212 register char *cp1, *cp2;
213 unsigned long cpumask;
214 size_t len;
215 long timeout;
217 cpu = (struct percpu_struct *)
218 ((char*)hwrpb
219 + hwrpb->processor_offset
220 + cpuid * hwrpb->processor_size);
222 cpumask = (1L << cpuid);
223 if (hwrpb->txrdy & cpumask)
224 goto delay1;
225 ready1:
227 cp2 = str;
228 len = strlen(cp2);
229 *(unsigned int *)&cpu->ipc_buffer[0] = len;
230 cp1 = (char *) &cpu->ipc_buffer[1];
231 memcpy(cp1, cp2, len);
233 /* atomic test and set */
234 wmb();
235 set_bit(cpuid, &hwrpb->rxrdy);
237 if (hwrpb->txrdy & cpumask)
238 goto delay2;
239 ready2:
240 return;
242 delay1:
243 /* Wait one second. Note that jiffies aren't ticking yet. */
244 for (timeout = 100000; timeout > 0; --timeout) {
245 if (!(hwrpb->txrdy & cpumask))
246 goto ready1;
247 udelay(10);
248 barrier();
250 goto timeout;
252 delay2:
253 /* Wait one second. */
254 for (timeout = 100000; timeout > 0; --timeout) {
255 if (!(hwrpb->txrdy & cpumask))
256 goto ready2;
257 udelay(10);
258 barrier();
260 goto timeout;
262 timeout:
263 printk("Processor %x not ready\n", cpuid);
264 return;
268 * A secondary console wants to send a message. Receive it.
270 static void
271 recv_secondary_console_msg(void)
273 int mycpu, i, cnt;
274 unsigned long txrdy = hwrpb->txrdy;
275 char *cp1, *cp2, buf[80];
276 struct percpu_struct *cpu;
278 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
280 mycpu = hard_smp_processor_id();
282 for (i = 0; i < NR_CPUS; i++) {
283 if (!(txrdy & (1L << i)))
284 continue;
286 DBGS(("recv_secondary_console_msg: "
287 "TXRDY contains CPU %d.\n", i));
289 cpu = (struct percpu_struct *)
290 ((char*)hwrpb
291 + hwrpb->processor_offset
292 + i * hwrpb->processor_size);
294 DBGS(("recv_secondary_console_msg: on %d from %d"
295 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
296 mycpu, i, cpu->halt_reason, cpu->flags));
298 cnt = cpu->ipc_buffer[0] >> 32;
299 if (cnt <= 0 || cnt >= 80)
300 strcpy(buf, "<<< BOGUS MSG >>>");
301 else {
302 cp1 = (char *) &cpu->ipc_buffer[11];
303 cp2 = buf;
304 strcpy(cp2, cp1);
306 while ((cp2 = strchr(cp2, '\r')) != 0) {
307 *cp2 = ' ';
308 if (cp2[1] == '\n')
309 cp2[1] = ' ';
313 printk(KERN_INFO "recv_secondary_console_msg: on %d "
314 "message is '%s'\n", mycpu, buf);
317 hwrpb->txrdy = 0;
321 * Convince the console to have a secondary cpu begin execution.
323 static int __init
324 secondary_cpu_start(int cpuid, struct task_struct *idle)
326 struct percpu_struct *cpu;
327 struct pcb_struct *hwpcb;
328 long timeout;
330 cpu = (struct percpu_struct *)
331 ((char*)hwrpb
332 + hwrpb->processor_offset
333 + cpuid * hwrpb->processor_size);
334 hwpcb = (struct pcb_struct *) cpu->hwpcb;
336 /* Initialize the CPU's HWPCB to something just good enough for
337 us to get started. Immediately after starting, we'll swpctx
338 to the target idle task's ptb. Reuse the stack in the mean
339 time. Precalculate the target PCBB. */
340 hwpcb->ksp = (unsigned long) idle + sizeof(union task_union) - 16;
341 hwpcb->usp = 0;
342 hwpcb->ptbr = idle->thread.ptbr;
343 hwpcb->pcc = 0;
344 hwpcb->asn = 0;
345 hwpcb->unique = virt_to_phys(&idle->thread);
346 hwpcb->flags = idle->thread.pal_flags;
347 hwpcb->res1 = hwpcb->res2 = 0;
349 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
350 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwcpb->unique));
351 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
352 cpuid, idle->state, idle->thread.pal_flags));
354 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
355 hwrpb->CPU_restart = __smp_callin;
356 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
358 /* Recalculate and update the HWRPB checksum */
359 hwrpb_update_checksum(hwrpb);
362 * Send a "start" command to the specified processor.
365 /* SRM III 3.4.1.3 */
366 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
367 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
368 wmb();
370 send_secondary_console_msg("START\r\n", cpuid);
372 /* Wait 1 second for an ACK from the console. Note that jiffies
373 aren't ticking yet. */
374 for (timeout = 100000; timeout > 0; timeout--) {
375 if (cpu->flags & 1)
376 goto started;
377 udelay(10);
378 barrier();
380 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
381 return -1;
383 started:
384 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
385 return 0;
389 * Bring one cpu online.
391 static int __init
392 smp_boot_one_cpu(int cpuid, int cpunum)
394 struct task_struct *idle;
395 long timeout;
397 /* Cook up an idler for this guy. Note that the address we give
398 to kernel_thread is irrelevant -- it's going to start where
399 HWRPB.CPU_restart says to start. But this gets all the other
400 task-y sort of data structures set up like we wish. */
401 kernel_thread((void *)__smp_callin, NULL, CLONE_PID|CLONE_VM);
403 idle = init_task.prev_task;
404 if (!idle)
405 panic("No idle process for CPU %d", cpunum);
406 del_from_runqueue(idle);
407 init_tasks[cpunum] = idle;
408 idle->processor = cpuid;
410 /* Schedule the first task manually. */
411 /* ??? Ingo, what is this? */
412 idle->has_cpu = 1;
414 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
415 cpuid, idle->state, idle->flags));
417 /* The secondary will change this once it is happy. Note that
418 secondary_cpu_start contains the necessary memory barrier. */
419 smp_secondary_alive = -1;
421 /* Whirrr, whirrr, whirrrrrrrrr... */
422 if (secondary_cpu_start(cpuid, idle))
423 return -1;
425 /* We've been acked by the console; wait one second for the task
426 to start up for real. Note that jiffies aren't ticking yet. */
427 for (timeout = 0; timeout < 100000; timeout++) {
428 if (smp_secondary_alive != -1)
429 goto alive;
430 udelay(10);
431 barrier();
434 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
435 return -1;
437 alive:
438 /* Another "Red Snapper". */
439 cpu_number_map[cpuid] = cpunum;
440 __cpu_logical_map[cpunum] = cpuid;
441 return 0;
445 * Called from setup_arch. Detect an SMP system and which processors
446 * are present.
448 void __init
449 setup_smp(void)
451 struct percpu_struct *cpubase, *cpu;
452 int i;
454 smp_boot_cpuid = hard_smp_processor_id();
455 if (smp_boot_cpuid != 0) {
456 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
457 smp_boot_cpuid);
460 if (hwrpb->nr_processors > 1) {
461 int boot_cpu_palrev;
463 DBGS(("setup_smp: nr_processors %ld\n",
464 hwrpb->nr_processors));
466 cpubase = (struct percpu_struct *)
467 ((char*)hwrpb + hwrpb->processor_offset);
468 boot_cpu_palrev = cpubase->pal_revision;
470 for (i = 0; i < hwrpb->nr_processors; i++ ) {
471 cpu = (struct percpu_struct *)
472 ((char *)cpubase + i*hwrpb->processor_size);
473 if ((cpu->flags & 0x1cc) == 0x1cc) {
474 smp_num_probed++;
475 /* Assume here that "whami" == index */
476 cpu_present_mask |= (1L << i);
477 cpu->pal_revision = boot_cpu_palrev;
480 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
481 i, cpu->flags, cpu->type));
482 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
483 i, cpu->pal_revision));
485 } else {
486 smp_num_probed = 1;
487 cpu_present_mask = (1L << smp_boot_cpuid);
490 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
491 smp_num_probed, cpu_present_mask);
495 * Called by smp_init bring all the secondaries online and hold them.
497 void __init
498 smp_boot_cpus(void)
500 int cpu_count, i;
501 unsigned long bogosum;
503 /* Take care of some initial bookkeeping. */
504 memset(cpu_number_map, -1, sizeof(cpu_number_map));
505 memset(__cpu_logical_map, -1, sizeof(__cpu_logical_map));
506 memset(ipi_data, 0, sizeof(ipi_data));
508 cpu_number_map[smp_boot_cpuid] = 0;
509 __cpu_logical_map[0] = smp_boot_cpuid;
510 current->processor = smp_boot_cpuid;
512 smp_store_cpu_info(smp_boot_cpuid);
513 smp_tune_scheduling();
514 smp_setup_percpu_timer(smp_boot_cpuid);
516 init_idle();
518 /* Nothing to do on a UP box, or when told not to. */
519 if (smp_num_probed == 1 || max_cpus == 0) {
520 printk(KERN_INFO "SMP mode deactivated.\n");
521 return;
524 printk(KERN_INFO "SMP starting up secondaries.\n");
526 cpu_count = 1;
527 for (i = 0; i < NR_CPUS; i++) {
528 if (i == smp_boot_cpuid)
529 continue;
531 if (((cpu_present_mask >> i) & 1) == 0)
532 continue;
534 if (smp_boot_one_cpu(i, cpu_count))
535 continue;
537 cpu_count++;
540 if (cpu_count == 1) {
541 printk(KERN_ERR "SMP: Only one lonely processor alive.\n");
542 return;
545 bogosum = 0;
546 for (i = 0; i < NR_CPUS; i++) {
547 if (cpu_present_mask & (1L << i))
548 bogosum += cpu_data[i].loops_per_sec;
550 printk(KERN_INFO "SMP: Total of %d processors activated "
551 "(%lu.%02lu BogoMIPS).\n",
552 cpu_count, (bogosum + 2500) / 500000,
553 ((bogosum + 2500) / 5000) % 100);
555 smp_num_cpus = cpu_count;
559 * Called by smp_init to release the blocking online cpus once they
560 * are all started.
562 void __init
563 smp_commence(void)
565 /* smp_init sets smp_threads_ready -- that's enough. */
566 mb();
570 * Only broken Intel needs this, thus it should not even be
571 * referenced globally.
574 void __init
575 initialize_secondary(void)
580 extern void update_one_process(struct task_struct *p, unsigned long ticks,
581 unsigned long user, unsigned long system,
582 int cpu);
584 void
585 smp_percpu_timer_interrupt(struct pt_regs *regs)
587 int cpu = smp_processor_id();
588 unsigned long user = user_mode(regs);
589 struct cpuinfo_alpha *data = &cpu_data[cpu];
591 /* Record kernel PC. */
592 if (!user)
593 alpha_do_profile(regs->pc);
595 if (!--data->prof_counter) {
596 /* We need to make like a normal interrupt -- otherwise
597 timer interrupts ignore the global interrupt lock,
598 which would be a Bad Thing. */
599 irq_enter(cpu, TIMER_IRQ);
601 update_one_process(current, 1, user, !user, cpu);
602 if (current->pid) {
603 if (--current->counter <= 0) {
604 current->counter = 0;
605 current->need_resched = 1;
608 if (user) {
609 if (current->priority < DEF_PRIORITY) {
610 kstat.cpu_nice++;
611 kstat.per_cpu_nice[cpu]++;
612 } else {
613 kstat.cpu_user++;
614 kstat.per_cpu_user[cpu]++;
616 } else {
617 kstat.cpu_system++;
618 kstat.per_cpu_system[cpu]++;
622 data->prof_counter = data->prof_multiplier;
623 irq_exit(cpu, TIMER_IRQ);
627 int __init
628 setup_profiling_timer(unsigned int multiplier)
630 return -EINVAL;
634 static void
635 send_ipi_message(unsigned long to_whom, enum ipi_message_type operation)
637 long i, j;
639 /* Reduce the number of memory barriers by doing two loops,
640 one to set the bits, one to invoke the interrupts. */
642 mb(); /* Order out-of-band data and bit setting. */
644 for (i = 0, j = 1; i < NR_CPUS; ++i, j <<= 1) {
645 if (to_whom & j)
646 set_bit(operation, &ipi_data[i].bits);
649 mb(); /* Order bit setting and interrupt. */
651 for (i = 0, j = 1; i < NR_CPUS; ++i, j <<= 1) {
652 if (to_whom & j)
653 wripir(i);
657 /* Structure and data for smp_call_function. This is designed to
658 minimize static memory requirements. Plus it looks cleaner. */
660 struct smp_call_struct {
661 void (*func) (void *info);
662 void *info;
663 long wait;
664 atomic_t unstarted_count;
665 atomic_t unfinished_count;
668 static struct smp_call_struct *smp_call_function_data;
670 /* Atomicly drop data into a shared pointer. The pointer is free if
671 it is initially locked. If retry, spin until free. */
673 static inline int
674 pointer_lock (void *lock, void *data, int retry)
676 void *old, *tmp;
678 mb();
679 again:
680 /* Compare and swap with zero. */
681 asm volatile (
682 "1: ldq_l %0,%1\n"
683 " mov %3,%2\n"
684 " bne %0,2f\n"
685 " stq_c %2,%1\n"
686 " beq %2,1b\n"
687 "2:"
688 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
689 : "r"(data)
690 : "memory");
692 if (old == 0)
693 return 0;
694 if (! retry)
695 return -EBUSY;
697 while (*(void **)lock)
698 schedule();
699 goto again;
702 void
703 handle_ipi(struct pt_regs *regs)
705 int this_cpu = smp_processor_id();
706 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
707 unsigned long ops;
709 DBGS(("handle_ipi: on CPU %d ops 0x%x PC 0x%lx\n",
710 this_cpu, *pending_ipis, regs->pc));
712 mb(); /* Order interrupt and bit testing. */
713 while ((ops = xchg(pending_ipis, 0)) != 0) {
714 mb(); /* Order bit clearing and data access. */
715 do {
716 unsigned long which;
718 which = ops & -ops;
719 ops &= ~which;
720 which = ffz(~which);
722 if (which == IPI_RESCHEDULE) {
723 /* Reschedule callback. Everything to be done
724 is done by the interrupt return path. */
726 else if (which == IPI_CALL_FUNC) {
727 struct smp_call_struct *data;
728 void (*func)(void *info);
729 void *info;
730 int wait;
732 data = smp_call_function_data;
733 func = data->func;
734 info = data->info;
735 wait = data->wait;
737 /* Notify the sending CPU that the data has been
738 received, and execution is about to begin. */
739 mb();
740 atomic_dec (&data->unstarted_count);
742 /* At this point the structure may be gone unless
743 wait is true. */
744 (*func)(info);
746 /* Notify the sending CPU that the task is done. */
747 mb();
748 if (wait) atomic_dec (&data->unfinished_count);
750 else if (which == IPI_CPU_STOP) {
751 halt();
753 else {
754 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
755 this_cpu, which);
757 } while (ops);
759 mb(); /* Order data access and bit testing. */
762 cpu_data[this_cpu].ipi_count++;
764 if (hwrpb->txrdy)
765 recv_secondary_console_msg();
768 void
769 smp_send_reschedule(int cpu)
771 #if DEBUG_IPI_MSG
772 if (cpu == hard_smp_processor_id())
773 printk(KERN_WARNING
774 "smp_send_reschedule: Sending IPI to self.\n");
775 #endif
776 send_ipi_message(1L << cpu, IPI_RESCHEDULE);
779 void
780 smp_send_stop(void)
782 unsigned long to_whom = cpu_present_mask ^ (1L << smp_processor_id());
783 #if DEBUG_IPI_MSG
784 if (hard_smp_processor_id() != boot_cpu_id)
785 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
786 #endif
787 send_ipi_message(to_whom, IPI_CPU_STOP);
791 * Run a function on all other CPUs.
792 * <func> The function to run. This must be fast and non-blocking.
793 * <info> An arbitrary pointer to pass to the function.
794 * <retry> If true, keep retrying until ready.
795 * <wait> If true, wait until function has completed on other CPUs.
796 * [RETURNS] 0 on success, else a negative status code.
798 * Does not return until remote CPUs are nearly ready to execute <func>
799 * or are or have executed.
803 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
805 unsigned long to_whom = cpu_present_mask ^ (1L << smp_processor_id());
806 struct smp_call_struct data;
807 long timeout;
809 data.func = func;
810 data.info = info;
811 data.wait = wait;
812 atomic_set(&data.unstarted_count, smp_num_cpus - 1);
813 atomic_set(&data.unfinished_count, smp_num_cpus - 1);
815 /* Aquire the smp_call_function_data mutex. */
816 if (pointer_lock(&smp_call_function_data, &data, retry))
817 return -EBUSY;
819 /* Send a message to all other CPUs. */
820 send_ipi_message(to_whom, IPI_CALL_FUNC);
822 /* Wait for a minimal response. */
823 timeout = jiffies + HZ;
824 while (atomic_read (&data.unstarted_count) > 0
825 && time_before (jiffies, timeout))
826 barrier();
828 /* We either got one or timed out -- clear the lock. */
829 mb();
830 smp_call_function_data = 0;
831 if (atomic_read (&data.unstarted_count) > 0)
832 return -ETIMEDOUT;
834 /* Wait for a complete response, if needed. */
835 if (wait) {
836 while (atomic_read (&data.unfinished_count) > 0)
837 barrier();
840 return 0;
843 static void
844 ipi_flush_tlb_all(void *ignored)
846 tbia();
849 void
850 flush_tlb_all(void)
852 /* Although we don't have any data to pass, we do want to
853 synchronize with the other processors. */
854 if (smp_call_function(ipi_flush_tlb_all, NULL, 1, 1)) {
855 printk(KERN_CRIT "flush_tlb_all: timed out\n");
858 tbia();
861 static void
862 ipi_flush_tlb_mm(void *x)
864 struct mm_struct *mm = (struct mm_struct *) x;
865 if (mm == current->mm)
866 flush_tlb_current(mm);
869 void
870 flush_tlb_mm(struct mm_struct *mm)
872 if (mm == current->mm) {
873 flush_tlb_current(mm);
874 if (atomic_read(&mm->count) == 1)
875 return;
876 } else
877 flush_tlb_other(mm);
879 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
880 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
884 struct flush_tlb_page_struct {
885 struct vm_area_struct *vma;
886 struct mm_struct *mm;
887 unsigned long addr;
890 static void
891 ipi_flush_tlb_page(void *x)
893 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
894 if (data->mm == current->mm)
895 flush_tlb_current_page(data->mm, data->vma, data->addr);
898 void
899 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
901 struct flush_tlb_page_struct data;
902 struct mm_struct *mm = vma->vm_mm;
904 if (mm == current->mm) {
905 flush_tlb_current_page(mm, vma, addr);
906 if (atomic_read(&mm->count) == 1)
907 return;
908 } else
909 flush_tlb_other(mm);
911 data.vma = vma;
912 data.mm = mm;
913 data.addr = addr;
915 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
916 printk(KERN_CRIT "flush_tlb_page: timed out\n");
920 void
921 flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
923 /* On the Alpha we always flush the whole user tlb. */
924 flush_tlb_mm(mm);
929 smp_info(char *buffer)
931 long i;
932 unsigned long sum = 0;
933 for (i = 0; i < NR_CPUS; i++)
934 sum += cpu_data[i].ipi_count;
936 return sprintf(buffer, "CPUs probed %d active %d map 0x%lx IPIs %ld\n",
937 smp_num_probed, smp_num_cpus, cpu_present_mask, sum);
941 #if DEBUG_SPINLOCK
942 void
943 spin_unlock(spinlock_t * lock)
945 mb();
946 lock->lock = 0;
948 lock->on_cpu = -1;
949 lock->previous = NULL;
950 lock->task = NULL;
951 lock->base_file = "none";
952 lock->line_no = 0;
955 void
956 debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
958 long tmp;
959 long stuck;
960 void *inline_pc = __builtin_return_address(0);
961 unsigned long started = jiffies;
962 int printed = 0;
963 int cpu = smp_processor_id();
965 stuck = 1L << 28;
966 try_again:
968 /* Use sub-sections to put the actual loop at the end
969 of this object file's text section so as to perfect
970 branch prediction. */
971 __asm__ __volatile__(
972 "1: ldl_l %0,%1\n"
973 " subq %2,1,%2\n"
974 " blbs %0,2f\n"
975 " or %0,1,%0\n"
976 " stl_c %0,%1\n"
977 " beq %0,3f\n"
978 "4: mb\n"
979 ".section .text2,\"ax\"\n"
980 "2: ldl %0,%1\n"
981 " subq %2,1,%2\n"
982 "3: blt %2,4b\n"
983 " blbs %0,2b\n"
984 " br 1b\n"
985 ".previous"
986 : "=r" (tmp), "=m" (__dummy_lock(lock)), "=r" (stuck)
987 : "1" (__dummy_lock(lock)), "2" (stuck));
989 if (stuck < 0) {
990 printk(KERN_WARNING
991 "%s:%d spinlock stuck in %s at %p(%d)"
992 " owner %s at %p(%d) %s:%d\n",
993 base_file, line_no,
994 current->comm, inline_pc, cpu,
995 lock->task->comm, lock->previous,
996 lock->on_cpu, lock->base_file, lock->line_no);
997 stuck = 1L << 36;
998 printed = 1;
999 goto try_again;
1002 /* Exiting. Got the lock. */
1003 lock->on_cpu = cpu;
1004 lock->previous = inline_pc;
1005 lock->task = current;
1006 lock->base_file = base_file;
1007 lock->line_no = line_no;
1009 if (printed) {
1010 printk(KERN_WARNING
1011 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
1012 base_file, line_no, current->comm, inline_pc,
1013 cpu, jiffies - started);
1018 debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
1020 int ret;
1021 if ((ret = !test_and_set_bit(0, lock))) {
1022 lock->on_cpu = smp_processor_id();
1023 lock->previous = __builtin_return_address(0);
1024 lock->task = current;
1025 } else {
1026 lock->base_file = base_file;
1027 lock->line_no = line_no;
1029 return ret;
1031 #endif /* DEBUG_SPINLOCK */
1033 #if DEBUG_RWLOCK
1034 void write_lock(rwlock_t * lock)
1036 long regx, regy;
1037 int stuck_lock, stuck_reader;
1038 void *inline_pc = __builtin_return_address(0);
1040 try_again:
1042 stuck_lock = 1<<26;
1043 stuck_reader = 1<<26;
1045 __asm__ __volatile__(
1046 "1: ldl_l %1,%0\n"
1047 " blbs %1,6f\n"
1048 " blt %1,8f\n"
1049 " mov 1,%1\n"
1050 " stl_c %1,%0\n"
1051 " beq %1,6f\n"
1052 "4: mb\n"
1053 ".section .text2,\"ax\"\n"
1054 "6: blt %3,4b # debug\n"
1055 " subl %3,1,%3 # debug\n"
1056 " ldl %1,%0\n"
1057 " blbs %1,6b\n"
1058 "8: blt %4,4b # debug\n"
1059 " subl %4,1,%4 # debug\n"
1060 " ldl %1,%0\n"
1061 " blt %1,8b\n"
1062 " br 1b\n"
1063 ".previous"
1064 : "=m" (__dummy_lock(lock)), "=&r" (regx), "=&r" (regy),
1065 "=&r" (stuck_lock), "=&r" (stuck_reader)
1066 : "0" (__dummy_lock(lock)), "3" (stuck_lock), "4" (stuck_reader));
1068 if (stuck_lock < 0) {
1069 printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc);
1070 goto try_again;
1072 if (stuck_reader < 0) {
1073 printk(KERN_WARNING "write_lock stuck on readers at %p\n",
1074 inline_pc);
1075 goto try_again;
1079 void read_lock(rwlock_t * lock)
1081 long regx;
1082 int stuck_lock;
1083 void *inline_pc = __builtin_return_address(0);
1085 try_again:
1087 stuck_lock = 1<<26;
1089 __asm__ __volatile__(
1090 "1: ldl_l %1,%0;"
1091 " blbs %1,6f;"
1092 " subl %1,2,%1;"
1093 " stl_c %1,%0;"
1094 " beq %1,6f;"
1095 "4: mb\n"
1096 ".section .text2,\"ax\"\n"
1097 "6: ldl %1,%0;"
1098 " blt %2,4b # debug\n"
1099 " subl %2,1,%2 # debug\n"
1100 " blbs %1,6b;"
1101 " br 1b\n"
1102 ".previous"
1103 : "=m" (__dummy_lock(lock)), "=&r" (regx), "=&r" (stuck_lock)
1104 : "0" (__dummy_lock(lock)), "2" (stuck_lock));
1106 if (stuck_lock < 0) {
1107 printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc);
1108 goto try_again;
1111 #endif /* DEBUG_RWLOCK */