Import 2.3.18pre1
[davej-history.git] / arch / alpha / kernel / smp.c
blobe35dd7c7e1ad5793e189709231bb399784c380de
1 /*
2 * linux/arch/alpha/kernel/smp.c
3 */
5 #include <linux/errno.h>
6 #include <linux/kernel.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/threads.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
18 #include <asm/hwrpb.h>
19 #include <asm/ptrace.h>
20 #include <asm/atomic.h>
22 #include <asm/io.h>
23 #include <asm/irq.h>
24 #include <asm/bitops.h>
25 #include <asm/pgtable.h>
26 #include <asm/hardirq.h>
27 #include <asm/softirq.h>
29 #define __KERNEL_SYSCALLS__
30 #include <asm/unistd.h>
32 #include "proto.h"
33 #include "irq_impl.h"
36 #define DEBUG_SMP 0
37 #if DEBUG_SMP
38 #define DBGS(args) printk args
39 #else
40 #define DBGS(args)
41 #endif
43 /* A collection of per-processor data. */
44 struct cpuinfo_alpha cpu_data[NR_CPUS];
46 /* A collection of single bit ipi messages. */
47 static struct {
48 unsigned long bits __cacheline_aligned;
49 } ipi_data[NR_CPUS];
51 enum ipi_message_type {
52 IPI_RESCHEDULE,
53 IPI_CALL_FUNC,
54 IPI_CPU_STOP,
57 spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
59 /* Set to a secondary's cpuid when it comes online. */
60 static unsigned long smp_secondary_alive;
62 unsigned long cpu_present_mask; /* Which cpus ids came online. */
64 static int max_cpus = -1; /* Command-line limitation. */
65 int smp_boot_cpuid; /* Which processor we booted from. */
66 int smp_num_probed; /* Internal processor count */
67 int smp_num_cpus = 1; /* Number that came online. */
68 int smp_threads_ready; /* True once the per process idle is forked. */
69 cycles_t cacheflush_time;
71 int cpu_number_map[NR_CPUS];
72 int __cpu_logical_map[NR_CPUS];
74 extern void calibrate_delay(void);
75 extern asmlinkage void entInt(void);
78 static int __init nosmp(char *str)
80 max_cpus = 0;
81 return 1;
84 __setup("nosmp", nosmp);
86 static int __init maxcpus(char *str)
88 get_option(&str, &max_cpus);
89 return 1;
92 __setup("maxcpus", maxcpus);
96 * Called by both boot and secondaries to move global data into
97 * per-processor storage.
99 static inline void __init
100 smp_store_cpu_info(int cpuid)
102 cpu_data[cpuid].loops_per_sec = loops_per_sec;
103 cpu_data[cpuid].last_asn
104 = (cpuid << WIDTH_HARDWARE_ASN) + ASN_FIRST_VERSION;
105 cpu_data[cpuid].irq_count = 0;
106 cpu_data[cpuid].bh_count = 0;
110 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
112 static inline void __init
113 smp_setup_percpu_timer(int cpuid)
115 cpu_data[cpuid].prof_counter = 1;
116 cpu_data[cpuid].prof_multiplier = 1;
120 * Where secondaries begin a life of C.
122 void __init
123 smp_callin(void)
125 int cpuid = hard_smp_processor_id();
127 DBGS(("CALLIN %d state 0x%lx\n", cpuid, current->state));
129 /* Turn on machine checks. */
130 wrmces(7);
132 /* Set trap vectors. */
133 trap_init();
135 /* Set interrupt vector. */
136 wrent(entInt, 0);
138 /* Setup the scheduler for this processor. */
139 init_idle();
141 /* ??? This should be in init_idle. */
142 atomic_inc(&init_mm.mm_count);
143 current->active_mm = &init_mm;
145 /* Get our local ticker going. */
146 smp_setup_percpu_timer(cpuid);
148 /* Must have completely accurate bogos. */
149 __sti();
150 calibrate_delay();
151 smp_store_cpu_info(cpuid);
153 /* Allow master to continue. */
154 wmb();
155 smp_secondary_alive = cpuid;
157 /* Wait for the go code. */
158 while (!smp_threads_ready)
159 barrier();
161 DBGS(("smp_callin: commencing CPU %d current %p\n",
162 cpuid, current));
164 /* Do nothing. */
165 cpu_idle();
170 * Rough estimation for SMP scheduling, this is the number of cycles it
171 * takes for a fully memory-limited process to flush the SMP-local cache.
173 * We are not told how much cache there is, so we have to guess.
175 static void __init
176 smp_tune_scheduling (void)
178 struct percpu_struct *cpu;
179 unsigned long on_chip_cache;
180 unsigned long freq;
182 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
183 switch (cpu->type)
185 case EV45_CPU:
186 on_chip_cache = 16 + 16;
187 break;
189 case EV5_CPU:
190 case EV56_CPU:
191 on_chip_cache = 8 + 8 + 96;
192 break;
194 case PCA56_CPU:
195 on_chip_cache = 16 + 8;
196 break;
198 case EV6_CPU:
199 on_chip_cache = 64 + 64;
200 break;
202 default:
203 on_chip_cache = 8 + 8;
204 break;
207 freq = hwrpb->cycle_freq ? : est_cycle_freq;
209 /* Magic estimation stolen from x86 port. */
210 cacheflush_time = freq / 1024 * on_chip_cache / 5000;
214 * Send a message to a secondary's console. "START" is one such
215 * interesting message. ;-)
217 static void
218 send_secondary_console_msg(char *str, int cpuid)
220 struct percpu_struct *cpu;
221 register char *cp1, *cp2;
222 unsigned long cpumask;
223 size_t len;
224 long timeout;
226 cpu = (struct percpu_struct *)
227 ((char*)hwrpb
228 + hwrpb->processor_offset
229 + cpuid * hwrpb->processor_size);
231 cpumask = (1L << cpuid);
232 if (hwrpb->txrdy & cpumask)
233 goto delay1;
234 ready1:
236 cp2 = str;
237 len = strlen(cp2);
238 *(unsigned int *)&cpu->ipc_buffer[0] = len;
239 cp1 = (char *) &cpu->ipc_buffer[1];
240 memcpy(cp1, cp2, len);
242 /* atomic test and set */
243 wmb();
244 set_bit(cpuid, &hwrpb->rxrdy);
246 if (hwrpb->txrdy & cpumask)
247 goto delay2;
248 ready2:
249 return;
251 delay1:
252 /* Wait one second. Note that jiffies aren't ticking yet. */
253 for (timeout = 100000; timeout > 0; --timeout) {
254 if (!(hwrpb->txrdy & cpumask))
255 goto ready1;
256 udelay(10);
257 barrier();
259 goto timeout;
261 delay2:
262 /* Wait one second. */
263 for (timeout = 100000; timeout > 0; --timeout) {
264 if (!(hwrpb->txrdy & cpumask))
265 goto ready2;
266 udelay(10);
267 barrier();
269 goto timeout;
271 timeout:
272 printk("Processor %x not ready\n", cpuid);
273 return;
277 * A secondary console wants to send a message. Receive it.
279 static void
280 recv_secondary_console_msg(void)
282 int mycpu, i, cnt;
283 unsigned long txrdy = hwrpb->txrdy;
284 char *cp1, *cp2, buf[80];
285 struct percpu_struct *cpu;
287 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
289 mycpu = hard_smp_processor_id();
291 for (i = 0; i < NR_CPUS; i++) {
292 if (!(txrdy & (1L << i)))
293 continue;
295 DBGS(("recv_secondary_console_msg: "
296 "TXRDY contains CPU %d.\n", i));
298 cpu = (struct percpu_struct *)
299 ((char*)hwrpb
300 + hwrpb->processor_offset
301 + i * hwrpb->processor_size);
303 DBGS(("recv_secondary_console_msg: on %d from %d"
304 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
305 mycpu, i, cpu->halt_reason, cpu->flags));
307 cnt = cpu->ipc_buffer[0] >> 32;
308 if (cnt <= 0 || cnt >= 80)
309 strcpy(buf, "<<< BOGUS MSG >>>");
310 else {
311 cp1 = (char *) &cpu->ipc_buffer[11];
312 cp2 = buf;
313 strcpy(cp2, cp1);
315 while ((cp2 = strchr(cp2, '\r')) != 0) {
316 *cp2 = ' ';
317 if (cp2[1] == '\n')
318 cp2[1] = ' ';
322 printk(KERN_INFO "recv_secondary_console_msg: on %d "
323 "message is '%s'\n", mycpu, buf);
326 hwrpb->txrdy = 0;
330 * Convince the console to have a secondary cpu begin execution.
332 static int __init
333 secondary_cpu_start(int cpuid, struct task_struct *idle)
335 struct percpu_struct *cpu;
336 struct pcb_struct *hwpcb;
337 long timeout;
339 cpu = (struct percpu_struct *)
340 ((char*)hwrpb
341 + hwrpb->processor_offset
342 + cpuid * hwrpb->processor_size);
343 hwpcb = (struct pcb_struct *) cpu->hwpcb;
345 /* Initialize the CPU's HWPCB to something just good enough for
346 us to get started. Immediately after starting, we'll swpctx
347 to the target idle task's ptb. Reuse the stack in the mean
348 time. Precalculate the target PCBB. */
349 hwpcb->ksp = (unsigned long) idle + sizeof(union task_union) - 16;
350 hwpcb->usp = 0;
351 hwpcb->ptbr = idle->thread.ptbr;
352 hwpcb->pcc = 0;
353 hwpcb->asn = 0;
354 hwpcb->unique = virt_to_phys(&idle->thread);
355 hwpcb->flags = idle->thread.pal_flags;
356 hwpcb->res1 = hwpcb->res2 = 0;
358 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
359 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwcpb->unique));
360 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
361 cpuid, idle->state, idle->thread.pal_flags));
363 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
364 hwrpb->CPU_restart = __smp_callin;
365 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
367 /* Recalculate and update the HWRPB checksum */
368 hwrpb_update_checksum(hwrpb);
371 * Send a "start" command to the specified processor.
374 /* SRM III 3.4.1.3 */
375 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
376 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
377 wmb();
379 send_secondary_console_msg("START\r\n", cpuid);
381 /* Wait 1 second for an ACK from the console. Note that jiffies
382 aren't ticking yet. */
383 for (timeout = 100000; timeout > 0; timeout--) {
384 if (cpu->flags & 1)
385 goto started;
386 udelay(10);
387 barrier();
389 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
390 return -1;
392 started:
393 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
394 return 0;
398 * Bring one cpu online.
400 static int __init
401 smp_boot_one_cpu(int cpuid, int cpunum)
403 struct task_struct *idle;
404 long timeout;
406 /* Cook up an idler for this guy. Note that the address we give
407 to kernel_thread is irrelevant -- it's going to start where
408 HWRPB.CPU_restart says to start. But this gets all the other
409 task-y sort of data structures set up like we wish. */
410 kernel_thread((void *)__smp_callin, NULL, CLONE_PID|CLONE_VM);
412 idle = init_task.prev_task;
413 if (!idle)
414 panic("No idle process for CPU %d", cpunum);
415 del_from_runqueue(idle);
416 init_tasks[cpunum] = idle;
417 idle->processor = cpuid;
419 /* Schedule the first task manually. */
420 /* ??? Ingo, what is this? */
421 idle->has_cpu = 1;
423 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
424 cpuid, idle->state, idle->flags));
426 /* The secondary will change this once it is happy. Note that
427 secondary_cpu_start contains the necessary memory barrier. */
428 smp_secondary_alive = -1;
430 /* Whirrr, whirrr, whirrrrrrrrr... */
431 if (secondary_cpu_start(cpuid, idle))
432 return -1;
434 /* We've been acked by the console; wait one second for the task
435 to start up for real. Note that jiffies aren't ticking yet. */
436 for (timeout = 0; timeout < 100000; timeout++) {
437 if (smp_secondary_alive != -1)
438 goto alive;
439 udelay(10);
440 barrier();
443 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
444 return -1;
446 alive:
447 /* Another "Red Snapper". */
448 cpu_number_map[cpuid] = cpunum;
449 __cpu_logical_map[cpunum] = cpuid;
450 return 0;
454 * Called from setup_arch. Detect an SMP system and which processors
455 * are present.
457 void __init
458 setup_smp(void)
460 struct percpu_struct *cpubase, *cpu;
461 int i;
463 smp_boot_cpuid = hard_smp_processor_id();
464 if (smp_boot_cpuid != 0) {
465 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
466 smp_boot_cpuid);
469 if (hwrpb->nr_processors > 1) {
470 int boot_cpu_palrev;
472 DBGS(("setup_smp: nr_processors %ld\n",
473 hwrpb->nr_processors));
475 cpubase = (struct percpu_struct *)
476 ((char*)hwrpb + hwrpb->processor_offset);
477 boot_cpu_palrev = cpubase->pal_revision;
479 for (i = 0; i < hwrpb->nr_processors; i++ ) {
480 cpu = (struct percpu_struct *)
481 ((char *)cpubase + i*hwrpb->processor_size);
482 if ((cpu->flags & 0x1cc) == 0x1cc) {
483 smp_num_probed++;
484 /* Assume here that "whami" == index */
485 cpu_present_mask |= (1L << i);
486 cpu->pal_revision = boot_cpu_palrev;
489 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
490 i, cpu->flags, cpu->type));
491 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
492 i, cpu->pal_revision));
494 } else {
495 smp_num_probed = 1;
496 cpu_present_mask = (1L << smp_boot_cpuid);
499 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
500 smp_num_probed, cpu_present_mask);
504 * Called by smp_init bring all the secondaries online and hold them.
506 void __init
507 smp_boot_cpus(void)
509 int cpu_count, i;
510 unsigned long bogosum;
512 /* Take care of some initial bookkeeping. */
513 memset(cpu_number_map, -1, sizeof(cpu_number_map));
514 memset(__cpu_logical_map, -1, sizeof(__cpu_logical_map));
515 memset(ipi_data, 0, sizeof(ipi_data));
517 cpu_number_map[smp_boot_cpuid] = 0;
518 __cpu_logical_map[0] = smp_boot_cpuid;
519 current->processor = smp_boot_cpuid;
521 smp_store_cpu_info(smp_boot_cpuid);
522 smp_tune_scheduling();
523 smp_setup_percpu_timer(smp_boot_cpuid);
525 init_idle();
527 /* ??? This should be in init_idle. */
528 atomic_inc(&init_mm.mm_count);
529 current->active_mm = &init_mm;
531 /* Nothing to do on a UP box, or when told not to. */
532 if (smp_num_probed == 1 || max_cpus == 0) {
533 printk(KERN_INFO "SMP mode deactivated.\n");
534 return;
537 printk(KERN_INFO "SMP starting up secondaries.\n");
539 cpu_count = 1;
540 for (i = 0; i < NR_CPUS; i++) {
541 if (i == smp_boot_cpuid)
542 continue;
544 if (((cpu_present_mask >> i) & 1) == 0)
545 continue;
547 if (smp_boot_one_cpu(i, cpu_count))
548 continue;
550 cpu_count++;
553 if (cpu_count == 1) {
554 printk(KERN_ERR "SMP: Only one lonely processor alive.\n");
555 return;
558 bogosum = 0;
559 for (i = 0; i < NR_CPUS; i++) {
560 if (cpu_present_mask & (1L << i))
561 bogosum += cpu_data[i].loops_per_sec;
563 printk(KERN_INFO "SMP: Total of %d processors activated "
564 "(%lu.%02lu BogoMIPS).\n",
565 cpu_count, (bogosum + 2500) / 500000,
566 ((bogosum + 2500) / 5000) % 100);
568 smp_num_cpus = cpu_count;
572 * Called by smp_init to release the blocking online cpus once they
573 * are all started.
575 void __init
576 smp_commence(void)
578 /* smp_init sets smp_threads_ready -- that's enough. */
579 mb();
583 * Only broken Intel needs this, thus it should not even be
584 * referenced globally.
587 void __init
588 initialize_secondary(void)
593 extern void update_one_process(struct task_struct *p, unsigned long ticks,
594 unsigned long user, unsigned long system,
595 int cpu);
597 void
598 smp_percpu_timer_interrupt(struct pt_regs *regs)
600 int cpu = smp_processor_id();
601 unsigned long user = user_mode(regs);
602 struct cpuinfo_alpha *data = &cpu_data[cpu];
604 /* Record kernel PC. */
605 if (!user)
606 alpha_do_profile(regs->pc);
608 if (!--data->prof_counter) {
609 /* We need to make like a normal interrupt -- otherwise
610 timer interrupts ignore the global interrupt lock,
611 which would be a Bad Thing. */
612 irq_enter(cpu, TIMER_IRQ);
614 update_one_process(current, 1, user, !user, cpu);
615 if (current->pid) {
616 if (--current->counter <= 0) {
617 current->counter = 0;
618 current->need_resched = 1;
621 if (user) {
622 if (current->priority < DEF_PRIORITY) {
623 kstat.cpu_nice++;
624 kstat.per_cpu_nice[cpu]++;
625 } else {
626 kstat.cpu_user++;
627 kstat.per_cpu_user[cpu]++;
629 } else {
630 kstat.cpu_system++;
631 kstat.per_cpu_system[cpu]++;
635 data->prof_counter = data->prof_multiplier;
636 irq_exit(cpu, TIMER_IRQ);
640 int __init
641 setup_profiling_timer(unsigned int multiplier)
643 return -EINVAL;
647 static void
648 send_ipi_message(unsigned long to_whom, enum ipi_message_type operation)
650 long i, j;
652 /* Reduce the number of memory barriers by doing two loops,
653 one to set the bits, one to invoke the interrupts. */
655 mb(); /* Order out-of-band data and bit setting. */
657 for (i = 0, j = 1; i < NR_CPUS; ++i, j <<= 1) {
658 if (to_whom & j)
659 set_bit(operation, &ipi_data[i].bits);
662 mb(); /* Order bit setting and interrupt. */
664 for (i = 0, j = 1; i < NR_CPUS; ++i, j <<= 1) {
665 if (to_whom & j)
666 wripir(i);
670 /* Structure and data for smp_call_function. This is designed to
671 minimize static memory requirements. Plus it looks cleaner. */
673 struct smp_call_struct {
674 void (*func) (void *info);
675 void *info;
676 long wait;
677 atomic_t unstarted_count;
678 atomic_t unfinished_count;
681 static struct smp_call_struct *smp_call_function_data;
683 /* Atomicly drop data into a shared pointer. The pointer is free if
684 it is initially locked. If retry, spin until free. */
686 static inline int
687 pointer_lock (void *lock, void *data, int retry)
689 void *old, *tmp;
691 mb();
692 again:
693 /* Compare and swap with zero. */
694 asm volatile (
695 "1: ldq_l %0,%1\n"
696 " mov %3,%2\n"
697 " bne %0,2f\n"
698 " stq_c %2,%1\n"
699 " beq %2,1b\n"
700 "2:"
701 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
702 : "r"(data)
703 : "memory");
705 if (old == 0)
706 return 0;
707 if (! retry)
708 return -EBUSY;
710 while (*(void **)lock)
711 schedule();
712 goto again;
715 void
716 handle_ipi(struct pt_regs *regs)
718 int this_cpu = smp_processor_id();
719 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
720 unsigned long ops;
722 DBGS(("handle_ipi: on CPU %d ops 0x%x PC 0x%lx\n",
723 this_cpu, *pending_ipis, regs->pc));
725 mb(); /* Order interrupt and bit testing. */
726 while ((ops = xchg(pending_ipis, 0)) != 0) {
727 mb(); /* Order bit clearing and data access. */
728 do {
729 unsigned long which;
731 which = ops & -ops;
732 ops &= ~which;
733 which = ffz(~which);
735 if (which == IPI_RESCHEDULE) {
736 /* Reschedule callback. Everything to be done
737 is done by the interrupt return path. */
739 else if (which == IPI_CALL_FUNC) {
740 struct smp_call_struct *data;
741 void (*func)(void *info);
742 void *info;
743 int wait;
745 data = smp_call_function_data;
746 func = data->func;
747 info = data->info;
748 wait = data->wait;
750 /* Notify the sending CPU that the data has been
751 received, and execution is about to begin. */
752 mb();
753 atomic_dec (&data->unstarted_count);
755 /* At this point the structure may be gone unless
756 wait is true. */
757 (*func)(info);
759 /* Notify the sending CPU that the task is done. */
760 mb();
761 if (wait) atomic_dec (&data->unfinished_count);
763 else if (which == IPI_CPU_STOP) {
764 halt();
766 else {
767 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
768 this_cpu, which);
770 } while (ops);
772 mb(); /* Order data access and bit testing. */
775 cpu_data[this_cpu].ipi_count++;
777 if (hwrpb->txrdy)
778 recv_secondary_console_msg();
781 void
782 smp_send_reschedule(int cpu)
784 #if DEBUG_IPI_MSG
785 if (cpu == hard_smp_processor_id())
786 printk(KERN_WARNING
787 "smp_send_reschedule: Sending IPI to self.\n");
788 #endif
789 send_ipi_message(1L << cpu, IPI_RESCHEDULE);
792 void
793 smp_send_stop(void)
795 unsigned long to_whom = cpu_present_mask ^ (1L << smp_processor_id());
796 #if DEBUG_IPI_MSG
797 if (hard_smp_processor_id() != boot_cpu_id)
798 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
799 #endif
800 send_ipi_message(to_whom, IPI_CPU_STOP);
804 * Run a function on all other CPUs.
805 * <func> The function to run. This must be fast and non-blocking.
806 * <info> An arbitrary pointer to pass to the function.
807 * <retry> If true, keep retrying until ready.
808 * <wait> If true, wait until function has completed on other CPUs.
809 * [RETURNS] 0 on success, else a negative status code.
811 * Does not return until remote CPUs are nearly ready to execute <func>
812 * or are or have executed.
816 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
818 unsigned long to_whom = cpu_present_mask ^ (1L << smp_processor_id());
819 struct smp_call_struct data;
820 long timeout;
822 data.func = func;
823 data.info = info;
824 data.wait = wait;
825 atomic_set(&data.unstarted_count, smp_num_cpus - 1);
826 atomic_set(&data.unfinished_count, smp_num_cpus - 1);
828 /* Aquire the smp_call_function_data mutex. */
829 if (pointer_lock(&smp_call_function_data, &data, retry))
830 return -EBUSY;
832 /* Send a message to all other CPUs. */
833 send_ipi_message(to_whom, IPI_CALL_FUNC);
835 /* Wait for a minimal response. */
836 timeout = jiffies + HZ;
837 while (atomic_read (&data.unstarted_count) > 0
838 && time_before (jiffies, timeout))
839 barrier();
841 /* We either got one or timed out -- clear the lock. */
842 mb();
843 smp_call_function_data = 0;
844 if (atomic_read (&data.unstarted_count) > 0)
845 return -ETIMEDOUT;
847 /* Wait for a complete response, if needed. */
848 if (wait) {
849 while (atomic_read (&data.unfinished_count) > 0)
850 barrier();
853 return 0;
856 static void
857 ipi_flush_tlb_all(void *ignored)
859 tbia();
862 void
863 flush_tlb_all(void)
865 /* Although we don't have any data to pass, we do want to
866 synchronize with the other processors. */
867 if (smp_call_function(ipi_flush_tlb_all, NULL, 1, 1)) {
868 printk(KERN_CRIT "flush_tlb_all: timed out\n");
871 tbia();
874 static void
875 ipi_flush_tlb_mm(void *x)
877 struct mm_struct *mm = (struct mm_struct *) x;
878 if (mm == current->active_mm)
879 flush_tlb_current(mm);
882 void
883 flush_tlb_mm(struct mm_struct *mm)
885 if (mm == current->active_mm) {
886 flush_tlb_current(mm);
887 if (atomic_read(&mm->mm_users) <= 1)
888 return;
889 } else
890 flush_tlb_other(mm);
892 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
893 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
897 struct flush_tlb_page_struct {
898 struct vm_area_struct *vma;
899 struct mm_struct *mm;
900 unsigned long addr;
903 static void
904 ipi_flush_tlb_page(void *x)
906 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
907 if (data->mm == current->active_mm)
908 flush_tlb_current_page(data->mm, data->vma, data->addr);
911 void
912 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
914 struct flush_tlb_page_struct data;
915 struct mm_struct *mm = vma->vm_mm;
917 if (mm == current->active_mm) {
918 flush_tlb_current_page(mm, vma, addr);
919 if (atomic_read(&mm->mm_users) <= 1)
920 return;
921 } else
922 flush_tlb_other(mm);
924 data.vma = vma;
925 data.mm = mm;
926 data.addr = addr;
928 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
929 printk(KERN_CRIT "flush_tlb_page: timed out\n");
933 void
934 flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
936 /* On the Alpha we always flush the whole user tlb. */
937 flush_tlb_mm(mm);
942 smp_info(char *buffer)
944 long i;
945 unsigned long sum = 0;
946 for (i = 0; i < NR_CPUS; i++)
947 sum += cpu_data[i].ipi_count;
949 return sprintf(buffer, "CPUs probed %d active %d map 0x%lx IPIs %ld\n",
950 smp_num_probed, smp_num_cpus, cpu_present_mask, sum);
954 #if DEBUG_SPINLOCK
955 void
956 spin_unlock(spinlock_t * lock)
958 mb();
959 lock->lock = 0;
961 lock->on_cpu = -1;
962 lock->previous = NULL;
963 lock->task = NULL;
964 lock->base_file = "none";
965 lock->line_no = 0;
968 void
969 debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
971 long tmp;
972 long stuck;
973 void *inline_pc = __builtin_return_address(0);
974 unsigned long started = jiffies;
975 int printed = 0;
976 int cpu = smp_processor_id();
978 stuck = 1L << 28;
979 try_again:
981 /* Use sub-sections to put the actual loop at the end
982 of this object file's text section so as to perfect
983 branch prediction. */
984 __asm__ __volatile__(
985 "1: ldl_l %0,%1\n"
986 " subq %2,1,%2\n"
987 " blbs %0,2f\n"
988 " or %0,1,%0\n"
989 " stl_c %0,%1\n"
990 " beq %0,3f\n"
991 "4: mb\n"
992 ".section .text2,\"ax\"\n"
993 "2: ldl %0,%1\n"
994 " subq %2,1,%2\n"
995 "3: blt %2,4b\n"
996 " blbs %0,2b\n"
997 " br 1b\n"
998 ".previous"
999 : "=r" (tmp), "=m" (__dummy_lock(lock)), "=r" (stuck)
1000 : "1" (__dummy_lock(lock)), "2" (stuck));
1002 if (stuck < 0) {
1003 printk(KERN_WARNING
1004 "%s:%d spinlock stuck in %s at %p(%d)"
1005 " owner %s at %p(%d) %s:%d\n",
1006 base_file, line_no,
1007 current->comm, inline_pc, cpu,
1008 lock->task->comm, lock->previous,
1009 lock->on_cpu, lock->base_file, lock->line_no);
1010 stuck = 1L << 36;
1011 printed = 1;
1012 goto try_again;
1015 /* Exiting. Got the lock. */
1016 lock->on_cpu = cpu;
1017 lock->previous = inline_pc;
1018 lock->task = current;
1019 lock->base_file = base_file;
1020 lock->line_no = line_no;
1022 if (printed) {
1023 printk(KERN_WARNING
1024 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
1025 base_file, line_no, current->comm, inline_pc,
1026 cpu, jiffies - started);
1031 debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
1033 int ret;
1034 if ((ret = !test_and_set_bit(0, lock))) {
1035 lock->on_cpu = smp_processor_id();
1036 lock->previous = __builtin_return_address(0);
1037 lock->task = current;
1038 } else {
1039 lock->base_file = base_file;
1040 lock->line_no = line_no;
1042 return ret;
1044 #endif /* DEBUG_SPINLOCK */
1046 #if DEBUG_RWLOCK
1047 void write_lock(rwlock_t * lock)
1049 long regx, regy;
1050 int stuck_lock, stuck_reader;
1051 void *inline_pc = __builtin_return_address(0);
1053 try_again:
1055 stuck_lock = 1<<26;
1056 stuck_reader = 1<<26;
1058 __asm__ __volatile__(
1059 "1: ldl_l %1,%0\n"
1060 " blbs %1,6f\n"
1061 " blt %1,8f\n"
1062 " mov 1,%1\n"
1063 " stl_c %1,%0\n"
1064 " beq %1,6f\n"
1065 "4: mb\n"
1066 ".section .text2,\"ax\"\n"
1067 "6: blt %3,4b # debug\n"
1068 " subl %3,1,%3 # debug\n"
1069 " ldl %1,%0\n"
1070 " blbs %1,6b\n"
1071 "8: blt %4,4b # debug\n"
1072 " subl %4,1,%4 # debug\n"
1073 " ldl %1,%0\n"
1074 " blt %1,8b\n"
1075 " br 1b\n"
1076 ".previous"
1077 : "=m" (__dummy_lock(lock)), "=&r" (regx), "=&r" (regy),
1078 "=&r" (stuck_lock), "=&r" (stuck_reader)
1079 : "0" (__dummy_lock(lock)), "3" (stuck_lock), "4" (stuck_reader));
1081 if (stuck_lock < 0) {
1082 printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc);
1083 goto try_again;
1085 if (stuck_reader < 0) {
1086 printk(KERN_WARNING "write_lock stuck on readers at %p\n",
1087 inline_pc);
1088 goto try_again;
1092 void read_lock(rwlock_t * lock)
1094 long regx;
1095 int stuck_lock;
1096 void *inline_pc = __builtin_return_address(0);
1098 try_again:
1100 stuck_lock = 1<<26;
1102 __asm__ __volatile__(
1103 "1: ldl_l %1,%0;"
1104 " blbs %1,6f;"
1105 " subl %1,2,%1;"
1106 " stl_c %1,%0;"
1107 " beq %1,6f;"
1108 "4: mb\n"
1109 ".section .text2,\"ax\"\n"
1110 "6: ldl %1,%0;"
1111 " blt %2,4b # debug\n"
1112 " subl %2,1,%2 # debug\n"
1113 " blbs %1,6b;"
1114 " br 1b\n"
1115 ".previous"
1116 : "=m" (__dummy_lock(lock)), "=&r" (regx), "=&r" (stuck_lock)
1117 : "0" (__dummy_lock(lock)), "2" (stuck_lock));
1119 if (stuck_lock < 0) {
1120 printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc);
1121 goto try_again;
1124 #endif /* DEBUG_RWLOCK */