initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / alpha / kernel / smp.c
blob6d4d09c43912611244b8d807c3253e3ae06f7f3d
1 /*
2 * linux/arch/alpha/kernel/smp.c
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
9 * This is helpful for DCPI.
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
30 #include <asm/hwrpb.h>
31 #include <asm/ptrace.h>
32 #include <asm/atomic.h>
34 #include <asm/io.h>
35 #include <asm/irq.h>
36 #include <asm/bitops.h>
37 #include <asm/pgtable.h>
38 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
42 #include "proto.h"
43 #include "irq_impl.h"
46 #define DEBUG_SMP 0
47 #if DEBUG_SMP
48 #define DBGS(args) printk args
49 #else
50 #define DBGS(args)
51 #endif
53 /* A collection of per-processor data. */
54 struct cpuinfo_alpha cpu_data[NR_CPUS];
56 /* A collection of single bit ipi messages. */
57 static struct {
58 unsigned long bits ____cacheline_aligned;
59 } ipi_data[NR_CPUS] __cacheline_aligned;
61 enum ipi_message_type {
62 IPI_RESCHEDULE,
63 IPI_CALL_FUNC,
64 IPI_CPU_STOP,
67 /* Set to a secondary's cpuid when it comes online. */
68 static int smp_secondary_alive __initdata = 0;
70 /* Which cpus ids came online. */
71 cpumask_t cpu_present_mask;
72 cpumask_t cpu_online_map;
74 EXPORT_SYMBOL(cpu_online_map);
76 /* cpus reported in the hwrpb */
77 static unsigned long hwrpb_cpu_present_mask __initdata = 0;
79 int smp_num_probed; /* Internal processor count */
80 int smp_num_cpus = 1; /* Number that came online. */
81 cycles_t cacheflush_time;
82 unsigned long cache_decay_ticks;
84 extern void calibrate_delay(void);
89 * Called by both boot and secondaries to move global data into
90 * per-processor storage.
92 static inline void __init
93 smp_store_cpu_info(int cpuid)
95 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
96 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
97 cpu_data[cpuid].need_new_asn = 0;
98 cpu_data[cpuid].asn_lock = 0;
102 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
104 static inline void __init
105 smp_setup_percpu_timer(int cpuid)
107 cpu_data[cpuid].prof_counter = 1;
108 cpu_data[cpuid].prof_multiplier = 1;
111 static void __init
112 wait_boot_cpu_to_stop(int cpuid)
114 unsigned long stop = jiffies + 10*HZ;
116 while (time_before(jiffies, stop)) {
117 if (!smp_secondary_alive)
118 return;
119 barrier();
122 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
123 for (;;)
124 barrier();
128 * Where secondaries begin a life of C.
130 void __init
131 smp_callin(void)
133 int cpuid = hard_smp_processor_id();
135 if (cpu_test_and_set(cpuid, cpu_online_map)) {
136 printk("??, cpu 0x%x already present??\n", cpuid);
137 BUG();
140 /* Turn on machine checks. */
141 wrmces(7);
143 /* Set trap vectors. */
144 trap_init();
146 /* Set interrupt vector. */
147 wrent(entInt, 0);
149 /* Get our local ticker going. */
150 smp_setup_percpu_timer(cpuid);
152 /* Call platform-specific callin, if specified */
153 if (alpha_mv.smp_callin) alpha_mv.smp_callin();
155 /* All kernel threads share the same mm context. */
156 atomic_inc(&init_mm.mm_count);
157 current->active_mm = &init_mm;
159 /* Must have completely accurate bogos. */
160 local_irq_enable();
162 /* Wait boot CPU to stop with irq enabled before running
163 calibrate_delay. */
164 wait_boot_cpu_to_stop(cpuid);
165 mb();
166 calibrate_delay();
168 smp_store_cpu_info(cpuid);
169 /* Allow master to continue only after we written loops_per_jiffy. */
170 wmb();
171 smp_secondary_alive = 1;
173 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
174 cpuid, current, current->active_mm));
176 /* Do nothing. */
177 cpu_idle();
182 * Rough estimation for SMP scheduling, this is the number of cycles it
183 * takes for a fully memory-limited process to flush the SMP-local cache.
185 * We are not told how much cache there is, so we have to guess.
187 static void __init
188 smp_tune_scheduling (int cpuid)
190 struct percpu_struct *cpu;
191 unsigned long on_chip_cache; /* kB */
192 unsigned long freq; /* Hz */
193 unsigned long bandwidth = 350; /* MB/s */
195 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset
196 + cpuid * hwrpb->processor_size);
197 switch (cpu->type)
199 case EV45_CPU:
200 on_chip_cache = 16 + 16;
201 break;
203 case EV5_CPU:
204 case EV56_CPU:
205 on_chip_cache = 8 + 8 + 96;
206 break;
208 case PCA56_CPU:
209 on_chip_cache = 16 + 8;
210 break;
212 case EV6_CPU:
213 case EV67_CPU:
214 default:
215 on_chip_cache = 64 + 64;
216 break;
219 freq = hwrpb->cycle_freq ? : est_cycle_freq;
221 cacheflush_time = (freq / 1000000) * (on_chip_cache << 10) / bandwidth;
222 cache_decay_ticks = cacheflush_time / (freq / 1000) * HZ / 1000;
224 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
225 cacheflush_time/(freq/1000000),
226 (cacheflush_time*100/(freq/1000000)) % 100);
227 printk("task migration cache decay timeout: %ld msecs.\n",
228 (cache_decay_ticks + 1) * 1000 / HZ);
231 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
232 static int __init
233 wait_for_txrdy (unsigned long cpumask)
235 unsigned long timeout;
237 if (!(hwrpb->txrdy & cpumask))
238 return 0;
240 timeout = jiffies + 10*HZ;
241 while (time_before(jiffies, timeout)) {
242 if (!(hwrpb->txrdy & cpumask))
243 return 0;
244 udelay(10);
245 barrier();
248 return -1;
252 * Send a message to a secondary's console. "START" is one such
253 * interesting message. ;-)
255 static void __init
256 send_secondary_console_msg(char *str, int cpuid)
258 struct percpu_struct *cpu;
259 register char *cp1, *cp2;
260 unsigned long cpumask;
261 size_t len;
263 cpu = (struct percpu_struct *)
264 ((char*)hwrpb
265 + hwrpb->processor_offset
266 + cpuid * hwrpb->processor_size);
268 cpumask = (1UL << cpuid);
269 if (wait_for_txrdy(cpumask))
270 goto timeout;
272 cp2 = str;
273 len = strlen(cp2);
274 *(unsigned int *)&cpu->ipc_buffer[0] = len;
275 cp1 = (char *) &cpu->ipc_buffer[1];
276 memcpy(cp1, cp2, len);
278 /* atomic test and set */
279 wmb();
280 set_bit(cpuid, &hwrpb->rxrdy);
282 if (wait_for_txrdy(cpumask))
283 goto timeout;
284 return;
286 timeout:
287 printk("Processor %x not ready\n", cpuid);
291 * A secondary console wants to send a message. Receive it.
293 static void
294 recv_secondary_console_msg(void)
296 int mycpu, i, cnt;
297 unsigned long txrdy = hwrpb->txrdy;
298 char *cp1, *cp2, buf[80];
299 struct percpu_struct *cpu;
301 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
303 mycpu = hard_smp_processor_id();
305 for (i = 0; i < NR_CPUS; i++) {
306 if (!(txrdy & (1UL << i)))
307 continue;
309 DBGS(("recv_secondary_console_msg: "
310 "TXRDY contains CPU %d.\n", i));
312 cpu = (struct percpu_struct *)
313 ((char*)hwrpb
314 + hwrpb->processor_offset
315 + i * hwrpb->processor_size);
317 DBGS(("recv_secondary_console_msg: on %d from %d"
318 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
319 mycpu, i, cpu->halt_reason, cpu->flags));
321 cnt = cpu->ipc_buffer[0] >> 32;
322 if (cnt <= 0 || cnt >= 80)
323 strcpy(buf, "<<< BOGUS MSG >>>");
324 else {
325 cp1 = (char *) &cpu->ipc_buffer[11];
326 cp2 = buf;
327 strcpy(cp2, cp1);
329 while ((cp2 = strchr(cp2, '\r')) != 0) {
330 *cp2 = ' ';
331 if (cp2[1] == '\n')
332 cp2[1] = ' ';
336 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
337 "message is '%s'\n", mycpu, buf));
340 hwrpb->txrdy = 0;
344 * Convince the console to have a secondary cpu begin execution.
346 static int __init
347 secondary_cpu_start(int cpuid, struct task_struct *idle)
349 struct percpu_struct *cpu;
350 struct pcb_struct *hwpcb, *ipcb;
351 unsigned long timeout;
353 cpu = (struct percpu_struct *)
354 ((char*)hwrpb
355 + hwrpb->processor_offset
356 + cpuid * hwrpb->processor_size);
357 hwpcb = (struct pcb_struct *) cpu->hwpcb;
358 ipcb = &idle->thread_info->pcb;
360 /* Initialize the CPU's HWPCB to something just good enough for
361 us to get started. Immediately after starting, we'll swpctx
362 to the target idle task's pcb. Reuse the stack in the mean
363 time. Precalculate the target PCBB. */
364 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
365 hwpcb->usp = 0;
366 hwpcb->ptbr = ipcb->ptbr;
367 hwpcb->pcc = 0;
368 hwpcb->asn = 0;
369 hwpcb->unique = virt_to_phys(ipcb);
370 hwpcb->flags = ipcb->flags;
371 hwpcb->res1 = hwpcb->res2 = 0;
373 #if 0
374 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
375 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
376 #endif
377 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
378 cpuid, idle->state, ipcb->flags));
380 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
381 hwrpb->CPU_restart = __smp_callin;
382 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
384 /* Recalculate and update the HWRPB checksum */
385 hwrpb_update_checksum(hwrpb);
388 * Send a "start" command to the specified processor.
391 /* SRM III 3.4.1.3 */
392 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
393 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
394 wmb();
396 send_secondary_console_msg("START\r\n", cpuid);
398 /* Wait 10 seconds for an ACK from the console. */
399 timeout = jiffies + 10*HZ;
400 while (time_before(jiffies, timeout)) {
401 if (cpu->flags & 1)
402 goto started;
403 udelay(10);
404 barrier();
406 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
407 return -1;
409 started:
410 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
411 return 0;
415 * Bring one cpu online.
417 static int __init
418 smp_boot_one_cpu(int cpuid)
420 struct task_struct *idle;
421 unsigned long timeout;
423 /* Cook up an idler for this guy. Note that the address we
424 give to kernel_thread is irrelevant -- it's going to start
425 where HWRPB.CPU_restart says to start. But this gets all
426 the other task-y sort of data structures set up like we
427 wish. We can't use kernel_thread since we must avoid
428 rescheduling the child. */
429 idle = fork_idle(cpuid);
430 if (IS_ERR(idle))
431 panic("failed fork for CPU %d", cpuid);
433 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
434 cpuid, idle->state, idle->flags));
436 /* Signal the secondary to wait a moment. */
437 smp_secondary_alive = -1;
439 /* Whirrr, whirrr, whirrrrrrrrr... */
440 if (secondary_cpu_start(cpuid, idle))
441 return -1;
443 /* Notify the secondary CPU it can run calibrate_delay. */
444 mb();
445 smp_secondary_alive = 0;
447 /* We've been acked by the console; wait one second for
448 the task to start up for real. */
449 timeout = jiffies + 1*HZ;
450 while (time_before(jiffies, timeout)) {
451 if (smp_secondary_alive == 1)
452 goto alive;
453 udelay(10);
454 barrier();
457 /* We failed to boot the CPU. */
459 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
460 return -1;
462 alive:
463 /* Another "Red Snapper". */
464 return 0;
468 * Called from setup_arch. Detect an SMP system and which processors
469 * are present.
471 void __init
472 setup_smp(void)
474 struct percpu_struct *cpubase, *cpu;
475 unsigned long i;
477 if (boot_cpuid != 0) {
478 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
479 boot_cpuid);
482 if (hwrpb->nr_processors > 1) {
483 int boot_cpu_palrev;
485 DBGS(("setup_smp: nr_processors %ld\n",
486 hwrpb->nr_processors));
488 cpubase = (struct percpu_struct *)
489 ((char*)hwrpb + hwrpb->processor_offset);
490 boot_cpu_palrev = cpubase->pal_revision;
492 for (i = 0; i < hwrpb->nr_processors; i++) {
493 cpu = (struct percpu_struct *)
494 ((char *)cpubase + i*hwrpb->processor_size);
495 if ((cpu->flags & 0x1cc) == 0x1cc) {
496 smp_num_probed++;
497 /* Assume here that "whami" == index */
498 hwrpb_cpu_present_mask |= (1UL << i);
499 cpu->pal_revision = boot_cpu_palrev;
502 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
503 i, cpu->flags, cpu->type));
504 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
505 i, cpu->pal_revision));
507 } else {
508 smp_num_probed = 1;
509 hwrpb_cpu_present_mask = (1UL << boot_cpuid);
511 cpu_present_mask = cpumask_of_cpu(boot_cpuid);
513 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
514 smp_num_probed, hwrpb_cpu_present_mask);
518 * Called by smp_init prepare the secondaries
520 void __init
521 smp_prepare_cpus(unsigned int max_cpus)
523 int cpu_count, i;
525 /* Take care of some initial bookkeeping. */
526 memset(ipi_data, 0, sizeof(ipi_data));
528 current_thread_info()->cpu = boot_cpuid;
530 smp_store_cpu_info(boot_cpuid);
531 smp_tune_scheduling(boot_cpuid);
532 smp_setup_percpu_timer(boot_cpuid);
534 /* Nothing to do on a UP box, or when told not to. */
535 if (smp_num_probed == 1 || max_cpus == 0) {
536 cpu_present_mask = cpumask_of_cpu(boot_cpuid);
537 printk(KERN_INFO "SMP mode deactivated.\n");
538 return;
541 printk(KERN_INFO "SMP starting up secondaries.\n");
543 cpu_count = 1;
544 for (i = 0; (i < NR_CPUS) && (cpu_count < max_cpus); i++) {
545 if (i == boot_cpuid)
546 continue;
548 if (((hwrpb_cpu_present_mask >> i) & 1) == 0)
549 continue;
551 cpu_set(i, cpu_possible_map);
552 cpu_count++;
555 smp_num_cpus = cpu_count;
558 void __devinit
559 smp_prepare_boot_cpu(void)
562 * Mark the boot cpu (current cpu) as both present and online
564 cpu_set(smp_processor_id(), cpu_present_mask);
565 cpu_set(smp_processor_id(), cpu_online_map);
568 int __devinit
569 __cpu_up(unsigned int cpu)
571 smp_boot_one_cpu(cpu);
573 return cpu_online(cpu) ? 0 : -ENOSYS;
576 void __init
577 smp_cpus_done(unsigned int max_cpus)
579 int cpu;
580 unsigned long bogosum = 0;
582 for(cpu = 0; cpu < NR_CPUS; cpu++)
583 if (cpu_online(cpu))
584 bogosum += cpu_data[cpu].loops_per_jiffy;
586 printk(KERN_INFO "SMP: Total of %d processors activated "
587 "(%lu.%02lu BogoMIPS).\n",
588 num_online_cpus(),
589 (bogosum + 2500) / (500000/HZ),
590 ((bogosum + 2500) / (5000/HZ)) % 100);
594 void
595 smp_percpu_timer_interrupt(struct pt_regs *regs)
597 int cpu = smp_processor_id();
598 unsigned long user = user_mode(regs);
599 struct cpuinfo_alpha *data = &cpu_data[cpu];
601 /* Record kernel PC. */
602 profile_tick(CPU_PROFILING, regs);
604 if (!--data->prof_counter) {
605 /* We need to make like a normal interrupt -- otherwise
606 timer interrupts ignore the global interrupt lock,
607 which would be a Bad Thing. */
608 irq_enter();
610 update_process_times(user);
612 data->prof_counter = data->prof_multiplier;
614 irq_exit();
618 int __init
619 setup_profiling_timer(unsigned int multiplier)
621 return -EINVAL;
625 static void
626 send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
628 int i;
630 mb();
631 for_each_cpu_mask(i, to_whom)
632 set_bit(operation, &ipi_data[i].bits);
634 mb();
635 for_each_cpu_mask(i, to_whom)
636 wripir(i);
639 /* Structure and data for smp_call_function. This is designed to
640 minimize static memory requirements. Plus it looks cleaner. */
642 struct smp_call_struct {
643 void (*func) (void *info);
644 void *info;
645 long wait;
646 atomic_t unstarted_count;
647 atomic_t unfinished_count;
650 static struct smp_call_struct *smp_call_function_data;
652 /* Atomicly drop data into a shared pointer. The pointer is free if
653 it is initially locked. If retry, spin until free. */
655 static int
656 pointer_lock (void *lock, void *data, int retry)
658 void *old, *tmp;
660 mb();
661 again:
662 /* Compare and swap with zero. */
663 asm volatile (
664 "1: ldq_l %0,%1\n"
665 " mov %3,%2\n"
666 " bne %0,2f\n"
667 " stq_c %2,%1\n"
668 " beq %2,1b\n"
669 "2:"
670 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
671 : "r"(data)
672 : "memory");
674 if (old == 0)
675 return 0;
676 if (! retry)
677 return -EBUSY;
679 while (*(void **)lock)
680 barrier();
681 goto again;
684 void
685 handle_ipi(struct pt_regs *regs)
687 int this_cpu = smp_processor_id();
688 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
689 unsigned long ops;
691 #if 0
692 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
693 this_cpu, *pending_ipis, regs->pc));
694 #endif
696 mb(); /* Order interrupt and bit testing. */
697 while ((ops = xchg(pending_ipis, 0)) != 0) {
698 mb(); /* Order bit clearing and data access. */
699 do {
700 unsigned long which;
702 which = ops & -ops;
703 ops &= ~which;
704 which = __ffs(which);
706 switch (which) {
707 case IPI_RESCHEDULE:
708 /* Reschedule callback. Everything to be done
709 is done by the interrupt return path. */
710 break;
712 case IPI_CALL_FUNC:
714 struct smp_call_struct *data;
715 void (*func)(void *info);
716 void *info;
717 int wait;
719 data = smp_call_function_data;
720 func = data->func;
721 info = data->info;
722 wait = data->wait;
724 /* Notify the sending CPU that the data has been
725 received, and execution is about to begin. */
726 mb();
727 atomic_dec (&data->unstarted_count);
729 /* At this point the structure may be gone unless
730 wait is true. */
731 (*func)(info);
733 /* Notify the sending CPU that the task is done. */
734 mb();
735 if (wait) atomic_dec (&data->unfinished_count);
736 break;
739 case IPI_CPU_STOP:
740 halt();
742 default:
743 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
744 this_cpu, which);
745 break;
747 } while (ops);
749 mb(); /* Order data access and bit testing. */
752 cpu_data[this_cpu].ipi_count++;
754 if (hwrpb->txrdy)
755 recv_secondary_console_msg();
758 void
759 smp_send_reschedule(int cpu)
761 #ifdef DEBUG_IPI_MSG
762 if (cpu == hard_smp_processor_id())
763 printk(KERN_WARNING
764 "smp_send_reschedule: Sending IPI to self.\n");
765 #endif
766 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
769 void
770 smp_send_stop(void)
772 cpumask_t to_whom = cpu_possible_map;
773 cpu_clear(smp_processor_id(), to_whom);
774 #ifdef DEBUG_IPI_MSG
775 if (hard_smp_processor_id() != boot_cpu_id)
776 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
777 #endif
778 send_ipi_message(to_whom, IPI_CPU_STOP);
782 * Run a function on all other CPUs.
783 * <func> The function to run. This must be fast and non-blocking.
784 * <info> An arbitrary pointer to pass to the function.
785 * <retry> If true, keep retrying until ready.
786 * <wait> If true, wait until function has completed on other CPUs.
787 * [RETURNS] 0 on success, else a negative status code.
789 * Does not return until remote CPUs are nearly ready to execute <func>
790 * or are or have executed.
791 * You must not call this function with disabled interrupts or from a
792 * hardware interrupt handler or from a bottom half handler.
796 smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
797 int wait, cpumask_t to_whom)
799 struct smp_call_struct data;
800 unsigned long timeout;
801 int num_cpus_to_call;
803 /* Can deadlock when called with interrupts disabled */
804 WARN_ON(irqs_disabled());
806 data.func = func;
807 data.info = info;
808 data.wait = wait;
810 cpu_clear(smp_processor_id(), to_whom);
811 num_cpus_to_call = cpus_weight(to_whom);
813 atomic_set(&data.unstarted_count, num_cpus_to_call);
814 atomic_set(&data.unfinished_count, num_cpus_to_call);
816 /* Acquire the smp_call_function_data mutex. */
817 if (pointer_lock(&smp_call_function_data, &data, retry))
818 return -EBUSY;
820 /* Send a message to the requested CPUs. */
821 send_ipi_message(to_whom, IPI_CALL_FUNC);
823 /* Wait for a minimal response. */
824 timeout = jiffies + HZ;
825 while (atomic_read (&data.unstarted_count) > 0
826 && time_before (jiffies, timeout))
827 barrier();
829 /* If there's no response yet, log a message but allow a longer
830 * timeout period -- if we get a response this time, log
831 * a message saying when we got it..
833 if (atomic_read(&data.unstarted_count) > 0) {
834 long start_time = jiffies;
835 printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
836 __FUNCTION__);
837 timeout = jiffies + 30 * HZ;
838 while (atomic_read(&data.unstarted_count) > 0
839 && time_before(jiffies, timeout))
840 barrier();
841 if (atomic_read(&data.unstarted_count) <= 0) {
842 long delta = jiffies - start_time;
843 printk(KERN_ERR
844 "%s: response %ld.%ld seconds into long wait\n",
845 __FUNCTION__, delta / HZ,
846 (100 * (delta - ((delta / HZ) * HZ))) / HZ);
850 /* We either got one or timed out -- clear the lock. */
851 mb();
852 smp_call_function_data = NULL;
855 * If after both the initial and long timeout periods we still don't
856 * have a response, something is very wrong...
858 BUG_ON(atomic_read (&data.unstarted_count) > 0);
860 /* Wait for a complete response, if needed. */
861 if (wait) {
862 while (atomic_read (&data.unfinished_count) > 0)
863 barrier();
866 return 0;
870 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
872 return smp_call_function_on_cpu (func, info, retry, wait,
873 cpu_online_map);
876 static void
877 ipi_imb(void *ignored)
879 imb();
882 void
883 smp_imb(void)
885 /* Must wait other processors to flush their icache before continue. */
886 if (on_each_cpu(ipi_imb, NULL, 1, 1))
887 printk(KERN_CRIT "smp_imb: timed out\n");
890 static void
891 ipi_flush_tlb_all(void *ignored)
893 tbia();
896 void
897 flush_tlb_all(void)
899 /* Although we don't have any data to pass, we do want to
900 synchronize with the other processors. */
901 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
902 printk(KERN_CRIT "flush_tlb_all: timed out\n");
906 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
908 static void
909 ipi_flush_tlb_mm(void *x)
911 struct mm_struct *mm = (struct mm_struct *) x;
912 if (mm == current->active_mm && !asn_locked())
913 flush_tlb_current(mm);
914 else
915 flush_tlb_other(mm);
918 void
919 flush_tlb_mm(struct mm_struct *mm)
921 preempt_disable();
923 if (mm == current->active_mm) {
924 flush_tlb_current(mm);
925 if (atomic_read(&mm->mm_users) <= 1) {
926 int cpu, this_cpu = smp_processor_id();
927 for (cpu = 0; cpu < NR_CPUS; cpu++) {
928 if (!cpu_online(cpu) || cpu == this_cpu)
929 continue;
930 if (mm->context[cpu])
931 mm->context[cpu] = 0;
933 preempt_enable();
934 return;
938 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
939 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
942 preempt_enable();
945 struct flush_tlb_page_struct {
946 struct vm_area_struct *vma;
947 struct mm_struct *mm;
948 unsigned long addr;
951 static void
952 ipi_flush_tlb_page(void *x)
954 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
955 struct mm_struct * mm = data->mm;
957 if (mm == current->active_mm && !asn_locked())
958 flush_tlb_current_page(mm, data->vma, data->addr);
959 else
960 flush_tlb_other(mm);
963 void
964 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
966 struct flush_tlb_page_struct data;
967 struct mm_struct *mm = vma->vm_mm;
969 preempt_disable();
971 if (mm == current->active_mm) {
972 flush_tlb_current_page(mm, vma, addr);
973 if (atomic_read(&mm->mm_users) <= 1) {
974 int cpu, this_cpu = smp_processor_id();
975 for (cpu = 0; cpu < NR_CPUS; cpu++) {
976 if (!cpu_online(cpu) || cpu == this_cpu)
977 continue;
978 if (mm->context[cpu])
979 mm->context[cpu] = 0;
981 preempt_enable();
982 return;
986 data.vma = vma;
987 data.mm = mm;
988 data.addr = addr;
990 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
991 printk(KERN_CRIT "flush_tlb_page: timed out\n");
994 preempt_enable();
997 void
998 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1000 /* On the Alpha we always flush the whole user tlb. */
1001 flush_tlb_mm(vma->vm_mm);
1004 static void
1005 ipi_flush_icache_page(void *x)
1007 struct mm_struct *mm = (struct mm_struct *) x;
1008 if (mm == current->active_mm && !asn_locked())
1009 __load_new_mm_context(mm);
1010 else
1011 flush_tlb_other(mm);
1014 void
1015 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
1016 unsigned long addr, int len)
1018 struct mm_struct *mm = vma->vm_mm;
1020 if ((vma->vm_flags & VM_EXEC) == 0)
1021 return;
1023 preempt_disable();
1025 if (mm == current->active_mm) {
1026 __load_new_mm_context(mm);
1027 if (atomic_read(&mm->mm_users) <= 1) {
1028 int cpu, this_cpu = smp_processor_id();
1029 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1030 if (!cpu_online(cpu) || cpu == this_cpu)
1031 continue;
1032 if (mm->context[cpu])
1033 mm->context[cpu] = 0;
1035 preempt_enable();
1036 return;
1040 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
1041 printk(KERN_CRIT "flush_icache_page: timed out\n");
1044 preempt_enable();
1047 #ifdef CONFIG_DEBUG_SPINLOCK
1048 void
1049 _raw_spin_unlock(spinlock_t * lock)
1051 mb();
1052 lock->lock = 0;
1054 lock->on_cpu = -1;
1055 lock->previous = NULL;
1056 lock->task = NULL;
1057 lock->base_file = "none";
1058 lock->line_no = 0;
1061 void
1062 debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
1064 long tmp;
1065 long stuck;
1066 void *inline_pc = __builtin_return_address(0);
1067 unsigned long started = jiffies;
1068 int printed = 0;
1069 int cpu = smp_processor_id();
1071 stuck = 1L << 30;
1072 try_again:
1074 /* Use sub-sections to put the actual loop at the end
1075 of this object file's text section so as to perfect
1076 branch prediction. */
1077 __asm__ __volatile__(
1078 "1: ldl_l %0,%1\n"
1079 " subq %2,1,%2\n"
1080 " blbs %0,2f\n"
1081 " or %0,1,%0\n"
1082 " stl_c %0,%1\n"
1083 " beq %0,3f\n"
1084 "4: mb\n"
1085 ".subsection 2\n"
1086 "2: ldl %0,%1\n"
1087 " subq %2,1,%2\n"
1088 "3: blt %2,4b\n"
1089 " blbs %0,2b\n"
1090 " br 1b\n"
1091 ".previous"
1092 : "=r" (tmp), "=m" (lock->lock), "=r" (stuck)
1093 : "1" (lock->lock), "2" (stuck) : "memory");
1095 if (stuck < 0) {
1096 printk(KERN_WARNING
1097 "%s:%d spinlock stuck in %s at %p(%d)"
1098 " owner %s at %p(%d) %s:%d\n",
1099 base_file, line_no,
1100 current->comm, inline_pc, cpu,
1101 lock->task->comm, lock->previous,
1102 lock->on_cpu, lock->base_file, lock->line_no);
1103 stuck = 1L << 36;
1104 printed = 1;
1105 goto try_again;
1108 /* Exiting. Got the lock. */
1109 lock->on_cpu = cpu;
1110 lock->previous = inline_pc;
1111 lock->task = current;
1112 lock->base_file = base_file;
1113 lock->line_no = line_no;
1115 if (printed) {
1116 printk(KERN_WARNING
1117 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
1118 base_file, line_no, current->comm, inline_pc,
1119 cpu, jiffies - started);
1124 debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
1126 int ret;
1127 if ((ret = !test_and_set_bit(0, lock))) {
1128 lock->on_cpu = smp_processor_id();
1129 lock->previous = __builtin_return_address(0);
1130 lock->task = current;
1131 } else {
1132 lock->base_file = base_file;
1133 lock->line_no = line_no;
1135 return ret;
1137 #endif /* CONFIG_DEBUG_SPINLOCK */
1139 #ifdef CONFIG_DEBUG_RWLOCK
1140 void _raw_write_lock(rwlock_t * lock)
1142 long regx, regy;
1143 int stuck_lock, stuck_reader;
1144 void *inline_pc = __builtin_return_address(0);
1146 try_again:
1148 stuck_lock = 1<<30;
1149 stuck_reader = 1<<30;
1151 __asm__ __volatile__(
1152 "1: ldl_l %1,%0\n"
1153 " blbs %1,6f\n"
1154 " blt %1,8f\n"
1155 " mov 1,%1\n"
1156 " stl_c %1,%0\n"
1157 " beq %1,6f\n"
1158 "4: mb\n"
1159 ".subsection 2\n"
1160 "6: blt %3,4b # debug\n"
1161 " subl %3,1,%3 # debug\n"
1162 " ldl %1,%0\n"
1163 " blbs %1,6b\n"
1164 "8: blt %4,4b # debug\n"
1165 " subl %4,1,%4 # debug\n"
1166 " ldl %1,%0\n"
1167 " blt %1,8b\n"
1168 " br 1b\n"
1169 ".previous"
1170 : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy),
1171 "=&r" (stuck_lock), "=&r" (stuck_reader)
1172 : "0" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory");
1174 if (stuck_lock < 0) {
1175 printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc);
1176 goto try_again;
1178 if (stuck_reader < 0) {
1179 printk(KERN_WARNING "write_lock stuck on readers at %p\n",
1180 inline_pc);
1181 goto try_again;
1185 void _raw_read_lock(rwlock_t * lock)
1187 long regx;
1188 int stuck_lock;
1189 void *inline_pc = __builtin_return_address(0);
1191 try_again:
1193 stuck_lock = 1<<30;
1195 __asm__ __volatile__(
1196 "1: ldl_l %1,%0;"
1197 " blbs %1,6f;"
1198 " subl %1,2,%1;"
1199 " stl_c %1,%0;"
1200 " beq %1,6f;"
1201 "4: mb\n"
1202 ".subsection 2\n"
1203 "6: ldl %1,%0;"
1204 " blt %2,4b # debug\n"
1205 " subl %2,1,%2 # debug\n"
1206 " blbs %1,6b;"
1207 " br 1b\n"
1208 ".previous"
1209 : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock)
1210 : "0" (*(volatile int *)lock), "2" (stuck_lock) : "memory");
1212 if (stuck_lock < 0) {
1213 printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc);
1214 goto try_again;
1217 #endif /* CONFIG_DEBUG_RWLOCK */