iwl4965: fix not correctly dealing with hotunplug
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / alpha / kernel / smp.c
blobf4ab233201b296013c7c45511a953838d29226d8
1 /*
2 * linux/arch/alpha/kernel/smp.c
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
9 * This is helpful for DCPI.
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
31 #include <asm/hwrpb.h>
32 #include <asm/ptrace.h>
33 #include <asm/atomic.h>
35 #include <asm/io.h>
36 #include <asm/irq.h>
37 #include <asm/pgtable.h>
38 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
42 #include "proto.h"
43 #include "irq_impl.h"
46 #define DEBUG_SMP 0
47 #if DEBUG_SMP
48 #define DBGS(args) printk args
49 #else
50 #define DBGS(args)
51 #endif
53 /* A collection of per-processor data. */
54 struct cpuinfo_alpha cpu_data[NR_CPUS];
55 EXPORT_SYMBOL(cpu_data);
57 /* A collection of single bit ipi messages. */
58 static struct {
59 unsigned long bits ____cacheline_aligned;
60 } ipi_data[NR_CPUS] __cacheline_aligned;
62 enum ipi_message_type {
63 IPI_RESCHEDULE,
64 IPI_CALL_FUNC,
65 IPI_CPU_STOP,
68 /* Set to a secondary's cpuid when it comes online. */
69 static int smp_secondary_alive __devinitdata = 0;
71 /* Which cpus ids came online. */
72 cpumask_t cpu_online_map;
74 EXPORT_SYMBOL(cpu_online_map);
76 int smp_num_probed; /* Internal processor count */
77 int smp_num_cpus = 1; /* Number that came online. */
78 EXPORT_SYMBOL(smp_num_cpus);
80 extern void calibrate_delay(void);
85 * Called by both boot and secondaries to move global data into
86 * per-processor storage.
88 static inline void __init
89 smp_store_cpu_info(int cpuid)
91 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
92 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
93 cpu_data[cpuid].need_new_asn = 0;
94 cpu_data[cpuid].asn_lock = 0;
98 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
100 static inline void __init
101 smp_setup_percpu_timer(int cpuid)
103 cpu_data[cpuid].prof_counter = 1;
104 cpu_data[cpuid].prof_multiplier = 1;
107 static void __init
108 wait_boot_cpu_to_stop(int cpuid)
110 unsigned long stop = jiffies + 10*HZ;
112 while (time_before(jiffies, stop)) {
113 if (!smp_secondary_alive)
114 return;
115 barrier();
118 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
119 for (;;)
120 barrier();
124 * Where secondaries begin a life of C.
126 void __init
127 smp_callin(void)
129 int cpuid = hard_smp_processor_id();
131 if (cpu_test_and_set(cpuid, cpu_online_map)) {
132 printk("??, cpu 0x%x already present??\n", cpuid);
133 BUG();
136 /* Turn on machine checks. */
137 wrmces(7);
139 /* Set trap vectors. */
140 trap_init();
142 /* Set interrupt vector. */
143 wrent(entInt, 0);
145 /* Get our local ticker going. */
146 smp_setup_percpu_timer(cpuid);
148 /* Call platform-specific callin, if specified */
149 if (alpha_mv.smp_callin) alpha_mv.smp_callin();
151 /* All kernel threads share the same mm context. */
152 atomic_inc(&init_mm.mm_count);
153 current->active_mm = &init_mm;
155 /* Must have completely accurate bogos. */
156 local_irq_enable();
158 /* Wait boot CPU to stop with irq enabled before running
159 calibrate_delay. */
160 wait_boot_cpu_to_stop(cpuid);
161 mb();
162 calibrate_delay();
164 smp_store_cpu_info(cpuid);
165 /* Allow master to continue only after we written loops_per_jiffy. */
166 wmb();
167 smp_secondary_alive = 1;
169 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
170 cpuid, current, current->active_mm));
172 /* Do nothing. */
173 cpu_idle();
176 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
177 static int __devinit
178 wait_for_txrdy (unsigned long cpumask)
180 unsigned long timeout;
182 if (!(hwrpb->txrdy & cpumask))
183 return 0;
185 timeout = jiffies + 10*HZ;
186 while (time_before(jiffies, timeout)) {
187 if (!(hwrpb->txrdy & cpumask))
188 return 0;
189 udelay(10);
190 barrier();
193 return -1;
197 * Send a message to a secondary's console. "START" is one such
198 * interesting message. ;-)
200 static void __init
201 send_secondary_console_msg(char *str, int cpuid)
203 struct percpu_struct *cpu;
204 register char *cp1, *cp2;
205 unsigned long cpumask;
206 size_t len;
208 cpu = (struct percpu_struct *)
209 ((char*)hwrpb
210 + hwrpb->processor_offset
211 + cpuid * hwrpb->processor_size);
213 cpumask = (1UL << cpuid);
214 if (wait_for_txrdy(cpumask))
215 goto timeout;
217 cp2 = str;
218 len = strlen(cp2);
219 *(unsigned int *)&cpu->ipc_buffer[0] = len;
220 cp1 = (char *) &cpu->ipc_buffer[1];
221 memcpy(cp1, cp2, len);
223 /* atomic test and set */
224 wmb();
225 set_bit(cpuid, &hwrpb->rxrdy);
227 if (wait_for_txrdy(cpumask))
228 goto timeout;
229 return;
231 timeout:
232 printk("Processor %x not ready\n", cpuid);
236 * A secondary console wants to send a message. Receive it.
238 static void
239 recv_secondary_console_msg(void)
241 int mycpu, i, cnt;
242 unsigned long txrdy = hwrpb->txrdy;
243 char *cp1, *cp2, buf[80];
244 struct percpu_struct *cpu;
246 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
248 mycpu = hard_smp_processor_id();
250 for (i = 0; i < NR_CPUS; i++) {
251 if (!(txrdy & (1UL << i)))
252 continue;
254 DBGS(("recv_secondary_console_msg: "
255 "TXRDY contains CPU %d.\n", i));
257 cpu = (struct percpu_struct *)
258 ((char*)hwrpb
259 + hwrpb->processor_offset
260 + i * hwrpb->processor_size);
262 DBGS(("recv_secondary_console_msg: on %d from %d"
263 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
264 mycpu, i, cpu->halt_reason, cpu->flags));
266 cnt = cpu->ipc_buffer[0] >> 32;
267 if (cnt <= 0 || cnt >= 80)
268 strcpy(buf, "<<< BOGUS MSG >>>");
269 else {
270 cp1 = (char *) &cpu->ipc_buffer[11];
271 cp2 = buf;
272 strcpy(cp2, cp1);
274 while ((cp2 = strchr(cp2, '\r')) != 0) {
275 *cp2 = ' ';
276 if (cp2[1] == '\n')
277 cp2[1] = ' ';
281 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
282 "message is '%s'\n", mycpu, buf));
285 hwrpb->txrdy = 0;
289 * Convince the console to have a secondary cpu begin execution.
291 static int __init
292 secondary_cpu_start(int cpuid, struct task_struct *idle)
294 struct percpu_struct *cpu;
295 struct pcb_struct *hwpcb, *ipcb;
296 unsigned long timeout;
298 cpu = (struct percpu_struct *)
299 ((char*)hwrpb
300 + hwrpb->processor_offset
301 + cpuid * hwrpb->processor_size);
302 hwpcb = (struct pcb_struct *) cpu->hwpcb;
303 ipcb = &task_thread_info(idle)->pcb;
305 /* Initialize the CPU's HWPCB to something just good enough for
306 us to get started. Immediately after starting, we'll swpctx
307 to the target idle task's pcb. Reuse the stack in the mean
308 time. Precalculate the target PCBB. */
309 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
310 hwpcb->usp = 0;
311 hwpcb->ptbr = ipcb->ptbr;
312 hwpcb->pcc = 0;
313 hwpcb->asn = 0;
314 hwpcb->unique = virt_to_phys(ipcb);
315 hwpcb->flags = ipcb->flags;
316 hwpcb->res1 = hwpcb->res2 = 0;
318 #if 0
319 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
320 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
321 #endif
322 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
323 cpuid, idle->state, ipcb->flags));
325 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
326 hwrpb->CPU_restart = __smp_callin;
327 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
329 /* Recalculate and update the HWRPB checksum */
330 hwrpb_update_checksum(hwrpb);
333 * Send a "start" command to the specified processor.
336 /* SRM III 3.4.1.3 */
337 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
338 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
339 wmb();
341 send_secondary_console_msg("START\r\n", cpuid);
343 /* Wait 10 seconds for an ACK from the console. */
344 timeout = jiffies + 10*HZ;
345 while (time_before(jiffies, timeout)) {
346 if (cpu->flags & 1)
347 goto started;
348 udelay(10);
349 barrier();
351 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
352 return -1;
354 started:
355 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
356 return 0;
360 * Bring one cpu online.
362 static int __cpuinit
363 smp_boot_one_cpu(int cpuid)
365 struct task_struct *idle;
366 unsigned long timeout;
368 /* Cook up an idler for this guy. Note that the address we
369 give to kernel_thread is irrelevant -- it's going to start
370 where HWRPB.CPU_restart says to start. But this gets all
371 the other task-y sort of data structures set up like we
372 wish. We can't use kernel_thread since we must avoid
373 rescheduling the child. */
374 idle = fork_idle(cpuid);
375 if (IS_ERR(idle))
376 panic("failed fork for CPU %d", cpuid);
378 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
379 cpuid, idle->state, idle->flags));
381 /* Signal the secondary to wait a moment. */
382 smp_secondary_alive = -1;
384 /* Whirrr, whirrr, whirrrrrrrrr... */
385 if (secondary_cpu_start(cpuid, idle))
386 return -1;
388 /* Notify the secondary CPU it can run calibrate_delay. */
389 mb();
390 smp_secondary_alive = 0;
392 /* We've been acked by the console; wait one second for
393 the task to start up for real. */
394 timeout = jiffies + 1*HZ;
395 while (time_before(jiffies, timeout)) {
396 if (smp_secondary_alive == 1)
397 goto alive;
398 udelay(10);
399 barrier();
402 /* We failed to boot the CPU. */
404 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
405 return -1;
407 alive:
408 /* Another "Red Snapper". */
409 return 0;
413 * Called from setup_arch. Detect an SMP system and which processors
414 * are present.
416 void __init
417 setup_smp(void)
419 struct percpu_struct *cpubase, *cpu;
420 unsigned long i;
422 if (boot_cpuid != 0) {
423 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
424 boot_cpuid);
427 if (hwrpb->nr_processors > 1) {
428 int boot_cpu_palrev;
430 DBGS(("setup_smp: nr_processors %ld\n",
431 hwrpb->nr_processors));
433 cpubase = (struct percpu_struct *)
434 ((char*)hwrpb + hwrpb->processor_offset);
435 boot_cpu_palrev = cpubase->pal_revision;
437 for (i = 0; i < hwrpb->nr_processors; i++) {
438 cpu = (struct percpu_struct *)
439 ((char *)cpubase + i*hwrpb->processor_size);
440 if ((cpu->flags & 0x1cc) == 0x1cc) {
441 smp_num_probed++;
442 cpu_set(i, cpu_present_map);
443 cpu->pal_revision = boot_cpu_palrev;
446 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
447 i, cpu->flags, cpu->type));
448 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
449 i, cpu->pal_revision));
451 } else {
452 smp_num_probed = 1;
455 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
456 smp_num_probed, cpu_present_map.bits[0]);
460 * Called by smp_init prepare the secondaries
462 void __init
463 smp_prepare_cpus(unsigned int max_cpus)
465 /* Take care of some initial bookkeeping. */
466 memset(ipi_data, 0, sizeof(ipi_data));
468 current_thread_info()->cpu = boot_cpuid;
470 smp_store_cpu_info(boot_cpuid);
471 smp_setup_percpu_timer(boot_cpuid);
473 /* Nothing to do on a UP box, or when told not to. */
474 if (smp_num_probed == 1 || max_cpus == 0) {
475 cpu_present_map = cpumask_of_cpu(boot_cpuid);
476 printk(KERN_INFO "SMP mode deactivated.\n");
477 return;
480 printk(KERN_INFO "SMP starting up secondaries.\n");
482 smp_num_cpus = smp_num_probed;
485 void __devinit
486 smp_prepare_boot_cpu(void)
490 int __cpuinit
491 __cpu_up(unsigned int cpu)
493 smp_boot_one_cpu(cpu);
495 return cpu_online(cpu) ? 0 : -ENOSYS;
498 void __init
499 smp_cpus_done(unsigned int max_cpus)
501 int cpu;
502 unsigned long bogosum = 0;
504 for(cpu = 0; cpu < NR_CPUS; cpu++)
505 if (cpu_online(cpu))
506 bogosum += cpu_data[cpu].loops_per_jiffy;
508 printk(KERN_INFO "SMP: Total of %d processors activated "
509 "(%lu.%02lu BogoMIPS).\n",
510 num_online_cpus(),
511 (bogosum + 2500) / (500000/HZ),
512 ((bogosum + 2500) / (5000/HZ)) % 100);
516 void
517 smp_percpu_timer_interrupt(struct pt_regs *regs)
519 struct pt_regs *old_regs;
520 int cpu = smp_processor_id();
521 unsigned long user = user_mode(regs);
522 struct cpuinfo_alpha *data = &cpu_data[cpu];
524 old_regs = set_irq_regs(regs);
526 /* Record kernel PC. */
527 profile_tick(CPU_PROFILING);
529 if (!--data->prof_counter) {
530 /* We need to make like a normal interrupt -- otherwise
531 timer interrupts ignore the global interrupt lock,
532 which would be a Bad Thing. */
533 irq_enter();
535 update_process_times(user);
537 data->prof_counter = data->prof_multiplier;
539 irq_exit();
541 set_irq_regs(old_regs);
545 setup_profiling_timer(unsigned int multiplier)
547 return -EINVAL;
551 static void
552 send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
554 int i;
556 mb();
557 for_each_cpu_mask(i, to_whom)
558 set_bit(operation, &ipi_data[i].bits);
560 mb();
561 for_each_cpu_mask(i, to_whom)
562 wripir(i);
565 /* Structure and data for smp_call_function. This is designed to
566 minimize static memory requirements. Plus it looks cleaner. */
568 struct smp_call_struct {
569 void (*func) (void *info);
570 void *info;
571 long wait;
572 atomic_t unstarted_count;
573 atomic_t unfinished_count;
576 static struct smp_call_struct *smp_call_function_data;
578 /* Atomicly drop data into a shared pointer. The pointer is free if
579 it is initially locked. If retry, spin until free. */
581 static int
582 pointer_lock (void *lock, void *data, int retry)
584 void *old, *tmp;
586 mb();
587 again:
588 /* Compare and swap with zero. */
589 asm volatile (
590 "1: ldq_l %0,%1\n"
591 " mov %3,%2\n"
592 " bne %0,2f\n"
593 " stq_c %2,%1\n"
594 " beq %2,1b\n"
595 "2:"
596 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
597 : "r"(data)
598 : "memory");
600 if (old == 0)
601 return 0;
602 if (! retry)
603 return -EBUSY;
605 while (*(void **)lock)
606 barrier();
607 goto again;
610 void
611 handle_ipi(struct pt_regs *regs)
613 int this_cpu = smp_processor_id();
614 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
615 unsigned long ops;
617 #if 0
618 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
619 this_cpu, *pending_ipis, regs->pc));
620 #endif
622 mb(); /* Order interrupt and bit testing. */
623 while ((ops = xchg(pending_ipis, 0)) != 0) {
624 mb(); /* Order bit clearing and data access. */
625 do {
626 unsigned long which;
628 which = ops & -ops;
629 ops &= ~which;
630 which = __ffs(which);
632 switch (which) {
633 case IPI_RESCHEDULE:
634 /* Reschedule callback. Everything to be done
635 is done by the interrupt return path. */
636 break;
638 case IPI_CALL_FUNC:
640 struct smp_call_struct *data;
641 void (*func)(void *info);
642 void *info;
643 int wait;
645 data = smp_call_function_data;
646 func = data->func;
647 info = data->info;
648 wait = data->wait;
650 /* Notify the sending CPU that the data has been
651 received, and execution is about to begin. */
652 mb();
653 atomic_dec (&data->unstarted_count);
655 /* At this point the structure may be gone unless
656 wait is true. */
657 (*func)(info);
659 /* Notify the sending CPU that the task is done. */
660 mb();
661 if (wait) atomic_dec (&data->unfinished_count);
662 break;
665 case IPI_CPU_STOP:
666 halt();
668 default:
669 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
670 this_cpu, which);
671 break;
673 } while (ops);
675 mb(); /* Order data access and bit testing. */
678 cpu_data[this_cpu].ipi_count++;
680 if (hwrpb->txrdy)
681 recv_secondary_console_msg();
684 void
685 smp_send_reschedule(int cpu)
687 #ifdef DEBUG_IPI_MSG
688 if (cpu == hard_smp_processor_id())
689 printk(KERN_WARNING
690 "smp_send_reschedule: Sending IPI to self.\n");
691 #endif
692 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
695 void
696 smp_send_stop(void)
698 cpumask_t to_whom = cpu_possible_map;
699 cpu_clear(smp_processor_id(), to_whom);
700 #ifdef DEBUG_IPI_MSG
701 if (hard_smp_processor_id() != boot_cpu_id)
702 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
703 #endif
704 send_ipi_message(to_whom, IPI_CPU_STOP);
708 * Run a function on all other CPUs.
709 * <func> The function to run. This must be fast and non-blocking.
710 * <info> An arbitrary pointer to pass to the function.
711 * <retry> If true, keep retrying until ready.
712 * <wait> If true, wait until function has completed on other CPUs.
713 * [RETURNS] 0 on success, else a negative status code.
715 * Does not return until remote CPUs are nearly ready to execute <func>
716 * or are or have executed.
717 * You must not call this function with disabled interrupts or from a
718 * hardware interrupt handler or from a bottom half handler.
722 smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
723 int wait, cpumask_t to_whom)
725 struct smp_call_struct data;
726 unsigned long timeout;
727 int num_cpus_to_call;
729 /* Can deadlock when called with interrupts disabled */
730 WARN_ON(irqs_disabled());
732 data.func = func;
733 data.info = info;
734 data.wait = wait;
736 cpu_clear(smp_processor_id(), to_whom);
737 num_cpus_to_call = cpus_weight(to_whom);
739 atomic_set(&data.unstarted_count, num_cpus_to_call);
740 atomic_set(&data.unfinished_count, num_cpus_to_call);
742 /* Acquire the smp_call_function_data mutex. */
743 if (pointer_lock(&smp_call_function_data, &data, retry))
744 return -EBUSY;
746 /* Send a message to the requested CPUs. */
747 send_ipi_message(to_whom, IPI_CALL_FUNC);
749 /* Wait for a minimal response. */
750 timeout = jiffies + HZ;
751 while (atomic_read (&data.unstarted_count) > 0
752 && time_before (jiffies, timeout))
753 barrier();
755 /* If there's no response yet, log a message but allow a longer
756 * timeout period -- if we get a response this time, log
757 * a message saying when we got it..
759 if (atomic_read(&data.unstarted_count) > 0) {
760 long start_time = jiffies;
761 printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
762 __FUNCTION__);
763 timeout = jiffies + 30 * HZ;
764 while (atomic_read(&data.unstarted_count) > 0
765 && time_before(jiffies, timeout))
766 barrier();
767 if (atomic_read(&data.unstarted_count) <= 0) {
768 long delta = jiffies - start_time;
769 printk(KERN_ERR
770 "%s: response %ld.%ld seconds into long wait\n",
771 __FUNCTION__, delta / HZ,
772 (100 * (delta - ((delta / HZ) * HZ))) / HZ);
776 /* We either got one or timed out -- clear the lock. */
777 mb();
778 smp_call_function_data = NULL;
781 * If after both the initial and long timeout periods we still don't
782 * have a response, something is very wrong...
784 BUG_ON(atomic_read (&data.unstarted_count) > 0);
786 /* Wait for a complete response, if needed. */
787 if (wait) {
788 while (atomic_read (&data.unfinished_count) > 0)
789 barrier();
792 return 0;
794 EXPORT_SYMBOL(smp_call_function_on_cpu);
797 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
799 return smp_call_function_on_cpu (func, info, retry, wait,
800 cpu_online_map);
802 EXPORT_SYMBOL(smp_call_function);
804 static void
805 ipi_imb(void *ignored)
807 imb();
810 void
811 smp_imb(void)
813 /* Must wait other processors to flush their icache before continue. */
814 if (on_each_cpu(ipi_imb, NULL, 1, 1))
815 printk(KERN_CRIT "smp_imb: timed out\n");
817 EXPORT_SYMBOL(smp_imb);
819 static void
820 ipi_flush_tlb_all(void *ignored)
822 tbia();
825 void
826 flush_tlb_all(void)
828 /* Although we don't have any data to pass, we do want to
829 synchronize with the other processors. */
830 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
831 printk(KERN_CRIT "flush_tlb_all: timed out\n");
835 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
837 static void
838 ipi_flush_tlb_mm(void *x)
840 struct mm_struct *mm = (struct mm_struct *) x;
841 if (mm == current->active_mm && !asn_locked())
842 flush_tlb_current(mm);
843 else
844 flush_tlb_other(mm);
847 void
848 flush_tlb_mm(struct mm_struct *mm)
850 preempt_disable();
852 if (mm == current->active_mm) {
853 flush_tlb_current(mm);
854 if (atomic_read(&mm->mm_users) <= 1) {
855 int cpu, this_cpu = smp_processor_id();
856 for (cpu = 0; cpu < NR_CPUS; cpu++) {
857 if (!cpu_online(cpu) || cpu == this_cpu)
858 continue;
859 if (mm->context[cpu])
860 mm->context[cpu] = 0;
862 preempt_enable();
863 return;
867 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
868 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
871 preempt_enable();
873 EXPORT_SYMBOL(flush_tlb_mm);
875 struct flush_tlb_page_struct {
876 struct vm_area_struct *vma;
877 struct mm_struct *mm;
878 unsigned long addr;
881 static void
882 ipi_flush_tlb_page(void *x)
884 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
885 struct mm_struct * mm = data->mm;
887 if (mm == current->active_mm && !asn_locked())
888 flush_tlb_current_page(mm, data->vma, data->addr);
889 else
890 flush_tlb_other(mm);
893 void
894 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
896 struct flush_tlb_page_struct data;
897 struct mm_struct *mm = vma->vm_mm;
899 preempt_disable();
901 if (mm == current->active_mm) {
902 flush_tlb_current_page(mm, vma, addr);
903 if (atomic_read(&mm->mm_users) <= 1) {
904 int cpu, this_cpu = smp_processor_id();
905 for (cpu = 0; cpu < NR_CPUS; cpu++) {
906 if (!cpu_online(cpu) || cpu == this_cpu)
907 continue;
908 if (mm->context[cpu])
909 mm->context[cpu] = 0;
911 preempt_enable();
912 return;
916 data.vma = vma;
917 data.mm = mm;
918 data.addr = addr;
920 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
921 printk(KERN_CRIT "flush_tlb_page: timed out\n");
924 preempt_enable();
926 EXPORT_SYMBOL(flush_tlb_page);
928 void
929 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
931 /* On the Alpha we always flush the whole user tlb. */
932 flush_tlb_mm(vma->vm_mm);
934 EXPORT_SYMBOL(flush_tlb_range);
936 static void
937 ipi_flush_icache_page(void *x)
939 struct mm_struct *mm = (struct mm_struct *) x;
940 if (mm == current->active_mm && !asn_locked())
941 __load_new_mm_context(mm);
942 else
943 flush_tlb_other(mm);
946 void
947 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
948 unsigned long addr, int len)
950 struct mm_struct *mm = vma->vm_mm;
952 if ((vma->vm_flags & VM_EXEC) == 0)
953 return;
955 preempt_disable();
957 if (mm == current->active_mm) {
958 __load_new_mm_context(mm);
959 if (atomic_read(&mm->mm_users) <= 1) {
960 int cpu, this_cpu = smp_processor_id();
961 for (cpu = 0; cpu < NR_CPUS; cpu++) {
962 if (!cpu_online(cpu) || cpu == this_cpu)
963 continue;
964 if (mm->context[cpu])
965 mm->context[cpu] = 0;
967 preempt_enable();
968 return;
972 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
973 printk(KERN_CRIT "flush_icache_page: timed out\n");
976 preempt_enable();