2 * linux/arch/alpha/kernel/smp.c
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
9 * This is helpful for DCPI.
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
30 #include <asm/hwrpb.h>
31 #include <asm/ptrace.h>
32 #include <asm/atomic.h>
36 #include <asm/bitops.h>
37 #include <asm/pgtable.h>
38 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
48 #define DBGS(args) printk args
53 /* A collection of per-processor data. */
54 struct cpuinfo_alpha cpu_data
[NR_CPUS
];
56 /* A collection of single bit ipi messages. */
58 unsigned long bits ____cacheline_aligned
;
59 } ipi_data
[NR_CPUS
] __cacheline_aligned
;
61 enum ipi_message_type
{
67 /* Set to a secondary's cpuid when it comes online. */
68 static int smp_secondary_alive __initdata
= 0;
70 /* Which cpus ids came online. */
71 cpumask_t cpu_present_mask
;
72 cpumask_t cpu_online_map
;
74 EXPORT_SYMBOL(cpu_online_map
);
76 /* cpus reported in the hwrpb */
77 static unsigned long hwrpb_cpu_present_mask __initdata
= 0;
79 int smp_num_probed
; /* Internal processor count */
80 int smp_num_cpus
= 1; /* Number that came online. */
81 cycles_t cacheflush_time
;
82 unsigned long cache_decay_ticks
;
84 extern void calibrate_delay(void);
89 * Called by both boot and secondaries to move global data into
90 * per-processor storage.
92 static inline void __init
93 smp_store_cpu_info(int cpuid
)
95 cpu_data
[cpuid
].loops_per_jiffy
= loops_per_jiffy
;
96 cpu_data
[cpuid
].last_asn
= ASN_FIRST_VERSION
;
97 cpu_data
[cpuid
].need_new_asn
= 0;
98 cpu_data
[cpuid
].asn_lock
= 0;
102 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
104 static inline void __init
105 smp_setup_percpu_timer(int cpuid
)
107 cpu_data
[cpuid
].prof_counter
= 1;
108 cpu_data
[cpuid
].prof_multiplier
= 1;
112 wait_boot_cpu_to_stop(int cpuid
)
114 unsigned long stop
= jiffies
+ 10*HZ
;
116 while (time_before(jiffies
, stop
)) {
117 if (!smp_secondary_alive
)
122 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid
);
128 * Where secondaries begin a life of C.
133 int cpuid
= hard_smp_processor_id();
135 if (cpu_test_and_set(cpuid
, cpu_online_map
)) {
136 printk("??, cpu 0x%x already present??\n", cpuid
);
140 /* Turn on machine checks. */
143 /* Set trap vectors. */
146 /* Set interrupt vector. */
149 /* Get our local ticker going. */
150 smp_setup_percpu_timer(cpuid
);
152 /* Call platform-specific callin, if specified */
153 if (alpha_mv
.smp_callin
) alpha_mv
.smp_callin();
155 /* All kernel threads share the same mm context. */
156 atomic_inc(&init_mm
.mm_count
);
157 current
->active_mm
= &init_mm
;
159 /* Must have completely accurate bogos. */
162 /* Wait boot CPU to stop with irq enabled before running
164 wait_boot_cpu_to_stop(cpuid
);
168 smp_store_cpu_info(cpuid
);
169 /* Allow master to continue only after we written loops_per_jiffy. */
171 smp_secondary_alive
= 1;
173 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
174 cpuid
, current
, current
->active_mm
));
182 * Rough estimation for SMP scheduling, this is the number of cycles it
183 * takes for a fully memory-limited process to flush the SMP-local cache.
185 * We are not told how much cache there is, so we have to guess.
188 smp_tune_scheduling (int cpuid
)
190 struct percpu_struct
*cpu
;
191 unsigned long on_chip_cache
; /* kB */
192 unsigned long freq
; /* Hz */
193 unsigned long bandwidth
= 350; /* MB/s */
195 cpu
= (struct percpu_struct
*)((char*)hwrpb
+ hwrpb
->processor_offset
196 + cpuid
* hwrpb
->processor_size
);
200 on_chip_cache
= 16 + 16;
205 on_chip_cache
= 8 + 8 + 96;
209 on_chip_cache
= 16 + 8;
215 on_chip_cache
= 64 + 64;
219 freq
= hwrpb
->cycle_freq
? : est_cycle_freq
;
221 cacheflush_time
= (freq
/ 1000000) * (on_chip_cache
<< 10) / bandwidth
;
222 cache_decay_ticks
= cacheflush_time
/ (freq
/ 1000) * HZ
/ 1000;
224 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
225 cacheflush_time
/(freq
/1000000),
226 (cacheflush_time
*100/(freq
/1000000)) % 100);
227 printk("task migration cache decay timeout: %ld msecs.\n",
228 (cache_decay_ticks
+ 1) * 1000 / HZ
);
231 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
233 wait_for_txrdy (unsigned long cpumask
)
235 unsigned long timeout
;
237 if (!(hwrpb
->txrdy
& cpumask
))
240 timeout
= jiffies
+ 10*HZ
;
241 while (time_before(jiffies
, timeout
)) {
242 if (!(hwrpb
->txrdy
& cpumask
))
252 * Send a message to a secondary's console. "START" is one such
253 * interesting message. ;-)
256 send_secondary_console_msg(char *str
, int cpuid
)
258 struct percpu_struct
*cpu
;
259 register char *cp1
, *cp2
;
260 unsigned long cpumask
;
263 cpu
= (struct percpu_struct
*)
265 + hwrpb
->processor_offset
266 + cpuid
* hwrpb
->processor_size
);
268 cpumask
= (1UL << cpuid
);
269 if (wait_for_txrdy(cpumask
))
274 *(unsigned int *)&cpu
->ipc_buffer
[0] = len
;
275 cp1
= (char *) &cpu
->ipc_buffer
[1];
276 memcpy(cp1
, cp2
, len
);
278 /* atomic test and set */
280 set_bit(cpuid
, &hwrpb
->rxrdy
);
282 if (wait_for_txrdy(cpumask
))
287 printk("Processor %x not ready\n", cpuid
);
291 * A secondary console wants to send a message. Receive it.
294 recv_secondary_console_msg(void)
297 unsigned long txrdy
= hwrpb
->txrdy
;
298 char *cp1
, *cp2
, buf
[80];
299 struct percpu_struct
*cpu
;
301 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy
));
303 mycpu
= hard_smp_processor_id();
305 for (i
= 0; i
< NR_CPUS
; i
++) {
306 if (!(txrdy
& (1UL << i
)))
309 DBGS(("recv_secondary_console_msg: "
310 "TXRDY contains CPU %d.\n", i
));
312 cpu
= (struct percpu_struct
*)
314 + hwrpb
->processor_offset
315 + i
* hwrpb
->processor_size
);
317 DBGS(("recv_secondary_console_msg: on %d from %d"
318 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
319 mycpu
, i
, cpu
->halt_reason
, cpu
->flags
));
321 cnt
= cpu
->ipc_buffer
[0] >> 32;
322 if (cnt
<= 0 || cnt
>= 80)
323 strcpy(buf
, "<<< BOGUS MSG >>>");
325 cp1
= (char *) &cpu
->ipc_buffer
[11];
329 while ((cp2
= strchr(cp2
, '\r')) != 0) {
336 DBGS((KERN_INFO
"recv_secondary_console_msg: on %d "
337 "message is '%s'\n", mycpu
, buf
));
344 * Convince the console to have a secondary cpu begin execution.
347 secondary_cpu_start(int cpuid
, struct task_struct
*idle
)
349 struct percpu_struct
*cpu
;
350 struct pcb_struct
*hwpcb
, *ipcb
;
351 unsigned long timeout
;
353 cpu
= (struct percpu_struct
*)
355 + hwrpb
->processor_offset
356 + cpuid
* hwrpb
->processor_size
);
357 hwpcb
= (struct pcb_struct
*) cpu
->hwpcb
;
358 ipcb
= &idle
->thread_info
->pcb
;
360 /* Initialize the CPU's HWPCB to something just good enough for
361 us to get started. Immediately after starting, we'll swpctx
362 to the target idle task's pcb. Reuse the stack in the mean
363 time. Precalculate the target PCBB. */
364 hwpcb
->ksp
= (unsigned long)ipcb
+ sizeof(union thread_union
) - 16;
366 hwpcb
->ptbr
= ipcb
->ptbr
;
369 hwpcb
->unique
= virt_to_phys(ipcb
);
370 hwpcb
->flags
= ipcb
->flags
;
371 hwpcb
->res1
= hwpcb
->res2
= 0;
374 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
375 hwpcb
->ksp
, hwpcb
->ptbr
, hwrpb
->vptb
, hwpcb
->unique
));
377 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
378 cpuid
, idle
->state
, ipcb
->flags
));
380 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
381 hwrpb
->CPU_restart
= __smp_callin
;
382 hwrpb
->CPU_restart_data
= (unsigned long) __smp_callin
;
384 /* Recalculate and update the HWRPB checksum */
385 hwrpb_update_checksum(hwrpb
);
388 * Send a "start" command to the specified processor.
391 /* SRM III 3.4.1.3 */
392 cpu
->flags
|= 0x22; /* turn on Context Valid and Restart Capable */
393 cpu
->flags
&= ~1; /* turn off Bootstrap In Progress */
396 send_secondary_console_msg("START\r\n", cpuid
);
398 /* Wait 10 seconds for an ACK from the console. */
399 timeout
= jiffies
+ 10*HZ
;
400 while (time_before(jiffies
, timeout
)) {
406 printk(KERN_ERR
"SMP: Processor %d failed to start.\n", cpuid
);
410 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid
));
415 * Bring one cpu online.
418 smp_boot_one_cpu(int cpuid
)
420 struct task_struct
*idle
;
421 unsigned long timeout
;
423 /* Cook up an idler for this guy. Note that the address we
424 give to kernel_thread is irrelevant -- it's going to start
425 where HWRPB.CPU_restart says to start. But this gets all
426 the other task-y sort of data structures set up like we
427 wish. We can't use kernel_thread since we must avoid
428 rescheduling the child. */
429 idle
= fork_idle(cpuid
);
431 panic("failed fork for CPU %d", cpuid
);
433 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
434 cpuid
, idle
->state
, idle
->flags
));
436 /* Signal the secondary to wait a moment. */
437 smp_secondary_alive
= -1;
439 /* Whirrr, whirrr, whirrrrrrrrr... */
440 if (secondary_cpu_start(cpuid
, idle
))
443 /* Notify the secondary CPU it can run calibrate_delay. */
445 smp_secondary_alive
= 0;
447 /* We've been acked by the console; wait one second for
448 the task to start up for real. */
449 timeout
= jiffies
+ 1*HZ
;
450 while (time_before(jiffies
, timeout
)) {
451 if (smp_secondary_alive
== 1)
457 /* We failed to boot the CPU. */
459 printk(KERN_ERR
"SMP: Processor %d is stuck.\n", cpuid
);
463 /* Another "Red Snapper". */
468 * Called from setup_arch. Detect an SMP system and which processors
474 struct percpu_struct
*cpubase
, *cpu
;
477 if (boot_cpuid
!= 0) {
478 printk(KERN_WARNING
"SMP: Booting off cpu %d instead of 0?\n",
482 if (hwrpb
->nr_processors
> 1) {
485 DBGS(("setup_smp: nr_processors %ld\n",
486 hwrpb
->nr_processors
));
488 cpubase
= (struct percpu_struct
*)
489 ((char*)hwrpb
+ hwrpb
->processor_offset
);
490 boot_cpu_palrev
= cpubase
->pal_revision
;
492 for (i
= 0; i
< hwrpb
->nr_processors
; i
++) {
493 cpu
= (struct percpu_struct
*)
494 ((char *)cpubase
+ i
*hwrpb
->processor_size
);
495 if ((cpu
->flags
& 0x1cc) == 0x1cc) {
497 /* Assume here that "whami" == index */
498 hwrpb_cpu_present_mask
|= (1UL << i
);
499 cpu
->pal_revision
= boot_cpu_palrev
;
502 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
503 i
, cpu
->flags
, cpu
->type
));
504 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
505 i
, cpu
->pal_revision
));
509 hwrpb_cpu_present_mask
= (1UL << boot_cpuid
);
511 cpu_present_mask
= cpumask_of_cpu(boot_cpuid
);
513 printk(KERN_INFO
"SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
514 smp_num_probed
, hwrpb_cpu_present_mask
);
518 * Called by smp_init prepare the secondaries
521 smp_prepare_cpus(unsigned int max_cpus
)
525 /* Take care of some initial bookkeeping. */
526 memset(ipi_data
, 0, sizeof(ipi_data
));
528 current_thread_info()->cpu
= boot_cpuid
;
530 smp_store_cpu_info(boot_cpuid
);
531 smp_tune_scheduling(boot_cpuid
);
532 smp_setup_percpu_timer(boot_cpuid
);
534 /* Nothing to do on a UP box, or when told not to. */
535 if (smp_num_probed
== 1 || max_cpus
== 0) {
536 cpu_present_mask
= cpumask_of_cpu(boot_cpuid
);
537 printk(KERN_INFO
"SMP mode deactivated.\n");
541 printk(KERN_INFO
"SMP starting up secondaries.\n");
544 for (i
= 0; (i
< NR_CPUS
) && (cpu_count
< max_cpus
); i
++) {
548 if (((hwrpb_cpu_present_mask
>> i
) & 1) == 0)
551 cpu_set(i
, cpu_possible_map
);
555 smp_num_cpus
= cpu_count
;
559 smp_prepare_boot_cpu(void)
562 * Mark the boot cpu (current cpu) as both present and online
564 cpu_set(smp_processor_id(), cpu_present_mask
);
565 cpu_set(smp_processor_id(), cpu_online_map
);
569 __cpu_up(unsigned int cpu
)
571 smp_boot_one_cpu(cpu
);
573 return cpu_online(cpu
) ? 0 : -ENOSYS
;
577 smp_cpus_done(unsigned int max_cpus
)
580 unsigned long bogosum
= 0;
582 for(cpu
= 0; cpu
< NR_CPUS
; cpu
++)
584 bogosum
+= cpu_data
[cpu
].loops_per_jiffy
;
586 printk(KERN_INFO
"SMP: Total of %d processors activated "
587 "(%lu.%02lu BogoMIPS).\n",
589 (bogosum
+ 2500) / (500000/HZ
),
590 ((bogosum
+ 2500) / (5000/HZ
)) % 100);
595 smp_percpu_timer_interrupt(struct pt_regs
*regs
)
597 int cpu
= smp_processor_id();
598 unsigned long user
= user_mode(regs
);
599 struct cpuinfo_alpha
*data
= &cpu_data
[cpu
];
601 /* Record kernel PC. */
602 profile_tick(CPU_PROFILING
, regs
);
604 if (!--data
->prof_counter
) {
605 /* We need to make like a normal interrupt -- otherwise
606 timer interrupts ignore the global interrupt lock,
607 which would be a Bad Thing. */
610 update_process_times(user
);
612 data
->prof_counter
= data
->prof_multiplier
;
619 setup_profiling_timer(unsigned int multiplier
)
626 send_ipi_message(cpumask_t to_whom
, enum ipi_message_type operation
)
631 for_each_cpu_mask(i
, to_whom
)
632 set_bit(operation
, &ipi_data
[i
].bits
);
635 for_each_cpu_mask(i
, to_whom
)
639 /* Structure and data for smp_call_function. This is designed to
640 minimize static memory requirements. Plus it looks cleaner. */
642 struct smp_call_struct
{
643 void (*func
) (void *info
);
646 atomic_t unstarted_count
;
647 atomic_t unfinished_count
;
650 static struct smp_call_struct
*smp_call_function_data
;
652 /* Atomicly drop data into a shared pointer. The pointer is free if
653 it is initially locked. If retry, spin until free. */
656 pointer_lock (void *lock
, void *data
, int retry
)
662 /* Compare and swap with zero. */
670 : "=&r"(old
), "=m"(*(void **)lock
), "=&r"(tmp
)
679 while (*(void **)lock
)
685 handle_ipi(struct pt_regs
*regs
)
687 int this_cpu
= smp_processor_id();
688 unsigned long *pending_ipis
= &ipi_data
[this_cpu
].bits
;
692 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
693 this_cpu
, *pending_ipis
, regs
->pc
));
696 mb(); /* Order interrupt and bit testing. */
697 while ((ops
= xchg(pending_ipis
, 0)) != 0) {
698 mb(); /* Order bit clearing and data access. */
704 which
= __ffs(which
);
708 /* Reschedule callback. Everything to be done
709 is done by the interrupt return path. */
714 struct smp_call_struct
*data
;
715 void (*func
)(void *info
);
719 data
= smp_call_function_data
;
724 /* Notify the sending CPU that the data has been
725 received, and execution is about to begin. */
727 atomic_dec (&data
->unstarted_count
);
729 /* At this point the structure may be gone unless
733 /* Notify the sending CPU that the task is done. */
735 if (wait
) atomic_dec (&data
->unfinished_count
);
743 printk(KERN_CRIT
"Unknown IPI on CPU %d: %lu\n",
749 mb(); /* Order data access and bit testing. */
752 cpu_data
[this_cpu
].ipi_count
++;
755 recv_secondary_console_msg();
759 smp_send_reschedule(int cpu
)
762 if (cpu
== hard_smp_processor_id())
764 "smp_send_reschedule: Sending IPI to self.\n");
766 send_ipi_message(cpumask_of_cpu(cpu
), IPI_RESCHEDULE
);
772 cpumask_t to_whom
= cpu_possible_map
;
773 cpu_clear(smp_processor_id(), to_whom
);
775 if (hard_smp_processor_id() != boot_cpu_id
)
776 printk(KERN_WARNING
"smp_send_stop: Not on boot cpu.\n");
778 send_ipi_message(to_whom
, IPI_CPU_STOP
);
782 * Run a function on all other CPUs.
783 * <func> The function to run. This must be fast and non-blocking.
784 * <info> An arbitrary pointer to pass to the function.
785 * <retry> If true, keep retrying until ready.
786 * <wait> If true, wait until function has completed on other CPUs.
787 * [RETURNS] 0 on success, else a negative status code.
789 * Does not return until remote CPUs are nearly ready to execute <func>
790 * or are or have executed.
791 * You must not call this function with disabled interrupts or from a
792 * hardware interrupt handler or from a bottom half handler.
796 smp_call_function_on_cpu (void (*func
) (void *info
), void *info
, int retry
,
797 int wait
, cpumask_t to_whom
)
799 struct smp_call_struct data
;
800 unsigned long timeout
;
801 int num_cpus_to_call
;
803 /* Can deadlock when called with interrupts disabled */
804 WARN_ON(irqs_disabled());
810 cpu_clear(smp_processor_id(), to_whom
);
811 num_cpus_to_call
= cpus_weight(to_whom
);
813 atomic_set(&data
.unstarted_count
, num_cpus_to_call
);
814 atomic_set(&data
.unfinished_count
, num_cpus_to_call
);
816 /* Acquire the smp_call_function_data mutex. */
817 if (pointer_lock(&smp_call_function_data
, &data
, retry
))
820 /* Send a message to the requested CPUs. */
821 send_ipi_message(to_whom
, IPI_CALL_FUNC
);
823 /* Wait for a minimal response. */
824 timeout
= jiffies
+ HZ
;
825 while (atomic_read (&data
.unstarted_count
) > 0
826 && time_before (jiffies
, timeout
))
829 /* If there's no response yet, log a message but allow a longer
830 * timeout period -- if we get a response this time, log
831 * a message saying when we got it..
833 if (atomic_read(&data
.unstarted_count
) > 0) {
834 long start_time
= jiffies
;
835 printk(KERN_ERR
"%s: initial timeout -- trying long wait\n",
837 timeout
= jiffies
+ 30 * HZ
;
838 while (atomic_read(&data
.unstarted_count
) > 0
839 && time_before(jiffies
, timeout
))
841 if (atomic_read(&data
.unstarted_count
) <= 0) {
842 long delta
= jiffies
- start_time
;
844 "%s: response %ld.%ld seconds into long wait\n",
845 __FUNCTION__
, delta
/ HZ
,
846 (100 * (delta
- ((delta
/ HZ
) * HZ
))) / HZ
);
850 /* We either got one or timed out -- clear the lock. */
852 smp_call_function_data
= NULL
;
855 * If after both the initial and long timeout periods we still don't
856 * have a response, something is very wrong...
858 BUG_ON(atomic_read (&data
.unstarted_count
) > 0);
860 /* Wait for a complete response, if needed. */
862 while (atomic_read (&data
.unfinished_count
) > 0)
870 smp_call_function (void (*func
) (void *info
), void *info
, int retry
, int wait
)
872 return smp_call_function_on_cpu (func
, info
, retry
, wait
,
877 ipi_imb(void *ignored
)
885 /* Must wait other processors to flush their icache before continue. */
886 if (on_each_cpu(ipi_imb
, NULL
, 1, 1))
887 printk(KERN_CRIT
"smp_imb: timed out\n");
891 ipi_flush_tlb_all(void *ignored
)
899 /* Although we don't have any data to pass, we do want to
900 synchronize with the other processors. */
901 if (on_each_cpu(ipi_flush_tlb_all
, NULL
, 1, 1)) {
902 printk(KERN_CRIT
"flush_tlb_all: timed out\n");
906 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
909 ipi_flush_tlb_mm(void *x
)
911 struct mm_struct
*mm
= (struct mm_struct
*) x
;
912 if (mm
== current
->active_mm
&& !asn_locked())
913 flush_tlb_current(mm
);
919 flush_tlb_mm(struct mm_struct
*mm
)
923 if (mm
== current
->active_mm
) {
924 flush_tlb_current(mm
);
925 if (atomic_read(&mm
->mm_users
) <= 1) {
926 int cpu
, this_cpu
= smp_processor_id();
927 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
928 if (!cpu_online(cpu
) || cpu
== this_cpu
)
930 if (mm
->context
[cpu
])
931 mm
->context
[cpu
] = 0;
938 if (smp_call_function(ipi_flush_tlb_mm
, mm
, 1, 1)) {
939 printk(KERN_CRIT
"flush_tlb_mm: timed out\n");
945 struct flush_tlb_page_struct
{
946 struct vm_area_struct
*vma
;
947 struct mm_struct
*mm
;
952 ipi_flush_tlb_page(void *x
)
954 struct flush_tlb_page_struct
*data
= (struct flush_tlb_page_struct
*)x
;
955 struct mm_struct
* mm
= data
->mm
;
957 if (mm
== current
->active_mm
&& !asn_locked())
958 flush_tlb_current_page(mm
, data
->vma
, data
->addr
);
964 flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
966 struct flush_tlb_page_struct data
;
967 struct mm_struct
*mm
= vma
->vm_mm
;
971 if (mm
== current
->active_mm
) {
972 flush_tlb_current_page(mm
, vma
, addr
);
973 if (atomic_read(&mm
->mm_users
) <= 1) {
974 int cpu
, this_cpu
= smp_processor_id();
975 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
976 if (!cpu_online(cpu
) || cpu
== this_cpu
)
978 if (mm
->context
[cpu
])
979 mm
->context
[cpu
] = 0;
990 if (smp_call_function(ipi_flush_tlb_page
, &data
, 1, 1)) {
991 printk(KERN_CRIT
"flush_tlb_page: timed out\n");
998 flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1000 /* On the Alpha we always flush the whole user tlb. */
1001 flush_tlb_mm(vma
->vm_mm
);
1005 ipi_flush_icache_page(void *x
)
1007 struct mm_struct
*mm
= (struct mm_struct
*) x
;
1008 if (mm
== current
->active_mm
&& !asn_locked())
1009 __load_new_mm_context(mm
);
1011 flush_tlb_other(mm
);
1015 flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
1016 unsigned long addr
, int len
)
1018 struct mm_struct
*mm
= vma
->vm_mm
;
1020 if ((vma
->vm_flags
& VM_EXEC
) == 0)
1025 if (mm
== current
->active_mm
) {
1026 __load_new_mm_context(mm
);
1027 if (atomic_read(&mm
->mm_users
) <= 1) {
1028 int cpu
, this_cpu
= smp_processor_id();
1029 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
1030 if (!cpu_online(cpu
) || cpu
== this_cpu
)
1032 if (mm
->context
[cpu
])
1033 mm
->context
[cpu
] = 0;
1040 if (smp_call_function(ipi_flush_icache_page
, mm
, 1, 1)) {
1041 printk(KERN_CRIT
"flush_icache_page: timed out\n");
1047 #ifdef CONFIG_DEBUG_SPINLOCK
1049 _raw_spin_unlock(spinlock_t
* lock
)
1055 lock
->previous
= NULL
;
1057 lock
->base_file
= "none";
1062 debug_spin_lock(spinlock_t
* lock
, const char *base_file
, int line_no
)
1066 void *inline_pc
= __builtin_return_address(0);
1067 unsigned long started
= jiffies
;
1069 int cpu
= smp_processor_id();
1074 /* Use sub-sections to put the actual loop at the end
1075 of this object file's text section so as to perfect
1076 branch prediction. */
1077 __asm__
__volatile__(
1092 : "=r" (tmp
), "=m" (lock
->lock
), "=r" (stuck
)
1093 : "1" (lock
->lock
), "2" (stuck
) : "memory");
1097 "%s:%d spinlock stuck in %s at %p(%d)"
1098 " owner %s at %p(%d) %s:%d\n",
1100 current
->comm
, inline_pc
, cpu
,
1101 lock
->task
->comm
, lock
->previous
,
1102 lock
->on_cpu
, lock
->base_file
, lock
->line_no
);
1108 /* Exiting. Got the lock. */
1110 lock
->previous
= inline_pc
;
1111 lock
->task
= current
;
1112 lock
->base_file
= base_file
;
1113 lock
->line_no
= line_no
;
1117 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
1118 base_file
, line_no
, current
->comm
, inline_pc
,
1119 cpu
, jiffies
- started
);
1124 debug_spin_trylock(spinlock_t
* lock
, const char *base_file
, int line_no
)
1127 if ((ret
= !test_and_set_bit(0, lock
))) {
1128 lock
->on_cpu
= smp_processor_id();
1129 lock
->previous
= __builtin_return_address(0);
1130 lock
->task
= current
;
1132 lock
->base_file
= base_file
;
1133 lock
->line_no
= line_no
;
1137 #endif /* CONFIG_DEBUG_SPINLOCK */
1139 #ifdef CONFIG_DEBUG_RWLOCK
1140 void _raw_write_lock(rwlock_t
* lock
)
1143 int stuck_lock
, stuck_reader
;
1144 void *inline_pc
= __builtin_return_address(0);
1149 stuck_reader
= 1<<30;
1151 __asm__
__volatile__(
1160 "6: blt %3,4b # debug\n"
1161 " subl %3,1,%3 # debug\n"
1164 "8: blt %4,4b # debug\n"
1165 " subl %4,1,%4 # debug\n"
1170 : "=m" (*(volatile int *)lock
), "=&r" (regx
), "=&r" (regy
),
1171 "=&r" (stuck_lock
), "=&r" (stuck_reader
)
1172 : "0" (*(volatile int *)lock
), "3" (stuck_lock
), "4" (stuck_reader
) : "memory");
1174 if (stuck_lock
< 0) {
1175 printk(KERN_WARNING
"write_lock stuck at %p\n", inline_pc
);
1178 if (stuck_reader
< 0) {
1179 printk(KERN_WARNING
"write_lock stuck on readers at %p\n",
1185 void _raw_read_lock(rwlock_t
* lock
)
1189 void *inline_pc
= __builtin_return_address(0);
1195 __asm__
__volatile__(
1204 " blt %2,4b # debug\n"
1205 " subl %2,1,%2 # debug\n"
1209 : "=m" (*(volatile int *)lock
), "=&r" (regx
), "=&r" (stuck_lock
)
1210 : "0" (*(volatile int *)lock
), "2" (stuck_lock
) : "memory");
1212 if (stuck_lock
< 0) {
1213 printk(KERN_WARNING
"read_lock stuck at %p\n", inline_pc
);
1217 #endif /* CONFIG_DEBUG_RWLOCK */