2 * linux/arch/alpha/kernel/smp.c
5 #include <linux/errno.h>
6 #include <linux/kernel.h>
7 #include <linux/kernel_stat.h>
8 #include <linux/sched.h>
10 #include <linux/threads.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/irq.h>
19 #include <asm/hwrpb.h>
20 #include <asm/ptrace.h>
21 #include <asm/atomic.h>
25 #include <asm/bitops.h>
26 #include <asm/pgtable.h>
27 #include <asm/pgalloc.h>
28 #include <asm/hardirq.h>
29 #include <asm/softirq.h>
30 #include <asm/mmu_context.h>
32 #define __KERNEL_SYSCALLS__
33 #include <asm/unistd.h>
41 #define DBGS(args) printk args
46 /* A collection of per-processor data. */
47 struct cpuinfo_alpha cpu_data
[NR_CPUS
];
49 /* A collection of single bit ipi messages. */
51 unsigned long bits ____cacheline_aligned
;
52 } ipi_data
[NR_CPUS
] __cacheline_aligned
;
54 enum ipi_message_type
{
60 spinlock_t kernel_flag
= SPIN_LOCK_UNLOCKED
;
62 /* Set to a secondary's cpuid when it comes online. */
63 static unsigned long smp_secondary_alive
;
65 /* Which cpus ids came online. */
66 unsigned long cpu_present_mask
;
68 /* cpus reported in the hwrpb */
69 static unsigned long hwrpb_cpu_present_mask __initdata
= 0;
71 static int max_cpus
= -1; /* Command-line limitation. */
72 int smp_num_probed
; /* Internal processor count */
73 int smp_num_cpus
= 1; /* Number that came online. */
74 int smp_threads_ready
; /* True once the per process idle is forked. */
75 cycles_t cacheflush_time
;
77 int __cpu_number_map
[NR_CPUS
];
78 int __cpu_logical_map
[NR_CPUS
];
80 extern void calibrate_delay(void);
81 extern asmlinkage
void entInt(void);
84 static int __init
nosmp(char *str
)
90 __setup("nosmp", nosmp
);
92 static int __init
maxcpus(char *str
)
94 get_option(&str
, &max_cpus
);
98 __setup("maxcpus", maxcpus
);
102 * Called by both boot and secondaries to move global data into
103 * per-processor storage.
105 static inline void __init
106 smp_store_cpu_info(int cpuid
)
108 cpu_data
[cpuid
].loops_per_sec
= loops_per_sec
;
109 cpu_data
[cpuid
].last_asn
= ASN_FIRST_VERSION
;
110 cpu_data
[cpuid
].need_new_asn
= 0;
111 cpu_data
[cpuid
].asn_lock
= 0;
112 local_irq_count(cpuid
) = 0;
113 local_bh_count(cpuid
) = 0;
117 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
119 static inline void __init
120 smp_setup_percpu_timer(int cpuid
)
122 cpu_data
[cpuid
].prof_counter
= 1;
123 cpu_data
[cpuid
].prof_multiplier
= 1;
127 * Where secondaries begin a life of C.
132 int cpuid
= hard_smp_processor_id();
134 DBGS(("CALLIN %d state 0x%lx\n", cpuid
, current
->state
));
136 /* Turn on machine checks. */
139 /* Set trap vectors. */
142 /* Set interrupt vector. */
145 /* Setup the scheduler for this processor. */
148 /* ??? This should be in init_idle. */
149 atomic_inc(&init_mm
.mm_count
);
150 current
->active_mm
= &init_mm
;
152 /* Get our local ticker going. */
153 smp_setup_percpu_timer(cpuid
);
155 /* Must have completely accurate bogos. */
158 smp_store_cpu_info(cpuid
);
160 /* Allow master to continue. */
162 smp_secondary_alive
= cpuid
;
164 /* Wait for the go code. */
165 while (!smp_threads_ready
)
168 DBGS(("smp_callin: commencing CPU %d current %p\n",
177 * Rough estimation for SMP scheduling, this is the number of cycles it
178 * takes for a fully memory-limited process to flush the SMP-local cache.
180 * We are not told how much cache there is, so we have to guess.
183 smp_tune_scheduling (void)
185 struct percpu_struct
*cpu
;
186 unsigned long on_chip_cache
;
189 cpu
= (struct percpu_struct
*)((char*)hwrpb
+ hwrpb
->processor_offset
);
193 on_chip_cache
= 16 + 16;
198 on_chip_cache
= 8 + 8 + 96;
202 on_chip_cache
= 16 + 8;
206 on_chip_cache
= 64 + 64;
210 on_chip_cache
= 8 + 8;
214 freq
= hwrpb
->cycle_freq
? : est_cycle_freq
;
217 /* Magic estimation stolen from x86 port. */
218 cacheflush_time
= freq
/ 1024L * on_chip_cache
/ 5000L;
220 printk("Using heuristic of %d cycles.\n",
223 /* Magic value to force potential preemption of other CPUs. */
224 cacheflush_time
= INT_MAX
;
226 printk("Using heuristic of %d cycles.\n",
232 * Send a message to a secondary's console. "START" is one such
233 * interesting message. ;-)
236 send_secondary_console_msg(char *str
, int cpuid
)
238 struct percpu_struct
*cpu
;
239 register char *cp1
, *cp2
;
240 unsigned long cpumask
;
244 cpu
= (struct percpu_struct
*)
246 + hwrpb
->processor_offset
247 + cpuid
* hwrpb
->processor_size
);
249 cpumask
= (1L << cpuid
);
250 if (hwrpb
->txrdy
& cpumask
)
256 *(unsigned int *)&cpu
->ipc_buffer
[0] = len
;
257 cp1
= (char *) &cpu
->ipc_buffer
[1];
258 memcpy(cp1
, cp2
, len
);
260 /* atomic test and set */
262 set_bit(cpuid
, &hwrpb
->rxrdy
);
264 if (hwrpb
->txrdy
& cpumask
)
270 /* Wait one second. Note that jiffies aren't ticking yet. */
271 for (timeout
= 100000; timeout
> 0; --timeout
) {
272 if (!(hwrpb
->txrdy
& cpumask
))
280 /* Wait one second. */
281 for (timeout
= 100000; timeout
> 0; --timeout
) {
282 if (!(hwrpb
->txrdy
& cpumask
))
290 printk("Processor %x not ready\n", cpuid
);
295 * A secondary console wants to send a message. Receive it.
298 recv_secondary_console_msg(void)
301 unsigned long txrdy
= hwrpb
->txrdy
;
302 char *cp1
, *cp2
, buf
[80];
303 struct percpu_struct
*cpu
;
305 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy
));
307 mycpu
= hard_smp_processor_id();
309 for (i
= 0; i
< NR_CPUS
; i
++) {
310 if (!(txrdy
& (1L << i
)))
313 DBGS(("recv_secondary_console_msg: "
314 "TXRDY contains CPU %d.\n", i
));
316 cpu
= (struct percpu_struct
*)
318 + hwrpb
->processor_offset
319 + i
* hwrpb
->processor_size
);
321 DBGS(("recv_secondary_console_msg: on %d from %d"
322 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
323 mycpu
, i
, cpu
->halt_reason
, cpu
->flags
));
325 cnt
= cpu
->ipc_buffer
[0] >> 32;
326 if (cnt
<= 0 || cnt
>= 80)
327 strcpy(buf
, "<<< BOGUS MSG >>>");
329 cp1
= (char *) &cpu
->ipc_buffer
[11];
333 while ((cp2
= strchr(cp2
, '\r')) != 0) {
340 DBGS((KERN_INFO
"recv_secondary_console_msg: on %d "
341 "message is '%s'\n", mycpu
, buf
));
348 * Convince the console to have a secondary cpu begin execution.
351 secondary_cpu_start(int cpuid
, struct task_struct
*idle
)
353 struct percpu_struct
*cpu
;
354 struct pcb_struct
*hwpcb
;
357 cpu
= (struct percpu_struct
*)
359 + hwrpb
->processor_offset
360 + cpuid
* hwrpb
->processor_size
);
361 hwpcb
= (struct pcb_struct
*) cpu
->hwpcb
;
363 /* Initialize the CPU's HWPCB to something just good enough for
364 us to get started. Immediately after starting, we'll swpctx
365 to the target idle task's ptb. Reuse the stack in the mean
366 time. Precalculate the target PCBB. */
367 hwpcb
->ksp
= (unsigned long) idle
+ sizeof(union task_union
) - 16;
369 hwpcb
->ptbr
= idle
->thread
.ptbr
;
372 hwpcb
->unique
= virt_to_phys(&idle
->thread
);
373 hwpcb
->flags
= idle
->thread
.pal_flags
;
374 hwpcb
->res1
= hwpcb
->res2
= 0;
377 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
378 hwpcb
->ksp
, hwpcb
->ptbr
, hwrpb
->vptb
, hwcpb
->unique
));
380 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
381 cpuid
, idle
->state
, idle
->thread
.pal_flags
));
383 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
384 hwrpb
->CPU_restart
= __smp_callin
;
385 hwrpb
->CPU_restart_data
= (unsigned long) __smp_callin
;
387 /* Recalculate and update the HWRPB checksum */
388 hwrpb_update_checksum(hwrpb
);
391 * Send a "start" command to the specified processor.
394 /* SRM III 3.4.1.3 */
395 cpu
->flags
|= 0x22; /* turn on Context Valid and Restart Capable */
396 cpu
->flags
&= ~1; /* turn off Bootstrap In Progress */
399 send_secondary_console_msg("START\r\n", cpuid
);
401 /* Wait 1 second for an ACK from the console. Note that jiffies
402 aren't ticking yet. */
403 for (timeout
= 100000; timeout
> 0; timeout
--) {
409 printk(KERN_ERR
"SMP: Processor %d failed to start.\n", cpuid
);
413 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid
));
417 static int __init
fork_by_hand(void)
421 * don't care about the regs settings since
422 * we'll never reschedule the forked task.
424 return do_fork(CLONE_VM
|CLONE_PID
, 0, ®s
, 0);
428 * Bring one cpu online.
431 smp_boot_one_cpu(int cpuid
, int cpunum
)
433 struct task_struct
*idle
;
436 /* Cook up an idler for this guy. Note that the address we give
437 to kernel_thread is irrelevant -- it's going to start where
438 HWRPB.CPU_restart says to start. But this gets all the other
439 task-y sort of data structures set up like we wish. */
441 * We can't use kernel_thread since we must avoid to
442 * reschedule the child.
444 if (fork_by_hand() < 0)
445 panic("failed fork for CPU %d", cpuid
);
447 idle
= init_task
.prev_task
;
449 panic("No idle process for CPU %d", cpuid
);
451 idle
->processor
= cpuid
;
452 __cpu_logical_map
[cpunum
] = cpuid
;
453 __cpu_number_map
[cpuid
] = cpunum
;
454 idle
->has_cpu
= 1; /* we schedule the first task manually */
456 del_from_runqueue(idle
);
457 unhash_process(idle
);
458 init_tasks
[cpunum
] = idle
;
460 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
461 cpuid
, idle
->state
, idle
->flags
));
463 /* The secondary will change this once it is happy. Note that
464 secondary_cpu_start contains the necessary memory barrier. */
465 smp_secondary_alive
= -1;
467 /* Whirrr, whirrr, whirrrrrrrrr... */
468 if (secondary_cpu_start(cpuid
, idle
))
471 /* We've been acked by the console; wait one second for the task
472 to start up for real. Note that jiffies aren't ticking yet. */
473 for (timeout
= 0; timeout
< 100000; timeout
++) {
474 if (smp_secondary_alive
!= -1)
480 /* we must invalidate our stuff as we failed to boot the CPU */
481 __cpu_logical_map
[cpunum
] = -1;
482 __cpu_number_map
[cpuid
] = -1;
484 /* the idle task is local to us so free it as we don't use it */
485 free_task_struct(idle
);
487 printk(KERN_ERR
"SMP: Processor %d is stuck.\n", cpuid
);
491 /* Another "Red Snapper". */
496 * Called from setup_arch. Detect an SMP system and which processors
502 struct percpu_struct
*cpubase
, *cpu
;
505 if (boot_cpuid
!= 0) {
506 printk(KERN_WARNING
"SMP: Booting off cpu %d instead of 0?\n",
510 if (hwrpb
->nr_processors
> 1) {
513 DBGS(("setup_smp: nr_processors %ld\n",
514 hwrpb
->nr_processors
));
516 cpubase
= (struct percpu_struct
*)
517 ((char*)hwrpb
+ hwrpb
->processor_offset
);
518 boot_cpu_palrev
= cpubase
->pal_revision
;
520 for (i
= 0; i
< hwrpb
->nr_processors
; i
++ ) {
521 cpu
= (struct percpu_struct
*)
522 ((char *)cpubase
+ i
*hwrpb
->processor_size
);
523 if ((cpu
->flags
& 0x1cc) == 0x1cc) {
525 /* Assume here that "whami" == index */
526 hwrpb_cpu_present_mask
|= (1L << i
);
527 cpu
->pal_revision
= boot_cpu_palrev
;
530 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
531 i
, cpu
->flags
, cpu
->type
));
532 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
533 i
, cpu
->pal_revision
));
537 hwrpb_cpu_present_mask
= (1L << boot_cpuid
);
539 cpu_present_mask
= 1L << boot_cpuid
;
541 printk(KERN_INFO
"SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
542 smp_num_probed
, hwrpb_cpu_present_mask
);
546 * Called by smp_init bring all the secondaries online and hold them.
552 unsigned long bogosum
;
554 /* Take care of some initial bookkeeping. */
555 memset(__cpu_number_map
, -1, sizeof(__cpu_number_map
));
556 memset(__cpu_logical_map
, -1, sizeof(__cpu_logical_map
));
557 memset(ipi_data
, 0, sizeof(ipi_data
));
559 __cpu_number_map
[boot_cpuid
] = 0;
560 __cpu_logical_map
[0] = boot_cpuid
;
561 current
->processor
= boot_cpuid
;
563 smp_store_cpu_info(boot_cpuid
);
564 smp_tune_scheduling();
565 smp_setup_percpu_timer(boot_cpuid
);
569 /* ??? This should be in init_idle. */
570 atomic_inc(&init_mm
.mm_count
);
571 current
->active_mm
= &init_mm
;
573 /* Nothing to do on a UP box, or when told not to. */
574 if (smp_num_probed
== 1 || max_cpus
== 0) {
575 printk(KERN_INFO
"SMP mode deactivated.\n");
579 printk(KERN_INFO
"SMP starting up secondaries.\n");
582 for (i
= 0; i
< NR_CPUS
; i
++) {
586 if (((hwrpb_cpu_present_mask
>> i
) & 1) == 0)
589 if (smp_boot_one_cpu(i
, cpu_count
))
592 cpu_present_mask
|= 1L << i
;
596 if (cpu_count
== 1) {
597 printk(KERN_ERR
"SMP: Only one lonely processor alive.\n");
602 for (i
= 0; i
< NR_CPUS
; i
++) {
603 if (cpu_present_mask
& (1L << i
))
604 bogosum
+= cpu_data
[i
].loops_per_sec
;
606 printk(KERN_INFO
"SMP: Total of %d processors activated "
607 "(%lu.%02lu BogoMIPS).\n",
608 cpu_count
, (bogosum
+ 2500) / 500000,
609 ((bogosum
+ 2500) / 5000) % 100);
611 smp_num_cpus
= cpu_count
;
615 * Called by smp_init to release the blocking online cpus once they
621 /* smp_init sets smp_threads_ready -- that's enough. */
627 smp_percpu_timer_interrupt(struct pt_regs
*regs
)
629 int cpu
= smp_processor_id();
630 unsigned long user
= user_mode(regs
);
631 struct cpuinfo_alpha
*data
= &cpu_data
[cpu
];
633 /* Record kernel PC. */
635 alpha_do_profile(regs
->pc
);
637 if (!--data
->prof_counter
) {
638 /* We need to make like a normal interrupt -- otherwise
639 timer interrupts ignore the global interrupt lock,
640 which would be a Bad Thing. */
641 irq_enter(cpu
, RTC_IRQ
);
643 update_process_times(user
);
645 data
->prof_counter
= data
->prof_multiplier
;
646 irq_exit(cpu
, RTC_IRQ
);
651 setup_profiling_timer(unsigned int multiplier
)
658 send_ipi_message(unsigned long to_whom
, enum ipi_message_type operation
)
662 /* Reduce the number of memory barriers by doing two loops,
663 one to set the bits, one to invoke the interrupts. */
665 mb(); /* Order out-of-band data and bit setting. */
667 for (i
= 0, j
= 1; i
< NR_CPUS
; ++i
, j
<<= 1) {
669 set_bit(operation
, &ipi_data
[i
].bits
);
672 mb(); /* Order bit setting and interrupt. */
674 for (i
= 0, j
= 1; i
< NR_CPUS
; ++i
, j
<<= 1) {
680 /* Structure and data for smp_call_function. This is designed to
681 minimize static memory requirements. Plus it looks cleaner. */
683 struct smp_call_struct
{
684 void (*func
) (void *info
);
687 atomic_t unstarted_count
;
688 atomic_t unfinished_count
;
691 static struct smp_call_struct
*smp_call_function_data
;
693 /* Atomicly drop data into a shared pointer. The pointer is free if
694 it is initially locked. If retry, spin until free. */
697 pointer_lock (void *lock
, void *data
, int retry
)
703 /* Compare and swap with zero. */
711 : "=&r"(old
), "=m"(*(void **)lock
), "=&r"(tmp
)
720 while (*(void **)lock
)
726 handle_ipi(struct pt_regs
*regs
)
728 int this_cpu
= smp_processor_id();
729 unsigned long *pending_ipis
= &ipi_data
[this_cpu
].bits
;
733 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
734 this_cpu
, *pending_ipis
, regs
->pc
));
737 mb(); /* Order interrupt and bit testing. */
738 while ((ops
= xchg(pending_ipis
, 0)) != 0) {
739 mb(); /* Order bit clearing and data access. */
747 if (which
== IPI_RESCHEDULE
) {
748 /* Reschedule callback. Everything to be done
749 is done by the interrupt return path. */
751 else if (which
== IPI_CALL_FUNC
) {
752 struct smp_call_struct
*data
;
753 void (*func
)(void *info
);
757 data
= smp_call_function_data
;
762 /* Notify the sending CPU that the data has been
763 received, and execution is about to begin. */
765 atomic_dec (&data
->unstarted_count
);
767 /* At this point the structure may be gone unless
771 /* Notify the sending CPU that the task is done. */
773 if (wait
) atomic_dec (&data
->unfinished_count
);
775 else if (which
== IPI_CPU_STOP
) {
779 printk(KERN_CRIT
"Unknown IPI on CPU %d: %lu\n",
784 mb(); /* Order data access and bit testing. */
787 cpu_data
[this_cpu
].ipi_count
++;
790 recv_secondary_console_msg();
794 smp_send_reschedule(int cpu
)
797 if (cpu
== hard_smp_processor_id())
799 "smp_send_reschedule: Sending IPI to self.\n");
801 send_ipi_message(1L << cpu
, IPI_RESCHEDULE
);
807 unsigned long to_whom
= cpu_present_mask
^ (1L << smp_processor_id());
809 if (hard_smp_processor_id() != boot_cpu_id
)
810 printk(KERN_WARNING
"smp_send_stop: Not on boot cpu.\n");
812 send_ipi_message(to_whom
, IPI_CPU_STOP
);
816 * Run a function on all other CPUs.
817 * <func> The function to run. This must be fast and non-blocking.
818 * <info> An arbitrary pointer to pass to the function.
819 * <retry> If true, keep retrying until ready.
820 * <wait> If true, wait until function has completed on other CPUs.
821 * [RETURNS] 0 on success, else a negative status code.
823 * Does not return until remote CPUs are nearly ready to execute <func>
824 * or are or have executed.
828 smp_call_function (void (*func
) (void *info
), void *info
, int retry
, int wait
)
830 unsigned long to_whom
= cpu_present_mask
^ (1L << smp_processor_id());
831 struct smp_call_struct data
;
837 atomic_set(&data
.unstarted_count
, smp_num_cpus
- 1);
838 atomic_set(&data
.unfinished_count
, smp_num_cpus
- 1);
840 /* Aquire the smp_call_function_data mutex. */
841 if (pointer_lock(&smp_call_function_data
, &data
, retry
))
844 /* Send a message to all other CPUs. */
845 send_ipi_message(to_whom
, IPI_CALL_FUNC
);
847 /* Wait for a minimal response. */
848 timeout
= jiffies
+ HZ
;
849 while (atomic_read (&data
.unstarted_count
) > 0
850 && time_before (jiffies
, timeout
))
853 /* We either got one or timed out -- clear the lock. */
855 smp_call_function_data
= 0;
856 if (atomic_read (&data
.unstarted_count
) > 0)
859 /* Wait for a complete response, if needed. */
861 while (atomic_read (&data
.unfinished_count
) > 0)
869 ipi_imb(void *ignored
)
877 /* Must wait other processors to flush their icache before continue. */
878 if (smp_call_function(ipi_imb
, NULL
, 1, 1))
879 printk(KERN_CRIT
"smp_imb: timed out\n");
885 ipi_flush_tlb_all(void *ignored
)
893 /* Although we don't have any data to pass, we do want to
894 synchronize with the other processors. */
895 if (smp_call_function(ipi_flush_tlb_all
, NULL
, 1, 1)) {
896 printk(KERN_CRIT
"flush_tlb_all: timed out\n");
902 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
905 ipi_flush_tlb_mm(void *x
)
907 struct mm_struct
*mm
= (struct mm_struct
*) x
;
908 if (mm
== current
->active_mm
&& !asn_locked())
909 flush_tlb_current(mm
);
915 flush_tlb_mm(struct mm_struct
*mm
)
917 if (mm
== current
->active_mm
) {
918 flush_tlb_current(mm
);
919 if (atomic_read(&mm
->mm_users
) <= 1) {
920 int i
, cpu
, this_cpu
= smp_processor_id();
921 for (i
= 0; i
< smp_num_cpus
; i
++) {
922 cpu
= cpu_logical_map(i
);
925 if (mm
->context
[cpu
])
926 mm
->context
[cpu
] = 0;
932 if (smp_call_function(ipi_flush_tlb_mm
, mm
, 1, 1)) {
933 printk(KERN_CRIT
"flush_tlb_mm: timed out\n");
937 struct flush_tlb_page_struct
{
938 struct vm_area_struct
*vma
;
939 struct mm_struct
*mm
;
944 ipi_flush_tlb_page(void *x
)
946 struct flush_tlb_page_struct
*data
= (struct flush_tlb_page_struct
*)x
;
947 struct mm_struct
* mm
= data
->mm
;
949 if (mm
== current
->active_mm
&& !asn_locked())
950 flush_tlb_current_page(mm
, data
->vma
, data
->addr
);
956 flush_tlb_page(struct vm_area_struct
*vma
, unsigned long addr
)
958 struct flush_tlb_page_struct data
;
959 struct mm_struct
*mm
= vma
->vm_mm
;
961 if (mm
== current
->active_mm
) {
962 flush_tlb_current_page(mm
, vma
, addr
);
963 if (atomic_read(&mm
->mm_users
) <= 1) {
964 int i
, cpu
, this_cpu
= smp_processor_id();
965 for (i
= 0; i
< smp_num_cpus
; i
++) {
966 cpu
= cpu_logical_map(i
);
969 if (mm
->context
[cpu
])
970 mm
->context
[cpu
] = 0;
980 if (smp_call_function(ipi_flush_tlb_page
, &data
, 1, 1)) {
981 printk(KERN_CRIT
"flush_tlb_page: timed out\n");
986 flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
988 /* On the Alpha we always flush the whole user tlb. */
993 ipi_flush_icache_page(void *x
)
995 struct mm_struct
*mm
= (struct mm_struct
*) x
;
996 if (mm
== current
->active_mm
&& !asn_locked())
997 __load_new_mm_context(mm
);
1003 flush_icache_page(struct vm_area_struct
*vma
, struct page
*page
)
1005 struct mm_struct
*mm
= vma
->vm_mm
;
1007 if ((vma
->vm_flags
& VM_EXEC
) == 0)
1010 if (mm
== current
->active_mm
) {
1011 __load_new_mm_context(mm
);
1012 if (atomic_read(&mm
->mm_users
) <= 1) {
1013 int i
, cpu
, this_cpu
= smp_processor_id();
1014 for (i
= 0; i
< smp_num_cpus
; i
++) {
1015 cpu
= cpu_logical_map(i
);
1016 if (cpu
== this_cpu
)
1018 if (mm
->context
[cpu
])
1019 mm
->context
[cpu
] = 0;
1025 if (smp_call_function(ipi_flush_icache_page
, mm
, 1, 1)) {
1026 printk(KERN_CRIT
"flush_icache_page: timed out\n");
1031 smp_info(char *buffer
)
1033 return sprintf(buffer
,
1034 "cpus active\t\t: %d\n"
1035 "cpu active mask\t\t: %016lx\n",
1036 smp_num_cpus
, cpu_present_mask
);
1041 spin_unlock(spinlock_t
* lock
)
1047 lock
->previous
= NULL
;
1049 lock
->base_file
= "none";
1054 debug_spin_lock(spinlock_t
* lock
, const char *base_file
, int line_no
)
1058 void *inline_pc
= __builtin_return_address(0);
1059 unsigned long started
= jiffies
;
1061 int cpu
= smp_processor_id();
1066 /* Use sub-sections to put the actual loop at the end
1067 of this object file's text section so as to perfect
1068 branch prediction. */
1069 __asm__
__volatile__(
1084 : "=r" (tmp
), "=m" (lock
->lock
), "=r" (stuck
)
1085 : "1" (lock
->lock
), "2" (stuck
) : "memory");
1089 "%s:%d spinlock stuck in %s at %p(%d)"
1090 " owner %s at %p(%d) %s:%d\n",
1092 current
->comm
, inline_pc
, cpu
,
1093 lock
->task
->comm
, lock
->previous
,
1094 lock
->on_cpu
, lock
->base_file
, lock
->line_no
);
1100 /* Exiting. Got the lock. */
1102 lock
->previous
= inline_pc
;
1103 lock
->task
= current
;
1104 lock
->base_file
= base_file
;
1105 lock
->line_no
= line_no
;
1109 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
1110 base_file
, line_no
, current
->comm
, inline_pc
,
1111 cpu
, jiffies
- started
);
1116 debug_spin_trylock(spinlock_t
* lock
, const char *base_file
, int line_no
)
1119 if ((ret
= !test_and_set_bit(0, lock
))) {
1120 lock
->on_cpu
= smp_processor_id();
1121 lock
->previous
= __builtin_return_address(0);
1122 lock
->task
= current
;
1124 lock
->base_file
= base_file
;
1125 lock
->line_no
= line_no
;
1129 #endif /* DEBUG_SPINLOCK */
1132 void write_lock(rwlock_t
* lock
)
1135 int stuck_lock
, stuck_reader
;
1136 void *inline_pc
= __builtin_return_address(0);
1141 stuck_reader
= 1<<26;
1143 __asm__
__volatile__(
1152 "6: blt %3,4b # debug\n"
1153 " subl %3,1,%3 # debug\n"
1156 "8: blt %4,4b # debug\n"
1157 " subl %4,1,%4 # debug\n"
1162 : "=m" (*(volatile int *)lock
), "=&r" (regx
), "=&r" (regy
),
1163 "=&r" (stuck_lock
), "=&r" (stuck_reader
)
1164 : "0" (*(volatile int *)lock
), "3" (stuck_lock
), "4" (stuck_reader
) : "memory");
1166 if (stuck_lock
< 0) {
1167 printk(KERN_WARNING
"write_lock stuck at %p\n", inline_pc
);
1170 if (stuck_reader
< 0) {
1171 printk(KERN_WARNING
"write_lock stuck on readers at %p\n",
1177 void read_lock(rwlock_t
* lock
)
1181 void *inline_pc
= __builtin_return_address(0);
1187 __asm__
__volatile__(
1196 " blt %2,4b # debug\n"
1197 " subl %2,1,%2 # debug\n"
1201 : "=m" (*(volatile int *)lock
), "=&r" (regx
), "=&r" (stuck_lock
)
1202 : "0" (*(volatile int *)lock
), "2" (stuck_lock
) : "memory");
1204 if (stuck_lock
< 0) {
1205 printk(KERN_WARNING
"read_lock stuck at %p\n", inline_pc
);
1209 #endif /* DEBUG_RWLOCK */